aiclient

package module
v0.0.0-...-a538039 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Sep 19, 2025 License: MIT Imports: 12 Imported by: 0

README ΒΆ

AI Client - Ultra-Configurable Multi-Provider AI Library

CI Security Documentation Go Report Card Go Reference GitHub release

A production-ready Go library for interfacing with multiple AI providers through a unified, simple API. Designed to be used in any Go project requiring AI provider integration.

πŸ“‹ Table of Contents

πŸ“¦ Installation

Requirements
  • Go 1.21 or later
  • Git (for go get)
Install
go get github.com/flowcmd/ai-client
Verify Installation
go mod tidy
go build ./...

πŸš€ Quick Start

Simple Usage
package main

import (
    "fmt"
    "github.com/flowcmd/ai-client"
)

func main() {
    response, err := aiclient.New(aiclient.Config{
        Token:   "sk-proj-1234567890",
        Model:   "gpt-4o",
        BaseURL: "https://api.openai.com/v1",
        SystemPrompt: "You are a helpful assistant.",
        Messages: []aiclient.Message{
            {Role: "user", Content: "What is Go programming language?"},
        },
        Options: map[string]interface{}{
            "temperature": 0.7,
            "max_tokens":  100,
        },
    })

    if err != nil {
        panic(err)
    }

    fmt.Printf("Response: %s\n", response.Content)
}
Multi-Provider Client
client := aiclient.NewClient()

// Register multiple providers
client.RegisterProvider("openai", aiclient.NewOpenAIProvider())
client.RegisterProvider("claude", aiclient.NewAnthropicProvider())
client.RegisterProvider("ollama", aiclient.NewOllamaProvider())

// Use with automatic failover
response, err := client.GenerateWithFallback(
    context.Background(),
    []string{"openai", "claude"}, // try OpenAI first, then Claude
    config,
)

🌟 Supported Providers

Provider Status Authentication Base URL Features Tests
OpenAI βœ… Production Bearer Token https://api.openai.com/v1 GPT-4o, GPT-3.5, structured outputs, streaming βœ…
Anthropic βœ… Production API Key https://api.anthropic.com Claude 3.5, Constitutional AI, streaming βœ…
Ollama βœ… Production None http://localhost:11434 Local models, privacy-first, streaming βœ…
Google βœ… Production API Key/OAuth https://generativelanguage.googleapis.com Gemini, Vertex AI, multimodal βœ…
Groq βœ… Production Bearer Token https://api.groq.com/openai/v1 Fast inference, OpenAI compatible βœ…
OpenRouter βœ… Production Bearer Token https://openrouter.ai/api/v1 Multi-provider routing βœ…
Provider Features Comparison
Feature OpenAI Anthropic Ollama Google Groq OpenRouter
Streaming βœ… βœ… βœ… βœ… βœ… βœ…
JSON Mode βœ… ❌ βœ… βœ… βœ… βœ…
Function Calling βœ… βœ… ❌ βœ… βœ… βœ…
Image Input βœ… βœ… βœ… βœ… ❌ βœ…
Local Models ❌ ❌ βœ… ❌ ❌ ❌
Free Tier ❌ ❌ βœ… βœ… βœ… ❌

πŸ“‹ Configuration Options

Core Configuration
type Config struct {
    Token          string                 // API token/key
    Model          string                 // Model name (e.g., "gpt-4o")
    BaseURL        string                 // Provider API base URL
    ResponseFormat string                 // "json" or "text"
    Schema         map[string]interface{} // JSON schema for structured output
    SystemPrompt   string                 // System prompt for the conversation
    Messages       []Message              // Conversation messages
    Options        map[string]interface{} // Provider-specific options
    Timeout        time.Duration          // Request timeout
}
Universal Options

All providers support these common options (mapped appropriately):

Options: map[string]interface{}{
    "temperature":        0.7,     // Randomness (0.0-2.0)
    "max_tokens":         100,     // Maximum output tokens
    "top_p":              0.9,     // Nucleus sampling
    "frequency_penalty":  0.0,     // Reduce repetition
    "presence_penalty":   0.0,     // Encourage new topics
    "stopwords":          []string{"STOP"}, // Stop sequences
}

πŸ”„ Streaming Support

stream, err := client.GenerateStream(context.Background(), config)
if err != nil {
    panic(err)
}

for chunk := range stream {
    if chunk.Error != nil {
        fmt.Printf("Error: %v\n", chunk.Error)
        break
    }

    fmt.Print(chunk.Delta) // Print incremental content

    if chunk.Done {
        fmt.Printf("\nFinal content: %s\n", chunk.Content)
        break
    }
}

🎯 Provider-Specific Examples

OpenAI with Structured Output
response, err := aiclient.New(aiclient.Config{
    Token:          "sk-proj-1234567890",
    Model:          "gpt-4o",
    BaseURL:        "https://api.openai.com/v1",
    ResponseFormat: "json",
    Schema: map[string]interface{}{
        "type": "object",
        "properties": map[string]interface{}{
            "title":       map[string]string{"type": "string"},
            "description": map[string]string{"type": "string"},
        },
    },
    SystemPrompt: "Extract title and description from the following text.",
    Messages: []aiclient.Message{
        {Role: "user", Content: "The new AI client supports multiple providers with a unified interface."},
    },
})
Anthropic Claude
response, err := aiclient.New(aiclient.Config{
    Token:   "ant-api-key-12345",
    Model:   "claude-3-sonnet-20240229",
    BaseURL: "https://api.anthropic.com",
    SystemPrompt: "You are Claude, created by Anthropic. You're helpful, harmless, and honest.",
    Messages: []aiclient.Message{
        {Role: "user", Content: "Explain quantum computing briefly."},
    },
    Options: map[string]interface{}{
        "max_tokens":  200,
        "temperature": 0.5,
    },
})
Ollama (Local)
response, err := aiclient.New(aiclient.Config{
    Model:   "llama3.2",
    BaseURL: "http://localhost:11434",
    SystemPrompt: "You are a local AI assistant.",
    Messages: []aiclient.Message{
        {Role: "user", Content: "What's the advantage of running models locally?"},
    },
    Options: map[string]interface{}{
        "temperature": 0.8,
        "max_tokens":  100,
    },
})

πŸ›‘οΈ Error Handling & Resilience

Automatic Failover
// Try multiple providers in order
response, err := client.GenerateWithFallback(
    ctx,
    []string{"openai", "anthropic", "ollama"},
    config,
)
Manual Error Handling
response, err := client.Generate(ctx, config)
if err != nil {
    // Handle specific error types
    switch {
    case strings.Contains(err.Error(), "rate limit"):
        // Wait and retry
    case strings.Contains(err.Error(), "401"):
        // Check API key
    default:
        // Generic error handling
    }
}

πŸ—οΈ Architecture

The library is built with a clean provider abstraction:

ai-client/
β”œβ”€β”€ aiclient.go          # Main client interface
β”œβ”€β”€ types.go             # Core types and interfaces
β”œβ”€β”€ adapters.go          # Provider adapters
└── providers/
    β”œβ”€β”€ openai/          # OpenAI implementation
    β”œβ”€β”€ anthropic/       # Anthropic implementation
    β”œβ”€β”€ ollama/          # Ollama implementation
    └── ...              # More providers
Adding New Providers
  1. Create a new provider package in providers/
  2. Implement the provider interface:
type Provider interface {
    Name() string
    Generate(ctx context.Context, config Config) (*Response, error)
    GenerateStream(ctx context.Context, config Config) (<-chan StreamChunk, error)
    ValidateConfig(config Config) error
}
  1. Add provider detection logic in detectAndCreateProvider()

🚦 Production Considerations

Rate Limiting

The client respects provider rate limits and implements exponential backoff:

config.Timeout = 30 * time.Second  // Per-request timeout
Token Management

Store tokens securely and rotate them regularly:

// Never hardcode tokens
config.Token = os.Getenv("OPENAI_API_KEY")
Monitoring

Monitor usage and costs:

fmt.Printf("Tokens used: %v\n", response.Usage)
fmt.Printf("Provider: %s\n", response.Provider)

πŸ“– Documentation

API Reference
Guides

🀝 Contributing

We welcome contributions! Please see our Contributing Guide for details.

Quick Start for Contributors
  1. Read our Contributing Guide
  2. Check out Good First Issues
  3. Fork the repository and create a feature branch
  4. Add tests for new functionality
  5. Ensure all tests pass: go test ./...
  6. Submit a pull request
Community

πŸ›‘οΈ Security

Security is important to us. Please see our Security Policy for:

  • Reporting vulnerabilities
  • Security best practices
  • Supported versions

πŸ“„ License

This project is licensed under the MIT License - see the LICENSE file for details.

Project
Documentation

Built with ❀️ for the Go community

Documentation ΒΆ

Index ΒΆ

Constants ΒΆ

This section is empty.

Variables ΒΆ

This section is empty.

Functions ΒΆ

func AnthropicResponse ΒΆ

func AnthropicResponse(content, model string) string

AnthropicResponse creates a mock Anthropic response

func AssertResponse ΒΆ

func AssertResponse(t interface {
	Helper()
	Errorf(format string, args ...interface{})
}, response *Response, expectedContent, expectedModel, expectedProvider string)

AssertResponse validates a response matches expected values

func AssertStreamChunk ΒΆ

func AssertStreamChunk(t interface {
	Helper()
	Errorf(format string, args ...interface{})
}, chunk StreamChunk, expectedContent, expectedDelta string, expectedDone bool)

AssertStreamChunk validates a stream chunk

func ConcurrentTestHelper ΒΆ

func ConcurrentTestHelper(t interface {
	Helper()
	Errorf(format string, args ...interface{})
}, numGoroutines int, operation func(int) error)

ConcurrentTestHelper helps test concurrent operations

func ErrorResponse ΒΆ

func ErrorResponse(code int, message string) string

ErrorResponse creates a mock error response

func OllamaResponse ΒΆ

func OllamaResponse(content, model string) string

OllamaResponse creates a mock Ollama response

func OpenAIResponse ΒΆ

func OpenAIResponse(content, model string) string

OpenAIResponse creates a mock OpenAI response

func OpenAIStreamResponse ΒΆ

func OpenAIStreamResponse(deltas []string, model string) string

OpenAIStreamResponse creates a mock OpenAI streaming response

func WithTimeout ΒΆ

func WithTimeout(timeout time.Duration) (context.Context, context.CancelFunc)

WithTimeout creates a context with timeout for testing

Types ΒΆ

type AnthropicAdapter ΒΆ

type AnthropicAdapter struct {
	// contains filtered or unexported fields
}

func (*AnthropicAdapter) Generate ΒΆ

func (a *AnthropicAdapter) Generate(ctx context.Context, config Config) (*Response, error)

func (*AnthropicAdapter) GenerateStream ΒΆ

func (a *AnthropicAdapter) GenerateStream(ctx context.Context, config Config) (<-chan StreamChunk, error)

func (*AnthropicAdapter) Name ΒΆ

func (a *AnthropicAdapter) Name() string

func (*AnthropicAdapter) ValidateConfig ΒΆ

func (a *AnthropicAdapter) ValidateConfig(config Config) error

type Client ΒΆ

type Client struct {
	// contains filtered or unexported fields
}

func NewClient ΒΆ

func NewClient() *Client

func (*Client) Generate ΒΆ

func (c *Client) Generate(ctx context.Context, config Config) (*Response, error)

func (*Client) GenerateStream ΒΆ

func (c *Client) GenerateStream(ctx context.Context, config Config) (<-chan StreamChunk, error)

func (*Client) GenerateStreamWithProvider ΒΆ

func (c *Client) GenerateStreamWithProvider(ctx context.Context, providerName string, config Config) (<-chan StreamChunk, error)

func (*Client) GenerateWithFallback ΒΆ

func (c *Client) GenerateWithFallback(ctx context.Context, providerNames []string, config Config) (*Response, error)

func (*Client) GenerateWithProvider ΒΆ

func (c *Client) GenerateWithProvider(ctx context.Context, providerName string, config Config) (*Response, error)

func (*Client) ListProviders ΒΆ

func (c *Client) ListProviders() []string

func (*Client) RegisterProvider ΒΆ

func (c *Client) RegisterProvider(name string, provider Provider) error

func (*Client) SetDefaultProvider ΒΆ

func (c *Client) SetDefaultProvider(name string) error

type Config ΒΆ

type Config struct {
	Token          string                 `json:"token"`
	Model          string                 `json:"model"`
	BaseURL        string                 `json:"base_url"`
	ResponseFormat string                 `json:"response_format,omitempty"`
	Schema         map[string]interface{} `json:"schema,omitempty"`
	SystemPrompt   string                 `json:"system_prompt,omitempty"`
	Messages       []Message              `json:"messages"`
	Options        map[string]interface{} `json:"options,omitempty"`
	Timeout        time.Duration          `json:"timeout,omitempty"`
}

func TestConfig ΒΆ

func TestConfig() Config

TestConfig creates a basic test configuration

func TestConfigWithJSONSchema ΒΆ

func TestConfigWithJSONSchema(schema map[string]interface{}) Config

TestConfigWithJSONSchema creates a test config with JSON response format

func TestConfigWithSystemPrompt ΒΆ

func TestConfigWithSystemPrompt(systemPrompt string) Config

TestConfigWithSystemPrompt creates a test config with system prompt

type GoogleAdapter ΒΆ

type GoogleAdapter struct {
	// contains filtered or unexported fields
}

func (*GoogleAdapter) Generate ΒΆ

func (a *GoogleAdapter) Generate(ctx context.Context, config Config) (*Response, error)

func (*GoogleAdapter) GenerateStream ΒΆ

func (a *GoogleAdapter) GenerateStream(ctx context.Context, config Config) (<-chan StreamChunk, error)

func (*GoogleAdapter) Name ΒΆ

func (a *GoogleAdapter) Name() string

func (*GoogleAdapter) ValidateConfig ΒΆ

func (a *GoogleAdapter) ValidateConfig(config Config) error

type Message ΒΆ

type Message struct {
	Role    string `json:"role"`
	Content string `json:"content"`
}

func TestMessage ΒΆ

func TestMessage(role, content string) Message

TestMessage creates a test message

type MockHTTPServer ΒΆ

type MockHTTPServer struct {
	*httptest.Server
	RequestCount int
	LastRequest  *http.Request
	LastBody     string
	Responses    []MockResponse
}

MockHTTPServer creates a mock HTTP server for testing providers

func NewMockHTTPServer ΒΆ

func NewMockHTTPServer(responses []MockResponse) *MockHTTPServer

NewMockHTTPServer creates a new mock HTTP server

func (*MockHTTPServer) Close ΒΆ

func (m *MockHTTPServer) Close()

Close closes the mock server

func (*MockHTTPServer) URL ΒΆ

func (m *MockHTTPServer) URL() string

URL returns the server URL

type MockProvider ΒΆ

type MockProvider struct {
	// contains filtered or unexported fields
}

MockProvider implements the Provider interface for testing

func FailingMockProvider ΒΆ

func FailingMockProvider(errorMessage string) *MockProvider

FailingMockProvider creates a provider that always fails

func SlowMockProvider ΒΆ

func SlowMockProvider(delay time.Duration) *MockProvider

SlowMockProvider creates a provider that responds slowly (for timeout testing)

func StreamingMockProvider ΒΆ

func StreamingMockProvider(chunks []StreamChunk) *MockProvider

StreamingMockProvider creates a provider that returns streaming responses

func (*MockProvider) Generate ΒΆ

func (m *MockProvider) Generate(ctx context.Context, config Config) (*Response, error)

func (*MockProvider) GenerateStream ΒΆ

func (m *MockProvider) GenerateStream(ctx context.Context, config Config) (<-chan StreamChunk, error)

func (*MockProvider) Name ΒΆ

func (m *MockProvider) Name() string

func (*MockProvider) ValidateConfig ΒΆ

func (m *MockProvider) ValidateConfig(config Config) error

type MockResponse ΒΆ

type MockResponse struct {
	StatusCode int
	Body       string
	Headers    map[string]string
	Delay      time.Duration
}

type OllamaAdapter ΒΆ

type OllamaAdapter struct {
	// contains filtered or unexported fields
}

func (*OllamaAdapter) Generate ΒΆ

func (a *OllamaAdapter) Generate(ctx context.Context, config Config) (*Response, error)

func (*OllamaAdapter) GenerateStream ΒΆ

func (a *OllamaAdapter) GenerateStream(ctx context.Context, config Config) (<-chan StreamChunk, error)

func (*OllamaAdapter) Name ΒΆ

func (a *OllamaAdapter) Name() string

func (*OllamaAdapter) ValidateConfig ΒΆ

func (a *OllamaAdapter) ValidateConfig(config Config) error

type OpenAIAdapter ΒΆ

type OpenAIAdapter struct {
	// contains filtered or unexported fields
}

func (*OpenAIAdapter) Generate ΒΆ

func (a *OpenAIAdapter) Generate(ctx context.Context, config Config) (*Response, error)

func (*OpenAIAdapter) GenerateStream ΒΆ

func (a *OpenAIAdapter) GenerateStream(ctx context.Context, config Config) (<-chan StreamChunk, error)

func (*OpenAIAdapter) Name ΒΆ

func (a *OpenAIAdapter) Name() string

func (*OpenAIAdapter) ValidateConfig ΒΆ

func (a *OpenAIAdapter) ValidateConfig(config Config) error

type Provider ΒΆ

type Provider interface {
	Name() string
	Generate(ctx context.Context, config Config) (*Response, error)
	GenerateStream(ctx context.Context, config Config) (<-chan StreamChunk, error)
	ValidateConfig(config Config) error
}

func NewAnthropicProvider ΒΆ

func NewAnthropicProvider() Provider

func NewGoogleProvider ΒΆ

func NewGoogleProvider(config Config) Provider

func NewGroqProvider ΒΆ

func NewGroqProvider() Provider

func NewOllamaProvider ΒΆ

func NewOllamaProvider() Provider

func NewOpenAIProvider ΒΆ

func NewOpenAIProvider() Provider

func NewOpenRouterProvider ΒΆ

func NewOpenRouterProvider() Provider

type Response ΒΆ

type Response struct {
	Content      string                 `json:"content"`
	Usage        map[string]interface{} `json:"usage,omitempty"`
	Model        string                 `json:"model"`
	FinishReason string                 `json:"finish_reason,omitempty"`
	Provider     string                 `json:"provider,omitempty"`
	Raw          map[string]interface{} `json:"raw,omitempty"`
}

func New ΒΆ

func New(config Config) (*Response, error)

func TestResponse ΒΆ

func TestResponse(content, model, provider string) *Response

TestResponse creates a test response

type StreamChunk ΒΆ

type StreamChunk struct {
	Content      string `json:"content"`
	Delta        string `json:"delta"`
	Done         bool   `json:"done"`
	FinishReason string `json:"finish_reason,omitempty"`
	Error        error  `json:"error,omitempty"`
}

func TestStreamChunk ΒΆ

func TestStreamChunk(content, delta string, done bool) StreamChunk

TestStreamChunk creates a test stream chunk

Directories ΒΆ

Path Synopsis
examples
basic command
providers

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL