Documentation
¶
Index ¶
- Constants
- Variables
- func DisableLogs()
- func String(s string) *string
- type APIError
- type APIKey
- type APIKeyCreateRequest
- type APIKeyCreateResponse
- type APIKeyCurrent
- type APIKeyCurrentResponse
- type APIKeyDeleteResponse
- type APIKeyResponse
- type APIKeyUpdateRequest
- type APIKeysListResponse
- type APIRateLimit
- type Annotation
- type AnnotationType
- type AudioFormat
- type CacheControl
- type ChatCompletionAspectRatio
- type ChatCompletionChoice
- type ChatCompletionImage
- type ChatCompletionImageConfig
- type ChatCompletionImageSize
- type ChatCompletionImageType
- type ChatCompletionImageURL
- type ChatCompletionMessage
- func AssistantMessage(content string) ChatCompletionMessage
- func SystemMessage(content string) ChatCompletionMessage
- func ToolMessage(callID string, content string) ChatCompletionMessage
- func UserMessage(content string) ChatCompletionMessage
- func UserMessageWithAudio(promptText string, audio []byte, format AudioFormat) ChatCompletionMessage
- func UserMessageWithAudioFromFile(promptText, filePath string) (ChatCompletionMessage, error)
- func UserMessageWithImage(text, imageURL string) ChatCompletionMessage
- func UserMessageWithPDF(text, filename, fileData string) ChatCompletionMessage
- func UserMessageWithPDFFromFile(text, filePath string) (ChatCompletionMessage, error)
- type ChatCompletionModality
- type ChatCompletionPlugin
- type ChatCompletionReasoning
- type ChatCompletionReasoningDetails
- type ChatCompletionReasoningDetailsType
- type ChatCompletionRequest
- type ChatCompletionResponse
- type ChatCompletionResponseFormat
- type ChatCompletionResponseFormatJSONSchema
- type ChatCompletionResponseFormatType
- type ChatCompletionStream
- type ChatCompletionStreamChoice
- type ChatCompletionStreamChoiceDelta
- type ChatCompletionStreamChoiceLogprobs
- type ChatCompletionStreamResponse
- type ChatCompletionTokenLogprob
- type ChatCompletionTokenLogprobTopLogprob
- type ChatCompletionTrace
- type ChatMessageImageURL
- type ChatMessageInputAudio
- type ChatMessagePart
- type ChatMessagePartType
- type ChatProvider
- type Client
- func (c *Client) CreateAPIKey(ctx context.Context, request APIKeyCreateRequest) (APIKeyCreateResponse, error)
- func (c *Client) CreateChatCompletion(ctx context.Context, request ChatCompletionRequest) (response ChatCompletionResponse, err error)
- func (c *Client) CreateChatCompletionStream(ctx context.Context, request ChatCompletionRequest) (*ChatCompletionStream, error)
- func (c *Client) CreateCompletion(ctx context.Context, request CompletionRequest) (response CompletionResponse, err error)
- func (c *Client) CreateCompletionStream(ctx context.Context, request CompletionRequest) (*CompletionStream, error)
- func (c *Client) CreateEmbeddings(ctx context.Context, request EmbeddingsRequest) (EmbeddingsResponse, error)
- func (c *Client) DeleteAPIKey(ctx context.Context, hash string) (APIKeyDeleteResponse, error)
- func (c *Client) GetAPIKey(ctx context.Context, hash string) (APIKeyResponse, error)
- func (c *Client) GetCurrentAPIKey(ctx context.Context) (APIKeyCurrentResponse, error)
- func (c *Client) GetGeneration(ctx context.Context, id string) (generation Generation, err error)
- func (c *Client) ListAPIKeys(ctx context.Context) (APIKeysListResponse, error)
- func (c *Client) ListEmbeddingsModels(ctx context.Context) ([]Model, error)
- func (c *Client) ListModels(ctx context.Context) (models []Model, err error)
- func (c *Client) ListUserModels(ctx context.Context) (models []Model, err error)
- func (c *Client) UpdateAPIKey(ctx context.Context, hash string, request APIKeyUpdateRequest) (APIKeyResponse, error)
- type ClientConfig
- type CompletionChoice
- type CompletionRequest
- type CompletionResponse
- type CompletionStream
- type CompletionTokenDetails
- type Content
- type ContentFilterResults
- type CostDetails
- type DataCollection
- type EmbeddingData
- type EmbeddingValue
- type EmbeddingsEncodingFormat
- type EmbeddingsRequest
- type EmbeddingsResponse
- type EmbeddingsUsage
- type ErrorResponse
- type FileContent
- type FinishReason
- type FunctionCall
- type FunctionDefinition
- type Generation
- type HTTPDoer
- type HTTPRequestBuilder
- type Hate
- type ImageURLDetail
- type IncludeUsage
- type JSONMarshaller
- type JailBreak
- type KeyLimitReset
- type LogProb
- type LogProbs
- type Marshaller
- type Metadata
- type Model
- type ModelArchitecture
- type ModelPricing
- type ModelTopProvider
- type Option
- type PDFEngine
- type PDFPlugin
- type PluginID
- type Profanity
- type PromptAnnotation
- type PromptFilterResult
- type PromptTokenDetails
- type ProviderError
- type ProviderSorting
- type RequestBuilder
- type RequestError
- type SearchContextSize
- type SelfHarm
- type Sexual
- type StreamOptions
- type Tool
- type ToolCall
- type ToolType
- type TopLogProbs
- type URLCitation
- type Usage
- type Violence
- type WebSearchOptions
Constants ¶
const ( GPT4o = "openai/chatgpt-4o-latest" DeepseekV3 = "deepseek/deepseek-chat" DeepseekR1 = "deepseek/deepseek-r1" DeepseekR1DistillLlama = "deepseek/deepseek-r1-distill-llama-70b" LiquidLFM7B = "liquid/lfm-7b" Phi3Mini = "microsoft/phi-3-mini-128k-instruct:free" GeminiFlashExp = "google/gemini-2.0-flash-exp:free" GeminiProExp = "google/gemini-pro-1.5-exp" GeminiFlash8B = "google/gemini-flash-1.5-8b" GPT4oMini = "openai/gpt-4o-mini" )
const ( ChatMessageRoleSystem = "system" ChatMessageRoleUser = "user" ChatMessageRoleAssistant = "assistant" ChatMessageRoleFunction = "function" ChatMessageRoleTool = "tool" )
Chat message role defined by the Openrouter API.
Variables ¶
var ( ErrChatCompletionInvalidModel = errors.New("this model is not supported with this method, please use CreateCompletion client method instead") //nolint:lll ErrChatCompletionStreamNotSupported = errors.New("streaming is not supported with this method, please use CreateChatCompletion") //nolint:lll ErrContentFieldsMisused = errors.New("can't use both Content and MultiContent properties simultaneously") )
var ( ErrCompletionInvalidModel = errors.New("this model is not supported with this method, please use CreateChatCompletion client method instead") //nolint:lll ErrCompletionStreamNotSupported = errors.New("streaming is not supported with this method, please use CreateCompletion") //nolint:lll )
Functions ¶
func DisableLogs ¶ added in v0.1.9
func DisableLogs()
DisableLogs disables the internally used logger.
Types ¶
type APIError ¶
type APIError struct {
Code any `json:"code,omitempty"`
Message string `json:"message"`
Metadata *Metadata `json:"metadata,omitempty"`
// Internal fields
HTTPStatusCode int `json:"-"`
ProviderError *ProviderError `json:"-"`
}
APIError provides error information returned by the Openrouter API.
func (*APIError) UnmarshalJSON ¶
type APIKey ¶ added in v1.1.6
type APIKey struct {
Hash string `json:"hash,omitempty"`
Name string `json:"name,omitempty"`
Label string `json:"label,omitempty"`
Disabled bool `json:"disabled,omitempty"`
Limit float64 `json:"limit,omitempty"`
LimitRemaining float64 `json:"limit_remaining,omitempty"`
LimitReset KeyLimitReset `json:"limit_reset,omitempty"`
IncludeByokInLimit bool `json:"include_byok_in_limit,omitempty"`
Usage float64 `json:"usage,omitempty"`
UsageDaily float64 `json:"usage_daily,omitempty"`
UsageWeekly float64 `json:"usage_weekly,omitempty"`
UsageMonthly float64 `json:"usage_monthly,omitempty"`
ByokUsage float64 `json:"byok_usage,omitempty"`
ByokUsageDaily float64 `json:"byok_usage_daily,omitempty"`
ByokUsageWeekly float64 `json:"byok_usage_weekly,omitempty"`
ByokUsageMonthly float64 `json:"byok_usage_monthly,omitempty"`
CreatedAt time.Time `json:"created_at,omitempty"`
UpdatedAt *time.Time `json:"updated_at,omitempty"`
ExpiresAt *time.Time `json:"expires_at,omitempty"`
}
type APIKeyCreateRequest ¶ added in v1.1.6
type APIKeyCreateResponse ¶ added in v1.1.6
type APIKeyCurrent ¶ added in v1.1.6
type APIKeyCurrent struct {
Label string `json:"label,omitempty"`
Limit float64 `json:"limit,omitempty"`
Usage float64 `json:"usage,omitempty"`
UsageDaily float64 `json:"usage_daily,omitempty"`
UsageWeekly float64 `json:"usage_weekly,omitempty"`
UsageMonthly float64 `json:"usage_monthly,omitempty"`
ByokUsage float64 `json:"byok_usage,omitempty"`
ByokUsageDaily float64 `json:"byok_usage_daily,omitempty"`
ByokUsageWeekly float64 `json:"byok_usage_weekly,omitempty"`
ByokUsageMonthly float64 `json:"byok_usage_monthly,omitempty"`
IsFreeTier bool `json:"is_free_tier,omitempty"`
IsProvisioningKey bool `json:"is_provisioning_key,omitempty"`
LimitRemaining float64 `json:"limit_remaining,omitempty"`
LimitReset KeyLimitReset `json:"limit_reset,omitempty"`
IncludeByokInLimit bool `json:"include_byok_in_limit,omitempty"`
RateLimit *APIRateLimit `json:"rate_limit,omitempty"`
ExpiresAt *time.Time `json:"expires_at,omitempty"`
}
type APIKeyCurrentResponse ¶ added in v1.1.6
type APIKeyCurrentResponse struct {
Data APIKeyCurrent `json:"data"`
}
type APIKeyDeleteResponse ¶ added in v1.1.6
type APIKeyDeleteResponse struct {
Deleted bool `json:"deleted"`
}
type APIKeyResponse ¶ added in v1.1.6
type APIKeyResponse struct {
Data APIKey `json:"data"`
}
type APIKeyUpdateRequest ¶ added in v1.1.6
type APIKeyUpdateRequest struct {
Name *string `json:"name,omitempty"`
Disabled *bool `json:"disabled,omitempty"`
Limit *float64 `json:"limit,omitempty"`
LimitReset *KeyLimitReset `json:"limit_reset,omitempty"`
IncludeByokInLimit *bool `json:"include_byok_in_limit,omitempty"`
ExpiresAt *time.Time `json:"expires_at,omitempty"`
}
type APIKeysListResponse ¶ added in v1.1.6
type APIKeysListResponse struct {
Data []APIKey `json:"data"`
}
type APIRateLimit ¶ added in v1.1.6
type Annotation ¶ added in v0.1.3
type Annotation struct {
Type AnnotationType `json:"type"`
URLCitation URLCitation `json:"url_citation"`
}
type AnnotationType ¶ added in v0.1.3
type AnnotationType string
const (
AnnotationTypeUrlCitation AnnotationType = "url_citation"
)
type AudioFormat ¶ added in v0.2.3
type AudioFormat string
const ( AudioFormatMp3 AudioFormat = AudioFormat("mp3") AudioFormatWav AudioFormat = AudioFormat("wav") )
type CacheControl ¶
type ChatCompletionAspectRatio ¶ added in v1.0.4
type ChatCompletionAspectRatio string
const ( AspectRatio1x1 ChatCompletionAspectRatio = "1:1" AspectRatio2x3 ChatCompletionAspectRatio = "2:3" AspectRatio3x2 ChatCompletionAspectRatio = "3:2" AspectRatio3x4 ChatCompletionAspectRatio = "3:4" AspectRatio4x3 ChatCompletionAspectRatio = "4:3" AspectRatio4x5 ChatCompletionAspectRatio = "4:5" AspectRatio5x4 ChatCompletionAspectRatio = "5:4" AspectRatio9x16 ChatCompletionAspectRatio = "9:16" AspectRatio16x9 ChatCompletionAspectRatio = "16:9" AspectRatio21x9 ChatCompletionAspectRatio = "21:9" )
type ChatCompletionChoice ¶
type ChatCompletionChoice struct {
Index int `json:"index"`
Message ChatCompletionMessage `json:"message"`
Reasoning *string `json:"reasoning,omitempty"`
ReasoningDetails []ChatCompletionReasoningDetails `json:"reasoning_details,omitempty"`
// FinishReason
// stop: API returned complete message,
// or a message terminated by one of the stop sequences provided via the stop parameter
// length: Incomplete model output due to max_tokens parameter or token limit
// function_call: The model decided to call a function
// content_filter: Omitted content due to a flag from our content filters
// null: API response still in progress or incomplete
FinishReason FinishReason `json:"finish_reason"`
NativeFinishReason string `json:"native_finish_reason"`
LogProbs *LogProbs `json:"logprobs,omitempty"`
}
type ChatCompletionImage ¶ added in v0.2.4
type ChatCompletionImage struct {
Index int `json:"index"`
Type ChatCompletionImageType `json:"type"`
ImageURL ChatCompletionImageURL `json:"image_url"`
}
Image generation: https://openrouter.ai/docs/features/multimodal/image-generation
type ChatCompletionImageConfig ¶ added in v1.0.0
type ChatCompletionImageConfig struct {
AspectRatio ChatCompletionAspectRatio `json:"aspect_ratio,omitempty"`
ImageSize ChatCompletionImageSize `json:"image_size,omitempty"`
}
ChatCompletionImageConfig is used to configure the image generation. https://openrouter.ai/docs/features/multimodal/image-generation#image-aspect-ratio-configuration Default '1:1' → 1024×1024 (default)
type ChatCompletionImageSize ¶ added in v1.0.4
type ChatCompletionImageSize string
const ( ImageSize1K ChatCompletionImageSize = "1K" ImageSize2K ChatCompletionImageSize = "2K" ImageSize4K ChatCompletionImageSize = "4K" )
type ChatCompletionImageType ¶ added in v0.2.4
type ChatCompletionImageType string
const (
StreamImageTypeImageURL ChatCompletionImageType = "image_url"
)
type ChatCompletionImageURL ¶ added in v0.2.4
type ChatCompletionImageURL struct {
URL string `json:"url"`
}
type ChatCompletionMessage ¶
type ChatCompletionMessage struct {
Role string `json:"role"`
Content Content `json:"content,omitzero"`
Refusal string `json:"refusal,omitempty"`
// This property is used for the "reasoning" feature supported by deepseek-reasoner
// - https://api-docs.deepseek.com/api/create-chat-completion#responses
ReasoningContent *string `json:"reasoning_content,omitempty"`
// Reasoning Used by all the other models
Reasoning *string `json:"reasoning,omitempty"`
// Required to preserve reasoning blocks https://openrouter.ai/docs/use-cases/reasoning-tokens#preserving-reasoning-blocks
ReasoningDetails []ChatCompletionReasoningDetails `json:"reasoning_details,omitempty"`
FunctionCall *FunctionCall `json:"function_call,omitempty"`
// For Role=assistant prompts this may be set to the tool calls generated by the model, such as function calls.
ToolCalls []ToolCall `json:"tool_calls,omitempty"`
// For Role=tool prompts this should be set to the ID given in the assistant's prior request to call a tool.
ToolCallID string `json:"tool_call_id,omitempty"`
// Web Search Annotations
Annotations []Annotation `json:"annotations,omitempty"`
// Multi-modal image generation (only in responses)
Images []ChatCompletionImage `json:"images,omitempty"`
}
func AssistantMessage ¶ added in v0.1.6
func AssistantMessage(content string) ChatCompletionMessage
AssistantMessage creates a new assistant message with the given text content.
func SystemMessage ¶ added in v0.1.6
func SystemMessage(content string) ChatCompletionMessage
SystemMessage creates a new system message with the given text content.
func ToolMessage ¶ added in v0.1.6
func ToolMessage(callID string, content string) ChatCompletionMessage
ToolMessage creates a new tool (response) message with a call ID and content.
func UserMessage ¶ added in v0.1.6
func UserMessage(content string) ChatCompletionMessage
UserMessage creates a new user message with the given text content.
func UserMessageWithAudio ¶ added in v0.2.3
func UserMessageWithAudio(promptText string, audio []byte, format AudioFormat) ChatCompletionMessage
UserMessageWithAudio creates a user message with the given prompt text and audio content. Creates a message with the embedded audio data.
func UserMessageWithAudioFromFile ¶ added in v0.2.3
func UserMessageWithAudioFromFile(promptText, filePath string) (ChatCompletionMessage, error)
UserMessageWithAudioFromFile creates a user message with the given prompt text and audio file. It reads the audio file (mp3 or wav) and creates a message with the embedded audio data.
func UserMessageWithImage ¶ added in v0.1.7
func UserMessageWithImage(text, imageURL string) ChatCompletionMessage
UserMessageWithImage creates a new user message with text and image URL.
func UserMessageWithPDF ¶ added in v0.1.7
func UserMessageWithPDF(text, filename, fileData string) ChatCompletionMessage
UserMessageWithPDF creates a new user message with text and PDF file content.
func UserMessageWithPDFFromFile ¶ added in v0.1.7
func UserMessageWithPDFFromFile(text, filePath string) (ChatCompletionMessage, error)
UserMessageWithPDFFromFile creates a user message with text and PDF content from a file. It reads the PDF file and creates a message with the embedded PDF data.
type ChatCompletionModality ¶ added in v0.2.4
type ChatCompletionModality string
const ( ModalityText ChatCompletionModality = "text" ModalityImage ChatCompletionModality = "image" )
type ChatCompletionPlugin ¶ added in v0.1.6
type ChatCompletionPlugin struct {
ID PluginID `json:"id"`
PDF *PDFPlugin `json:"pdf,omitempty"`
MaxResults *int `json:"max_results,omitempty"`
}
func CreatePDFPlugin ¶ added in v0.1.7
func CreatePDFPlugin(engine PDFEngine) ChatCompletionPlugin
CreatePDFPlugin creates a completion plugin to process PDFs using the specified engine. The engine can be: "mistral-ocr" (for scanned documents/PDFs with images), "pdf-text" (for well-structured PDFs - free), or "native" (only for models that support file input).
type ChatCompletionReasoning ¶
type ChatCompletionReasoning struct {
// Effort The prompt that was used to generate the reasoning. [high, medium, low]
Effort *string `json:"prompt,omitempty"`
// MaxTokens cannot be simultaneously used with effort.
MaxTokens *int `json:"max_tokens,omitempty"`
// Exclude defaults to false.
Exclude *bool `json:"exclude,omitempty"`
// Or enable reasoning with the default parameters:
// Default: inferred from `effort` or `max_tokens`
Enabled *bool `json:"enabled,omitempty"`
}
type ChatCompletionReasoningDetails ¶ added in v0.2.4
type ChatCompletionReasoningDetails struct {
ID string `json:"id,omitempty"`
Index int `json:"index"`
Type ChatCompletionReasoningDetailsType `json:"type"`
Text string `json:"text,omitempty"`
Summary string `json:"summary,omitempty"`
Data string `json:"data,omitempty"`
Format string `json:"format,omitempty"`
}
type ChatCompletionReasoningDetailsType ¶ added in v0.2.4
type ChatCompletionReasoningDetailsType string
const ( ReasoningDetailsTypeText ChatCompletionReasoningDetailsType = "reasoning.text" ReasoningDetailsTypeSummary ChatCompletionReasoningDetailsType = "reasoning.summary" ReasoningDetailsTypeEncrypted ChatCompletionReasoningDetailsType = "reasoning.encrypted" )
type ChatCompletionRequest ¶
type ChatCompletionRequest struct {
Model string `json:"model,omitempty"`
// Optional model fallbacks: https://openrouter.ai/docs/features/model-routing#the-models-parameter
Models []string `json:"models,omitempty"`
Provider *ChatProvider `json:"provider,omitempty"`
Messages []ChatCompletionMessage `json:"messages"`
Reasoning *ChatCompletionReasoning `json:"reasoning,omitempty"`
Plugins []ChatCompletionPlugin `json:"plugins,omitempty"`
Modalities []ChatCompletionModality `json:"modalities,omitempty"`
ImageConfig *ChatCompletionImageConfig `json:"image_config,omitempty"`
// MaxTokens The maximum number of tokens that can be generated in the chat completion.
// This value can be used to control costs for text generated via API.
MaxTokens int `json:"max_tokens,omitempty"`
// MaxCompletionTokens Upper bound for completion tokens, supported for OpenAI API compliance.
// Prefer "max_tokens" for limiting output in new integrations.
// refs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-max_completion_tokens
MaxCompletionTokens int `json:"max_completion_tokens,omitempty"`
Temperature float32 `json:"temperature,omitempty"`
TopP float32 `json:"top_p,omitempty"`
TopK int `json:"top_k,omitempty"`
TopA float32 `json:"top_a,omitempty"`
N int `json:"n,omitempty"`
Stream bool `json:"stream,omitempty"`
Stop []string `json:"stop,omitempty"`
PresencePenalty float32 `json:"presence_penalty,omitempty"`
RepetitionPenalty float32 `json:"repetition_penalty,omitempty"`
ResponseFormat *ChatCompletionResponseFormat `json:"response_format,omitempty"`
Seed *int `json:"seed,omitempty"`
MinP float32 `json:"min_p,omitempty"`
FrequencyPenalty float32 `json:"frequency_penalty,omitempty"`
// LogitBias is must be a token id string (specified by their token ID in the tokenizer), not a word string.
// incorrect: `"logit_bias":{"You": 6}`, correct: `"logit_bias":{"1639": 6}`
// refs: https://platform.openai.com/docs/api-reference/chat/create#chat/create-logit_bias
LogitBias map[string]int `json:"logit_bias,omitempty"`
// LogProbs indicates whether to return log probabilities of the output tokens or not.
// If true, returns the log probabilities of each output token returned in the content of message.
// This option is currently not available on the gpt-4-vision-preview model.
LogProbs bool `json:"logprobs,omitempty"`
// TopLogProbs is an integer between 0 and 5 specifying the number of most likely tokens to return at each
// token position, each with an associated log probability.
// logprobs must be set to true if this parameter is used.
TopLogProbs int `json:"top_logprobs,omitempty"`
User string `json:"user,omitempty"`
// For usage with the broadcast feature. Group related requests together (such as a conversation or agent workflow) by including the session_id field (up to 128 characters).
// https://openrouter.ai/docs/guides/features/broadcast/overview#optional-trace-data
SessionId string `json:"session_id,omitempty"`
// Deprecated: use Tools instead.
Functions []FunctionDefinition `json:"functions,omitempty"`
// Deprecated: use ToolChoice instead.
FunctionCall any `json:"function_call,omitempty"`
Tools []Tool `json:"tools,omitempty"`
// This can be either a string or an ToolChoice object.
ToolChoice any `json:"tool_choice,omitempty"`
// Options for streaming response. Only set this when you set stream: true.
StreamOptions *StreamOptions `json:"stream_options,omitempty"`
// Disable the default behavior of parallel tool calls by setting it: false.
ParallelToolCalls any `json:"parallel_tool_calls,omitempty"`
// Store can be set to true to store the output of this completion request for use in distillations and evals.
// https://platform.openai.com/docs/api-reference/chat/create#chat-create-store
Store bool `json:"store,omitempty"`
// Metadata to store with the completion.
Metadata map[string]string `json:"metadata,omitempty"`
// Trace provides structured tracing metadata for observability integrations.
// https://openrouter.ai/docs/guides/features/broadcast/overview#custom-metadata
Trace *ChatCompletionTrace `json:"trace,omitempty"`
// Apply message transforms
// https://openrouter.ai/docs/features/message-transforms
Transforms []string `json:"transforms,omitempty"`
// Optional web search options
// https://openrouter.ai/docs/features/web-search#specifying-search-context-size
WebSearchOptions *WebSearchOptions `json:"web_search_options,omitempty"`
Usage *IncludeUsage `json:"usage,omitempty"`
}
type ChatCompletionResponse ¶
type ChatCompletionResponse struct {
ID string `json:"id"`
Object string `json:"object"`
Created int64 `json:"created"`
Model string `json:"model"`
Provider string `json:"provider"`
Choices []ChatCompletionChoice `json:"choices"`
Citations []string `json:"citations"`
Usage *Usage `json:"usage,omitempty"`
SystemFingerprint string `json:"system_fingerprint"`
}
ChatCompletionResponse represents a response structure for chat completion API.
type ChatCompletionResponseFormat ¶
type ChatCompletionResponseFormat struct {
Type ChatCompletionResponseFormatType `json:"type,omitempty"`
JSONSchema *ChatCompletionResponseFormatJSONSchema `json:"json_schema,omitempty"`
}
type ChatCompletionResponseFormatType ¶
type ChatCompletionResponseFormatType string
const ( ChatCompletionResponseFormatTypeJSONObject ChatCompletionResponseFormatType = "json_object" ChatCompletionResponseFormatTypeJSONSchema ChatCompletionResponseFormatType = "json_schema" ChatCompletionResponseFormatTypeText ChatCompletionResponseFormatType = "text" )
type ChatCompletionStream ¶
type ChatCompletionStream struct {
// contains filtered or unexported fields
}
func (*ChatCompletionStream) Close ¶
func (s *ChatCompletionStream) Close()
Close terminates the stream and cleans up resources.
func (*ChatCompletionStream) Recv ¶
func (s *ChatCompletionStream) Recv() (ChatCompletionStreamResponse, error)
Recv reads the next chunk from the stream.
type ChatCompletionStreamChoice ¶
type ChatCompletionStreamChoice struct {
Index int `json:"index"`
Delta ChatCompletionStreamChoiceDelta `json:"delta"`
Logprobs *ChatCompletionStreamChoiceLogprobs `json:"logprobs,omitempty"`
FinishReason FinishReason `json:"finish_reason"`
NativeFinishReason string `json:"native_finish_reason"`
ContentFilterResults *ContentFilterResults `json:"content_filter_results,omitempty"`
}
type ChatCompletionStreamChoiceDelta ¶
type ChatCompletionStreamChoiceDelta struct {
Content string `json:"content,omitempty"`
Role string `json:"role,omitempty"`
FunctionCall *FunctionCall `json:"function_call,omitempty"`
ToolCalls []ToolCall `json:"tool_calls,omitempty"`
Refusal string `json:"refusal,omitempty"`
Annotations []Annotation `json:"annotations,omitempty"`
Images []ChatCompletionImage `json:"images,omitempty"`
Reasoning *string `json:"reasoning,omitempty"`
ReasoningDetails []ChatCompletionReasoningDetails `json:"reasoning_details,omitempty"`
// This property is used for the "reasoning" feature supported by deepseek-reasoner
// which is not in the official documentation.
// the doc from deepseek:
// - https://api-docs.deepseek.com/api/create-chat-completion#responses
ReasoningContent string `json:"reasoning_content,omitempty"`
}
type ChatCompletionStreamChoiceLogprobs ¶
type ChatCompletionStreamChoiceLogprobs struct {
Content []ChatCompletionTokenLogprob `json:"content,omitempty"`
Refusal []ChatCompletionTokenLogprob `json:"refusal,omitempty"`
}
type ChatCompletionStreamResponse ¶
type ChatCompletionStreamResponse struct {
ID string `json:"id"`
Object string `json:"object"`
Created int64 `json:"created"`
Model string `json:"model"`
Provider string `json:"provider"`
Choices []ChatCompletionStreamChoice `json:"choices"`
SystemFingerprint string `json:"system_fingerprint"`
PromptAnnotations []PromptAnnotation `json:"prompt_annotations,omitempty"`
PromptFilterResults []PromptFilterResult `json:"prompt_filter_results,omitempty"`
// An optional field that will only be present when you set stream_options: {"include_usage": true} in your request.
// When present, it contains a null value except for the last chunk which contains the token usage statistics
// for the entire request.
Usage *Usage `json:"usage,omitempty"`
}
type ChatCompletionTokenLogprob ¶
type ChatCompletionTokenLogprob struct {
Token string `json:"token"`
Bytes []int64 `json:"bytes,omitempty"`
Logprob float64 `json:"logprob,omitempty"`
TopLogprobs []ChatCompletionTokenLogprobTopLogprob `json:"top_logprobs"`
}
type ChatCompletionTrace ¶ added in v1.1.7
type ChatCompletionTrace struct {
// TraceID groups multiple API requests into a single trace.
TraceID string `json:"trace_id,omitempty"`
// TraceName is a custom identifier for the root trace; defaults to the model name.
TraceName string `json:"trace_name,omitempty"`
// SpanName creates a parent span that groups LLM operations.
SpanName string `json:"span_name,omitempty"`
// GenerationName is a custom identifier for this specific LLM call.
GenerationName string `json:"generation_name,omitempty"`
// ParentSpanID links this trace to an existing span in your own tracing system (e.g. OpenTelemetry).
ParentSpanID string `json:"parent_span_id,omitempty"`
}
ChatCompletionTrace provides structured tracing metadata for observability integrations. Use it as the value for the "trace" key in ChatCompletionRequest.Metadata. https://openrouter.ai/docs/guides/features/broadcast/overview#custom-metadata
type ChatMessageImageURL ¶
type ChatMessageImageURL struct {
URL string `json:"url,omitempty"`
Detail ImageURLDetail `json:"detail,omitempty"`
}
type ChatMessageInputAudio ¶ added in v0.2.3
type ChatMessageInputAudio struct {
Data string `json:"data,omitempty"`
Format AudioFormat `json:"format,omitempty"`
}
type ChatMessagePart ¶
type ChatMessagePart struct {
Type ChatMessagePartType `json:"type,omitempty"`
Text string `json:"text,omitempty"`
// Prompt caching
// https://openrouter.ai/docs/features/prompt-caching
CacheControl *CacheControl `json:"cache_control,omitempty"`
ImageURL *ChatMessageImageURL `json:"image_url,omitempty"`
InputAudio *ChatMessageInputAudio `json:"input_audio,omitempty"`
File *FileContent `json:"file,omitempty"`
}
type ChatMessagePartType ¶
type ChatMessagePartType string
const ( ChatMessagePartTypeText ChatMessagePartType = "text" ChatMessagePartTypeImageURL ChatMessagePartType = "image_url" ChatMessagePartTypeFile ChatMessagePartType = "file" ChatMessagePartTypeInputAudio ChatMessagePartType = "input_audio" )
type ChatProvider ¶
type ChatProvider struct {
// The order of the providers in the list determines the order in which they are called.
Order []string `json:"order,omitempty"`
// Allow fallbacks to other providers if the primary provider fails. Default: true
AllowFallbacks *bool `json:"allow_fallbacks,omitempty"`
// Only use providers that support all parameters in your request.
RequireParameters bool `json:"require_parameters,omitempty"`
// Control whether to use providers that may store data.
DataCollection DataCollection `json:"data_collection,omitempty"`
// List of provider slugs to allow for this request.
Only []string `json:"only,omitempty"`
// List of provider slugs to skip for this request.
Ignore []string `json:"ignore,omitempty"`
// List of quantization levels to filter by (e.g. ["int4", "int8"]).
Quantizations []string `json:"quantizations,omitempty"`
// Sort providers by price or throughput. (e.g. "price" or "throughput").
Sort ProviderSorting `json:"sort,omitempty"`
}
Provider Routing: https://openrouter.ai/docs/features/provider-routing
type Client ¶
type Client struct {
// contains filtered or unexported fields
}
func NewClientWithConfig ¶
func NewClientWithConfig(config ClientConfig) *Client
func (*Client) CreateAPIKey ¶ added in v1.1.6
func (c *Client) CreateAPIKey( ctx context.Context, request APIKeyCreateRequest, ) (APIKeyCreateResponse, error)
CreateAPIKey creates a new API key.
func (*Client) CreateChatCompletion ¶
func (c *Client) CreateChatCompletion( ctx context.Context, request ChatCompletionRequest, ) (response ChatCompletionResponse, err error)
CreateChatCompletion — API call to Create a completion for the chat message.
func (*Client) CreateChatCompletionStream ¶
func (c *Client) CreateChatCompletionStream( ctx context.Context, request ChatCompletionRequest, ) (*ChatCompletionStream, error)
CreateChatCompletionStream — API call to Create a completion for the chat message with streaming.
func (*Client) CreateCompletion ¶ added in v0.2.4
func (c *Client) CreateCompletion( ctx context.Context, request CompletionRequest, ) (response CompletionResponse, err error)
CreateCompletion — API call to Create a completion for the prompt.
func (*Client) CreateCompletionStream ¶ added in v0.2.4
func (c *Client) CreateCompletionStream( ctx context.Context, request CompletionRequest, ) (*CompletionStream, error)
CreateCompletionStream — API call to Create a completion for the prompt with streaming.
func (*Client) CreateEmbeddings ¶ added in v1.0.3
func (c *Client) CreateEmbeddings( ctx context.Context, request EmbeddingsRequest, ) (EmbeddingsResponse, error)
CreateEmbeddings submits an embedding request to the embeddings router.
API reference: https://openrouter.ai/docs/api/api-reference/embeddings/create-embeddings
func (*Client) DeleteAPIKey ¶ added in v1.1.6
DeleteAPIKey deletes a single API key by hash.
func (*Client) GetCurrentAPIKey ¶ added in v1.1.6
func (c *Client) GetCurrentAPIKey(ctx context.Context) (APIKeyCurrentResponse, error)
GetCurrentAPIKey returns information about the API key used for this request.
func (*Client) GetGeneration ¶ added in v0.2.1
func (*Client) ListAPIKeys ¶ added in v1.1.6
func (c *Client) ListAPIKeys(ctx context.Context) (APIKeysListResponse, error)
ListAPIKeys lists all API keys for the current account.
func (*Client) ListEmbeddingsModels ¶ added in v1.0.3
ListEmbeddingsModels returns all available embeddings models and their properties. API reference: https://openrouter.ai/docs/api/api-reference/embeddings/list-embeddings-models
func (*Client) ListModels ¶ added in v0.2.0
func (*Client) ListUserModels ¶ added in v0.2.0
func (*Client) UpdateAPIKey ¶ added in v1.1.6
func (c *Client) UpdateAPIKey( ctx context.Context, hash string, request APIKeyUpdateRequest, ) (APIKeyResponse, error)
UpdateAPIKey updates an API key by hash.
type ClientConfig ¶
type ClientConfig struct {
BaseURL string
OrgID string
AssistantVersion string
HTTPClient HTTPDoer
HttpReferer string
XTitle string
EmptyMessagesLimit uint
// contains filtered or unexported fields
}
ClientConfig is a configuration for the openrouter client.
func DefaultConfig ¶
func DefaultConfig(authToken string) *ClientConfig
type CompletionChoice ¶ added in v0.2.4
type CompletionChoice struct {
Index int `json:"index"`
Text string `json:"text"`
// Reasoning Used by all the other models
Reasoning *string `json:"reasoning,omitempty"`
// FinishReason
// stop: API returned complete message,
// or a message terminated by one of the stop sequences provided via the stop parameter
// length: Incomplete model output due to max_tokens parameter or token limit
// function_call: The model decided to call a function
// content_filter: Omitted content due to a flag from our content filters
// null: API response still in progress or incomplete
FinishReason FinishReason `json:"finish_reason"`
LogProbs *LogProbs `json:"logprobs,omitempty"`
}
type CompletionRequest ¶ added in v0.2.4
type CompletionRequest struct {
Model string `json:"model,omitempty"`
// The prompt to complete
Prompt string `json:"prompt"`
// Optional model fallbacks: https://openrouter.ai/docs/features/model-routing#the-models-parameter
Models []string `json:"models,omitempty"`
Provider *ChatProvider `json:"provider,omitempty"`
Reasoning *ChatCompletionReasoning `json:"reasoning,omitempty"`
Usage *IncludeUsage `json:"usage,omitempty"`
// Apply message transforms
// https://openrouter.ai/docs/features/message-transforms
Transforms []string `json:"transforms,omitempty"`
Stream bool `json:"stream,omitempty"`
// MaxTokens The maximum number of tokens that can be generated in the chat completion.
// This value can be used to control costs for text generated via API.
MaxTokens int `json:"max_tokens,omitempty"`
Temperature float32 `json:"temperature,omitempty"`
Seed *int `json:"seed,omitempty"`
TopP float32 `json:"top_p,omitempty"`
TopK int `json:"top_k,omitempty"`
FrequencyPenalty float32 `json:"frequency_penalty,omitempty"`
PresencePenalty float32 `json:"presence_penalty,omitempty"`
RepetitionPenalty float32 `json:"repetition_penalty,omitempty"`
// LogitBias is must be a token id string (specified by their token ID in the tokenizer), not a word string.
// incorrect: `"logit_bias":{"You": 6}`, correct: `"logit_bias":{"1639": 6}`
// refs: https://platform.openai.com/docs/api-reference/chat/create#chat/create-logit_bias
LogitBias map[string]int `json:"logit_bias,omitempty"`
TopLogProbs int `json:"top_logprobs,omitempty"`
MinP float32 `json:"min_p,omitempty"`
TopA float32 `json:"top_a,omitempty"`
User string `json:"user,omitempty"`
// For usage with the broadcast feature. Group related requests together (such as a conversation or agent workflow) by including the session_id field (up to 128 characters).
// https://openrouter.ai/docs/guides/features/broadcast/overview#optional-trace-data
SessionId string `json:"session_id,omitempty"`
}
type CompletionResponse ¶ added in v0.2.4
type CompletionResponse struct {
ID string `json:"id"`
Object string `json:"object"`
Created int64 `json:"created"`
Model string `json:"model"`
Choices []CompletionChoice `json:"choices"`
Citations []string `json:"citations"`
Usage *Usage `json:"usage,omitempty"`
SystemFingerprint string `json:"system_fingerprint"`
}
CompletionResponse represents a response structure for completion API.
type CompletionStream ¶ added in v0.2.4
type CompletionStream struct {
// contains filtered or unexported fields
}
func (*CompletionStream) Close ¶ added in v0.2.4
func (s *CompletionStream) Close()
Close terminates the stream and cleans up resources.
func (*CompletionStream) Recv ¶ added in v0.2.4
func (s *CompletionStream) Recv() (CompletionResponse, error)
Recv reads the next chunk from the stream.
type CompletionTokenDetails ¶ added in v0.1.8
type Content ¶
type Content struct {
Text string
Multi []ChatMessagePart
}
Content handles both string and multi-part content.
func (Content) MarshalJSON ¶
MarshalJSON serializes ContentType as a string or array.
func (*Content) UnmarshalJSON ¶
UnmarshalJSON deserializes ContentType from a string or array.
type ContentFilterResults ¶
type ContentFilterResults struct {
Hate Hate `json:"hate,omitempty"`
SelfHarm SelfHarm `json:"self_harm,omitempty"`
Sexual Sexual `json:"sexual,omitempty"`
Violence Violence `json:"violence,omitempty"`
JailBreak JailBreak `json:"jailbreak,omitempty"`
Profanity Profanity `json:"profanity,omitempty"`
}
type CostDetails ¶ added in v0.1.8
type DataCollection ¶ added in v0.1.6
type DataCollection string
const ( DataCollectionAllow DataCollection = "allow" DataCollectionDeny DataCollection = "deny" )
type EmbeddingData ¶ added in v1.0.3
type EmbeddingData struct {
Object string `json:"object"`
Embedding EmbeddingValue `json:"embedding"`
Index int `json:"index"`
}
EmbeddingData represents a single embedding entry in the response.
type EmbeddingValue ¶ added in v1.0.3
EmbeddingValue represents a single embedding, which can be returned either as a vector of floats or as a base64 string depending on encoding_format.
func (*EmbeddingValue) UnmarshalJSON ¶ added in v1.0.3
func (e *EmbeddingValue) UnmarshalJSON(data []byte) error
type EmbeddingsEncodingFormat ¶ added in v1.0.3
type EmbeddingsEncodingFormat string
EmbeddingsEncodingFormat controls how embeddings are returned by the API. See: https://openrouter.ai/docs/api/api-reference/embeddings/create-embeddings
const ( EmbeddingsEncodingFormatFloat EmbeddingsEncodingFormat = "float" EmbeddingsEncodingFormatBase64 EmbeddingsEncodingFormat = "base64" )
type EmbeddingsRequest ¶ added in v1.0.3
type EmbeddingsRequest struct {
// Model is the model slug to use for embeddings.
Model string `json:"model"`
// Input is the content to embed. See the API docs for supported formats.
Input any `json:"input"`
// EncodingFormat controls how the embedding is returned: "float" or "base64".
EncodingFormat EmbeddingsEncodingFormat `json:"encoding_format,omitempty"`
// Dimensions optionally truncates the embedding to the given number of dimensions.
Dimensions *int `json:"dimensions,omitempty"`
// User is an optional identifier for the end-user making the request.
User string `json:"user,omitempty"`
// Provider configuration for provider routing. This reuses the same structure
// as chat/completions provider routing, which is compatible with the embeddings API.
Provider *ChatProvider `json:"provider,omitempty"`
// InputType is an optional hint describing the type of input, e.g. "text" or "image".
InputType string `json:"input_type,omitempty"`
}
EmbeddingsRequest represents a request to the /embeddings endpoint.
The input field is intentionally typed as any to support the flexible input types accepted by the OpenRouter API:
- string
- []string
- []float64
- [][]float64
- structured content blocks
For examples, see: https://openrouter.ai/docs/api/api-reference/embeddings/create-embeddings
type EmbeddingsResponse ¶ added in v1.0.3
type EmbeddingsResponse struct {
ID string `json:"id"`
Object string `json:"object"`
Data []EmbeddingData `json:"data"`
Model string `json:"model"`
Usage *EmbeddingsUsage `json:"usage,omitempty"`
}
EmbeddingsResponse represents the response from the /embeddings endpoint.
type EmbeddingsUsage ¶ added in v1.0.3
type EmbeddingsUsage struct {
PromptTokens int `json:"prompt_tokens"`
TotalTokens int `json:"total_tokens"`
Cost float64 `json:"cost"`
}
EmbeddingsUsage represents the token and cost statistics for an embeddings request.
type ErrorResponse ¶
type ErrorResponse struct {
Error *APIError `json:"error,omitempty"`
}
type FileContent ¶ added in v0.1.7
FileContent represents file content for PDF processing
type FinishReason ¶
type FinishReason string
const ( FinishReasonStop FinishReason = "stop" FinishReasonLength FinishReason = "length" FinishReasonFunctionCall FinishReason = "function_call" FinishReasonToolCalls FinishReason = "tool_calls" FinishReasonContentFilter FinishReason = "content_filter" FinishReasonNull FinishReason = "null" )
func (FinishReason) MarshalJSON ¶
func (r FinishReason) MarshalJSON() ([]byte, error)
type FunctionCall ¶
type FunctionDefinition ¶
type FunctionDefinition struct {
Name string `json:"name"`
Description string `json:"description,omitempty"`
Strict bool `json:"strict,omitempty"`
// Parameters is an object describing the function.
// You can pass json.RawMessage to describe the schema,
// or you can pass in a struct which serializes to the proper JSON schema.
// The jsonschema package is provided for convenience, but you should
// consider another specialized library if you require more complex schemas.
Parameters any `json:"parameters"`
}
type Generation ¶ added in v0.2.1
type Generation struct {
ID string `json:"id"`
TotalCost float64 `json:"total_cost"`
CreatedAt string `json:"created_at"`
Model string `json:"model"`
Origin string `json:"origin"`
Usage float64 `json:"usage"`
IsBYOK bool `json:"is_byok"`
UpstreamID *string `json:"upstream_id,omitempty"`
CacheDiscount *float64 `json:"cache_discount,omitempty"`
UpstreamInferenceCost *float64 `json:"upstream_inference_cost,omitempty"`
AppID *int `json:"app_id,omitempty"`
Streamed *bool `json:"streamed,omitempty"`
Cancelled *bool `json:"cancelled,omitempty"`
ProviderName *string `json:"provider_name,omitempty"`
Latency *int `json:"latency,omitempty"`
ModerationLatency *int `json:"moderation_latency,omitempty"`
GenerationTime *int `json:"generation_time,omitempty"`
FinishReason *string `json:"finish_reason,omitempty"`
NativeFinishReason *string `json:"native_finish_reason,omitempty"`
TokensPrompt *int `json:"tokens_prompt,omitempty"`
TokensCompletion *int `json:"tokens_completion,omitempty"`
NativeTokensPrompt *int `json:"native_tokens_prompt,omitempty"`
NativeTokensCompletion *int `json:"native_tokens_completion,omitempty"`
NativeTokensReasoning *int `json:"native_tokens_reasoning,omitempty"`
NumMediaPrompt *int `json:"num_media_prompt,omitempty"`
NumMediaCompletion *int `json:"num_media_completion,omitempty"`
NumSearchResults *int `json:"num_search_results,omitempty"`
}
type HTTPRequestBuilder ¶
type HTTPRequestBuilder struct {
// contains filtered or unexported fields
}
func NewRequestBuilder ¶
func NewRequestBuilder() *HTTPRequestBuilder
type ImageURLDetail ¶
type ImageURLDetail string
const ( ImageURLDetailHigh ImageURLDetail = "high" ImageURLDetailLow ImageURLDetail = "low" ImageURLDetailAuto ImageURLDetail = "auto" )
type IncludeUsage ¶
type IncludeUsage struct {
Include bool `json:"include"`
}
type JSONMarshaller ¶
type JSONMarshaller struct{}
type KeyLimitReset ¶ added in v1.1.6
type KeyLimitReset string
const ( KeyLimitResetDaily KeyLimitReset = "daily" KeyLimitResetWeekly KeyLimitReset = "weekly" KeyLimitResetMonthly KeyLimitReset = "monthly" )
type LogProb ¶
type LogProb struct {
Token string `json:"token"`
LogProb float64 `json:"logprob"`
Bytes []byte `json:"bytes,omitempty"` // Omitting the field if it is null
// TopLogProbs is a list of the most likely tokens and their log probability, at this token position.
// In rare cases, there may be fewer than the number of requested top_logprobs returned.
TopLogProbs []TopLogProbs `json:"top_logprobs"`
}
LogProb represents the probability information for a token.
type LogProbs ¶
type LogProbs struct {
// Content is a list of message content tokens with log probability information.
Content []LogProb `json:"content"`
}
LogProbs is the top-level structure containing the log probability information.
type Marshaller ¶
type Model ¶ added in v0.2.0
type Model struct {
ID string `json:"id"`
Name string `json:"name"`
Created int64 `json:"created"`
Description string `json:"description"`
Architecture ModelArchitecture `json:"architecture"`
TopProvider ModelTopProvider `json:"top_provider"`
Pricing ModelPricing `json:"pricing"`
CanonicalSlug *string `json:"canonical_slug,omitempty"`
ContextLength *int64 `json:"context_length,omitempty"`
HuggingFaceID *string `json:"hugging_face_id,omitempty"`
PerRequestLimits any `json:"per_request_limits,omitempty"`
SupportedParameters []string `json:"supported_parameters,omitempty"`
}
type ModelArchitecture ¶ added in v0.2.0
type ModelPricing ¶ added in v0.2.0
type ModelPricing struct {
Prompt string `json:"prompt"`
Completion string `json:"completion"`
Image string `json:"image"`
Request string `json:"request"`
WebSearch string `json:"web_search"`
InternalReasoning string `json:"internal_reasoning"`
InputCacheRead *string `json:"input_cache_read,omitempty"`
InputCacheWrite *string `json:"input_cache_write,omitempty"`
}
type ModelTopProvider ¶ added in v0.2.0
type PDFEngine ¶ added in v0.1.6
type PDFEngine string
const ( // Best for scanned documents or PDFs with images ($2 per 1,000 pages). PDFEngineMistralOCR PDFEngine = "mistral-ocr" // Best for well-structured PDFs with clear text content (Free). PDFEnginePDFText PDFEngine = "pdf-text" // Only available for models that support file input natively (charged as input tokens). PDFEngineNative PDFEngine = "native" )
type PluginID ¶ added in v0.1.6
type PluginID string
const ( // Processing PDFs: https://openrouter.ai/docs/features/images-and-pdfs#processing-pdfs PluginIDFileParser PluginID = "file-parser" // Web search plugin: https://openrouter.ai/docs/features/web-search PluginIDWeb PluginID = "web" )
type PromptAnnotation ¶
type PromptAnnotation struct {
PromptIndex int `json:"prompt_index,omitempty"`
ContentFilterResults ContentFilterResults `json:"content_filter_results,omitempty"`
}
type PromptFilterResult ¶
type PromptFilterResult struct {
Index int `json:"index"`
ContentFilterResults ContentFilterResults `json:"content_filter_results,omitempty"`
}
type PromptTokenDetails ¶ added in v0.1.8
type ProviderError ¶ added in v0.2.1
ProviderError provides the provider error (if available).
func (*ProviderError) Message ¶ added in v0.2.1
func (e *ProviderError) Message() any
type ProviderSorting ¶ added in v0.1.6
type ProviderSorting string
const ( ProviderSortingPrice ProviderSorting = "price" ProviderSortingThroughput ProviderSorting = "throughput" ProviderSortingLatency ProviderSorting = "latency" )
type RequestBuilder ¶
type RequestError ¶
RequestError provides information about generic request errors.
func (*RequestError) Error ¶
func (e *RequestError) Error() string
func (*RequestError) Unwrap ¶
func (e *RequestError) Unwrap() error
type SearchContextSize ¶ added in v0.1.3
type SearchContextSize string
const ( SearchContextSizeLow SearchContextSize = "low" SearchContextSizeMedium SearchContextSize = "medium" SearchContextSizeHigh SearchContextSize = "high" )
type StreamOptions ¶
type StreamOptions struct {
// If set, an additional chunk will be streamed before the data: [DONE] message.
// The usage field on this chunk shows the token usage statistics for the entire request,
// and the choices field will always be an empty array.
// All other chunks will also include a usage field, but with a null value.
IncludeUsage bool `json:"include_usage,omitempty"`
}
type Tool ¶
type Tool struct {
Type ToolType `json:"type"`
Function *FunctionDefinition `json:"function,omitempty"`
}
type ToolCall ¶
type ToolCall struct {
// Index is not nil only in chat completion chunk object
Index *int `json:"index,omitempty"`
ID string `json:"id,omitempty"`
Type ToolType `json:"type"`
Function FunctionCall `json:"function"`
}
type TopLogProbs ¶
type URLCitation ¶ added in v0.1.3
type Usage ¶
type Usage struct {
PromptTokens int `json:"prompt_tokens"`
CompletionTokens int `json:"completion_tokens"`
CompletionTokenDetails CompletionTokenDetails `json:"completion_token_details"`
TotalTokens int `json:"total_tokens"`
IsBYOK bool `json:"is_byok"`
Cost float64 `json:"cost"`
CostDetails CostDetails `json:"cost_details"`
PromptTokenDetails PromptTokenDetails `json:"prompt_token_details"`
}
Usage Represents the total token usage per request to OpenAI.
type WebSearchOptions ¶ added in v0.1.3
type WebSearchOptions struct {
SearchContextSize SearchContextSize `json:"search_context_size"`
}
Source Files
¶
Directories
¶
| Path | Synopsis |
|---|---|
|
examples
|
|
|
completion
command
|
|
|
completion-tool
command
|
|
|
embeddings
command
|
|
|
structured
command
|
|
|
structured-deepseek
command
|
|
|
Package jsonschema provides very simple functionality for representing a JSON schema as a (nested) struct.
|
Package jsonschema provides very simple functionality for representing a JSON schema as a (nested) struct. |