summaryrefslogtreecommitdiff
path: root/models/models.go
diff options
context:
space:
mode:
Diffstat (limited to 'models/models.go')
-rw-r--r--models/models.go296
1 files changed, 0 insertions, 296 deletions
diff --git a/models/models.go b/models/models.go
index f0f4130..0a10da1 100644
--- a/models/models.go
+++ b/models/models.go
@@ -127,139 +127,6 @@ func (cb *ChatBody) MakeStopSlice() []string {
return ss
}
-type DSChatReq struct {
- Messages []RoleMsg `json:"messages"`
- Model string `json:"model"`
- Stream bool `json:"stream"`
- FrequencyPenalty int `json:"frequency_penalty"`
- MaxTokens int `json:"max_tokens"`
- PresencePenalty int `json:"presence_penalty"`
- Temperature float32 `json:"temperature"`
- TopP float32 `json:"top_p"`
- // ResponseFormat struct {
- // Type string `json:"type"`
- // } `json:"response_format"`
- // Stop any `json:"stop"`
- // StreamOptions any `json:"stream_options"`
- // Tools any `json:"tools"`
- // ToolChoice string `json:"tool_choice"`
- // Logprobs bool `json:"logprobs"`
- // TopLogprobs any `json:"top_logprobs"`
-}
-
-func NewDSCharReq(cb ChatBody) DSChatReq {
- return DSChatReq{
- Messages: cb.Messages,
- Model: cb.Model,
- Stream: cb.Stream,
- MaxTokens: 2048,
- PresencePenalty: 0,
- FrequencyPenalty: 0,
- Temperature: 1.0,
- TopP: 1.0,
- }
-}
-
-type DSCompletionReq struct {
- Model string `json:"model"`
- Prompt string `json:"prompt"`
- Echo bool `json:"echo"`
- FrequencyPenalty int `json:"frequency_penalty"`
- // Logprobs int `json:"logprobs"`
- MaxTokens int `json:"max_tokens"`
- PresencePenalty int `json:"presence_penalty"`
- Stop any `json:"stop"`
- Stream bool `json:"stream"`
- StreamOptions any `json:"stream_options"`
- Suffix any `json:"suffix"`
- Temperature float32 `json:"temperature"`
- TopP float32 `json:"top_p"`
-}
-
-func NewDSCompletionReq(prompt, model string, temp float32, stopSlice []string) DSCompletionReq {
- return DSCompletionReq{
- Model: model,
- Prompt: prompt,
- Temperature: temp,
- Stream: true,
- Echo: false,
- MaxTokens: 2048,
- PresencePenalty: 0,
- FrequencyPenalty: 0,
- TopP: 1.0,
- Stop: stopSlice,
- }
-}
-
-type DSCompletionResp struct {
- ID string `json:"id"`
- Choices []struct {
- FinishReason string `json:"finish_reason"`
- Index int `json:"index"`
- Logprobs struct {
- TextOffset []int `json:"text_offset"`
- TokenLogprobs []int `json:"token_logprobs"`
- Tokens []string `json:"tokens"`
- TopLogprobs []struct {
- } `json:"top_logprobs"`
- } `json:"logprobs"`
- Text string `json:"text"`
- } `json:"choices"`
- Created int `json:"created"`
- Model string `json:"model"`
- SystemFingerprint string `json:"system_fingerprint"`
- Object string `json:"object"`
- Usage struct {
- CompletionTokens int `json:"completion_tokens"`
- PromptTokens int `json:"prompt_tokens"`
- PromptCacheHitTokens int `json:"prompt_cache_hit_tokens"`
- PromptCacheMissTokens int `json:"prompt_cache_miss_tokens"`
- TotalTokens int `json:"total_tokens"`
- CompletionTokensDetails struct {
- ReasoningTokens int `json:"reasoning_tokens"`
- } `json:"completion_tokens_details"`
- } `json:"usage"`
-}
-
-type DSChatResp struct {
- Choices []struct {
- Delta struct {
- Content string `json:"content"`
- Role any `json:"role"`
- } `json:"delta"`
- FinishReason string `json:"finish_reason"`
- Index int `json:"index"`
- Logprobs any `json:"logprobs"`
- } `json:"choices"`
- Created int `json:"created"`
- ID string `json:"id"`
- Model string `json:"model"`
- Object string `json:"object"`
- SystemFingerprint string `json:"system_fingerprint"`
- Usage struct {
- CompletionTokens int `json:"completion_tokens"`
- PromptTokens int `json:"prompt_tokens"`
- TotalTokens int `json:"total_tokens"`
- } `json:"usage"`
-}
-
-type DSChatStreamResp struct {
- ID string `json:"id"`
- Object string `json:"object"`
- Created int `json:"created"`
- Model string `json:"model"`
- SystemFingerprint string `json:"system_fingerprint"`
- Choices []struct {
- Index int `json:"index"`
- Delta struct {
- Content string `json:"content"`
- ReasoningContent string `json:"reasoning_content"`
- } `json:"delta"`
- Logprobs any `json:"logprobs"`
- FinishReason string `json:"finish_reason"`
- } `json:"choices"`
-}
-
type EmbeddingResp struct {
Embedding []float32 `json:"embedding"`
Index uint32 `json:"index"`
@@ -374,166 +241,3 @@ type LlamaCPPResp struct {
Content string `json:"content"`
Stop bool `json:"stop"`
}
-
-type DSBalance struct {
- IsAvailable bool `json:"is_available"`
- BalanceInfos []struct {
- Currency string `json:"currency"`
- TotalBalance string `json:"total_balance"`
- GrantedBalance string `json:"granted_balance"`
- ToppedUpBalance string `json:"topped_up_balance"`
- } `json:"balance_infos"`
-}
-
-// openrouter
-// https://openrouter.ai/docs/api-reference/completion
-type OpenRouterCompletionReq struct {
- Model string `json:"model"`
- Prompt string `json:"prompt"`
- Stream bool `json:"stream"`
- Temperature float32 `json:"temperature"`
- Stop []string `json:"stop"` // not present in docs
- MinP float32 `json:"min_p"`
- NPredict int32 `json:"max_tokens"`
-}
-
-func NewOpenRouterCompletionReq(model, prompt string, props map[string]float32, stopStrings []string) OpenRouterCompletionReq {
- return OpenRouterCompletionReq{
- Stream: true,
- Prompt: prompt,
- Temperature: props["temperature"],
- MinP: props["min_p"],
- NPredict: int32(props["n_predict"]),
- Stop: stopStrings,
- Model: model,
- }
-}
-
-type OpenRouterChatReq struct {
- Messages []RoleMsg `json:"messages"`
- Model string `json:"model"`
- Stream bool `json:"stream"`
- Temperature float32 `json:"temperature"`
- MinP float32 `json:"min_p"`
- NPredict int32 `json:"max_tokens"`
-}
-
-func NewOpenRouterChatReq(cb ChatBody, props map[string]float32) OpenRouterChatReq {
- return OpenRouterChatReq{
- Messages: cb.Messages,
- Model: cb.Model,
- Stream: cb.Stream,
- Temperature: props["temperature"],
- MinP: props["min_p"],
- NPredict: int32(props["n_predict"]),
- }
-}
-
-type OpenRouterChatRespNonStream struct {
- ID string `json:"id"`
- Provider string `json:"provider"`
- Model string `json:"model"`
- Object string `json:"object"`
- Created int `json:"created"`
- Choices []struct {
- Logprobs any `json:"logprobs"`
- FinishReason string `json:"finish_reason"`
- NativeFinishReason string `json:"native_finish_reason"`
- Index int `json:"index"`
- Message struct {
- Role string `json:"role"`
- Content string `json:"content"`
- Refusal any `json:"refusal"`
- Reasoning any `json:"reasoning"`
- } `json:"message"`
- } `json:"choices"`
- Usage struct {
- PromptTokens int `json:"prompt_tokens"`
- CompletionTokens int `json:"completion_tokens"`
- TotalTokens int `json:"total_tokens"`
- } `json:"usage"`
-}
-
-type OpenRouterChatResp struct {
- ID string `json:"id"`
- Provider string `json:"provider"`
- Model string `json:"model"`
- Object string `json:"object"`
- Created int `json:"created"`
- Choices []struct {
- Index int `json:"index"`
- Delta struct {
- Role string `json:"role"`
- Content string `json:"content"`
- } `json:"delta"`
- FinishReason string `json:"finish_reason"`
- NativeFinishReason string `json:"native_finish_reason"`
- Logprobs any `json:"logprobs"`
- } `json:"choices"`
-}
-
-type OpenRouterCompletionResp struct {
- ID string `json:"id"`
- Provider string `json:"provider"`
- Model string `json:"model"`
- Object string `json:"object"`
- Created int `json:"created"`
- Choices []struct {
- Text string `json:"text"`
- FinishReason string `json:"finish_reason"`
- NativeFinishReason string `json:"native_finish_reason"`
- Logprobs any `json:"logprobs"`
- } `json:"choices"`
-}
-
-type ORModel struct {
- ID string `json:"id"`
- CanonicalSlug string `json:"canonical_slug"`
- HuggingFaceID string `json:"hugging_face_id"`
- Name string `json:"name"`
- Created int `json:"created"`
- Description string `json:"description"`
- ContextLength int `json:"context_length"`
- Architecture struct {
- Modality string `json:"modality"`
- InputModalities []string `json:"input_modalities"`
- OutputModalities []string `json:"output_modalities"`
- Tokenizer string `json:"tokenizer"`
- InstructType any `json:"instruct_type"`
- } `json:"architecture"`
- Pricing struct {
- Prompt string `json:"prompt"`
- Completion string `json:"completion"`
- Request string `json:"request"`
- Image string `json:"image"`
- Audio string `json:"audio"`
- WebSearch string `json:"web_search"`
- InternalReasoning string `json:"internal_reasoning"`
- } `json:"pricing,omitempty"`
- TopProvider struct {
- ContextLength int `json:"context_length"`
- MaxCompletionTokens int `json:"max_completion_tokens"`
- IsModerated bool `json:"is_moderated"`
- } `json:"top_provider"`
- PerRequestLimits any `json:"per_request_limits"`
- SupportedParameters []string `json:"supported_parameters"`
-}
-
-type ORModels struct {
- Data []ORModel `json:"data"`
-}
-
-func (orm *ORModels) ListModels(free bool) []string {
- resp := []string{}
- for _, model := range orm.Data {
- if free {
- if model.Pricing.Prompt == "0" && model.Pricing.Request == "0" &&
- model.Pricing.Completion == "0" {
- resp = append(resp, model.ID)
- }
- } else {
- resp = append(resp, model.ID)
- }
- }
- return resp
-}