summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGrail Finder <wohilas@gmail.com>2025-08-07 12:18:01 +0300
committerGrail Finder <wohilas@gmail.com>2025-08-07 12:18:01 +0300
commit813cb49d36edc987ecfad13291f58b6b044df3ba (patch)
tree16135431c421c5f1a06a10972805b20beb3dc2b8
parent9b2558ffe88993e36d19cfb951ef8eb6016cec36 (diff)
Feat: open router impl
-rw-r--r--.golangci.yml1
-rw-r--r--bot.go41
-rw-r--r--config.example.toml2
-rw-r--r--config/config.go17
-rw-r--r--llm.go178
-rw-r--r--models/models.go160
-rw-r--r--tables.go1
-rw-r--r--tui.go13
8 files changed, 385 insertions, 28 deletions
diff --git a/.golangci.yml b/.golangci.yml
index d377c38..2c7e552 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -10,7 +10,6 @@ linters:
- fatcontext
- govet
- ineffassign
- - noctx
- perfsprint
- prealloc
- staticcheck
diff --git a/bot.go b/bot.go
index 0503548..79b2804 100644
--- a/bot.go
+++ b/bot.go
@@ -52,6 +52,14 @@ var (
"min_p": 0.05,
"n_predict": -1.0,
}
+ ORFreeModels = []string{
+ "google/gemini-2.0-flash-exp:free",
+ "deepseek/deepseek-chat-v3-0324:free",
+ "mistralai/mistral-small-3.2-24b-instruct:free",
+ "qwen/qwen3-14b:free",
+ "google/gemma-3-27b-it:free",
+ "meta-llama/llama-3.3-70b-instruct:free",
+ }
)
func createClient(connectTimeout time.Duration) *http.Client {
@@ -124,6 +132,24 @@ func fetchDSBalance() *models.DSBalance {
return &resp
}
+func fetchORModels(free bool) ([]string, error) {
+ resp, err := http.Get("https://openrouter.ai/api/v1/models")
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != 200 {
+ err := fmt.Errorf("failed to fetch or models; status: %s", resp.Status)
+ return nil, err
+ }
+ data := &models.ORModels{}
+ if err := json.NewDecoder(resp.Body).Decode(data); err != nil {
+ return nil, err
+ }
+ freeModels := data.ListModels(free)
+ return freeModels, nil
+}
+
func sendMsgToLLM(body io.Reader) {
choseChunkParser()
bodyBytes, _ := io.ReadAll(body)
@@ -143,7 +169,7 @@ func sendMsgToLLM(body io.Reader) {
}
req.Header.Add("Accept", "application/json")
req.Header.Add("Content-Type", "application/json")
- req.Header.Add("Authorization", "Bearer "+cfg.DeepSeekToken)
+ req.Header.Add("Authorization", "Bearer "+chunkParser.GetToken())
req.Header.Set("Content-Length", strconv.Itoa(len(bodyBytes)))
req.Header.Set("Accept-Encoding", "gzip")
// nolint
@@ -196,6 +222,9 @@ func sendMsgToLLM(body io.Reader) {
streamDone <- true
break
}
+ if bytes.Equal(line, []byte("ROUTER PROCESSING\n")) {
+ continue
+ }
content, stop, err = chunkParser.ParseChunk(line)
if err != nil {
logger.Error("error parsing response body", "error", err,
@@ -408,7 +437,7 @@ func chatToTextSlice(showSys bool) []string {
if !showSys && (msg.Role != cfg.AssistantRole && msg.Role != cfg.UserRole) {
continue
}
- resp[i] = msg.ToText(i, cfg)
+ resp[i] = msg.ToText(i)
}
return resp
}
@@ -524,6 +553,14 @@ func init() {
playerOrder = []string{cfg.UserRole, cfg.AssistantRole, cfg.CluedoRole2}
cluedoState = extra.CluedoPrepCards(playerOrder)
}
+ if cfg.OpenRouterToken != "" {
+ ORModels, err := fetchORModels(true)
+ if err != nil {
+ logger.Error("failed to fetch or models", "error", err)
+ } else {
+ ORFreeModels = ORModels
+ }
+ }
choseChunkParser()
httpClient = createClient(time.Second * 15)
if cfg.TTS_ENABLED {
diff --git a/config.example.toml b/config.example.toml
index 229f657..731383b 100644
--- a/config.example.toml
+++ b/config.example.toml
@@ -1,5 +1,7 @@
ChatAPI = "http://localhost:8080/v1/chat/completions"
CompletionAPI = "http://localhost:8080/completion"
+OpenRouterCompletionAPI = "https://openrouter.ai/api/v1/completions"
+OpenRouterChatAPI = "https://openrouter.ai/api/v1/chat/completions"
EmbedURL = "http://localhost:8080/v1/embeddings"
ShowSys = true
LogFile = "log.txt"
diff --git a/config/config.go b/config/config.go
index 39f0208..110cd75 100644
--- a/config/config.go
+++ b/config/config.go
@@ -41,6 +41,11 @@ type Config struct {
DeepSeekToken string `toml:"DeepSeekToken"`
DeepSeekModel string `toml:"DeepSeekModel"`
ApiLinks []string
+ // openrouter
+ OpenRouterChatAPI string `toml:"OpenRouterChatAPI"`
+ OpenRouterCompletionAPI string `toml:"OpenRouterCompletionAPI"`
+ OpenRouterToken string `toml:"OpenRouterToken"`
+ OpenRouterModel string `toml:"OpenRouterModel"`
// TTS
TTS_URL string `toml:"TTS_URL"`
TTS_ENABLED bool `toml:"TTS_ENABLED"`
@@ -62,6 +67,8 @@ func LoadConfigOrDefault(fn string) *Config {
config.CompletionAPI = "http://localhost:8080/completion"
config.DeepSeekCompletionAPI = "https://api.deepseek.com/beta/completions"
config.DeepSeekChatAPI = "https://api.deepseek.com/chat/completions"
+ config.OpenRouterCompletionAPI = "https://openrouter.ai/api/v1/completions"
+ config.OpenRouterChatAPI = "https://openrouter.ai/api/v1/chat/completions"
config.RAGEnabled = false
config.EmbedURL = "http://localhost:8080/v1/embiddings"
config.ShowSys = true
@@ -81,10 +88,12 @@ func LoadConfigOrDefault(fn string) *Config {
}
config.CurrentAPI = config.ChatAPI
config.APIMap = map[string]string{
- config.ChatAPI: config.CompletionAPI,
- config.CompletionAPI: config.DeepSeekChatAPI,
- config.DeepSeekChatAPI: config.DeepSeekCompletionAPI,
- config.DeepSeekCompletionAPI: config.ChatAPI,
+ config.ChatAPI: config.CompletionAPI,
+ config.CompletionAPI: config.DeepSeekChatAPI,
+ config.DeepSeekChatAPI: config.DeepSeekCompletionAPI,
+ config.DeepSeekCompletionAPI: config.OpenRouterCompletionAPI,
+ config.OpenRouterCompletionAPI: config.OpenRouterChatAPI,
+ config.OpenRouterChatAPI: config.ChatAPI,
}
for _, el := range []string{config.ChatAPI, config.CompletionAPI, config.DeepSeekChatAPI, config.DeepSeekCompletionAPI} {
if el != "" {
diff --git a/llm.go b/llm.go
index 046d28d..6d1cdbf 100644
--- a/llm.go
+++ b/llm.go
@@ -11,6 +11,7 @@ import (
type ChunkParser interface {
ParseChunk([]byte) (string, bool, error)
FormMsg(msg, role string, cont bool) (io.Reader, error)
+ GetToken() string
}
func choseChunkParser() {
@@ -32,15 +33,17 @@ func choseChunkParser() {
chunkParser = DeepSeekerChat{}
logger.Debug("chosen deepseekerchat", "link", cfg.CurrentAPI)
return
+ case "https://openrouter.ai/api/v1/completions":
+ chunkParser = OpenRouterCompletion{}
+ logger.Debug("chosen openroutercompletion", "link", cfg.CurrentAPI)
+ return
+ case "https://openrouter.ai/api/v1/chat/completions":
+ chunkParser = OpenRouterChat{}
+ logger.Debug("chosen openrouterchat", "link", cfg.CurrentAPI)
+ return
default:
chunkParser = LlamaCPPeer{}
}
- // if strings.Contains(cfg.CurrentAPI, "chat") {
- // logger.Debug("chosen chat parser")
- // chunkParser = OpenAIer{}
- // return
- // }
- // logger.Debug("chosen llamacpp /completion parser")
}
type LlamaCPPeer struct {
@@ -51,6 +54,16 @@ type DeepSeekerCompletion struct {
}
type DeepSeekerChat struct {
}
+type OpenRouterCompletion struct {
+ Model string
+}
+type OpenRouterChat struct {
+ Model string
+}
+
+func (lcp LlamaCPPeer) GetToken() string {
+ return ""
+}
func (lcp LlamaCPPeer) FormMsg(msg, role string, resume bool) (io.Reader, error) {
logger.Debug("formmsg llamacppeer", "link", cfg.CurrentAPI)
@@ -88,10 +101,10 @@ func (lcp LlamaCPPeer) FormMsg(msg, role string, resume bool) (io.Reader, error)
logger.Debug("checking prompt for /completion", "tool_use", cfg.ToolUse,
"msg", msg, "resume", resume, "prompt", prompt)
var payload any
- payload = models.NewLCPReq(prompt, cfg, defaultLCPProps, chatBody.MakeStopSlice())
- if strings.Contains(chatBody.Model, "deepseek") {
+ payload = models.NewLCPReq(prompt, defaultLCPProps, chatBody.MakeStopSlice())
+ if strings.Contains(chatBody.Model, "deepseek") { // TODO: why?
payload = models.NewDSCompletionReq(prompt, chatBody.Model,
- defaultLCPProps["temp"], cfg, chatBody.MakeStopSlice())
+ defaultLCPProps["temp"], chatBody.MakeStopSlice())
}
data, err := json.Marshal(payload)
if err != nil {
@@ -116,6 +129,10 @@ func (lcp LlamaCPPeer) ParseChunk(data []byte) (string, bool, error) {
return llmchunk.Content, false, nil
}
+func (op OpenAIer) GetToken() string {
+ return ""
+}
+
func (op OpenAIer) ParseChunk(data []byte) (string, bool, error) {
llmchunk := models.LLMRespChunk{}
if err := json.Unmarshal(data, &llmchunk); err != nil {
@@ -177,6 +194,10 @@ func (ds DeepSeekerCompletion) ParseChunk(data []byte) (string, bool, error) {
return llmchunk.Choices[0].Text, false, nil
}
+func (ds DeepSeekerCompletion) GetToken() string {
+ return cfg.DeepSeekToken
+}
+
func (ds DeepSeekerCompletion) FormMsg(msg, role string, resume bool) (io.Reader, error) {
logger.Debug("formmsg deepseekercompletion", "link", cfg.CurrentAPI)
if msg != "" { // otherwise let the bot to continue
@@ -213,7 +234,7 @@ func (ds DeepSeekerCompletion) FormMsg(msg, role string, resume bool) (io.Reader
logger.Debug("checking prompt for /completion", "tool_use", cfg.ToolUse,
"msg", msg, "resume", resume, "prompt", prompt)
payload := models.NewDSCompletionReq(prompt, chatBody.Model,
- defaultLCPProps["temp"], cfg, chatBody.MakeStopSlice())
+ defaultLCPProps["temp"], chatBody.MakeStopSlice())
data, err := json.Marshal(payload)
if err != nil {
logger.Error("failed to form a msg", "error", err)
@@ -240,6 +261,10 @@ func (ds DeepSeekerChat) ParseChunk(data []byte) (string, bool, error) {
return llmchunk.Choices[0].Delta.Content, false, nil
}
+func (ds DeepSeekerChat) GetToken() string {
+ return cfg.DeepSeekToken
+}
+
func (ds DeepSeekerChat) FormMsg(msg, role string, resume bool) (io.Reader, error) {
logger.Debug("formmsg deepseekerchat", "link", cfg.CurrentAPI)
if cfg.ToolUse && !resume {
@@ -286,3 +311,136 @@ func (ds DeepSeekerChat) FormMsg(msg, role string, resume bool) (io.Reader, erro
}
return bytes.NewReader(data), nil
}
+
+// openrouter
+func (or OpenRouterCompletion) ParseChunk(data []byte) (string, bool, error) {
+ llmchunk := models.OpenRouterCompletionResp{}
+ if err := json.Unmarshal(data, &llmchunk); err != nil {
+ logger.Error("failed to decode", "error", err, "line", string(data))
+ return "", false, err
+ }
+ content := llmchunk.Choices[len(llmchunk.Choices)-1].Text
+ if llmchunk.Choices[len(llmchunk.Choices)-1].FinishReason == "stop" {
+ if content != "" {
+ logger.Error("text inside of finish llmchunk", "chunk", llmchunk)
+ }
+ return content, true, nil
+ }
+ return content, false, nil
+}
+
+func (or OpenRouterCompletion) GetToken() string {
+ return cfg.OpenRouterToken
+}
+
+func (or OpenRouterCompletion) FormMsg(msg, role string, resume bool) (io.Reader, error) {
+ logger.Debug("formmsg openroutercompletion", "link", cfg.CurrentAPI)
+ if msg != "" { // otherwise let the bot to continue
+ newMsg := models.RoleMsg{Role: role, Content: msg}
+ chatBody.Messages = append(chatBody.Messages, newMsg)
+ // if rag
+ if cfg.RAGEnabled {
+ ragResp, err := chatRagUse(newMsg.Content)
+ if err != nil {
+ logger.Error("failed to form a rag msg", "error", err)
+ return nil, err
+ }
+ ragMsg := models.RoleMsg{Role: cfg.ToolRole, Content: ragResp}
+ chatBody.Messages = append(chatBody.Messages, ragMsg)
+ }
+ }
+ if cfg.ToolUse && !resume {
+ // add to chat body
+ chatBody.Messages = append(chatBody.Messages, models.RoleMsg{Role: cfg.ToolRole, Content: toolSysMsg})
+ }
+ messages := make([]string, len(chatBody.Messages))
+ for i, m := range chatBody.Messages {
+ messages[i] = m.ToPrompt()
+ }
+ prompt := strings.Join(messages, "\n")
+ // strings builder?
+ if !resume {
+ botMsgStart := "\n" + cfg.AssistantRole + ":\n"
+ prompt += botMsgStart
+ }
+ if cfg.ThinkUse && !cfg.ToolUse {
+ prompt += "<think>"
+ }
+ logger.Debug("checking prompt for /completion", "tool_use", cfg.ToolUse,
+ "msg", msg, "resume", resume, "prompt", prompt)
+ payload := models.NewOpenRouterCompletionReq(chatBody.Model, prompt, defaultLCPProps, chatBody.MakeStopSlice())
+ data, err := json.Marshal(payload)
+ if err != nil {
+ logger.Error("failed to form a msg", "error", err)
+ return nil, err
+ }
+ return bytes.NewReader(data), nil
+}
+
+// chat
+func (or OpenRouterChat) ParseChunk(data []byte) (string, bool, error) {
+ llmchunk := models.OpenRouterChatResp{}
+ if err := json.Unmarshal(data, &llmchunk); err != nil {
+ logger.Error("failed to decode", "error", err, "line", string(data))
+ return "", false, err
+ }
+ content := llmchunk.Choices[len(llmchunk.Choices)-1].Delta.Content
+ if llmchunk.Choices[len(llmchunk.Choices)-1].FinishReason == "stop" {
+ if content != "" {
+ logger.Error("text inside of finish llmchunk", "chunk", llmchunk)
+ }
+ return content, true, nil
+ }
+ return content, false, nil
+}
+
+func (or OpenRouterChat) GetToken() string {
+ return cfg.OpenRouterToken
+}
+
+func (or OpenRouterChat) FormMsg(msg, role string, resume bool) (io.Reader, error) {
+ logger.Debug("formmsg open router completion", "link", cfg.CurrentAPI)
+ if cfg.ToolUse && !resume {
+ // prompt += "\n" + cfg.ToolRole + ":\n" + toolSysMsg
+ // add to chat body
+ chatBody.Messages = append(chatBody.Messages, models.RoleMsg{Role: cfg.ToolRole, Content: toolSysMsg})
+ }
+ if msg != "" { // otherwise let the bot continue
+ newMsg := models.RoleMsg{Role: role, Content: msg}
+ chatBody.Messages = append(chatBody.Messages, newMsg)
+ // if rag
+ if cfg.RAGEnabled {
+ ragResp, err := chatRagUse(newMsg.Content)
+ if err != nil {
+ logger.Error("failed to form a rag msg", "error", err)
+ return nil, err
+ }
+ ragMsg := models.RoleMsg{Role: cfg.ToolRole, Content: ragResp}
+ chatBody.Messages = append(chatBody.Messages, ragMsg)
+ }
+ }
+ // Create copy of chat body with standardized user role
+ // modifiedBody := *chatBody
+ bodyCopy := &models.ChatBody{
+ Messages: make([]models.RoleMsg, len(chatBody.Messages)),
+ Model: chatBody.Model,
+ Stream: chatBody.Stream,
+ }
+ // modifiedBody.Messages = make([]models.RoleMsg, len(chatBody.Messages))
+ for i, msg := range chatBody.Messages {
+ logger.Debug("checking roles", "#", i, "role", msg.Role)
+ if msg.Role == cfg.UserRole || i == 1 {
+ bodyCopy.Messages[i].Role = "user"
+ logger.Debug("replaced role in body", "#", i)
+ } else {
+ bodyCopy.Messages[i] = msg
+ }
+ }
+ dsBody := models.NewDSCharReq(*bodyCopy)
+ data, err := json.Marshal(dsBody)
+ if err != nil {
+ logger.Error("failed to form a msg", "error", err)
+ return nil, err
+ }
+ return bytes.NewReader(data), nil
+}
diff --git a/models/models.go b/models/models.go
index 9514741..bb3fa1b 100644
--- a/models/models.go
+++ b/models/models.go
@@ -2,7 +2,6 @@ package models
import (
"fmt"
- "gf-lt/config"
"strings"
)
@@ -56,7 +55,7 @@ type RoleMsg struct {
Content string `json:"content"`
}
-func (m RoleMsg) ToText(i int, cfg *config.Config) string {
+func (m RoleMsg) ToText(i int) string {
icon := fmt.Sprintf("(%d)", i)
// check if already has role annotation (/completion makes them)
if !strings.HasPrefix(m.Content, m.Role+":") {
@@ -185,7 +184,7 @@ type DSCompletionReq struct {
TopP float32 `json:"top_p"`
}
-func NewDSCompletionReq(prompt, model string, temp float32, cfg *config.Config, stopSlice []string) DSCompletionReq {
+func NewDSCompletionReq(prompt, model string, temp float32, stopSlice []string) DSCompletionReq {
return DSCompletionReq{
Model: model,
Prompt: prompt,
@@ -334,7 +333,7 @@ type LlamaCPPReq struct {
// Samplers string `json:"samplers"`
}
-func NewLCPReq(prompt string, cfg *config.Config, props map[string]float32, stopStrings []string) LlamaCPPReq {
+func NewLCPReq(prompt string, props map[string]float32, stopStrings []string) LlamaCPPReq {
return LlamaCPPReq{
Stream: true,
Prompt: prompt,
@@ -362,3 +361,156 @@ type DSBalance struct {
ToppedUpBalance string `json:"topped_up_balance"`
} `json:"balance_infos"`
}
+
+// openrouter
+// https://openrouter.ai/docs/api-reference/completion
+type OpenRouterCompletionReq struct {
+ Model string `json:"model"`
+ Prompt string `json:"prompt"`
+ Stream bool `json:"stream"`
+ Temperature float32 `json:"temperature"`
+ Stop []string `json:"stop"` // not present in docs
+ MinP float32 `json:"min_p"`
+ NPredict int32 `json:"max_tokens"`
+}
+
+func NewOpenRouterCompletionReq(model, prompt string, props map[string]float32, stopStrings []string) OpenRouterCompletionReq {
+ return OpenRouterCompletionReq{
+ Stream: true,
+ Prompt: prompt,
+ Temperature: props["temperature"],
+ MinP: props["min_p"],
+ NPredict: int32(props["n_predict"]),
+ Stop: stopStrings,
+ Model: model,
+ }
+}
+
+type OpenRouterChatReq struct {
+ Messages []RoleMsg `json:"messages"`
+ Model string `json:"model"`
+ Stream bool `json:"stream"`
+ Temperature float32 `json:"temperature"`
+ MinP float32 `json:"min_p"`
+ NPredict int32 `json:"max_tokens"`
+}
+
+func NewOpenRouterChatReq(cb ChatBody, props map[string]float32) OpenRouterChatReq {
+ return OpenRouterChatReq{
+ Messages: cb.Messages,
+ Model: cb.Model,
+ Stream: cb.Stream,
+ Temperature: props["temperature"],
+ MinP: props["min_p"],
+ NPredict: int32(props["n_predict"]),
+ }
+}
+
+type OpenRouterChatRespNonStream struct {
+ ID string `json:"id"`
+ Provider string `json:"provider"`
+ Model string `json:"model"`
+ Object string `json:"object"`
+ Created int `json:"created"`
+ Choices []struct {
+ Logprobs any `json:"logprobs"`
+ FinishReason string `json:"finish_reason"`
+ NativeFinishReason string `json:"native_finish_reason"`
+ Index int `json:"index"`
+ Message struct {
+ Role string `json:"role"`
+ Content string `json:"content"`
+ Refusal any `json:"refusal"`
+ Reasoning any `json:"reasoning"`
+ } `json:"message"`
+ } `json:"choices"`
+ Usage struct {
+ PromptTokens int `json:"prompt_tokens"`
+ CompletionTokens int `json:"completion_tokens"`
+ TotalTokens int `json:"total_tokens"`
+ } `json:"usage"`
+}
+
+type OpenRouterChatResp struct {
+ ID string `json:"id"`
+ Provider string `json:"provider"`
+ Model string `json:"model"`
+ Object string `json:"object"`
+ Created int `json:"created"`
+ Choices []struct {
+ Index int `json:"index"`
+ Delta struct {
+ Role string `json:"role"`
+ Content string `json:"content"`
+ } `json:"delta"`
+ FinishReason string `json:"finish_reason"`
+ NativeFinishReason string `json:"native_finish_reason"`
+ Logprobs any `json:"logprobs"`
+ } `json:"choices"`
+}
+
+type OpenRouterCompletionResp struct {
+ ID string `json:"id"`
+ Provider string `json:"provider"`
+ Model string `json:"model"`
+ Object string `json:"object"`
+ Created int `json:"created"`
+ Choices []struct {
+ Text string `json:"text"`
+ FinishReason string `json:"finish_reason"`
+ NativeFinishReason string `json:"native_finish_reason"`
+ Logprobs any `json:"logprobs"`
+ } `json:"choices"`
+}
+
+type ORModel struct {
+ ID string `json:"id"`
+ CanonicalSlug string `json:"canonical_slug"`
+ HuggingFaceID string `json:"hugging_face_id"`
+ Name string `json:"name"`
+ Created int `json:"created"`
+ Description string `json:"description"`
+ ContextLength int `json:"context_length"`
+ Architecture struct {
+ Modality string `json:"modality"`
+ InputModalities []string `json:"input_modalities"`
+ OutputModalities []string `json:"output_modalities"`
+ Tokenizer string `json:"tokenizer"`
+ InstructType any `json:"instruct_type"`
+ } `json:"architecture"`
+ Pricing struct {
+ Prompt string `json:"prompt"`
+ Completion string `json:"completion"`
+ Request string `json:"request"`
+ Image string `json:"image"`
+ Audio string `json:"audio"`
+ WebSearch string `json:"web_search"`
+ InternalReasoning string `json:"internal_reasoning"`
+ } `json:"pricing,omitempty"`
+ TopProvider struct {
+ ContextLength int `json:"context_length"`
+ MaxCompletionTokens int `json:"max_completion_tokens"`
+ IsModerated bool `json:"is_moderated"`
+ } `json:"top_provider"`
+ PerRequestLimits any `json:"per_request_limits"`
+ SupportedParameters []string `json:"supported_parameters"`
+}
+
+type ORModels struct {
+ Data []ORModel `json:"data"`
+}
+
+func (orm *ORModels) ListModels(free bool) []string {
+ resp := []string{}
+ for _, model := range orm.Data {
+ if free {
+ if model.Pricing.Prompt == "0" && model.Pricing.Request == "0" &&
+ model.Pricing.Completion == "0" {
+ resp = append(resp, model.ID)
+ }
+ } else {
+ resp = append(resp, model.ID)
+ }
+ }
+ return resp
+}
diff --git a/tables.go b/tables.go
index ad39396..4090c8a 100644
--- a/tables.go
+++ b/tables.go
@@ -144,7 +144,6 @@ func makeChatTable(chatMap map[string]models.Chat) *tview.Table {
return
}
// Reload card from disk
- newCard := &models.CharCard{}
newCard, err := pngmeta.ReadCard(cc.FilePath, cfg.UserRole)
if err != nil {
logger.Error("failed to reload charcard", "path", cc.FilePath, "error", err)
diff --git a/tui.go b/tui.go
index 95b93f2..ee0e5e6 100644
--- a/tui.go
+++ b/tui.go
@@ -241,6 +241,8 @@ func setLogLevel(sl string) {
func makePropsForm(props map[string]float32) *tview.Form {
// https://github.com/rivo/tview/commit/0a18dea458148770d212d348f656988df75ff341
// no way to close a form by a key press; a shame.
+ modelList := []string{chatBody.Model, "deepseek-chat", "deepseek-reasoner"}
+ modelList = append(modelList, ORFreeModels...)
form := tview.NewForm().
AddTextView("Notes", "Props for llamacpp completion call", 40, 2, true, false).
AddCheckbox("Insert <think> (/completion only)", cfg.ThinkUse, func(checked bool) {
@@ -253,7 +255,7 @@ func makePropsForm(props map[string]float32) *tview.Form {
}).AddDropDown("Select an api: ", slices.Insert(cfg.ApiLinks, 0, cfg.CurrentAPI), 0,
func(option string, optionIndex int) {
cfg.CurrentAPI = option
- }).AddDropDown("Select a model: ", []string{chatBody.Model, "deepseek-chat", "deepseek-reasoner"}, 0,
+ }).AddDropDown("Select a model: ", modelList, 0,
func(option string, optionIndex int) {
chatBody.Model = option
}).AddDropDown("Write next message as: ", chatBody.ListRoles(), 0,
@@ -694,11 +696,10 @@ func init() {
return nil
}
cfg.CurrentAPI = newAPI
- if strings.Contains(cfg.CurrentAPI, "deepseek") {
- chatBody.Model = "deepseek-chat"
- } else {
- chatBody.Model = "local"
- }
+ // // TODO: implement model pick
+ // if strings.Contains(cfg.CurrentAPI, "deepseek") {
+ // chatBody.Model = "deepseek-chat"
+ // }
choseChunkParser()
updateStatusLine()
return nil