From ac8c8bb0558a00cf0d025ab8522aaa57b8cba7de Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Thu, 5 Mar 2026 19:20:21 +0300 Subject: Enha: onnx config vars --- bot.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'bot.go') diff --git a/bot.go b/bot.go index 13ee074..5463800 100644 --- a/bot.go +++ b/bot.go @@ -1393,12 +1393,13 @@ func updateModelLists() { } } // if llama.cpp started after gf-lt? - localModelsMu.Lock() - LocalModels, err = fetchLCPModelsWithLoadStatus() - localModelsMu.Unlock() + ml, err := fetchLCPModelsWithLoadStatus() if err != nil { logger.Warn("failed to fetch llama.cpp models", "error", err) } + localModelsMu.Lock() + LocalModels = ml + localModelsMu.Unlock() // set already loaded model in llama.cpp if strings.Contains(cfg.CurrentAPI, "localhost") || strings.Contains(cfg.CurrentAPI, "127.0.0.1") { localModelsMu.Lock() -- cgit v1.2.3 From efc92d884c36498220e2b8d5ad9e02f84e42d953 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Thu, 5 Mar 2026 20:02:46 +0300 Subject: Chore: onnx library lookup --- bot.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'bot.go') diff --git a/bot.go b/bot.go index 5463800..20ffeb2 100644 --- a/bot.go +++ b/bot.go @@ -1501,7 +1501,13 @@ func init() { os.Exit(1) return } - ragger = rag.New(logger, store, cfg) + ragger, err = rag.New(logger, store, cfg) + if err != nil { + logger.Error("failed to create RAG", "error", err) + } + if ragger != nil && ragger.FallbackMessage() != "" && app != nil { + showToast("RAG", "ONNX unavailable, using API: "+ragger.FallbackMessage()) + } // https://github.com/coreydaley/ggerganov-llama.cpp/blob/master/examples/server/README.md // load all chats in memory if _, err := loadHistoryChats(); err != nil { -- cgit v1.2.3 From 822cc48834f5f1908f619b5441ae40946aceb86d Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Fri, 6 Mar 2026 10:37:08 +0300 Subject: Fix: avoid panic if statuslinewidget not loaded yet --- bot.go | 3 +++ 1 file changed, 3 insertions(+) (limited to 'bot.go') diff --git a/bot.go b/bot.go index 20ffeb2..ad52059 100644 --- a/bot.go +++ b/bot.go @@ -1400,6 +1400,9 @@ func updateModelLists() { localModelsMu.Lock() LocalModels = ml localModelsMu.Unlock() + for statusLineWidget == nil { + time.Sleep(time.Millisecond * 100) + } // set already loaded model in llama.cpp if strings.Contains(cfg.CurrentAPI, "localhost") || strings.Contains(cfg.CurrentAPI, "127.0.0.1") { localModelsMu.Lock() -- cgit v1.2.3 From bf655a10875630a6fe5f283340b6d390a1920b58 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Sat, 7 Mar 2026 18:42:12 +0300 Subject: Enha: llama.cpp on non localhost --- bot.go | 37 ++++++++++++++++--------------------- 1 file changed, 16 insertions(+), 21 deletions(-) (limited to 'bot.go') diff --git a/bot.go b/bot.go index ad52059..663dd0b 100644 --- a/bot.go +++ b/bot.go @@ -16,7 +16,6 @@ import ( "log/slog" "net" "net/http" - "net/url" "os" "regexp" "slices" @@ -253,12 +252,7 @@ func createClient(connectTimeout time.Duration) *http.Client { } func warmUpModel() { - u, err := url.Parse(cfg.CurrentAPI) - if err != nil { - return - } - host := u.Hostname() - if host != "localhost" && host != "127.0.0.1" && host != "::1" { + if !isLocalLlamacpp() { return } // Check if model is already loaded @@ -1404,20 +1398,21 @@ func updateModelLists() { time.Sleep(time.Millisecond * 100) } // set already loaded model in llama.cpp - if strings.Contains(cfg.CurrentAPI, "localhost") || strings.Contains(cfg.CurrentAPI, "127.0.0.1") { - localModelsMu.Lock() - defer localModelsMu.Unlock() - for i := range LocalModels { - if strings.Contains(LocalModels[i], models.LoadedMark) { - m := strings.TrimPrefix(LocalModels[i], models.LoadedMark) - cfg.CurrentModel = m - chatBody.Model = m - cachedModelColor = "green" - updateStatusLine() - updateToolCapabilities() - app.Draw() - return - } + if !isLocalLlamacpp() { + return + } + localModelsMu.Lock() + defer localModelsMu.Unlock() + for i := range LocalModels { + if strings.Contains(LocalModels[i], models.LoadedMark) { + m := strings.TrimPrefix(LocalModels[i], models.LoadedMark) + cfg.CurrentModel = m + chatBody.Model = m + cachedModelColor = "green" + updateStatusLine() + updateToolCapabilities() + app.Draw() + return } } } -- cgit v1.2.3 From 4f0bce50c53267a9f53938ad1b264d5094a08ce4 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Sat, 7 Mar 2026 19:11:13 +0300 Subject: Chore: one init for clear call order --- bot.go | 52 ++-------------------------------------------------- 1 file changed, 2 insertions(+), 50 deletions(-) (limited to 'bot.go') diff --git a/bot.go b/bot.go index 663dd0b..0b4328f 100644 --- a/bot.go +++ b/bot.go @@ -1548,55 +1548,7 @@ func init() { } // Initialize scrollToEndEnabled based on config scrollToEndEnabled = cfg.AutoScrollEnabled - go updateModelLists() go chatWatcher(ctx) -} - -func getValidKnowToRecipient(msg *models.RoleMsg) (string, bool) { - if cfg == nil || !cfg.CharSpecificContextEnabled { - return "", false - } - // case where all roles are in the tag => public message - cr := listChatRoles() - slices.Sort(cr) - slices.Sort(msg.KnownTo) - if slices.Equal(cr, msg.KnownTo) { - logger.Info("got msg with tag mentioning every role") - return "", false - } - // Check each character in the KnownTo list - for _, recipient := range msg.KnownTo { - if recipient == msg.Role || recipient == cfg.ToolRole { - // weird cases, skip - continue - } - // Skip if this is the user character (user handles their own turn) - // If user is in KnownTo, stop processing - it's the user's turn - if recipient == cfg.UserRole || recipient == cfg.WriteNextMsgAs { - return "", false - } - return recipient, true - } - return "", false -} - -// triggerPrivateMessageResponses checks if a message was sent privately to specific characters -// and triggers those non-user characters to respond -func triggerPrivateMessageResponses(msg *models.RoleMsg) { - recipient, ok := getValidKnowToRecipient(msg) - if !ok || recipient == "" { - return - } - // Trigger the recipient character to respond - triggerMsg := recipient + ":\n" - // Send empty message so LLM continues naturally from the conversation - crr := &models.ChatRoundReq{ - UserMsg: triggerMsg, - Role: recipient, - Resume: true, - } - fmt.Fprintf(textView, "\n[-:-:b](%d) ", len(chatBody.Messages)) - fmt.Fprint(textView, roleToIcon(recipient)) - fmt.Fprint(textView, "[-:-:-]\n") - chatRoundChan <- crr + initTUI() + initTools() } -- cgit v1.2.3 From 23cb8f2578540e698f590bed35f973a22a8c2f90 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Sun, 8 Mar 2026 06:45:51 +0300 Subject: Chore: remove AutoCleanToolCallsFromCtx, atomic model color --- bot.go | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) (limited to 'bot.go') diff --git a/bot.go b/bot.go index 0b4328f..315491a 100644 --- a/bot.go +++ b/bot.go @@ -851,7 +851,7 @@ out: if thinkingCollapsed { // Show placeholder immediately when thinking starts in collapsed mode fmt.Fprint(textView, "[yellow::i][thinking... (press Alt+T to expand)][-:-:-]") - if scrollToEndEnabled { + if cfg.AutoScrollEnabled { textView.ScrollToEnd() } respText.WriteString(chunk) @@ -866,7 +866,7 @@ out: // Thinking already displayed as placeholder, just update respText respText.WriteString(chunk) justExitedThinkingCollapsed = true - if scrollToEndEnabled { + if cfg.AutoScrollEnabled { textView.ScrollToEnd() } continue @@ -888,7 +888,7 @@ out: respText.WriteString(chunk) // Update the message in chatBody.Messages so it persists during Alt+T chatBody.Messages[msgIdx].Content = respText.String() - if scrollToEndEnabled { + if cfg.AutoScrollEnabled { textView.ScrollToEnd() } // Send chunk to audio stream handler @@ -898,7 +898,7 @@ out: case toolChunk := <-openAIToolChan: fmt.Fprint(textView, toolChunk) toolResp.WriteString(toolChunk) - if scrollToEndEnabled { + if cfg.AutoScrollEnabled { textView.ScrollToEnd() } case <-streamDone: @@ -906,7 +906,7 @@ out: chunk := <-chunkChan fmt.Fprint(textView, chunk) respText.WriteString(chunk) - if scrollToEndEnabled { + if cfg.AutoScrollEnabled { textView.ScrollToEnd() } if cfg.TTS_ENABLED { @@ -1394,9 +1394,6 @@ func updateModelLists() { localModelsMu.Lock() LocalModels = ml localModelsMu.Unlock() - for statusLineWidget == nil { - time.Sleep(time.Millisecond * 100) - } // set already loaded model in llama.cpp if !isLocalLlamacpp() { return @@ -1408,7 +1405,7 @@ func updateModelLists() { m := strings.TrimPrefix(LocalModels[i], models.LoadedMark) cfg.CurrentModel = m chatBody.Model = m - cachedModelColor = "green" + cachedModelColor.Store("green") updateStatusLine() updateToolCapabilities() app.Draw() @@ -1546,8 +1543,8 @@ func init() { } } } - // Initialize scrollToEndEnabled based on config - scrollToEndEnabled = cfg.AutoScrollEnabled + // atomic default values + cachedModelColor.Store("orange") go chatWatcher(ctx) initTUI() initTools() -- cgit v1.2.3 From c200c9328c4aa7654dc41c0eac02fe1cc267d666 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Sun, 8 Mar 2026 07:13:27 +0300 Subject: Enha: botresp, toolresp to atomic --- bot.go | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) (limited to 'bot.go') diff --git a/bot.go b/bot.go index 315491a..d01ebb9 100644 --- a/bot.go +++ b/bot.go @@ -22,6 +22,7 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "time" ) @@ -40,7 +41,7 @@ var ( store storage.FullRepo defaultFirstMsg = "Hello! What can I do for you?" defaultStarter = []models.RoleMsg{} - interruptResp = false + interruptResp atomic.Bool ragger *rag.RAG chunkParser ChunkParser lastToolCall *models.FuncCall @@ -643,7 +644,7 @@ func sendMsgToLLM(body io.Reader) { // continue } if len(line) <= 1 { - if interruptResp { + if interruptResp.Load() { goto interrupt // get unstuck from bad connection } continue // skip \n @@ -736,8 +737,7 @@ func sendMsgToLLM(body io.Reader) { lastToolCall.ID = chunk.ToolID } interrupt: - if interruptResp { // read bytes, so it would not get into beginning of the next req - // interruptResp = false + if interruptResp.Load() { // read bytes, so it would not get into beginning of the next req logger.Info("interrupted bot response", "chunk_counter", counter) streamDone <- true break @@ -770,14 +770,14 @@ func showSpinner() { if cfg.WriteNextMsgAsCompletionAgent != "" { botPersona = cfg.WriteNextMsgAsCompletionAgent } - for botRespMode || toolRunningMode { + for botRespMode.Load() || toolRunningMode.Load() { time.Sleep(400 * time.Millisecond) spin := i % len(spinners) app.QueueUpdateDraw(func() { switch { - case toolRunningMode: + case toolRunningMode.Load(): textArea.SetTitle(spinners[spin] + " tool") - case botRespMode: + case botRespMode.Load(): textArea.SetTitle(spinners[spin] + " " + botPersona + " (F6 to interrupt)") default: textArea.SetTitle(spinners[spin] + " input") @@ -791,8 +791,8 @@ func showSpinner() { } func chatRound(r *models.ChatRoundReq) error { - interruptResp = false - botRespMode = true + interruptResp.Store(false) + botRespMode.Store(true) go showSpinner() updateStatusLine() botPersona := cfg.AssistantRole @@ -800,7 +800,7 @@ func chatRound(r *models.ChatRoundReq) error { botPersona = cfg.WriteNextMsgAsCompletionAgent } defer func() { - botRespMode = false + botRespMode.Store(false) ClearImageAttachment() }() // check that there is a model set to use if is not local @@ -928,7 +928,7 @@ out: } lastRespStats = nil } - botRespMode = false + botRespMode.Store(false) if r.Resume { chatBody.Messages[len(chatBody.Messages)-1].Content += respText.String() updatedMsg := chatBody.Messages[len(chatBody.Messages)-1] @@ -957,7 +957,7 @@ out: } // Strip think blocks before parsing for tool calls respTextNoThink := thinkBlockRE.ReplaceAllString(respText.String(), "") - if interruptResp { + if interruptResp.Load() { return nil } if findCall(respTextNoThink, toolResp.String()) { @@ -1192,9 +1192,9 @@ func findCall(msg, toolCall string) bool { } // Show tool call progress indicator before execution fmt.Fprintf(textView, "\n[yellow::i][tool: %s...][-:-:-]", fc.Name) - toolRunningMode = true + toolRunningMode.Store(true) resp := callToolWithAgent(fc.Name, fc.Args) - toolRunningMode = false + toolRunningMode.Store(false) toolMsg := string(resp) logger.Info("llm used a tool call", "tool_name", fc.Name, "too_args", fc.Args, "id", fc.ID, "tool_resp", toolMsg) // Create tool response message with the proper tool_call_id -- cgit v1.2.3 From 6ed96c9bd3cb2cd7afb980cf023a0f969651acbe Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Sun, 8 Mar 2026 09:42:07 +0300 Subject: Fix (ctrl+w): avoid msg duplication --- bot.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'bot.go') diff --git a/bot.go b/bot.go index d01ebb9..cb75a7b 100644 --- a/bot.go +++ b/bot.go @@ -887,7 +887,9 @@ out: fmt.Fprint(textView, chunk) respText.WriteString(chunk) // Update the message in chatBody.Messages so it persists during Alt+T - chatBody.Messages[msgIdx].Content = respText.String() + if !r.Resume { + chatBody.Messages[msgIdx].Content += respText.String() + } if cfg.AutoScrollEnabled { textView.ScrollToEnd() } -- cgit v1.2.3