From f5d76eb60587564648e9f5084469a27cef5765b8 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Fri, 16 Jan 2026 10:11:01 +0300 Subject: Doc: feature concept --- char-specific-context.md | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 char-specific-context.md diff --git a/char-specific-context.md b/char-specific-context.md new file mode 100644 index 0000000..d56a0cf --- /dev/null +++ b/char-specific-context.md @@ -0,0 +1,31 @@ +say we have a chat (system card) with three or more characters: +Alice, Bob and Carl. +the chat uses /completion endpoint (as oposed to /v1/chat/completion of openai) to the same llm on all chars. +Alice needs to pass info to Bob without Carl knowing the content (or perhaps even that communication occured at all). +Issue is that being in the same chat history (chatBody), llm shares context for each char. +Even if message passed through the tool calls, Carl can see a tool call with the arguments. +If we delete tool calls and their responses, then both Bob and Alice would have to re-request that secret info each time it is their turn, which is absurd. + +concept of char specific context: +let every message to have a `KnownTo` field (type []string); +which could be empty (to everyone) or have speicifc names ([]string{"Alice", "Bob"}) +so when that's character turn (which we track in `WriteNextMsgAsCompletionAgent`, then that message is injected at its proper index position (means every message should know it's index?) into chatBody (chat history). + +indexes are tricky. +what happens if msg is deleted? will every following message decrement their index? so far edit/copy functionality take in consideration position of existing messages in order. +how to avoid two messages with the same index? if Alices letter is send as secret and assigned index: 5. Then Carl's turn we have that secret message excluded, so his action would get also index 5. +Perhaps instead of indexes we should only keep message order by timestamps (time.Time)? + +so we need to think of some sort of tag that llm could add into the message, to make sure it is to be known by that specific target char, some weird string that would not occur naturally, that we could parse: +__known_to_chars__Alice,Bob__ + + +for ex. +Alice: __known_to_chars__Bob__ Can you keep a secret? +Bob: I also have a secret for you Alice __known_to_chars__Alice__ + +tag can be anywhere in the message. Sender should be also included in KnownTo, so we should parse sender name and add them to KnownTo. + +also need to consider user case (as in human chatting with llm). User also can assume any char identity to write the message and ideally the same rules should affect user's chars. + +Again, this is not going to work with openais /v1/chat endpoint since it converts all characters to user/assistant; so it is completion only feature. It also might cause unwanted effects, so we better have an option in config to switch this context editing on/off. -- cgit v1.2.3 From eb44b1e4b244e5a93e7d465b14df39819d8dfaba Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Fri, 16 Jan 2026 16:53:19 +0300 Subject: Feat: impl attempt --- bot.go | 106 ++++++++++++++++++ bot_test.go | 318 ++++++++++++++++++++++++++++++++++++++++++++++++++++ config.example.toml | 2 + config/config.go | 10 +- llm.go | 66 +++++++---- models/models.go | 14 ++- 6 files changed, 486 insertions(+), 30 deletions(-) diff --git a/bot.go b/bot.go index 4d6da58..967c060 100644 --- a/bot.go +++ b/bot.go @@ -18,6 +18,7 @@ import ( "net/url" "os" "path" + "regexp" "strconv" "strings" "sync" @@ -68,6 +69,111 @@ var ( LocalModels = []string{} ) +// parseKnownToTag extracts known_to list from content using configured tag. +// Returns cleaned content and list of character names. +func parseKnownToTag(content string) (string, []string) { + if cfg == nil || !cfg.CharSpecificContextEnabled { + return content, nil + } + tag := cfg.CharSpecificContextTag + if tag == "" { + tag = "__known_to_chars__" + } + // Pattern: tag + list + "__" + pattern := regexp.QuoteMeta(tag) + `(.*?)__` + re := regexp.MustCompile(pattern) + matches := re.FindAllStringSubmatch(content, -1) + if len(matches) == 0 { + return content, nil + } + // There may be multiple tags; we combine all. + var knownTo []string + cleaned := content + for _, match := range matches { + if len(match) < 2 { + continue + } + // Remove the entire matched tag from content + cleaned = strings.Replace(cleaned, match[0], "", 1) + + list := strings.TrimSpace(match[1]) + if list == "" { + continue + } + parts := strings.Split(list, ",") + for _, p := range parts { + p = strings.TrimSpace(p) + if p != "" { + knownTo = append(knownTo, p) + } + } + } + // Also remove any leftover trailing "__" that might be orphaned? Not needed. + return strings.TrimSpace(cleaned), knownTo +} + +// processMessageTag processes a message for known_to tag and sets KnownTo field. +// It also ensures the sender's role is included in KnownTo. +// If KnownTo already set (e.g., from DB), preserves it unless new tag found. +func processMessageTag(msg models.RoleMsg) models.RoleMsg { + if cfg == nil || !cfg.CharSpecificContextEnabled { + return msg + } + // If KnownTo already set, assume tag already processed (content cleaned). + // However, we still check for new tags (maybe added later). + cleaned, knownTo := parseKnownToTag(msg.Content) + if cleaned != msg.Content { + msg.Content = cleaned + } + // If tag found, replace KnownTo with new list (merge with existing?) + // For simplicity, if knownTo is not nil, replace. + if knownTo != nil { + msg.KnownTo = knownTo + } + // Ensure sender role is in KnownTo + if msg.Role != "" { + senderAdded := false + for _, k := range msg.KnownTo { + if k == msg.Role { + senderAdded = true + break + } + } + if !senderAdded { + msg.KnownTo = append(msg.KnownTo, msg.Role) + } + } + return msg +} + +// filterMessagesForCharacter returns messages visible to the specified character. +// If CharSpecificContextEnabled is false, returns all messages. +func filterMessagesForCharacter(messages []models.RoleMsg, character string) []models.RoleMsg { + if cfg == nil || !cfg.CharSpecificContextEnabled || character == "" { + return messages + } + filtered := make([]models.RoleMsg, 0, len(messages)) + for _, msg := range messages { + // If KnownTo is nil or empty, message is visible to all + if len(msg.KnownTo) == 0 { + filtered = append(filtered, msg) + continue + } + // Check if character is in KnownTo list + found := false + for _, k := range msg.KnownTo { + if k == character { + found = true + break + } + } + if found { + filtered = append(filtered, msg) + } + } + return filtered +} + // cleanNullMessages removes messages with null or empty content to prevent API issues func cleanNullMessages(messages []models.RoleMsg) []models.RoleMsg { // // deletes tool calls which we don't want for now diff --git a/bot_test.go b/bot_test.go index d2956a9..7496175 100644 --- a/bot_test.go +++ b/bot_test.go @@ -286,4 +286,322 @@ func TestConvertJSONToMapStringString(t *testing.T) { } }) } +} + +func TestParseKnownToTag(t *testing.T) { + tests := []struct { + name string + content string + enabled bool + tag string + wantCleaned string + wantKnownTo []string + }{ + { + name: "feature disabled returns original", + content: "Hello __known_to_chars__Alice__", + enabled: false, + tag: "__known_to_chars__", + wantCleaned: "Hello __known_to_chars__Alice__", + wantKnownTo: nil, + }, + { + name: "no tag returns original", + content: "Hello Alice", + enabled: true, + tag: "__known_to_chars__", + wantCleaned: "Hello Alice", + wantKnownTo: nil, + }, + { + name: "single tag with one char", + content: "Hello __known_to_chars__Alice__", + enabled: true, + tag: "__known_to_chars__", + wantCleaned: "Hello", + wantKnownTo: []string{"Alice"}, + }, + { + name: "single tag with two chars", + content: "Secret __known_to_chars__Alice,Bob__ message", + enabled: true, + tag: "__known_to_chars__", + wantCleaned: "Secret message", + wantKnownTo: []string{"Alice", "Bob"}, + }, + { + name: "tag at beginning", + content: "__known_to_chars__Alice__ Hello", + enabled: true, + tag: "__known_to_chars__", + wantCleaned: "Hello", + wantKnownTo: []string{"Alice"}, + }, + { + name: "tag at end", + content: "Hello __known_to_chars__Alice__", + enabled: true, + tag: "__known_to_chars__", + wantCleaned: "Hello", + wantKnownTo: []string{"Alice"}, + }, + { + name: "multiple tags", + content: "First __known_to_chars__Alice__ then __known_to_chars__Bob__", + enabled: true, + tag: "__known_to_chars__", + wantCleaned: "First then", + wantKnownTo: []string{"Alice", "Bob"}, + }, + { + name: "custom tag", + content: "Secret __secret__Alice,Bob__ message", + enabled: true, + tag: "__secret__", + wantCleaned: "Secret message", + wantKnownTo: []string{"Alice", "Bob"}, + }, + { + name: "empty list", + content: "Secret __known_to_chars____", + enabled: true, + tag: "__known_to_chars__", + wantCleaned: "Secret", + wantKnownTo: nil, + }, + { + name: "whitespace around commas", + content: "__known_to_chars__ Alice , Bob , Carl __", + enabled: true, + tag: "__known_to_chars__", + wantCleaned: "", + wantKnownTo: []string{"Alice", "Bob", "Carl"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Set up config + testCfg := &config.Config{ + CharSpecificContextEnabled: tt.enabled, + CharSpecificContextTag: tt.tag, + } + cfg = testCfg + + cleaned, knownTo := parseKnownToTag(tt.content) + + if cleaned != tt.wantCleaned { + t.Errorf("parseKnownToTag() cleaned = %q, want %q", cleaned, tt.wantCleaned) + } + + if len(knownTo) != len(tt.wantKnownTo) { + t.Errorf("parseKnownToTag() knownTo length = %v, want %v", len(knownTo), len(tt.wantKnownTo)) + t.Logf("got: %v", knownTo) + t.Logf("want: %v", tt.wantKnownTo) + } else { + for i, got := range knownTo { + if got != tt.wantKnownTo[i] { + t.Errorf("parseKnownToTag() knownTo[%d] = %q, want %q", i, got, tt.wantKnownTo[i]) + } + } + } + }) + } +} + +func TestProcessMessageTag(t *testing.T) { + tests := []struct { + name string + msg models.RoleMsg + enabled bool + tag string + wantMsg models.RoleMsg + }{ + { + name: "feature disabled returns unchanged", + msg: models.RoleMsg{ + Role: "Alice", + Content: "Secret __known_to_chars__Bob__", + }, + enabled: false, + tag: "__known_to_chars__", + wantMsg: models.RoleMsg{ + Role: "Alice", + Content: "Secret __known_to_chars__Bob__", + KnownTo: nil, + }, + }, + { + name: "no tag, no knownTo", + msg: models.RoleMsg{ + Role: "Alice", + Content: "Hello everyone", + }, + enabled: true, + tag: "__known_to_chars__", + wantMsg: models.RoleMsg{ + Role: "Alice", + Content: "Hello everyone", + KnownTo: []string{"Alice"}, + }, + }, + { + name: "tag with Bob, adds Alice automatically", + msg: models.RoleMsg{ + Role: "Alice", + Content: "Secret __known_to_chars__Bob__", + }, + enabled: true, + tag: "__known_to_chars__", + wantMsg: models.RoleMsg{ + Role: "Alice", + Content: "Secret", + KnownTo: []string{"Bob", "Alice"}, + }, + }, + { + name: "tag already includes sender", + msg: models.RoleMsg{ + Role: "Alice", + Content: "__known_to_chars__Alice,Bob__", + }, + enabled: true, + tag: "__known_to_chars__", + wantMsg: models.RoleMsg{ + Role: "Alice", + Content: "", + KnownTo: []string{"Alice", "Bob"}, + }, + }, + { + name: "knownTo already set (from DB), tag still processed", + msg: models.RoleMsg{ + Role: "Alice", + Content: "Secret __known_to_chars__Bob__", + KnownTo: []string{"Alice"}, // from previous processing + }, + enabled: true, + tag: "__known_to_chars__", + wantMsg: models.RoleMsg{ + Role: "Alice", + Content: "Secret", + KnownTo: []string{"Bob", "Alice"}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + testCfg := &config.Config{ + CharSpecificContextEnabled: tt.enabled, + CharSpecificContextTag: tt.tag, + } + cfg = testCfg + + got := processMessageTag(tt.msg) + + if got.Content != tt.wantMsg.Content { + t.Errorf("processMessageTag() content = %q, want %q", got.Content, tt.wantMsg.Content) + } + + if len(got.KnownTo) != len(tt.wantMsg.KnownTo) { + t.Errorf("processMessageTag() KnownTo length = %v, want %v", len(got.KnownTo), len(tt.wantMsg.KnownTo)) + t.Logf("got: %v", got.KnownTo) + t.Logf("want: %v", tt.wantMsg.KnownTo) + } else { + // order may differ; check membership + for _, want := range tt.wantMsg.KnownTo { + found := false + for _, gotVal := range got.KnownTo { + if gotVal == want { + found = true + break + } + } + if !found { + t.Errorf("processMessageTag() missing KnownTo entry %q, got %v", want, got.KnownTo) + } + } + } + }) + } +} + +func TestFilterMessagesForCharacter(t *testing.T) { + messages := []models.RoleMsg{ + {Role: "system", Content: "System message", KnownTo: nil}, // visible to all + {Role: "Alice", Content: "Hello everyone", KnownTo: nil}, // visible to all + {Role: "Alice", Content: "Secret for Bob", KnownTo: []string{"Alice", "Bob"}}, + {Role: "Bob", Content: "Reply to Alice", KnownTo: []string{"Alice", "Bob"}}, + {Role: "Alice", Content: "Private to Carl", KnownTo: []string{"Alice", "Carl"}}, + {Role: "Carl", Content: "Hi all", KnownTo: nil}, // visible to all + } + + tests := []struct { + name string + enabled bool + character string + wantIndices []int // indices from original messages that should be included + }{ + { + name: "feature disabled returns all", + enabled: false, + character: "Alice", + wantIndices: []int{0,1,2,3,4,5}, + }, + { + name: "character empty returns all", + enabled: true, + character: "", + wantIndices: []int{0,1,2,3,4,5}, + }, + { + name: "Alice sees all including Carl-private", + enabled: true, + character: "Alice", + wantIndices: []int{0,1,2,3,4,5}, + }, + { + name: "Bob sees Alice-Bob secrets and all public", + enabled: true, + character: "Bob", + wantIndices: []int{0,1,2,3,5}, + }, + { + name: "Carl sees Alice-Carl secret and public", + enabled: true, + character: "Carl", + wantIndices: []int{0,1,4,5}, + }, + { + name: "David sees only public messages", + enabled: true, + character: "David", + wantIndices: []int{0,1,5}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + testCfg := &config.Config{ + CharSpecificContextEnabled: tt.enabled, + CharSpecificContextTag: "__known_to_chars__", + } + cfg = testCfg + + got := filterMessagesForCharacter(messages, tt.character) + + if len(got) != len(tt.wantIndices) { + t.Errorf("filterMessagesForCharacter() returned %d messages, want %d", len(got), len(tt.wantIndices)) + t.Logf("got: %v", got) + return + } + + for i, idx := range tt.wantIndices { + if got[i].Content != messages[idx].Content { + t.Errorf("filterMessagesForCharacter() message %d content = %q, want %q", i, got[i].Content, messages[idx].Content) + } + } + }) + } } \ No newline at end of file diff --git a/config.example.toml b/config.example.toml index 3a5401b..85b2662 100644 --- a/config.example.toml +++ b/config.example.toml @@ -43,3 +43,5 @@ DBPATH = "gflt.db" FilePickerDir = "." # Directory where file picker should start FilePickerExts = "png,jpg,jpeg,gif,webp" # Comma-separated list of allowed file extensions for file picker EnableMouse = false # Enable mouse support in the UI +CharSpecificContextEnabled = false +CharSpecificContextTag = "__known_to_chars__" diff --git a/config/config.go b/config/config.go index 20935a2..62c8331 100644 --- a/config/config.go +++ b/config/config.go @@ -61,10 +61,12 @@ type Config struct { WhisperBinaryPath string `toml:"WhisperBinaryPath"` WhisperModelPath string `toml:"WhisperModelPath"` STT_LANG string `toml:"STT_LANG"` - DBPATH string `toml:"DBPATH"` - FilePickerDir string `toml:"FilePickerDir"` - FilePickerExts string `toml:"FilePickerExts"` - EnableMouse bool `toml:"EnableMouse"` + DBPATH string `toml:"DBPATH"` + FilePickerDir string `toml:"FilePickerDir"` + FilePickerExts string `toml:"FilePickerExts"` + EnableMouse bool `toml:"EnableMouse"` + CharSpecificContextEnabled bool `toml:"CharSpecificContextEnabled"` + CharSpecificContextTag string `toml:"CharSpecificContextTag"` } func LoadConfig(fn string) (*Config, error) { diff --git a/llm.go b/llm.go index 5621ecf..5599d21 100644 --- a/llm.go +++ b/llm.go @@ -34,6 +34,24 @@ func ClearImageAttachment() { imageAttachmentPath = "" } +// filterMessagesForCurrentCharacter filters messages based on char-specific context. +// Returns filtered messages and the bot persona role (target character). +func filterMessagesForCurrentCharacter(messages []models.RoleMsg) ([]models.RoleMsg, string) { + if cfg == nil || !cfg.CharSpecificContextEnabled { + botPersona := cfg.AssistantRole + if cfg.WriteNextMsgAsCompletionAgent != "" { + botPersona = cfg.WriteNextMsgAsCompletionAgent + } + return messages, botPersona + } + botPersona := cfg.AssistantRole + if cfg.WriteNextMsgAsCompletionAgent != "" { + botPersona = cfg.WriteNextMsgAsCompletionAgent + } + filtered := filterMessagesForCharacter(messages, botPersona) + return filtered, botPersona +} + type ChunkParser interface { ParseChunk([]byte) (*models.TextChunk, error) FormMsg(msg, role string, cont bool) (io.Reader, error) @@ -113,6 +131,7 @@ func (lcp LCPCompletion) FormMsg(msg, role string, resume bool) (io.Reader, erro } if msg != "" { // otherwise let the bot to continue newMsg := models.RoleMsg{Role: role, Content: msg} + newMsg = processMessageTag(newMsg) chatBody.Messages = append(chatBody.Messages, newMsg) } if !resume { @@ -136,17 +155,14 @@ func (lcp LCPCompletion) FormMsg(msg, role string, resume bool) (io.Reader, erro // add to chat body chatBody.Messages = append(chatBody.Messages, models.RoleMsg{Role: cfg.ToolRole, Content: toolSysMsg}) } - messages := make([]string, len(chatBody.Messages)) - for i, m := range chatBody.Messages { + filteredMessages, botPersona := filterMessagesForCurrentCharacter(chatBody.Messages) + messages := make([]string, len(filteredMessages)) + for i, m := range filteredMessages { messages[i] = m.ToPrompt() } prompt := strings.Join(messages, "\n") // strings builder? if !resume { - botPersona := cfg.AssistantRole - if cfg.WriteNextMsgAsCompletionAgent != "" { - botPersona = cfg.WriteNextMsgAsCompletionAgent - } botMsgStart := "\n" + botPersona + ":\n" prompt += botMsgStart } @@ -270,6 +286,7 @@ func (op LCPChat) FormMsg(msg, role string, resume bool) (io.Reader, error) { // Create a simple text message newMsg = models.NewRoleMsg(role, msg) } + newMsg = processMessageTag(newMsg) chatBody.Messages = append(chatBody.Messages, newMsg) logger.Debug("LCPChat FormMsg: added message to chatBody", "role", newMsg.Role, "content_len", len(newMsg.Content), "message_count_after_add", len(chatBody.Messages)) } @@ -291,12 +308,13 @@ func (op LCPChat) FormMsg(msg, role string, resume bool) (io.Reader, error) { } } // openai /v1/chat does not support custom roles; needs to be user, assistant, system + filteredMessages, _ := filterMessagesForCurrentCharacter(chatBody.Messages) bodyCopy := &models.ChatBody{ - Messages: make([]models.RoleMsg, len(chatBody.Messages)), + Messages: make([]models.RoleMsg, len(filteredMessages)), Model: chatBody.Model, Stream: chatBody.Stream, } - for i, msg := range chatBody.Messages { + for i, msg := range filteredMessages { if msg.Role == cfg.UserRole { bodyCopy.Messages[i] = msg bodyCopy.Messages[i].Role = "user" @@ -348,6 +366,7 @@ func (ds DeepSeekerCompletion) FormMsg(msg, role string, resume bool) (io.Reader logger.Debug("formmsg deepseekercompletion", "link", cfg.CurrentAPI) if msg != "" { // otherwise let the bot to continue newMsg := models.RoleMsg{Role: role, Content: msg} + newMsg = processMessageTag(newMsg) chatBody.Messages = append(chatBody.Messages, newMsg) } if !resume { @@ -372,17 +391,14 @@ func (ds DeepSeekerCompletion) FormMsg(msg, role string, resume bool) (io.Reader // add to chat body chatBody.Messages = append(chatBody.Messages, models.RoleMsg{Role: cfg.ToolRole, Content: toolSysMsg}) } - messages := make([]string, len(chatBody.Messages)) - for i, m := range chatBody.Messages { + filteredMessages, botPersona := filterMessagesForCurrentCharacter(chatBody.Messages) + messages := make([]string, len(filteredMessages)) + for i, m := range filteredMessages { messages[i] = m.ToPrompt() } prompt := strings.Join(messages, "\n") // strings builder? if !resume { - botPersona := cfg.AssistantRole - if cfg.WriteNextMsgAsCompletionAgent != "" { - botPersona = cfg.WriteNextMsgAsCompletionAgent - } botMsgStart := "\n" + botPersona + ":\n" prompt += botMsgStart } @@ -432,6 +448,7 @@ func (ds DeepSeekerChat) FormMsg(msg, role string, resume bool) (io.Reader, erro logger.Debug("formmsg deepseekerchat", "link", cfg.CurrentAPI) if msg != "" { // otherwise let the bot continue newMsg := models.RoleMsg{Role: role, Content: msg} + newMsg = processMessageTag(newMsg) chatBody.Messages = append(chatBody.Messages, newMsg) } if !resume { @@ -451,12 +468,13 @@ func (ds DeepSeekerChat) FormMsg(msg, role string, resume bool) (io.Reader, erro logger.Debug("RAG message added to chat body", "message_count", len(chatBody.Messages)) } } + filteredMessages, _ := filterMessagesForCurrentCharacter(chatBody.Messages) bodyCopy := &models.ChatBody{ - Messages: make([]models.RoleMsg, len(chatBody.Messages)), + Messages: make([]models.RoleMsg, len(filteredMessages)), Model: chatBody.Model, Stream: chatBody.Stream, } - for i, msg := range chatBody.Messages { + for i, msg := range filteredMessages { if msg.Role == cfg.UserRole || i == 1 { bodyCopy.Messages[i] = msg bodyCopy.Messages[i].Role = "user" @@ -502,6 +520,7 @@ func (or OpenRouterCompletion) FormMsg(msg, role string, resume bool) (io.Reader logger.Debug("formmsg openroutercompletion", "link", cfg.CurrentAPI) if msg != "" { // otherwise let the bot to continue newMsg := models.RoleMsg{Role: role, Content: msg} + newMsg = processMessageTag(newMsg) chatBody.Messages = append(chatBody.Messages, newMsg) } if !resume { @@ -525,17 +544,14 @@ func (or OpenRouterCompletion) FormMsg(msg, role string, resume bool) (io.Reader // add to chat body chatBody.Messages = append(chatBody.Messages, models.RoleMsg{Role: cfg.ToolRole, Content: toolSysMsg}) } - messages := make([]string, len(chatBody.Messages)) - for i, m := range chatBody.Messages { + filteredMessages, botPersona := filterMessagesForCurrentCharacter(chatBody.Messages) + messages := make([]string, len(filteredMessages)) + for i, m := range filteredMessages { messages[i] = m.ToPrompt() } prompt := strings.Join(messages, "\n") // strings builder? if !resume { - botPersona := cfg.AssistantRole - if cfg.WriteNextMsgAsCompletionAgent != "" { - botPersona = cfg.WriteNextMsgAsCompletionAgent - } botMsgStart := "\n" + botPersona + ":\n" prompt += botMsgStart } @@ -619,6 +635,7 @@ func (or OpenRouterChat) FormMsg(msg, role string, resume bool) (io.Reader, erro // Create a simple text message newMsg = models.NewRoleMsg(role, msg) } + newMsg = processMessageTag(newMsg) chatBody.Messages = append(chatBody.Messages, newMsg) } if !resume { @@ -639,12 +656,13 @@ func (or OpenRouterChat) FormMsg(msg, role string, resume bool) (io.Reader, erro } } // Create copy of chat body with standardized user role + filteredMessages, _ := filterMessagesForCurrentCharacter(chatBody.Messages) bodyCopy := &models.ChatBody{ - Messages: make([]models.RoleMsg, len(chatBody.Messages)), + Messages: make([]models.RoleMsg, len(filteredMessages)), Model: chatBody.Model, Stream: chatBody.Stream, } - for i, msg := range chatBody.Messages { + for i, msg := range filteredMessages { bodyCopy.Messages[i] = msg // Standardize role if it's a user role if bodyCopy.Messages[i].Role == cfg.UserRole { diff --git a/models/models.go b/models/models.go index 912f72b..88ba144 100644 --- a/models/models.go +++ b/models/models.go @@ -93,6 +93,7 @@ type RoleMsg struct { Content string `json:"-"` ContentParts []interface{} `json:"-"` ToolCallID string `json:"tool_call_id,omitempty"` // For tool response messages + KnownTo []string `json:"known_to,omitempty"` hasContentParts bool // Flag to indicate which content type to marshal } @@ -104,10 +105,12 @@ func (m RoleMsg) MarshalJSON() ([]byte, error) { Role string `json:"role"` Content []interface{} `json:"content"` ToolCallID string `json:"tool_call_id,omitempty"` + KnownTo []string `json:"known_to,omitempty"` }{ Role: m.Role, Content: m.ContentParts, ToolCallID: m.ToolCallID, + KnownTo: m.KnownTo, } return json.Marshal(aux) } else { @@ -116,10 +119,12 @@ func (m RoleMsg) MarshalJSON() ([]byte, error) { Role string `json:"role"` Content string `json:"content"` ToolCallID string `json:"tool_call_id,omitempty"` + KnownTo []string `json:"known_to,omitempty"` }{ Role: m.Role, Content: m.Content, ToolCallID: m.ToolCallID, + KnownTo: m.KnownTo, } return json.Marshal(aux) } @@ -132,11 +137,13 @@ func (m *RoleMsg) UnmarshalJSON(data []byte) error { Role string `json:"role"` Content []interface{} `json:"content"` ToolCallID string `json:"tool_call_id,omitempty"` + KnownTo []string `json:"known_to,omitempty"` } if err := json.Unmarshal(data, &structured); err == nil && len(structured.Content) > 0 { m.Role = structured.Role m.ContentParts = structured.Content m.ToolCallID = structured.ToolCallID + m.KnownTo = structured.KnownTo m.hasContentParts = true return nil } @@ -146,6 +153,7 @@ func (m *RoleMsg) UnmarshalJSON(data []byte) error { Role string `json:"role"` Content string `json:"content"` ToolCallID string `json:"tool_call_id,omitempty"` + KnownTo []string `json:"known_to,omitempty"` } if err := json.Unmarshal(data, &simple); err != nil { return err @@ -153,6 +161,7 @@ func (m *RoleMsg) UnmarshalJSON(data []byte) error { m.Role = simple.Role m.Content = simple.Content m.ToolCallID = simple.ToolCallID + m.KnownTo = simple.KnownTo m.hasContentParts = false return nil } @@ -363,7 +372,8 @@ func (cb *ChatBody) MakeStopSlice() []string { for _, m := range cb.Messages { namesMap[m.Role] = struct{}{} } - ss := []string{"<|im_end|>"} + ss := make([]string, 0, 1+len(namesMap)) + ss = append(ss, "<|im_end|>") for k := range namesMap { ss = append(ss, k+":\n") } @@ -523,7 +533,7 @@ type LCPModels struct { } func (lcp *LCPModels) ListModels() []string { - resp := []string{} + resp := make([]string, 0, len(lcp.Data)) for _, model := range lcp.Data { resp = append(resp, model.ID) } -- cgit v1.2.3 From 12be6036902ba9fa1b403310422e5f6f3e6c1875 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Sat, 17 Jan 2026 10:23:06 +0300 Subject: Chore: add alice-bob-carl card --- .gitignore | 1 + char-specific-context.md | 5 +++++ sysprompts/alice_bob_carl.json | 8 ++++++++ 3 files changed, 14 insertions(+) create mode 100644 sysprompts/alice_bob_carl.json diff --git a/.gitignore b/.gitignore index c366cf2..472ea7d 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,7 @@ history/ config.toml sysprompts/* !sysprompts/cluedo.json +!sysprompts/alice_bob_carl.json history_bak/ .aider* tags diff --git a/char-specific-context.md b/char-specific-context.md index d56a0cf..93c08c5 100644 --- a/char-specific-context.md +++ b/char-specific-context.md @@ -27,5 +27,10 @@ Bob: I also have a secret for you Alice __known_to_chars__Alice__ tag can be anywhere in the message. Sender should be also included in KnownTo, so we should parse sender name and add them to KnownTo. also need to consider user case (as in human chatting with llm). User also can assume any char identity to write the message and ideally the same rules should affect user's chars. +user has "Writing as {char}" (vars: persona and cfg.UserRole) +on persona change we should update tui text view to have atual for that character chat history Again, this is not going to work with openais /v1/chat endpoint since it converts all characters to user/assistant; so it is completion only feature. It also might cause unwanted effects, so we better have an option in config to switch this context editing on/off. + + +alternative approach to the tag string would be to have a judge agent to determine after each message what characters should hae access to it. but it means to make an additional call to llm after each msg. diff --git a/sysprompts/alice_bob_carl.json b/sysprompts/alice_bob_carl.json new file mode 100644 index 0000000..abde0f2 --- /dev/null +++ b/sysprompts/alice_bob_carl.json @@ -0,0 +1,8 @@ +{ + "sys_prompt": "This is a chat between Alice, Bob and Carl. Normally what is said by any character is seen by all others. But characters also might write messages intended to specific targets if their message contain string tag '__known_to_chars__{CharName1,CharName2,CharName3}__'.\nFor example:\nAlice:\n\"Hey, Bob. I have a secret for you... (ooc: __known_to_chars__Bob__)\"\nThis message would be seen only by Bob and Alice (sender always sees their own message).", + "role": "Alice", + "role2": "Bob", + "role3": "Carl", + "filepath": "sysprompts/alice_bob_carl.json", + "first_msg": "Hey guys! Want to play Alias like game? I'll tell Bob a word and he needs to describe that word so Carl can guess what it was?" +} -- cgit v1.2.3 From 8b162ef34f0755e2224c43499218def16d4b6845 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Sat, 17 Jan 2026 11:42:35 +0300 Subject: Enha: change textview chat history based on current user persona --- bot.go | 15 ++++++++------- char-specific-context.md | 3 +++ helpfuncs.go | 2 +- llm.go | 1 - tables.go | 10 +++++----- tools.go | 4 ++-- tui.go | 37 ++++++++++++++++++++++--------------- 7 files changed, 41 insertions(+), 31 deletions(-) diff --git a/bot.go b/bot.go index 967c060..a5d3b12 100644 --- a/bot.go +++ b/bot.go @@ -153,7 +153,8 @@ func filterMessagesForCharacter(messages []models.RoleMsg, character string) []m return messages } filtered := make([]models.RoleMsg, 0, len(messages)) - for _, msg := range messages { + for i, msg := range messages { + logger.Info("filtering messages", "character", character, "index", i, "known_to", msg.KnownTo) // If KnownTo is nil or empty, message is visible to all if len(msg.KnownTo) == 0 { filtered = append(filtered, msg) @@ -1003,9 +1004,9 @@ func findCall(msg, toolCall string, tv *tview.TextView) { chatRound("", cfg.AssistantRole, tv, false, false) } -func chatToTextSlice(showSys bool) []string { - resp := make([]string, len(chatBody.Messages)) - for i, msg := range chatBody.Messages { +func chatToTextSlice(messages []models.RoleMsg, showSys bool) []string { + resp := make([]string, len(messages)) + for i, msg := range messages { // INFO: skips system msg and tool msg if !showSys && (msg.Role == cfg.ToolRole || msg.Role == "system") { continue @@ -1015,8 +1016,8 @@ func chatToTextSlice(showSys bool) []string { return resp } -func chatToText(showSys bool) string { - s := chatToTextSlice(showSys) +func chatToText(messages []models.RoleMsg, showSys bool) string { + s := chatToTextSlice(messages, showSys) return strings.Join(s, "\n") } @@ -1140,7 +1141,7 @@ func summarizeAndStartNewChat() { } chatBody.Messages = append(chatBody.Messages, toolMsg) // Update UI - textView.SetText(chatToText(cfg.ShowSys)) + textView.SetText(chatToText(chatBody.Messages, cfg.ShowSys)) colorText() // Update storage if err := updateStorageChat(activeChatName, chatBody.Messages); err != nil { diff --git a/char-specific-context.md b/char-specific-context.md index 93c08c5..c1a7bd6 100644 --- a/char-specific-context.md +++ b/char-specific-context.md @@ -34,3 +34,6 @@ Again, this is not going to work with openais /v1/chat endpoint since it convert alternative approach to the tag string would be to have a judge agent to determine after each message what characters should hae access to it. but it means to make an additional call to llm after each msg. + + +need to update character card loader to support multiple characters diff --git a/helpfuncs.go b/helpfuncs.go index 73f8fb0..ccb5858 100644 --- a/helpfuncs.go +++ b/helpfuncs.go @@ -109,7 +109,7 @@ func startNewChat() { } // set chat body chatBody.Messages = chatBody.Messages[:2] - textView.SetText(chatToText(cfg.ShowSys)) + textView.SetText(chatToText(chatBody.Messages, cfg.ShowSys)) newChat := &models.Chat{ ID: id + 1, Name: fmt.Sprintf("%d_%s", id+1, cfg.AssistantRole), diff --git a/llm.go b/llm.go index 5599d21..cd5a3fe 100644 --- a/llm.go +++ b/llm.go @@ -180,7 +180,6 @@ func (lcp LCPCompletion) FormMsg(msg, role string, resume bool) (io.Reader, erro } prompt = sb.String() } - logger.Debug("checking prompt for /completion", "tool_use", cfg.ToolUse, "msg", msg, "resume", resume, "prompt", prompt, "multimodal_data_count", len(multimodalData)) payload := models.NewLCPReq(prompt, chatBody.Model, multimodalData, defaultLCPProps, chatBody.MakeStopSlice()) diff --git a/tables.go b/tables.go index a88b501..4783cf6 100644 --- a/tables.go +++ b/tables.go @@ -119,7 +119,7 @@ func makeChatTable(chatMap map[string]models.Chat) *tview.Table { return } chatBody.Messages = history - textView.SetText(chatToText(cfg.ShowSys)) + textView.SetText(chatToText(chatBody.Messages, cfg.ShowSys)) activeChatName = selectedChat pages.RemovePage(historyPage) return @@ -142,7 +142,7 @@ func makeChatTable(chatMap map[string]models.Chat) *tview.Table { } // load last chat chatBody.Messages = loadOldChatOrGetNew() - textView.SetText(chatToText(cfg.ShowSys)) + textView.SetText(chatToText(chatBody.Messages, cfg.ShowSys)) pages.RemovePage(historyPage) return case "update card": @@ -175,7 +175,7 @@ func makeChatTable(chatMap map[string]models.Chat) *tview.Table { case "move sysprompt onto 1st msg": chatBody.Messages[1].Content = chatBody.Messages[0].Content + chatBody.Messages[1].Content chatBody.Messages[0].Content = rpDefenitionSysMsg - textView.SetText(chatToText(cfg.ShowSys)) + textView.SetText(chatToText(chatBody.Messages, cfg.ShowSys)) activeChatName = selectedChat pages.RemovePage(historyPage) return @@ -546,7 +546,7 @@ func makeAgentTable(agentList []string) *tview.Table { return } // replace textview - textView.SetText(chatToText(cfg.ShowSys)) + textView.SetText(chatToText(chatBody.Messages, cfg.ShowSys)) colorText() updateStatusLine() // sysModal.ClearButtons() @@ -715,7 +715,7 @@ func makeImportChatTable(filenames []string) *tview.Table { colorText() updateStatusLine() // redraw the text in text area - textView.SetText(chatToText(cfg.ShowSys)) + textView.SetText(chatToText(chatBody.Messages, cfg.ShowSys)) pages.RemovePage(historyPage) app.SetFocus(textArea) return diff --git a/tools.go b/tools.go index fddbffe..2eafaf5 100644 --- a/tools.go +++ b/tools.go @@ -24,7 +24,7 @@ var ( starRE = regexp.MustCompile(`(\*.*?\*)`) thinkRE = regexp.MustCompile(`(\s*([\s\S]*?))`) codeBlockRE = regexp.MustCompile(`(?s)\x60{3}(?:.*?)\n(.*?)\n\s*\x60{3}\s*`) - singleBacktickRE = regexp.MustCompile(`\x60([^\x60]*)\x60`) + singleBacktickRE = regexp.MustCompile(`\x60([^\x60]*)\x60`) roleRE = regexp.MustCompile(`^(\w+):`) rpDefenitionSysMsg = ` For this roleplay immersion is at most importance. @@ -945,7 +945,7 @@ func summarizeChat(args map[string]string) []byte { return []byte("No chat history to summarize.") } // Format chat history for the agent - chatText := chatToText(true) // include system and tool messages + chatText := chatToText(chatBody.Messages, true) // include system and tool messages return []byte(chatText) } diff --git a/tui.go b/tui.go index a7570cf..aa9972a 100644 --- a/tui.go +++ b/tui.go @@ -310,7 +310,7 @@ func performSearch(term string) { searchResultLengths = nil originalTextForSearch = "" // Re-render text without highlights - textView.SetText(chatToText(cfg.ShowSys)) + textView.SetText(chatToText(chatBody.Messages, cfg.ShowSys)) colorText() return } @@ -517,8 +517,8 @@ func init() { searchResults = nil // Clear search results searchResultLengths = nil // Clear search result lengths originalTextForSearch = "" - textView.SetText(chatToText(cfg.ShowSys)) // Reset text without search regions - colorText() // Apply normal chat coloring + textView.SetText(chatToText(chatBody.Messages, cfg.ShowSys)) // Reset text without search regions + colorText() // Apply normal chat coloring } else { // Original logic if no search is active currentSelection := textView.GetHighlights() @@ -594,7 +594,7 @@ func init() { } chatBody.Messages[selectedIndex].Content = editedMsg // change textarea - textView.SetText(chatToText(cfg.ShowSys)) + textView.SetText(chatToText(chatBody.Messages, cfg.ShowSys)) pages.RemovePage(editMsgPage) editMode = false return nil @@ -627,7 +627,7 @@ func init() { } if selectedIndex >= 0 && selectedIndex < len(chatBody.Messages) { chatBody.Messages[selectedIndex].Role = newRole - textView.SetText(chatToText(cfg.ShowSys)) + textView.SetText(chatToText(chatBody.Messages, cfg.ShowSys)) colorText() pages.RemovePage(roleEditPage) } @@ -739,7 +739,7 @@ func init() { searchResults = nil searchResultLengths = nil originalTextForSearch = "" - textView.SetText(chatToText(cfg.ShowSys)) + textView.SetText(chatToText(chatBody.Messages, cfg.ShowSys)) colorText() return } else { @@ -787,7 +787,7 @@ func init() { // textArea.SetMovedFunc(updateStatusLine) updateStatusLine() - textView.SetText(chatToText(cfg.ShowSys)) + textView.SetText(chatToText(chatBody.Messages, cfg.ShowSys)) colorText() if scrollToEndEnabled { textView.ScrollToEnd() @@ -801,7 +801,7 @@ func init() { if event.Key() == tcell.KeyRune && event.Rune() == '5' && event.Modifiers()&tcell.ModAlt != 0 { // switch cfg.ShowSys cfg.ShowSys = !cfg.ShowSys - textView.SetText(chatToText(cfg.ShowSys)) + textView.SetText(chatToText(chatBody.Messages, cfg.ShowSys)) colorText() } if event.Key() == tcell.KeyRune && event.Rune() == '3' && event.Modifiers()&tcell.ModAlt != 0 { @@ -866,7 +866,7 @@ func init() { chatBody.Messages = chatBody.Messages[:len(chatBody.Messages)-1] // there is no case where user msg is regenerated // lastRole := chatBody.Messages[len(chatBody.Messages)-1].Role - textView.SetText(chatToText(cfg.ShowSys)) + textView.SetText(chatToText(chatBody.Messages, cfg.ShowSys)) go chatRound("", cfg.UserRole, textView, true, false) return nil } @@ -888,7 +888,7 @@ func init() { return nil } chatBody.Messages = chatBody.Messages[:len(chatBody.Messages)-1] - textView.SetText(chatToText(cfg.ShowSys)) + textView.SetText(chatToText(chatBody.Messages, cfg.ShowSys)) colorText() return nil } @@ -1052,7 +1052,7 @@ func init() { // clear context // remove tools and thinking removeThinking(chatBody) - textView.SetText(chatToText(cfg.ShowSys)) + textView.SetText(chatToText(chatBody.Messages, cfg.ShowSys)) colorText() return nil } @@ -1184,20 +1184,26 @@ func init() { if strings.EqualFold(role, persona) { if i == len(roles)-1 { cfg.WriteNextMsgAs = roles[0] // reached last, get first + persona = cfg.WriteNextMsgAs break } cfg.WriteNextMsgAs = roles[i+1] // get next role + persona = cfg.WriteNextMsgAs logger.Info("picked role", "roles", roles, "index", i+1) break } } + // role got switch, update textview with character specific context for user + filtered := filterMessagesForCharacter(chatBody.Messages, persona) + textView.SetText(chatToText(filtered, cfg.ShowSys)) updateStatusLine() + colorText() return nil } if event.Key() == tcell.KeyCtrlX { - persona := cfg.AssistantRole + botPersona := cfg.AssistantRole if cfg.WriteNextMsgAsCompletionAgent != "" { - persona = cfg.WriteNextMsgAsCompletionAgent + botPersona = cfg.WriteNextMsgAsCompletionAgent } roles := chatBody.ListRoles() if len(roles) == 0 { @@ -1207,12 +1213,14 @@ func init() { roles = append(roles, cfg.AssistantRole) } for i, role := range roles { - if strings.EqualFold(role, persona) { + if strings.EqualFold(role, botPersona) { if i == len(roles)-1 { cfg.WriteNextMsgAsCompletionAgent = roles[0] // reached last, get first + botPersona = cfg.WriteNextMsgAsCompletionAgent break } cfg.WriteNextMsgAsCompletionAgent = roles[i+1] // get next role + botPersona = cfg.WriteNextMsgAsCompletionAgent logger.Info("picked role", "roles", roles, "index", i+1) break } @@ -1295,7 +1303,6 @@ func init() { // cannot send msg in editMode or botRespMode if event.Key() == tcell.KeyEscape && !editMode && !botRespMode { msgText := textArea.GetText() - // TODO: add shellmode command -> output to the chat history, or at least have an option if shellMode && msgText != "" { // In shell mode, execute command instead of sending to LLM executeCommandAndDisplay(msgText) -- cgit v1.2.3 From ec2d1c05ace9905e0ff88e25d5d65f0d7ff58274 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Sat, 17 Jan 2026 11:54:52 +0300 Subject: Fix: do not skip system msgs --- bot.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/bot.go b/bot.go index a5d3b12..fc878d5 100644 --- a/bot.go +++ b/bot.go @@ -95,7 +95,6 @@ func parseKnownToTag(content string) (string, []string) { } // Remove the entire matched tag from content cleaned = strings.Replace(cleaned, match[0], "", 1) - list := strings.TrimSpace(match[1]) if list == "" { continue @@ -122,6 +121,7 @@ func processMessageTag(msg models.RoleMsg) models.RoleMsg { // If KnownTo already set, assume tag already processed (content cleaned). // However, we still check for new tags (maybe added later). cleaned, knownTo := parseKnownToTag(msg.Content) + logger.Info("processing tags", "msg", msg.Content, "known_to", knownTo) if cleaned != msg.Content { msg.Content = cleaned } @@ -156,7 +156,8 @@ func filterMessagesForCharacter(messages []models.RoleMsg, character string) []m for i, msg := range messages { logger.Info("filtering messages", "character", character, "index", i, "known_to", msg.KnownTo) // If KnownTo is nil or empty, message is visible to all - if len(msg.KnownTo) == 0 { + // system msg cannot be filtered + if len(msg.KnownTo) == 0 || msg.Role == "system" { filtered = append(filtered, msg) continue } -- cgit v1.2.3 From fd84dd58266bdeff498f939721e9a1998318473b Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Sat, 17 Jan 2026 12:28:19 +0300 Subject: Enha: do not remove tag --- bot.go | 26 ++++--- bot_test.go | 241 ++++++++++++++++++++++++++++++------------------------------ 2 files changed, 136 insertions(+), 131 deletions(-) diff --git a/bot.go b/bot.go index fc878d5..3af0ac9 100644 --- a/bot.go +++ b/bot.go @@ -71,9 +71,9 @@ var ( // parseKnownToTag extracts known_to list from content using configured tag. // Returns cleaned content and list of character names. -func parseKnownToTag(content string) (string, []string) { +func parseKnownToTag(content string) []string { if cfg == nil || !cfg.CharSpecificContextEnabled { - return content, nil + return nil } tag := cfg.CharSpecificContextTag if tag == "" { @@ -84,17 +84,15 @@ func parseKnownToTag(content string) (string, []string) { re := regexp.MustCompile(pattern) matches := re.FindAllStringSubmatch(content, -1) if len(matches) == 0 { - return content, nil + return nil } // There may be multiple tags; we combine all. var knownTo []string - cleaned := content for _, match := range matches { if len(match) < 2 { continue } // Remove the entire matched tag from content - cleaned = strings.Replace(cleaned, match[0], "", 1) list := strings.TrimSpace(match[1]) if list == "" { continue @@ -108,7 +106,7 @@ func parseKnownToTag(content string) (string, []string) { } } // Also remove any leftover trailing "__" that might be orphaned? Not needed. - return strings.TrimSpace(cleaned), knownTo + return knownTo } // processMessageTag processes a message for known_to tag and sets KnownTo field. @@ -120,11 +118,8 @@ func processMessageTag(msg models.RoleMsg) models.RoleMsg { } // If KnownTo already set, assume tag already processed (content cleaned). // However, we still check for new tags (maybe added later). - cleaned, knownTo := parseKnownToTag(msg.Content) + knownTo := parseKnownToTag(msg.Content) logger.Info("processing tags", "msg", msg.Content, "known_to", knownTo) - if cleaned != msg.Content { - msg.Content = cleaned - } // If tag found, replace KnownTo with new list (merge with existing?) // For simplicity, if knownTo is not nil, replace. if knownTo != nil { @@ -789,10 +784,17 @@ out: if resume { chatBody.Messages[len(chatBody.Messages)-1].Content += respText.String() // lastM.Content = lastM.Content + respText.String() + // Process the updated message to check for known_to tags in resumed response + updatedMsg := chatBody.Messages[len(chatBody.Messages)-1] + processedMsg := processMessageTag(updatedMsg) + chatBody.Messages[len(chatBody.Messages)-1] = processedMsg } else { - chatBody.Messages = append(chatBody.Messages, models.RoleMsg{ + newMsg := models.RoleMsg{ Role: botPersona, Content: respText.String(), - }) + } + // Process the new message to check for known_to tags in LLM response + newMsg = processMessageTag(newMsg) + chatBody.Messages = append(chatBody.Messages, newMsg) } logger.Debug("chatRound: before cleanChatBody", "messages_before_clean", len(chatBody.Messages)) for i, msg := range chatBody.Messages { diff --git a/bot_test.go b/bot_test.go index 7496175..a745dde 100644 --- a/bot_test.go +++ b/bot_test.go @@ -118,34 +118,34 @@ func TestConsolidateConsecutiveAssistantMessages(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { result := consolidateConsecutiveAssistantMessages(tt.input) - + if len(result) != len(tt.expected) { t.Errorf("Expected %d messages, got %d", len(tt.expected), len(result)) t.Logf("Result: %+v", result) t.Logf("Expected: %+v", tt.expected) return } - + for i, expectedMsg := range tt.expected { if i >= len(result) { t.Errorf("Result has fewer messages than expected at index %d", i) continue } - + actualMsg := result[i] if actualMsg.Role != expectedMsg.Role { t.Errorf("Message %d: expected role '%s', got '%s'", i, expectedMsg.Role, actualMsg.Role) } - + if actualMsg.Content != expectedMsg.Content { t.Errorf("Message %d: expected content '%s', got '%s'", i, expectedMsg.Content, actualMsg.Content) } - + if actualMsg.ToolCallID != expectedMsg.ToolCallID { t.Errorf("Message %d: expected ToolCallID '%s', got '%s'", i, expectedMsg.ToolCallID, actualMsg.ToolCallID) } } - + // Additional check: ensure no messages were lost if !reflect.DeepEqual(result, tt.expected) { t.Errorf("Result does not match expected:\nResult: %+v\nExpected: %+v", result, tt.expected) @@ -290,92 +290,92 @@ func TestConvertJSONToMapStringString(t *testing.T) { func TestParseKnownToTag(t *testing.T) { tests := []struct { - name string - content string - enabled bool - tag string - wantCleaned string - wantKnownTo []string + name string + content string + enabled bool + tag string + wantCleaned string + wantKnownTo []string }{ { - name: "feature disabled returns original", - content: "Hello __known_to_chars__Alice__", - enabled: false, - tag: "__known_to_chars__", - wantCleaned: "Hello __known_to_chars__Alice__", - wantKnownTo: nil, + name: "feature disabled returns original", + content: "Hello __known_to_chars__Alice__", + enabled: false, + tag: "__known_to_chars__", + wantCleaned: "Hello __known_to_chars__Alice__", + wantKnownTo: nil, }, { - name: "no tag returns original", - content: "Hello Alice", - enabled: true, - tag: "__known_to_chars__", - wantCleaned: "Hello Alice", - wantKnownTo: nil, + name: "no tag returns original", + content: "Hello Alice", + enabled: true, + tag: "__known_to_chars__", + wantCleaned: "Hello Alice", + wantKnownTo: nil, }, { - name: "single tag with one char", - content: "Hello __known_to_chars__Alice__", - enabled: true, - tag: "__known_to_chars__", - wantCleaned: "Hello", - wantKnownTo: []string{"Alice"}, + name: "single tag with one char", + content: "Hello __known_to_chars__Alice__", + enabled: true, + tag: "__known_to_chars__", + wantCleaned: "Hello", + wantKnownTo: []string{"Alice"}, }, { - name: "single tag with two chars", - content: "Secret __known_to_chars__Alice,Bob__ message", - enabled: true, - tag: "__known_to_chars__", - wantCleaned: "Secret message", - wantKnownTo: []string{"Alice", "Bob"}, + name: "single tag with two chars", + content: "Secret __known_to_chars__Alice,Bob__ message", + enabled: true, + tag: "__known_to_chars__", + wantCleaned: "Secret message", + wantKnownTo: []string{"Alice", "Bob"}, }, { - name: "tag at beginning", - content: "__known_to_chars__Alice__ Hello", - enabled: true, - tag: "__known_to_chars__", - wantCleaned: "Hello", - wantKnownTo: []string{"Alice"}, + name: "tag at beginning", + content: "__known_to_chars__Alice__ Hello", + enabled: true, + tag: "__known_to_chars__", + wantCleaned: "Hello", + wantKnownTo: []string{"Alice"}, }, { - name: "tag at end", - content: "Hello __known_to_chars__Alice__", - enabled: true, - tag: "__known_to_chars__", - wantCleaned: "Hello", - wantKnownTo: []string{"Alice"}, + name: "tag at end", + content: "Hello __known_to_chars__Alice__", + enabled: true, + tag: "__known_to_chars__", + wantCleaned: "Hello", + wantKnownTo: []string{"Alice"}, }, { - name: "multiple tags", - content: "First __known_to_chars__Alice__ then __known_to_chars__Bob__", - enabled: true, - tag: "__known_to_chars__", - wantCleaned: "First then", - wantKnownTo: []string{"Alice", "Bob"}, + name: "multiple tags", + content: "First __known_to_chars__Alice__ then __known_to_chars__Bob__", + enabled: true, + tag: "__known_to_chars__", + wantCleaned: "First then", + wantKnownTo: []string{"Alice", "Bob"}, }, { - name: "custom tag", - content: "Secret __secret__Alice,Bob__ message", - enabled: true, - tag: "__secret__", - wantCleaned: "Secret message", - wantKnownTo: []string{"Alice", "Bob"}, + name: "custom tag", + content: "Secret __secret__Alice,Bob__ message", + enabled: true, + tag: "__secret__", + wantCleaned: "Secret message", + wantKnownTo: []string{"Alice", "Bob"}, }, { - name: "empty list", - content: "Secret __known_to_chars____", - enabled: true, - tag: "__known_to_chars__", - wantCleaned: "Secret", - wantKnownTo: nil, + name: "empty list", + content: "Secret __known_to_chars____", + enabled: true, + tag: "__known_to_chars__", + wantCleaned: "Secret", + wantKnownTo: nil, }, { - name: "whitespace around commas", - content: "__known_to_chars__ Alice , Bob , Carl __", - enabled: true, - tag: "__known_to_chars__", - wantCleaned: "", - wantKnownTo: []string{"Alice", "Bob", "Carl"}, + name: "whitespace around commas", + content: "__known_to_chars__ Alice , Bob , Carl __", + enabled: true, + tag: "__known_to_chars__", + wantCleaned: "", + wantKnownTo: []string{"Alice", "Bob", "Carl"}, }, } @@ -387,13 +387,7 @@ func TestParseKnownToTag(t *testing.T) { CharSpecificContextTag: tt.tag, } cfg = testCfg - - cleaned, knownTo := parseKnownToTag(tt.content) - - if cleaned != tt.wantCleaned { - t.Errorf("parseKnownToTag() cleaned = %q, want %q", cleaned, tt.wantCleaned) - } - + knownTo := parseKnownToTag(tt.content) if len(knownTo) != len(tt.wantKnownTo) { t.Errorf("parseKnownToTag() knownTo length = %v, want %v", len(knownTo), len(tt.wantKnownTo)) t.Logf("got: %v", knownTo) @@ -411,11 +405,11 @@ func TestParseKnownToTag(t *testing.T) { func TestProcessMessageTag(t *testing.T) { tests := []struct { - name string - msg models.RoleMsg - enabled bool - tag string - wantMsg models.RoleMsg + name string + msg models.RoleMsg + enabled bool + tag string + wantMsg models.RoleMsg }{ { name: "feature disabled returns unchanged", @@ -488,6 +482,21 @@ func TestProcessMessageTag(t *testing.T) { KnownTo: []string{"Bob", "Alice"}, }, }, + { + name: "example from real use", + msg: models.RoleMsg{ + Role: "Alice", + Content: "I'll start with a simple one! The word is 'banana'. (ooc: __known_to_chars__Bob__)", + KnownTo: []string{"Alice"}, // from previous processing + }, + enabled: true, + tag: "__known_to_chars__", + wantMsg: models.RoleMsg{ + Role: "Alice", + Content: "I'll start with a simple one! The word is 'banana'. (ooc: __known_to_chars__Bob__)", + KnownTo: []string{"Bob", "Alice"}, + }, + }, } for _, tt := range tests { @@ -497,13 +506,7 @@ func TestProcessMessageTag(t *testing.T) { CharSpecificContextTag: tt.tag, } cfg = testCfg - got := processMessageTag(tt.msg) - - if got.Content != tt.wantMsg.Content { - t.Errorf("processMessageTag() content = %q, want %q", got.Content, tt.wantMsg.Content) - } - if len(got.KnownTo) != len(tt.wantMsg.KnownTo) { t.Errorf("processMessageTag() KnownTo length = %v, want %v", len(got.KnownTo), len(tt.wantMsg.KnownTo)) t.Logf("got: %v", got.KnownTo) @@ -530,7 +533,7 @@ func TestProcessMessageTag(t *testing.T) { func TestFilterMessagesForCharacter(t *testing.T) { messages := []models.RoleMsg{ {Role: "system", Content: "System message", KnownTo: nil}, // visible to all - {Role: "Alice", Content: "Hello everyone", KnownTo: nil}, // visible to all + {Role: "Alice", Content: "Hello everyone", KnownTo: nil}, // visible to all {Role: "Alice", Content: "Secret for Bob", KnownTo: []string{"Alice", "Bob"}}, {Role: "Bob", Content: "Reply to Alice", KnownTo: []string{"Alice", "Bob"}}, {Role: "Alice", Content: "Private to Carl", KnownTo: []string{"Alice", "Carl"}}, @@ -538,46 +541,46 @@ func TestFilterMessagesForCharacter(t *testing.T) { } tests := []struct { - name string - enabled bool - character string + name string + enabled bool + character string wantIndices []int // indices from original messages that should be included }{ { - name: "feature disabled returns all", - enabled: false, - character: "Alice", - wantIndices: []int{0,1,2,3,4,5}, + name: "feature disabled returns all", + enabled: false, + character: "Alice", + wantIndices: []int{0, 1, 2, 3, 4, 5}, }, { - name: "character empty returns all", - enabled: true, - character: "", - wantIndices: []int{0,1,2,3,4,5}, + name: "character empty returns all", + enabled: true, + character: "", + wantIndices: []int{0, 1, 2, 3, 4, 5}, }, { - name: "Alice sees all including Carl-private", - enabled: true, - character: "Alice", - wantIndices: []int{0,1,2,3,4,5}, + name: "Alice sees all including Carl-private", + enabled: true, + character: "Alice", + wantIndices: []int{0, 1, 2, 3, 4, 5}, }, { - name: "Bob sees Alice-Bob secrets and all public", - enabled: true, - character: "Bob", - wantIndices: []int{0,1,2,3,5}, + name: "Bob sees Alice-Bob secrets and all public", + enabled: true, + character: "Bob", + wantIndices: []int{0, 1, 2, 3, 5}, }, { - name: "Carl sees Alice-Carl secret and public", - enabled: true, - character: "Carl", - wantIndices: []int{0,1,4,5}, + name: "Carl sees Alice-Carl secret and public", + enabled: true, + character: "Carl", + wantIndices: []int{0, 1, 4, 5}, }, { - name: "David sees only public messages", - enabled: true, - character: "David", - wantIndices: []int{0,1,5}, + name: "David sees only public messages", + enabled: true, + character: "David", + wantIndices: []int{0, 1, 5}, }, } @@ -604,4 +607,4 @@ func TestFilterMessagesForCharacter(t *testing.T) { } }) } -} \ No newline at end of file +} -- cgit v1.2.3 From 0fb59210045792433a7a3796046c8383f2bb8824 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Sat, 17 Jan 2026 12:44:18 +0300 Subject: Fix: copy with knownto --- bot_test.go | 62 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ models/models.go | 1 + 2 files changed, 63 insertions(+) diff --git a/bot_test.go b/bot_test.go index a745dde..d5b4b0a 100644 --- a/bot_test.go +++ b/bot_test.go @@ -608,3 +608,65 @@ func TestFilterMessagesForCharacter(t *testing.T) { }) } } + +func TestRoleMsgCopyPreservesKnownTo(t *testing.T) { + // Test that the Copy() method preserves the KnownTo field + originalMsg := models.RoleMsg{ + Role: "Alice", + Content: "Test message", + KnownTo: []string{"Bob", "Charlie"}, + } + + copiedMsg := originalMsg.Copy() + + if copiedMsg.Role != originalMsg.Role { + t.Errorf("Copy() failed to preserve Role: got %q, want %q", copiedMsg.Role, originalMsg.Role) + } + if copiedMsg.Content != originalMsg.Content { + t.Errorf("Copy() failed to preserve Content: got %q, want %q", copiedMsg.Content, originalMsg.Content) + } + if !reflect.DeepEqual(copiedMsg.KnownTo, originalMsg.KnownTo) { + t.Errorf("Copy() failed to preserve KnownTo: got %v, want %v", copiedMsg.KnownTo, originalMsg.KnownTo) + } + if copiedMsg.ToolCallID != originalMsg.ToolCallID { + t.Errorf("Copy() failed to preserve ToolCallID: got %q, want %q", copiedMsg.ToolCallID, originalMsg.ToolCallID) + } + if copiedMsg.IsContentParts() != originalMsg.IsContentParts() { + t.Errorf("Copy() failed to preserve hasContentParts flag") + } +} + +func TestKnownToFieldPreservationScenario(t *testing.T) { + // Test the specific scenario from the log where KnownTo field was getting lost + originalMsg := models.RoleMsg{ + Role: "Alice", + Content: `Alice: "Okay, Bob. The word is... **'Ephemeral'**. (ooc: __known_to_chars__Bob__)"`, + KnownTo: []string{"Bob"}, // This was detected in the log + } + + t.Logf("Original message - Role: %s, Content: %s, KnownTo: %v", + originalMsg.Role, originalMsg.Content, originalMsg.KnownTo) + + // Simulate what happens when the message gets copied during processing + copiedMsg := originalMsg.Copy() + + t.Logf("Copied message - Role: %s, Content: %s, KnownTo: %v", + copiedMsg.Role, copiedMsg.Content, copiedMsg.KnownTo) + + // Check if KnownTo field survived the copy + if len(copiedMsg.KnownTo) == 0 { + t.Error("ERROR: KnownTo field was lost during copy!") + } else { + t.Log("SUCCESS: KnownTo field was preserved during copy!") + } + + // Verify the content is the same + if copiedMsg.Content != originalMsg.Content { + t.Errorf("Content was changed during copy: got %s, want %s", copiedMsg.Content, originalMsg.Content) + } + + // Verify the KnownTo slice is properly copied + if !reflect.DeepEqual(copiedMsg.KnownTo, originalMsg.KnownTo) { + t.Errorf("KnownTo was not properly copied: got %v, want %v", copiedMsg.KnownTo, originalMsg.KnownTo) + } +} diff --git a/models/models.go b/models/models.go index 88ba144..69bdf02 100644 --- a/models/models.go +++ b/models/models.go @@ -267,6 +267,7 @@ func (m RoleMsg) Copy() RoleMsg { Content: m.Content, ContentParts: m.ContentParts, ToolCallID: m.ToolCallID, + KnownTo: m.KnownTo, hasContentParts: m.hasContentParts, } } -- cgit v1.2.3 From 3e2a1b6f9975aaa2b9cb45bcb77aac146a37fd3c Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Sat, 17 Jan 2026 13:03:30 +0300 Subject: Fix: KnowTo is added only if tag present --- bot.go | 23 ++++++++++++----------- bot_test.go | 2 +- tui.go | 13 +++++++++++++ 3 files changed, 26 insertions(+), 12 deletions(-) diff --git a/bot.go b/bot.go index 3af0ac9..f3fef8c 100644 --- a/bot.go +++ b/bot.go @@ -124,18 +124,19 @@ func processMessageTag(msg models.RoleMsg) models.RoleMsg { // For simplicity, if knownTo is not nil, replace. if knownTo != nil { msg.KnownTo = knownTo - } - // Ensure sender role is in KnownTo - if msg.Role != "" { - senderAdded := false - for _, k := range msg.KnownTo { - if k == msg.Role { - senderAdded = true - break + // Only ensure sender role is in KnownTo if there was a tag + // This means the message is intended for specific characters + if msg.Role != "" { + senderAdded := false + for _, k := range msg.KnownTo { + if k == msg.Role { + senderAdded = true + break + } + } + if !senderAdded { + msg.KnownTo = append(msg.KnownTo, msg.Role) } - } - if !senderAdded { - msg.KnownTo = append(msg.KnownTo, msg.Role) } } return msg diff --git a/bot_test.go b/bot_test.go index d5b4b0a..3dabc15 100644 --- a/bot_test.go +++ b/bot_test.go @@ -436,7 +436,7 @@ func TestProcessMessageTag(t *testing.T) { wantMsg: models.RoleMsg{ Role: "Alice", Content: "Hello everyone", - KnownTo: []string{"Alice"}, + KnownTo: nil, }, }, { diff --git a/tui.go b/tui.go index aa9972a..8454d45 100644 --- a/tui.go +++ b/tui.go @@ -93,6 +93,7 @@ var ( [yellow]Alt+4[white]: edit msg role [yellow]Alt+5[white]: toggle system and tool messages display [yellow]Alt+6[white]: toggle status line visibility +[yellow]Alt+7[white]: toggle role injection (inject role in messages) [yellow]Alt+8[white]: show char img or last picked img [yellow]Alt+9[white]: warm up (load) selected llama.cpp model @@ -828,6 +829,18 @@ func init() { } updateStatusLine() } + // Handle Alt+7 to toggle injectRole + if event.Key() == tcell.KeyRune && event.Rune() == '7' && event.Modifiers()&tcell.ModAlt != 0 { + injectRole = !injectRole + status := "disabled" + if injectRole { + status = "enabled" + } + if err := notifyUser("injectRole", fmt.Sprintf("Role injection %s", status)); err != nil { + logger.Error("failed to send notification", "error", err) + } + updateStatusLine() + } if event.Key() == tcell.KeyF1 { // chatList, err := loadHistoryChats() chatList, err := store.GetChatByChar(cfg.AssistantRole) -- cgit v1.2.3 From 4e597e944eacbeb5269dfdf586dd4a2163762a17 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Sat, 17 Jan 2026 13:07:14 +0300 Subject: Chore: log cleanup --- bot.go | 5 ++--- tui.go | 4 ++-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/bot.go b/bot.go index f3fef8c..2c59f07 100644 --- a/bot.go +++ b/bot.go @@ -119,7 +119,7 @@ func processMessageTag(msg models.RoleMsg) models.RoleMsg { // If KnownTo already set, assume tag already processed (content cleaned). // However, we still check for new tags (maybe added later). knownTo := parseKnownToTag(msg.Content) - logger.Info("processing tags", "msg", msg.Content, "known_to", knownTo) + // logger.Info("processing tags", "msg", msg.Content, "known_to", knownTo) // If tag found, replace KnownTo with new list (merge with existing?) // For simplicity, if knownTo is not nil, replace. if knownTo != nil { @@ -149,8 +149,7 @@ func filterMessagesForCharacter(messages []models.RoleMsg, character string) []m return messages } filtered := make([]models.RoleMsg, 0, len(messages)) - for i, msg := range messages { - logger.Info("filtering messages", "character", character, "index", i, "known_to", msg.KnownTo) + for _, msg := range messages { // If KnownTo is nil or empty, message is visible to all // system msg cannot be filtered if len(msg.KnownTo) == 0 || msg.Role == "system" { diff --git a/tui.go b/tui.go index 8454d45..dc90db9 100644 --- a/tui.go +++ b/tui.go @@ -1202,7 +1202,7 @@ func init() { } cfg.WriteNextMsgAs = roles[i+1] // get next role persona = cfg.WriteNextMsgAs - logger.Info("picked role", "roles", roles, "index", i+1) + // logger.Info("picked role", "roles", roles, "index", i+1) break } } @@ -1234,7 +1234,7 @@ func init() { } cfg.WriteNextMsgAsCompletionAgent = roles[i+1] // get next role botPersona = cfg.WriteNextMsgAsCompletionAgent - logger.Info("picked role", "roles", roles, "index", i+1) + // logger.Info("picked role", "roles", roles, "index", i+1) break } } -- cgit v1.2.3 From a28e8ef9e250ace5c9624393da308c189c0839f6 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Wed, 21 Jan 2026 21:01:01 +0300 Subject: Enha: charlist in cards --- bot.go | 1 - helpfuncs.go | 25 ++++++++++++++++++++++++- models/card.go | 18 ++++++++++-------- sysprompts/alice_bob_carl.json | 1 + tui.go | 7 +++---- 5 files changed, 38 insertions(+), 14 deletions(-) diff --git a/bot.go b/bot.go index 2c59f07..112af07 100644 --- a/bot.go +++ b/bot.go @@ -1063,7 +1063,6 @@ func addNewChat(chatName string) { func applyCharCard(cc *models.CharCard) { cfg.AssistantRole = cc.Role - // FIXME: remove history, err := loadAgentsLastChat(cfg.AssistantRole) if err != nil { // too much action for err != nil; loadAgentsLastChat needs to be split up diff --git a/helpfuncs.go b/helpfuncs.go index ccb5858..f74cd13 100644 --- a/helpfuncs.go +++ b/helpfuncs.go @@ -164,7 +164,7 @@ func setLogLevel(sl string) { } func listRolesWithUser() []string { - roles := chatBody.ListRoles() + roles := listChatRoles() // Remove user role if it exists in the list (to avoid duplicates and ensure it's at position 0) filteredRoles := make([]string, 0, len(roles)) for _, role := range roles { @@ -250,3 +250,26 @@ func randString(n int) string { } return string(b) } + +// set of roles within card definition and mention in chat history +func listChatRoles() []string { + currentChat, ok := chatMap[activeChatName] + cbc := chatBody.ListRoles() + if !ok { + return cbc + } + currentCard, ok := sysMap[currentChat.Agent] + if !ok { + // log error + logger.Warn("failed to find current card in sysMap", "agent", currentChat.Agent, "sysMap", sysMap) + return cbc + } + charset := []string{} + for _, name := range currentCard.Characters { + if !strInSlice(name, cbc) { + charset = append(charset, name) + } + } + charset = append(charset, cbc...) + return charset +} diff --git a/models/card.go b/models/card.go index adfb030..9bf6665 100644 --- a/models/card.go +++ b/models/card.go @@ -31,18 +31,20 @@ func (c *CharCardSpec) Simplify(userName, fpath string) *CharCard { fm := strings.ReplaceAll(strings.ReplaceAll(c.FirstMes, "{{char}}", c.Name), "{{user}}", userName) sysPr := strings.ReplaceAll(strings.ReplaceAll(c.Description, "{{char}}", c.Name), "{{user}}", userName) return &CharCard{ - SysPrompt: sysPr, - FirstMsg: fm, - Role: c.Name, - FilePath: fpath, + SysPrompt: sysPr, + FirstMsg: fm, + Role: c.Name, + FilePath: fpath, + Characters: []string{c.Name, userName}, } } type CharCard struct { - SysPrompt string `json:"sys_prompt"` - FirstMsg string `json:"first_msg"` - Role string `json:"role"` - FilePath string `json:"filepath"` + SysPrompt string `json:"sys_prompt"` + FirstMsg string `json:"first_msg"` + Role string `json:"role"` + Characters []string `json:"chars"` + FilePath string `json:"filepath"` } func (cc *CharCard) ToSpec(userName string) *CharCardSpec { diff --git a/sysprompts/alice_bob_carl.json b/sysprompts/alice_bob_carl.json index abde0f2..8c7b8e2 100644 --- a/sysprompts/alice_bob_carl.json +++ b/sysprompts/alice_bob_carl.json @@ -4,5 +4,6 @@ "role2": "Bob", "role3": "Carl", "filepath": "sysprompts/alice_bob_carl.json", + "chars": ["Alice", "Bob", "Carl"], "first_msg": "Hey guys! Want to play Alias like game? I'll tell Bob a word and he needs to describe that word so Carl can guess what it was?" } diff --git a/tui.go b/tui.go index dc90db9..54a4e32 100644 --- a/tui.go +++ b/tui.go @@ -836,7 +836,7 @@ func init() { if injectRole { status = "enabled" } - if err := notifyUser("injectRole", fmt.Sprintf("Role injection %s", status)); err != nil { + if err := notifyUser("injectRole", "Role injection "+status); err != nil { logger.Error("failed to send notification", "error", err) } updateStatusLine() @@ -1218,7 +1218,8 @@ func init() { if cfg.WriteNextMsgAsCompletionAgent != "" { botPersona = cfg.WriteNextMsgAsCompletionAgent } - roles := chatBody.ListRoles() + // roles := chatBody.ListRoles() + roles := listChatRoles() if len(roles) == 0 { logger.Warn("empty roles in chat") } @@ -1229,11 +1230,9 @@ func init() { if strings.EqualFold(role, botPersona) { if i == len(roles)-1 { cfg.WriteNextMsgAsCompletionAgent = roles[0] // reached last, get first - botPersona = cfg.WriteNextMsgAsCompletionAgent break } cfg.WriteNextMsgAsCompletionAgent = roles[i+1] // get next role - botPersona = cfg.WriteNextMsgAsCompletionAgent // logger.Info("picked role", "roles", roles, "index", i+1) break } -- cgit v1.2.3 From 98138728542d0ed529d9d3a389c3531945d971f3 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Thu, 22 Jan 2026 09:29:56 +0300 Subject: Chore: bool colors for statusline --- bot.go | 30 ++++++++++++------------------ helpfuncs.go | 18 ++++++++++++------ main.go | 6 +++--- tui.go | 7 ------- 4 files changed, 27 insertions(+), 34 deletions(-) diff --git a/bot.go b/bot.go index 112af07..cd35445 100644 --- a/bot.go +++ b/bot.go @@ -35,19 +35,18 @@ var ( logLevel = new(slog.LevelVar) ) var ( - activeChatName string - chunkChan = make(chan string, 10) - openAIToolChan = make(chan string, 10) - streamDone = make(chan bool, 1) - chatBody *models.ChatBody - store storage.FullRepo - defaultFirstMsg = "Hello! What can I do for you?" - defaultStarter = []models.RoleMsg{} - defaultStarterBytes = []byte{} - interruptResp = false - ragger *rag.RAG - chunkParser ChunkParser - lastToolCall *models.FuncCall + activeChatName string + chunkChan = make(chan string, 10) + openAIToolChan = make(chan string, 10) + streamDone = make(chan bool, 1) + chatBody *models.ChatBody + store storage.FullRepo + defaultFirstMsg = "Hello! What can I do for you?" + defaultStarter = []models.RoleMsg{} + interruptResp = false + ragger *rag.RAG + chunkParser ChunkParser + lastToolCall *models.FuncCall //nolint:unused // TTS_ENABLED conditionally uses this orator Orator asr STT @@ -1170,11 +1169,6 @@ func init() { slog.Error("failed to open log file", "error", err, "filename", cfg.LogFile) return } - defaultStarterBytes, err = json.Marshal(defaultStarter) - if err != nil { - slog.Error("failed to marshal defaultStarter", "error", err) - return - } // load cards basicCard.Role = cfg.AssistantRole // toolCard.Role = cfg.AssistantRole diff --git a/helpfuncs.go b/helpfuncs.go index f74cd13..28e7962 100644 --- a/helpfuncs.go +++ b/helpfuncs.go @@ -111,9 +111,11 @@ func startNewChat() { chatBody.Messages = chatBody.Messages[:2] textView.SetText(chatToText(chatBody.Messages, cfg.ShowSys)) newChat := &models.Chat{ - ID: id + 1, - Name: fmt.Sprintf("%d_%s", id+1, cfg.AssistantRole), - Msgs: string(defaultStarterBytes), + ID: id + 1, + Name: fmt.Sprintf("%d_%s", id+1, cfg.AssistantRole), + // chat is written to db when we get first llm response (or any) + // actual chat history (messages) would be parsed then + Msgs: "", Agent: cfg.AssistantRole, } activeChatName = newChat.Name @@ -235,9 +237,10 @@ func makeStatusLine() string { } else { shellModeInfo = "" } - statusLine := fmt.Sprintf(indexLineCompletion, botRespMode, activeChatName, - cfg.ToolUse, chatBody.Model, cfg.SkipLLMResp, cfg.CurrentAPI, - isRecording, persona, botPersona, injectRole) + statusLine := fmt.Sprintf(indexLineCompletion, boolColors[botRespMode], botRespMode, activeChatName, + boolColors[cfg.ToolUse], cfg.ToolUse, chatBody.Model, boolColors[cfg.SkipLLMResp], + cfg.SkipLLMResp, cfg.CurrentAPI, boolColors[isRecording], isRecording, persona, + botPersona, boolColors[injectRole], injectRole) return statusLine + imageInfo + shellModeInfo } @@ -260,6 +263,9 @@ func listChatRoles() []string { } currentCard, ok := sysMap[currentChat.Agent] if !ok { + // case which won't let to switch roles: + // started new chat (basic_sys or any other), at the start it yet be saved or have chatbody + // if it does not have a card or chars, it'll return an empty slice // log error logger.Warn("failed to find current card in sysMap", "agent", currentChat.Agent, "sysMap", sysMap) return cbc diff --git a/main.go b/main.go index c375d95..0f2df2e 100644 --- a/main.go +++ b/main.go @@ -8,6 +8,7 @@ import ( ) var ( + boolColors = map[bool]string{true: "green", false: "red"} botRespMode = false editMode = false roleEditMode = false @@ -17,9 +18,8 @@ var ( currentORModelIndex = 0 // Index to track current OpenRouter model in ORFreeModels slice currentLocalModelIndex = 0 // Index to track current llama.cpp model shellMode = false - // indexLine = "F12 to show keys help | bot resp mode: [orange:-:b]%v[-:-:-] (F6) | card's char: [orange:-:b]%s[-:-:-] (ctrl+s) | chat: [orange:-:b]%s[-:-:-] (F1) | toolUseAdviced: [orange:-:b]%v[-:-:-] (ctrl+k) | model: [orange:-:b]%s[-:-:-] (ctrl+l) | skip LLM resp: [orange:-:b]%v[-:-:-] (F10)\nAPI_URL: [orange:-:b]%s[-:-:-] (ctrl+v) | ThinkUse: [orange:-:b]%v[-:-:-] (ctrl+p) | Log Level: [orange:-:b]%v[-:-:-] (ctrl+p) | Recording: [orange:-:b]%v[-:-:-] (ctrl+r) | Writing as: [orange:-:b]%s[-:-:-] (ctrl+q)" - indexLineCompletion = "F12 to show keys help | bot resp mode: [orange:-:b]%v[-:-:-] (F6) | chat: [orange:-:b]%s[-:-:-] (F1) | toolUseAdviced: [orange:-:b]%v[-:-:-] (ctrl+k) | model: [orange:-:b]%s[-:-:-] (ctrl+l) | skip LLM resp: [orange:-:b]%v[-:-:-] (F10)\nAPI: [orange:-:b]%s[-:-:-] (ctrl+v) | Recording: [orange:-:b]%v[-:-:-] (ctrl+r) | Writing as: [orange:-:b]%s[-:-:-] (ctrl+q) | Bot will write as [orange:-:b]%s[-:-:-] (ctrl+x) | role_inject [orange:-:b]%v[-:-:-]" - focusSwitcher = map[tview.Primitive]tview.Primitive{} + indexLineCompletion = "F12 to show keys help | llm turn: [%s:-:b]%v[-:-:-] (F6) | chat: [orange:-:b]%s[-:-:-] (F1) | toolUseAdviced: [%s:-:b]%v[-:-:-] (ctrl+k) | model: [orange:-:b]%s[-:-:-] (ctrl+l) | skip LLM resp: [%s:-:b]%v[-:-:-] (F10)\nAPI: [orange:-:b]%s[-:-:-] (ctrl+v) | recording: [%s:-:b]%v[-:-:-] (ctrl+r) | writing as: [orange:-:b]%s[-:-:-] (ctrl+q) | bot will write as [orange:-:b]%s[-:-:-] (ctrl+x) | role injection (alt+7) [%s:-:b]%v[-:-:-]" + focusSwitcher = map[tview.Primitive]tview.Primitive{} ) func main() { diff --git a/tui.go b/tui.go index 54a4e32..d222d15 100644 --- a/tui.go +++ b/tui.go @@ -832,13 +832,6 @@ func init() { // Handle Alt+7 to toggle injectRole if event.Key() == tcell.KeyRune && event.Rune() == '7' && event.Modifiers()&tcell.ModAlt != 0 { injectRole = !injectRole - status := "disabled" - if injectRole { - status = "enabled" - } - if err := notifyUser("injectRole", "Role injection "+status); err != nil { - logger.Error("failed to send notification", "error", err) - } updateStatusLine() } if event.Key() == tcell.KeyF1 { -- cgit v1.2.3 From fa192a262410eb98b42ff8fb9e0f4e1111240514 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Sun, 25 Jan 2026 09:59:07 +0300 Subject: Feat: autoturn --- bot.go | 35 +++++++++++++++++++++++++++++++++++ config/config.go | 27 ++++++++++++++------------- 2 files changed, 49 insertions(+), 13 deletions(-) diff --git a/bot.go b/bot.go index cd35445..1a2cebb 100644 --- a/bot.go +++ b/bot.go @@ -813,7 +813,18 @@ out: if err := updateStorageChat(activeChatName, chatBody.Messages); err != nil { logger.Warn("failed to update storage", "error", err, "name", activeChatName) } + // FIXME: recursive calls findCall(respText.String(), toolResp.String(), tv) + // TODO: have a config attr + // Check if this message was sent privately to specific characters + // If so, trigger those characters to respond if that char is not controlled by user + // perhaps we should have narrator role to determine which char is next to act + if cfg.AutoTurn { + lastMsg := chatBody.Messages[len(chatBody.Messages)-1] + if len(lastMsg.KnownTo) > 0 { + triggerPrivateMessageResponses(lastMsg, tv) + } + } } // cleanChatBody removes messages with null or empty content to prevent API issues @@ -1205,3 +1216,27 @@ func init() { scrollToEndEnabled = cfg.AutoScrollEnabled go updateModelLists() } + +// triggerPrivateMessageResponses checks if a message was sent privately to specific characters +// and triggers those non-user characters to respond +func triggerPrivateMessageResponses(msg models.RoleMsg, tv *tview.TextView) { + if cfg == nil || !cfg.CharSpecificContextEnabled { + return + } + userCharacter := cfg.UserRole + if cfg.WriteNextMsgAs != "" { + userCharacter = cfg.WriteNextMsgAs + } + // Check each character in the KnownTo list + for _, recipient := range msg.KnownTo { + // Skip if this is the user character or the sender of the message + if recipient == cfg.UserRole || recipient == userCharacter || recipient == msg.Role || recipient == cfg.ToolRole { + continue + } + // Trigger the recipient character to respond by simulating a prompt + // that indicates it's their turn + triggerMsg := recipient + ":\n" + // Call chatRound with the trigger message to make the recipient respond + chatRound(triggerMsg, recipient, tv, false, false) + } +} diff --git a/config/config.go b/config/config.go index 62c8331..381fa72 100644 --- a/config/config.go +++ b/config/config.go @@ -54,19 +54,20 @@ type Config struct { TTS_PROVIDER string `toml:"TTS_PROVIDER"` TTS_LANGUAGE string `toml:"TTS_LANGUAGE"` // STT - STT_TYPE string `toml:"STT_TYPE"` // WHISPER_SERVER, WHISPER_BINARY - STT_URL string `toml:"STT_URL"` - STT_SR int `toml:"STT_SR"` - STT_ENABLED bool `toml:"STT_ENABLED"` - WhisperBinaryPath string `toml:"WhisperBinaryPath"` - WhisperModelPath string `toml:"WhisperModelPath"` - STT_LANG string `toml:"STT_LANG"` - DBPATH string `toml:"DBPATH"` - FilePickerDir string `toml:"FilePickerDir"` - FilePickerExts string `toml:"FilePickerExts"` - EnableMouse bool `toml:"EnableMouse"` - CharSpecificContextEnabled bool `toml:"CharSpecificContextEnabled"` - CharSpecificContextTag string `toml:"CharSpecificContextTag"` + STT_TYPE string `toml:"STT_TYPE"` // WHISPER_SERVER, WHISPER_BINARY + STT_URL string `toml:"STT_URL"` + STT_SR int `toml:"STT_SR"` + STT_ENABLED bool `toml:"STT_ENABLED"` + WhisperBinaryPath string `toml:"WhisperBinaryPath"` + WhisperModelPath string `toml:"WhisperModelPath"` + STT_LANG string `toml:"STT_LANG"` + DBPATH string `toml:"DBPATH"` + FilePickerDir string `toml:"FilePickerDir"` + FilePickerExts string `toml:"FilePickerExts"` + EnableMouse bool `toml:"EnableMouse"` + CharSpecificContextEnabled bool `toml:"CharSpecificContextEnabled"` + CharSpecificContextTag string `toml:"CharSpecificContextTag"` + AutoTurn bool } func LoadConfig(fn string) (*Config, error) { -- cgit v1.2.3 From 3a11210f52a850f84771e1642cafcc3027b85075 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Sat, 31 Jan 2026 12:57:53 +0300 Subject: Enha: avoid recursion in llm calls --- bot.go | 145 ++++++++++++++++++++++++++++++++++++------------------- helpfuncs.go | 13 +++++ llm.go | 6 +++ models/models.go | 19 +++++--- tui.go | 9 ++-- 5 files changed, 132 insertions(+), 60 deletions(-) diff --git a/bot.go b/bot.go index 1a2cebb..6e7d094 100644 --- a/bot.go +++ b/bot.go @@ -25,17 +25,16 @@ import ( "time" "github.com/neurosnap/sentences/english" - "github.com/rivo/tview" ) var ( - httpClient = &http.Client{} - cfg *config.Config - logger *slog.Logger - logLevel = new(slog.LevelVar) -) -var ( + httpClient = &http.Client{} + cfg *config.Config + logger *slog.Logger + logLevel = new(slog.LevelVar) + ctx, cancel = context.WithCancel(context.Background()) activeChatName string + chatRoundChan = make(chan *models.ChatRoundReq, 1) chunkChan = make(chan string, 10) openAIToolChan = make(chan string, 10) streamDone = make(chan bool, 1) @@ -699,7 +698,23 @@ func roleToIcon(role string) string { return "<" + role + ">: " } -func chatRound(userMsg, role string, tv *tview.TextView, regen, resume bool) { +func chatWatcher(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + case chatRoundReq := <-chatRoundChan: + if err := chatRound(chatRoundReq); err != nil { + logger.Error("failed to chatRound", "err", err) + } + } + } +} + +func chatRound(r *models.ChatRoundReq) error { + // chunkChan := make(chan string, 10) + // openAIToolChan := make(chan string, 10) + // streamDone := make(chan bool, 1) botRespMode = true botPersona := cfg.AssistantRole if cfg.WriteNextMsgAsCompletionAgent != "" { @@ -707,32 +722,23 @@ func chatRound(userMsg, role string, tv *tview.TextView, regen, resume bool) { } defer func() { botRespMode = false }() // check that there is a model set to use if is not local - if cfg.CurrentAPI == cfg.DeepSeekChatAPI || cfg.CurrentAPI == cfg.DeepSeekCompletionAPI { - if chatBody.Model != "deepseek-chat" && chatBody.Model != "deepseek-reasoner" { - if err := notifyUser("bad request", "wrong deepseek model name"); err != nil { - logger.Warn("failed ot notify user", "error", err) - return - } - return - } - } choseChunkParser() - reader, err := chunkParser.FormMsg(userMsg, role, resume) + reader, err := chunkParser.FormMsg(r.UserMsg, r.Role, r.Resume) if reader == nil || err != nil { - logger.Error("empty reader from msgs", "role", role, "error", err) - return + logger.Error("empty reader from msgs", "role", r.Role, "error", err) + return err } if cfg.SkipLLMResp { - return + return nil } go sendMsgToLLM(reader) - logger.Debug("looking at vars in chatRound", "msg", userMsg, "regen", regen, "resume", resume) - if !resume { - fmt.Fprintf(tv, "\n[-:-:b](%d) ", len(chatBody.Messages)) - fmt.Fprint(tv, roleToIcon(botPersona)) - fmt.Fprint(tv, "[-:-:-]\n") + logger.Debug("looking at vars in chatRound", "msg", r.UserMsg, "regen", r.Regen, "resume", r.Resume) + if !r.Resume { + fmt.Fprintf(textView, "\n[-:-:b](%d) ", len(chatBody.Messages)) + fmt.Fprint(textView, roleToIcon(botPersona)) + fmt.Fprint(textView, "[-:-:-]\n") if cfg.ThinkUse && !strings.Contains(cfg.CurrentAPI, "v1") { - // fmt.Fprint(tv, "") + // fmt.Fprint(textView, "") chunkChan <- "" } } @@ -742,29 +748,29 @@ out: for { select { case chunk := <-chunkChan: - fmt.Fprint(tv, chunk) + fmt.Fprint(textView, chunk) respText.WriteString(chunk) if scrollToEndEnabled { - tv.ScrollToEnd() + textView.ScrollToEnd() } // Send chunk to audio stream handler if cfg.TTS_ENABLED { TTSTextChan <- chunk } case toolChunk := <-openAIToolChan: - fmt.Fprint(tv, toolChunk) + fmt.Fprint(textView, toolChunk) toolResp.WriteString(toolChunk) if scrollToEndEnabled { - tv.ScrollToEnd() + textView.ScrollToEnd() } case <-streamDone: // drain any remaining chunks from chunkChan before exiting for len(chunkChan) > 0 { chunk := <-chunkChan - fmt.Fprint(tv, chunk) + fmt.Fprint(textView, chunk) respText.WriteString(chunk) if scrollToEndEnabled { - tv.ScrollToEnd() + textView.ScrollToEnd() } if cfg.TTS_ENABLED { // Send chunk to audio stream handler @@ -780,7 +786,7 @@ out: } botRespMode = false // numbers in chatbody and displayed must be the same - if resume { + if r.Resume { chatBody.Messages[len(chatBody.Messages)-1].Content += respText.String() // lastM.Content = lastM.Content + respText.String() // Process the updated message to check for known_to tags in resumed response @@ -797,7 +803,9 @@ out: } logger.Debug("chatRound: before cleanChatBody", "messages_before_clean", len(chatBody.Messages)) for i, msg := range chatBody.Messages { - logger.Debug("chatRound: before cleaning", "index", i, "role", msg.Role, "content_len", len(msg.Content), "has_content", msg.HasContent(), "tool_call_id", msg.ToolCallID) + logger.Debug("chatRound: before cleaning", "index", i, + "role", msg.Role, "content_len", len(msg.Content), + "has_content", msg.HasContent(), "tool_call_id", msg.ToolCallID) } // // Clean null/empty messages to prevent API issues with endpoints like llama.cpp jinja template cleanChatBody() @@ -813,8 +821,9 @@ out: if err := updateStorageChat(activeChatName, chatBody.Messages); err != nil { logger.Warn("failed to update storage", "error", err, "name", activeChatName) } - // FIXME: recursive calls - findCall(respText.String(), toolResp.String(), tv) + if findCall(respText.String(), toolResp.String()) { + return nil + } // TODO: have a config attr // Check if this message was sent privately to specific characters // If so, trigger those characters to respond if that char is not controlled by user @@ -822,9 +831,10 @@ out: if cfg.AutoTurn { lastMsg := chatBody.Messages[len(chatBody.Messages)-1] if len(lastMsg.KnownTo) > 0 { - triggerPrivateMessageResponses(lastMsg, tv) + triggerPrivateMessageResponses(lastMsg) } } + return nil } // cleanChatBody removes messages with null or empty content to prevent API issues @@ -909,7 +919,8 @@ func unmarshalFuncCall(jsonStr string) (*models.FuncCall, error) { return fc, nil } -func findCall(msg, toolCall string, tv *tview.TextView) { +// findCall: adds chatRoundReq into the chatRoundChan and returns true if does +func findCall(msg, toolCall string) bool { fc := &models.FuncCall{} if toolCall != "" { // HTML-decode the tool call string to handle encoded characters like < -> <= @@ -927,8 +938,13 @@ func findCall(msg, toolCall string, tv *tview.TextView) { chatBody.Messages = append(chatBody.Messages, toolResponseMsg) // Clear the stored tool call ID after using it (no longer needed) // Trigger the assistant to continue processing with the error message - chatRound("", cfg.AssistantRole, tv, false, false) - return + crr := &models.ChatRoundReq{ + Role: cfg.AssistantRole, + } + // provoke next llm msg after failed tool call + chatRoundChan <- crr + // chatRound("", cfg.AssistantRole, tv, false, false) + return true } lastToolCall.Args = openAIToolMap fc = lastToolCall @@ -940,8 +956,8 @@ func findCall(msg, toolCall string, tv *tview.TextView) { } } else { jsStr := toolCallRE.FindString(msg) - if jsStr == "" { - return + if jsStr == "" { // no tool call case + return false } prefix := "__tool_call__\n" suffix := "\n__tool_call__" @@ -960,8 +976,13 @@ func findCall(msg, toolCall string, tv *tview.TextView) { chatBody.Messages = append(chatBody.Messages, toolResponseMsg) logger.Debug("findCall: added tool error response", "role", toolResponseMsg.Role, "content_len", len(toolResponseMsg.Content), "message_count_after_add", len(chatBody.Messages)) // Trigger the assistant to continue processing with the error message - chatRound("", cfg.AssistantRole, tv, false, false) - return + // chatRound("", cfg.AssistantRole, tv, false, false) + crr := &models.ChatRoundReq{ + Role: cfg.AssistantRole, + } + // provoke next llm msg after failed tool call + chatRoundChan <- crr + return true } // Update lastToolCall with parsed function call lastToolCall.ID = fc.ID @@ -994,13 +1015,17 @@ func findCall(msg, toolCall string, tv *tview.TextView) { lastToolCall.ID = "" // Trigger the assistant to continue processing with the new tool response // by calling chatRound with empty content to continue the assistant's response - chatRound("", cfg.AssistantRole, tv, false, false) - return + crr := &models.ChatRoundReq{ + Role: cfg.AssistantRole, + } + // failed to find tool + chatRoundChan <- crr + return true } resp := callToolWithAgent(fc.Name, fc.Args) toolMsg := string(resp) // Remove the "tool response: " prefix and %+v formatting logger.Info("llm used tool call", "tool_resp", toolMsg, "tool_attrs", fc) - fmt.Fprintf(tv, "%s[-:-:b](%d) <%s>: [-:-:-]\n%s\n", + fmt.Fprintf(textView, "%s[-:-:b](%d) <%s>: [-:-:-]\n%s\n", "\n\n", len(chatBody.Messages), cfg.ToolRole, toolMsg) // Create tool response message with the proper tool_call_id toolResponseMsg := models.RoleMsg{ @@ -1014,7 +1039,11 @@ func findCall(msg, toolCall string, tv *tview.TextView) { lastToolCall.ID = "" // Trigger the assistant to continue processing with the new tool response // by calling chatRound with empty content to continue the assistant's response - chatRound("", cfg.AssistantRole, tv, false, false) + crr := &models.ChatRoundReq{ + Role: cfg.AssistantRole, + } + chatRoundChan <- crr + return true } func chatToTextSlice(messages []models.RoleMsg, showSys bool) []string { @@ -1163,10 +1192,12 @@ func summarizeAndStartNewChat() { } func init() { + // ctx, cancel := context.WithCancel(context.Background()) var err error cfg, err = config.LoadConfig("config.toml") if err != nil { fmt.Println("failed to load config.toml") + cancel() os.Exit(1) return } @@ -1178,6 +1209,8 @@ func init() { os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) if err != nil { slog.Error("failed to open log file", "error", err, "filename", cfg.LogFile) + cancel() + os.Exit(1) return } // load cards @@ -1188,13 +1221,17 @@ func init() { logger = slog.New(slog.NewTextHandler(logfile, &slog.HandlerOptions{Level: logLevel})) store = storage.NewProviderSQL(cfg.DBPATH, logger) if store == nil { + cancel() os.Exit(1) + return } ragger = rag.New(logger, store, cfg) // https://github.com/coreydaley/ggerganov-llama.cpp/blob/master/examples/server/README.md // load all chats in memory if _, err := loadHistoryChats(); err != nil { logger.Error("failed to load chat", "error", err) + cancel() + os.Exit(1) return } lastToolCall = &models.FuncCall{} @@ -1215,11 +1252,12 @@ func init() { // Initialize scrollToEndEnabled based on config scrollToEndEnabled = cfg.AutoScrollEnabled go updateModelLists() + go chatWatcher(ctx) } // triggerPrivateMessageResponses checks if a message was sent privately to specific characters // and triggers those non-user characters to respond -func triggerPrivateMessageResponses(msg models.RoleMsg, tv *tview.TextView) { +func triggerPrivateMessageResponses(msg models.RoleMsg) { if cfg == nil || !cfg.CharSpecificContextEnabled { return } @@ -1237,6 +1275,11 @@ func triggerPrivateMessageResponses(msg models.RoleMsg, tv *tview.TextView) { // that indicates it's their turn triggerMsg := recipient + ":\n" // Call chatRound with the trigger message to make the recipient respond - chatRound(triggerMsg, recipient, tv, false, false) + // chatRound(triggerMsg, recipient, tv, false, false) + crr := &models.ChatRoundReq{ + UserMsg: triggerMsg, + Role: recipient, + } + chatRoundChan <- crr } } diff --git a/helpfuncs.go b/helpfuncs.go index 28e7962..849b0a0 100644 --- a/helpfuncs.go +++ b/helpfuncs.go @@ -279,3 +279,16 @@ func listChatRoles() []string { charset = append(charset, cbc...) return charset } + +func deepseekModelValidator() error { + if cfg.CurrentAPI == cfg.DeepSeekChatAPI || cfg.CurrentAPI == cfg.DeepSeekCompletionAPI { + if chatBody.Model != "deepseek-chat" && chatBody.Model != "deepseek-reasoner" { + if err := notifyUser("bad request", "wrong deepseek model name"); err != nil { + logger.Warn("failed ot notify user", "error", err) + return err + } + return nil + } + } + return nil +} diff --git a/llm.go b/llm.go index cd5a3fe..5bd7554 100644 --- a/llm.go +++ b/llm.go @@ -363,6 +363,9 @@ func (ds DeepSeekerCompletion) GetToken() string { func (ds DeepSeekerCompletion) FormMsg(msg, role string, resume bool) (io.Reader, error) { logger.Debug("formmsg deepseekercompletion", "link", cfg.CurrentAPI) + if err := deepseekModelValidator(); err != nil { + return nil, err + } if msg != "" { // otherwise let the bot to continue newMsg := models.RoleMsg{Role: role, Content: msg} newMsg = processMessageTag(newMsg) @@ -445,6 +448,9 @@ func (ds DeepSeekerChat) GetToken() string { func (ds DeepSeekerChat) FormMsg(msg, role string, resume bool) (io.Reader, error) { logger.Debug("formmsg deepseekerchat", "link", cfg.CurrentAPI) + if err := deepseekModelValidator(); err != nil { + return nil, err + } if msg != "" { // otherwise let the bot continue newMsg := models.RoleMsg{Role: role, Content: msg} newMsg = processMessageTag(newMsg) diff --git a/models/models.go b/models/models.go index 69bdf02..76ef183 100644 --- a/models/models.go +++ b/models/models.go @@ -116,9 +116,9 @@ func (m RoleMsg) MarshalJSON() ([]byte, error) { } else { // Use simple content format aux := struct { - Role string `json:"role"` - Content string `json:"content"` - ToolCallID string `json:"tool_call_id,omitempty"` + Role string `json:"role"` + Content string `json:"content"` + ToolCallID string `json:"tool_call_id,omitempty"` KnownTo []string `json:"known_to,omitempty"` }{ Role: m.Role, @@ -150,9 +150,9 @@ func (m *RoleMsg) UnmarshalJSON(data []byte) error { // Otherwise, unmarshal as simple content format var simple struct { - Role string `json:"role"` - Content string `json:"content"` - ToolCallID string `json:"tool_call_id,omitempty"` + Role string `json:"role"` + Content string `json:"content"` + ToolCallID string `json:"tool_call_id,omitempty"` KnownTo []string `json:"known_to,omitempty"` } if err := json.Unmarshal(data, &simple); err != nil { @@ -540,3 +540,10 @@ func (lcp *LCPModels) ListModels() []string { } return resp } + +type ChatRoundReq struct { + UserMsg string + Role string + Regen bool + Resume bool +} diff --git a/tui.go b/tui.go index d222d15..e164423 100644 --- a/tui.go +++ b/tui.go @@ -873,7 +873,8 @@ func init() { // there is no case where user msg is regenerated // lastRole := chatBody.Messages[len(chatBody.Messages)-1].Role textView.SetText(chatToText(chatBody.Messages, cfg.ShowSys)) - go chatRound("", cfg.UserRole, textView, true, false) + // go chatRound("", cfg.UserRole, textView, true, false) + chatRoundChan <- &models.ChatRoundReq{Role: cfg.UserRole} return nil } if event.Key() == tcell.KeyF3 && !botRespMode { @@ -1176,7 +1177,8 @@ func init() { // INFO: continue bot/text message // without new role lastRole := chatBody.Messages[len(chatBody.Messages)-1].Role - go chatRound("", lastRole, textView, false, true) + // go chatRound("", lastRole, textView, false, true) + chatRoundChan <- &models.ChatRoundReq{Role: lastRole, Resume: true} return nil } if event.Key() == tcell.KeyCtrlQ { @@ -1347,7 +1349,8 @@ func init() { } colorText() } - go chatRound(msgText, persona, textView, false, false) + // go chatRound(msgText, persona, textView, false, false) + chatRoundChan <- &models.ChatRoundReq{Role: persona, UserMsg: msgText} // Also clear any image attachment after sending the message go func() { // Wait a short moment for the message to be processed, then clear the image attachment -- cgit v1.2.3 From 6f6a35459ef4de340c0c6825da20828e7f579207 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Sun, 1 Feb 2026 11:38:51 +0300 Subject: Chore: cleaning --- bot.go | 28 ++++++---------------------- bot_test.go | 2 +- llm.go | 6 +++--- 3 files changed, 10 insertions(+), 26 deletions(-) diff --git a/bot.go b/bot.go index 6e7d094..f55fc6d 100644 --- a/bot.go +++ b/bot.go @@ -169,26 +169,10 @@ func filterMessagesForCharacter(messages []models.RoleMsg, character string) []m return filtered } -// cleanNullMessages removes messages with null or empty content to prevent API issues -func cleanNullMessages(messages []models.RoleMsg) []models.RoleMsg { - // // deletes tool calls which we don't want for now - // cleaned := make([]models.RoleMsg, 0, len(messages)) - // for _, msg := range messages { - // // is there a sense for this check at all? - // if msg.HasContent() || msg.ToolCallID != "" || msg.Role == cfg.AssistantRole || msg.Role == cfg.WriteNextMsgAsCompletionAgent { - // cleaned = append(cleaned, msg) - // } else { - // // Log filtered messages for debugging - // logger.Warn("filtering out message during cleaning", "role", msg.Role, "content", msg.Content, "tool_call_id", msg.ToolCallID, "has_content", msg.HasContent()) - // } - // } - return consolidateConsecutiveAssistantMessages(messages) -} - func cleanToolCalls(messages []models.RoleMsg) []models.RoleMsg { // If AutoCleanToolCallsFromCtx is false, keep tool call messages in context if cfg != nil && !cfg.AutoCleanToolCallsFromCtx { - return consolidateConsecutiveAssistantMessages(messages) + return consolidateAssistantMessages(messages) } cleaned := make([]models.RoleMsg, 0, len(messages)) for i, msg := range messages { @@ -198,11 +182,11 @@ func cleanToolCalls(messages []models.RoleMsg) []models.RoleMsg { cleaned = append(cleaned, msg) } } - return consolidateConsecutiveAssistantMessages(cleaned) + return consolidateAssistantMessages(cleaned) } -// consolidateConsecutiveAssistantMessages merges consecutive assistant messages into a single message -func consolidateConsecutiveAssistantMessages(messages []models.RoleMsg) []models.RoleMsg { +// consolidateAssistantMessages merges consecutive assistant messages into a single message +func consolidateAssistantMessages(messages []models.RoleMsg) []models.RoleMsg { if len(messages) == 0 { return messages } @@ -211,6 +195,7 @@ func consolidateConsecutiveAssistantMessages(messages []models.RoleMsg) []models isBuildingAssistantMsg := false for i := 0; i < len(messages); i++ { msg := messages[i] + // what about the case with multiplpe assistant roles? if msg.Role == cfg.AssistantRole || msg.Role == cfg.WriteNextMsgAsCompletionAgent { // If this is an assistant message, start or continue building if !isBuildingAssistantMsg { @@ -824,7 +809,6 @@ out: if findCall(respText.String(), toolResp.String()) { return nil } - // TODO: have a config attr // Check if this message was sent privately to specific characters // If so, trigger those characters to respond if that char is not controlled by user // perhaps we should have narrator role to determine which char is next to act @@ -850,7 +834,7 @@ func cleanChatBody() { // Tool request cleaning is now configurable via AutoCleanToolCallsFromCtx (default false) // /completion msg where part meant for user and other part tool call chatBody.Messages = cleanToolCalls(chatBody.Messages) - chatBody.Messages = cleanNullMessages(chatBody.Messages) + chatBody.Messages = consolidateAssistantMessages(chatBody.Messages) logger.Debug("cleanChatBody: after cleaning", "original_len", originalLen, "new_len", len(chatBody.Messages)) for i, msg := range chatBody.Messages { logger.Debug("cleanChatBody: after clean", "index", i, "role", msg.Role, "content_len", len(msg.Content), "has_content", msg.HasContent(), "tool_call_id", msg.ToolCallID) diff --git a/bot_test.go b/bot_test.go index 3dabc15..4cbe953 100644 --- a/bot_test.go +++ b/bot_test.go @@ -117,7 +117,7 @@ func TestConsolidateConsecutiveAssistantMessages(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - result := consolidateConsecutiveAssistantMessages(tt.input) + result := consolidateAssistantMessages(tt.input) if len(result) != len(tt.expected) { t.Errorf("Expected %d messages, got %d", len(tt.expected), len(result)) diff --git a/llm.go b/llm.go index 5bd7554..7651a19 100644 --- a/llm.go +++ b/llm.go @@ -322,7 +322,7 @@ func (op LCPChat) FormMsg(msg, role string, resume bool) (io.Reader, error) { } } // Clean null/empty messages to prevent API issues - bodyCopy.Messages = cleanNullMessages(bodyCopy.Messages) + bodyCopy.Messages = consolidateAssistantMessages(bodyCopy.Messages) req := models.OpenAIReq{ ChatBody: bodyCopy, Tools: nil, @@ -488,7 +488,7 @@ func (ds DeepSeekerChat) FormMsg(msg, role string, resume bool) (io.Reader, erro } } // Clean null/empty messages to prevent API issues - bodyCopy.Messages = cleanNullMessages(bodyCopy.Messages) + bodyCopy.Messages = consolidateAssistantMessages(bodyCopy.Messages) dsBody := models.NewDSChatReq(*bodyCopy) data, err := json.Marshal(dsBody) if err != nil { @@ -676,7 +676,7 @@ func (or OpenRouterChat) FormMsg(msg, role string, resume bool) (io.Reader, erro } } // Clean null/empty messages to prevent API issues - bodyCopy.Messages = cleanNullMessages(bodyCopy.Messages) + bodyCopy.Messages = consolidateAssistantMessages(bodyCopy.Messages) orBody := models.NewOpenRouterChatReq(*bodyCopy, defaultLCPProps) if cfg.ToolUse && !resume && role != cfg.ToolRole { orBody.Tools = baseTools // set tools to use -- cgit v1.2.3 From c1b04303ef91709e6a0f2ec93f5ae5a1dac610ce Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Sun, 1 Feb 2026 12:53:06 +0300 Subject: Enha: persona suffix for /chat endpoints --- llm.go | 51 ++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 48 insertions(+), 3 deletions(-) diff --git a/llm.go b/llm.go index 7651a19..d6e4d13 100644 --- a/llm.go +++ b/llm.go @@ -13,6 +13,27 @@ var imageAttachmentPath string // Global variable to track image attachment for var lastImg string // for ctrl+j var RAGMsg = "Retrieved context for user's query:\n" +// addPersonaSuffixToLastUserMessage adds the persona suffix to the last user message +// to indicate to the assistant who it should reply as +func addPersonaSuffixToLastUserMessage(messages []models.RoleMsg, persona string) []models.RoleMsg { + if len(messages) == 0 { + return messages + } + + // Find the last user message to modify + for i := len(messages) - 1; i >= 0; i-- { + if messages[i].Role == cfg.UserRole || messages[i].Role == "user" { + // Create a copy of the message to avoid modifying the original + modifiedMsg := messages[i] + modifiedMsg.Content = modifiedMsg.Content + "\n" + persona + ":" + messages[i] = modifiedMsg + break + } + } + + return messages +} + // containsToolSysMsg checks if the toolSysMsg already exists in the chat body func containsToolSysMsg() bool { for _, msg := range chatBody.Messages { @@ -307,7 +328,15 @@ func (op LCPChat) FormMsg(msg, role string, resume bool) (io.Reader, error) { } } // openai /v1/chat does not support custom roles; needs to be user, assistant, system - filteredMessages, _ := filterMessagesForCurrentCharacter(chatBody.Messages) + filteredMessages, botPersona := filterMessagesForCurrentCharacter(chatBody.Messages) + + // Add persona suffix to the last user message to indicate who the assistant should reply as + if !resume && cfg.WriteNextMsgAsCompletionAgent != "" { + filteredMessages = addPersonaSuffixToLastUserMessage(filteredMessages, cfg.WriteNextMsgAsCompletionAgent) + } else if !resume { + filteredMessages = addPersonaSuffixToLastUserMessage(filteredMessages, botPersona) + } + bodyCopy := &models.ChatBody{ Messages: make([]models.RoleMsg, len(filteredMessages)), Model: chatBody.Model, @@ -473,7 +502,15 @@ func (ds DeepSeekerChat) FormMsg(msg, role string, resume bool) (io.Reader, erro logger.Debug("RAG message added to chat body", "message_count", len(chatBody.Messages)) } } - filteredMessages, _ := filterMessagesForCurrentCharacter(chatBody.Messages) + filteredMessages, botPersona := filterMessagesForCurrentCharacter(chatBody.Messages) + + // Add persona suffix to the last user message to indicate who the assistant should reply as + if !resume && cfg.WriteNextMsgAsCompletionAgent != "" { + filteredMessages = addPersonaSuffixToLastUserMessage(filteredMessages, cfg.WriteNextMsgAsCompletionAgent) + } else if !resume { + filteredMessages = addPersonaSuffixToLastUserMessage(filteredMessages, botPersona) + } + bodyCopy := &models.ChatBody{ Messages: make([]models.RoleMsg, len(filteredMessages)), Model: chatBody.Model, @@ -661,7 +698,15 @@ func (or OpenRouterChat) FormMsg(msg, role string, resume bool) (io.Reader, erro } } // Create copy of chat body with standardized user role - filteredMessages, _ := filterMessagesForCurrentCharacter(chatBody.Messages) + filteredMessages, botPersona := filterMessagesForCurrentCharacter(chatBody.Messages) + + // Add persona suffix to the last user message to indicate who the assistant should reply as + if !resume && cfg.WriteNextMsgAsCompletionAgent != "" { + filteredMessages = addPersonaSuffixToLastUserMessage(filteredMessages, cfg.WriteNextMsgAsCompletionAgent) + } else if !resume { + filteredMessages = addPersonaSuffixToLastUserMessage(filteredMessages, botPersona) + } + bodyCopy := &models.ChatBody{ Messages: make([]models.RoleMsg, len(filteredMessages)), Model: chatBody.Model, -- cgit v1.2.3 From e52e8ce2cc44b4e8cc950fe6811810db4142921d Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Mon, 2 Feb 2026 08:18:49 +0300 Subject: Enha: consolidate assistant messages only --- bot.go | 4 ++-- llm.go | 31 ++++++++++++++----------------- 2 files changed, 16 insertions(+), 19 deletions(-) diff --git a/bot.go b/bot.go index f55fc6d..9a991d6 100644 --- a/bot.go +++ b/bot.go @@ -195,8 +195,8 @@ func consolidateAssistantMessages(messages []models.RoleMsg) []models.RoleMsg { isBuildingAssistantMsg := false for i := 0; i < len(messages); i++ { msg := messages[i] - // what about the case with multiplpe assistant roles? - if msg.Role == cfg.AssistantRole || msg.Role == cfg.WriteNextMsgAsCompletionAgent { + // assistant role only + if msg.Role == cfg.AssistantRole { // If this is an assistant message, start or continue building if !isBuildingAssistantMsg { // Start accumulating assistant message diff --git a/llm.go b/llm.go index d6e4d13..a4162b7 100644 --- a/llm.go +++ b/llm.go @@ -19,18 +19,19 @@ func addPersonaSuffixToLastUserMessage(messages []models.RoleMsg, persona string if len(messages) == 0 { return messages } - - // Find the last user message to modify - for i := len(messages) - 1; i >= 0; i-- { - if messages[i].Role == cfg.UserRole || messages[i].Role == "user" { - // Create a copy of the message to avoid modifying the original - modifiedMsg := messages[i] - modifiedMsg.Content = modifiedMsg.Content + "\n" + persona + ":" - messages[i] = modifiedMsg - break - } - } - + // // Find the last user message to modify + // for i := len(messages) - 1; i >= 0; i-- { + // if messages[i].Role == cfg.UserRole || messages[i].Role == "user" { + // // Create a copy of the message to avoid modifying the original + // modifiedMsg := messages[i] + // modifiedMsg.Content = modifiedMsg.Content + "\n" + persona + ":" + // messages[i] = modifiedMsg + // break + // } + // } + modifiedMsg := messages[len(messages)-1] + modifiedMsg.Content = modifiedMsg.Content + "\n" + persona + ":\n" + messages[len(messages)-1] = modifiedMsg return messages } @@ -329,14 +330,10 @@ func (op LCPChat) FormMsg(msg, role string, resume bool) (io.Reader, error) { } // openai /v1/chat does not support custom roles; needs to be user, assistant, system filteredMessages, botPersona := filterMessagesForCurrentCharacter(chatBody.Messages) - // Add persona suffix to the last user message to indicate who the assistant should reply as - if !resume && cfg.WriteNextMsgAsCompletionAgent != "" { - filteredMessages = addPersonaSuffixToLastUserMessage(filteredMessages, cfg.WriteNextMsgAsCompletionAgent) - } else if !resume { + if !resume { filteredMessages = addPersonaSuffixToLastUserMessage(filteredMessages, botPersona) } - bodyCopy := &models.ChatBody{ Messages: make([]models.RoleMsg, len(filteredMessages)), Model: chatBody.Model, -- cgit v1.2.3 From 343e045095419522a388aa0aa7d66ec1eced1803 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Mon, 2 Feb 2026 08:23:05 +0300 Subject: Enha: role suffix for /chat only if AutoTurn is enabled --- llm.go | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/llm.go b/llm.go index a4162b7..b7f28de 100644 --- a/llm.go +++ b/llm.go @@ -328,10 +328,10 @@ func (op LCPChat) FormMsg(msg, role string, resume bool) (io.Reader, error) { logger.Debug("LCPChat: RAG message added to chat body", "role", ragMsg.Role, "rag_content_len", len(ragMsg.Content), "message_count_after_rag", len(chatBody.Messages)) } } - // openai /v1/chat does not support custom roles; needs to be user, assistant, system filteredMessages, botPersona := filterMessagesForCurrentCharacter(chatBody.Messages) + // openai /v1/chat does not support custom roles; needs to be user, assistant, system // Add persona suffix to the last user message to indicate who the assistant should reply as - if !resume { + if cfg.AutoTurn && !resume { filteredMessages = addPersonaSuffixToLastUserMessage(filteredMessages, botPersona) } bodyCopy := &models.ChatBody{ @@ -499,15 +499,12 @@ func (ds DeepSeekerChat) FormMsg(msg, role string, resume bool) (io.Reader, erro logger.Debug("RAG message added to chat body", "message_count", len(chatBody.Messages)) } } + // Create copy of chat body with standardized user role filteredMessages, botPersona := filterMessagesForCurrentCharacter(chatBody.Messages) - // Add persona suffix to the last user message to indicate who the assistant should reply as - if !resume && cfg.WriteNextMsgAsCompletionAgent != "" { - filteredMessages = addPersonaSuffixToLastUserMessage(filteredMessages, cfg.WriteNextMsgAsCompletionAgent) - } else if !resume { + if cfg.AutoTurn && !resume { filteredMessages = addPersonaSuffixToLastUserMessage(filteredMessages, botPersona) } - bodyCopy := &models.ChatBody{ Messages: make([]models.RoleMsg, len(filteredMessages)), Model: chatBody.Model, @@ -696,14 +693,10 @@ func (or OpenRouterChat) FormMsg(msg, role string, resume bool) (io.Reader, erro } // Create copy of chat body with standardized user role filteredMessages, botPersona := filterMessagesForCurrentCharacter(chatBody.Messages) - // Add persona suffix to the last user message to indicate who the assistant should reply as - if !resume && cfg.WriteNextMsgAsCompletionAgent != "" { - filteredMessages = addPersonaSuffixToLastUserMessage(filteredMessages, cfg.WriteNextMsgAsCompletionAgent) - } else if !resume { + if cfg.AutoTurn && !resume { filteredMessages = addPersonaSuffixToLastUserMessage(filteredMessages, botPersona) } - bodyCopy := &models.ChatBody{ Messages: make([]models.RoleMsg, len(filteredMessages)), Model: chatBody.Model, -- cgit v1.2.3 From 0e6d2747cde8485d4d1ce7e2dd866e03f77467fc Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Mon, 2 Feb 2026 08:29:38 +0300 Subject: Enha: auto turn config switch --- props_table.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/props_table.go b/props_table.go index a235d7c..1b8e894 100644 --- a/props_table.go +++ b/props_table.go @@ -137,6 +137,9 @@ func makePropsTable(props map[string]float32) *tview.Table { // Reconfigure the app's mouse setting app.EnableMouse(cfg.EnableMouse) }) + addCheckboxRow("Auto turn (for cards with many chars)", cfg.AutoTurn, func(checked bool) { + cfg.AutoTurn = checked + }) // Add dropdowns logLevels := []string{"Debug", "Info", "Warn"} addListPopupRow("Set log level", logLevels, GetLogLevel(), func(option string) { -- cgit v1.2.3 From fcb4b99332b78abb9517378e83379c2e5faed2ab Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Mon, 2 Feb 2026 11:11:07 +0300 Subject: Fix(tts): mutex use --- extra/tts.go | 60 ++++++++++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 52 insertions(+), 8 deletions(-) diff --git a/extra/tts.go b/extra/tts.go index fd4a235..dcc811e 100644 --- a/extra/tts.go +++ b/extra/tts.go @@ -16,6 +16,7 @@ import ( "regexp" "strings" "time" + "sync" google_translate_tts "github.com/GrailFinder/google-translate-tts" "github.com/GrailFinder/google-translate-tts/handlers" @@ -77,6 +78,7 @@ type Orator interface { // impl https://github.com/remsky/Kokoro-FastAPI type KokoroOrator struct { logger *slog.Logger + mu sync.Mutex URL string Format models.AudioFormat Stream bool @@ -93,6 +95,7 @@ type KokoroOrator struct { // Google Translate TTS implementation type GoogleTranslateOrator struct { logger *slog.Logger + mu sync.Mutex speech *google_translate_tts.Speech currentStream *beep.Ctrl currentDone chan bool @@ -109,6 +112,7 @@ func (o *KokoroOrator) stoproutine() { for len(TTSTextChan) > 0 { <-TTSTextChan } + o.mu.Lock() o.textBuffer.Reset() if o.currentDone != nil { select { @@ -118,6 +122,7 @@ func (o *KokoroOrator) stoproutine() { } } o.interrupt = true + o.mu.Unlock() } } @@ -128,21 +133,24 @@ func (o *KokoroOrator) readroutine() { for { select { case chunk := <-TTSTextChan: + o.mu.Lock() o.interrupt = false - // sentenceBuf.WriteString(chunk) - // text := sentenceBuf.String() _, err := o.textBuffer.WriteString(chunk) if err != nil { o.logger.Warn("failed to write to stringbuilder", "error", err) + o.mu.Unlock() continue } text := o.textBuffer.String() + o.mu.Unlock() sentences := tokenizer.Tokenize(text) o.logger.Debug("adding chunk", "chunk", chunk, "text", text, "sen-len", len(sentences)) for i, sentence := range sentences { if i == len(sentences)-1 { // last sentence + o.mu.Lock() o.textBuffer.Reset() _, err := o.textBuffer.WriteString(sentence.Text) + o.mu.Unlock() if err != nil { o.logger.Warn("failed to write to stringbuilder", "error", err) continue @@ -163,7 +171,9 @@ func (o *KokoroOrator) readroutine() { // lln is done get the whole message out if len(TTSTextChan) > 0 { // otherwise might get stuck for chunk := range TTSTextChan { + o.mu.Lock() _, err := o.textBuffer.WriteString(chunk) + o.mu.Unlock() if err != nil { o.logger.Warn("failed to write to stringbuilder", "error", err) continue @@ -174,16 +184,21 @@ func (o *KokoroOrator) readroutine() { } } // flush remaining text + o.mu.Lock() remaining := o.textBuffer.String() remaining = cleanText(remaining) o.textBuffer.Reset() + o.mu.Unlock() if remaining == "" { continue } o.logger.Debug("calling Speak with remainder", "rem", remaining) sentencesRem := tokenizer.Tokenize(remaining) for _, rs := range sentencesRem { // to avoid dumping large volume of text - if o.interrupt { + o.mu.Lock() + interrupt := o.interrupt + o.mu.Unlock() + if interrupt { break } if err := o.Speak(rs.Text); err != nil { @@ -240,6 +255,9 @@ func (o *KokoroOrator) GetLogger() *slog.Logger { } func (o *KokoroOrator) requestSound(text string) (io.ReadCloser, error) { + if o.URL == "" { + return nil, fmt.Errorf("TTS URL is empty") + } payload := map[string]interface{}{ "input": text, "voice": o.Voice, @@ -291,14 +309,18 @@ func (o *KokoroOrator) Speak(text string) error { o.logger.Debug("failed to init speaker", "error", err) } done := make(chan bool) + o.mu.Lock() o.currentDone = done - // Create controllable stream and store reference o.currentStream = &beep.Ctrl{Streamer: beep.Seq(streamer, beep.Callback(func() { + o.mu.Lock() close(done) o.currentStream = nil + o.currentDone = nil + o.mu.Unlock() })), Paused: false} + o.mu.Unlock() speaker.Play(o.currentStream) - <-o.currentDone + <-done return nil } @@ -307,6 +329,8 @@ func (o *KokoroOrator) Stop() { o.logger.Debug("attempted to stop orator", "orator", o) speaker.Lock() defer speaker.Unlock() + o.mu.Lock() + defer o.mu.Unlock() if o.currentStream != nil { // o.currentStream.Paused = true o.currentStream.Streamer = nil @@ -322,6 +346,7 @@ func (o *GoogleTranslateOrator) stoproutine() { for len(TTSTextChan) > 0 { <-TTSTextChan } + o.mu.Lock() o.textBuffer.Reset() if o.currentDone != nil { select { @@ -331,6 +356,7 @@ func (o *GoogleTranslateOrator) stoproutine() { } } o.interrupt = true + o.mu.Unlock() } } @@ -339,19 +365,24 @@ func (o *GoogleTranslateOrator) readroutine() { for { select { case chunk := <-TTSTextChan: + o.mu.Lock() o.interrupt = false _, err := o.textBuffer.WriteString(chunk) if err != nil { o.logger.Warn("failed to write to stringbuilder", "error", err) + o.mu.Unlock() continue } text := o.textBuffer.String() + o.mu.Unlock() sentences := tokenizer.Tokenize(text) o.logger.Debug("adding chunk", "chunk", chunk, "text", text, "sen-len", len(sentences)) for i, sentence := range sentences { if i == len(sentences)-1 { // last sentence + o.mu.Lock() o.textBuffer.Reset() _, err := o.textBuffer.WriteString(sentence.Text) + o.mu.Unlock() if err != nil { o.logger.Warn("failed to write to stringbuilder", "error", err) continue @@ -372,7 +403,9 @@ func (o *GoogleTranslateOrator) readroutine() { // lln is done get the whole message out if len(TTSTextChan) > 0 { // otherwise might get stuck for chunk := range TTSTextChan { + o.mu.Lock() _, err := o.textBuffer.WriteString(chunk) + o.mu.Unlock() if err != nil { o.logger.Warn("failed to write to stringbuilder", "error", err) continue @@ -382,16 +415,21 @@ func (o *GoogleTranslateOrator) readroutine() { } } } + o.mu.Lock() remaining := o.textBuffer.String() remaining = cleanText(remaining) o.textBuffer.Reset() + o.mu.Unlock() if remaining == "" { continue } o.logger.Debug("calling Speak with remainder", "rem", remaining) sentencesRem := tokenizer.Tokenize(remaining) for _, rs := range sentencesRem { // to avoid dumping large volume of text - if o.interrupt { + o.mu.Lock() + interrupt := o.interrupt + o.mu.Unlock() + if interrupt { break } if err := o.Speak(rs.Text); err != nil { @@ -434,14 +472,18 @@ func (o *GoogleTranslateOrator) Speak(text string) error { o.logger.Debug("failed to init speaker", "error", err) } done := make(chan bool) + o.mu.Lock() o.currentDone = done - // Create controllable stream and store reference o.currentStream = &beep.Ctrl{Streamer: beep.Seq(playbackStreamer, beep.Callback(func() { + o.mu.Lock() close(done) o.currentStream = nil + o.currentDone = nil + o.mu.Unlock() })), Paused: false} + o.mu.Unlock() speaker.Play(o.currentStream) - <-o.currentDone // wait for playback to complete + <-done // wait for playback to complete return nil } @@ -449,6 +491,8 @@ func (o *GoogleTranslateOrator) Stop() { o.logger.Debug("attempted to stop google translate orator") speaker.Lock() defer speaker.Unlock() + o.mu.Lock() + defer o.mu.Unlock() if o.currentStream != nil { o.currentStream.Streamer = nil } -- cgit v1.2.3 From e3be45b023686e9f03b70481ac26054a720d38f3 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Mon, 2 Feb 2026 14:29:31 +0300 Subject: Fix: openrouter model list --- main_test.go | 2 +- models/openrouter.go | 8 ++-- models/openrouter_test.go | 97 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 103 insertions(+), 4 deletions(-) create mode 100644 models/openrouter_test.go diff --git a/main_test.go b/main_test.go index 84d23ba..998778c 100644 --- a/main_test.go +++ b/main_test.go @@ -1,9 +1,9 @@ package main import ( - "gf-lt/models" "fmt" "gf-lt/config" + "gf-lt/models" "strings" "testing" ) diff --git a/models/openrouter.go b/models/openrouter.go index 50f26b6..29ba0d8 100644 --- a/models/openrouter.go +++ b/models/openrouter.go @@ -145,9 +145,11 @@ func (orm *ORModels) ListModels(free bool) []string { resp := []string{} for _, model := range orm.Data { if free { - if model.Pricing.Prompt == "0" && model.Pricing.Request == "0" && - model.Pricing.Completion == "0" { - resp = append(resp, model.ID) + if model.Pricing.Prompt == "0" && model.Pricing.Completion == "0" { + // treat missing request as free + if model.Pricing.Request == "" || model.Pricing.Request == "0" { + resp = append(resp, model.ID) + } } } else { resp = append(resp, model.ID) diff --git a/models/openrouter_test.go b/models/openrouter_test.go new file mode 100644 index 0000000..dd38d23 --- /dev/null +++ b/models/openrouter_test.go @@ -0,0 +1,97 @@ +package models + +import ( + "encoding/json" + "os" + "path/filepath" + "testing" +) + +func TestORModelsListModels(t *testing.T) { + t.Run("unit test with hardcoded data", func(t *testing.T) { + jsonData := `{ + "data": [ + { + "id": "model/free", + "pricing": { + "prompt": "0", + "completion": "0" + } + }, + { + "id": "model/paid", + "pricing": { + "prompt": "0.001", + "completion": "0.002" + } + }, + { + "id": "model/request-zero", + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0" + } + }, + { + "id": "model/request-nonzero", + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0.5" + } + } + ] + }` + var models ORModels + if err := json.Unmarshal([]byte(jsonData), &models); err != nil { + t.Fatalf("failed to unmarshal test data: %v", err) + } + freeModels := models.ListModels(true) + if len(freeModels) != 2 { + t.Errorf("expected 2 free models, got %d: %v", len(freeModels), freeModels) + } + expectedFree := map[string]bool{"model/free": true, "model/request-zero": true} + for _, id := range freeModels { + if !expectedFree[id] { + t.Errorf("unexpected free model ID: %s", id) + } + } + allModels := models.ListModels(false) + if len(allModels) != 4 { + t.Errorf("expected 4 total models, got %d", len(allModels)) + } + }) + + t.Run("integration with or_models.json", func(t *testing.T) { + // Attempt to load the real data file from the project root + path := filepath.Join("..", "or_models.json") + data, err := os.ReadFile(path) + if err != nil { + t.Skip("or_models.json not found, skipping integration test") + } + var models ORModels + if err := json.Unmarshal(data, &models); err != nil { + t.Fatalf("failed to unmarshal %s: %v", path, err) + } + freeModels := models.ListModels(true) + if len(freeModels) == 0 { + t.Error("expected at least one free model, got none") + } + allModels := models.ListModels(false) + if len(allModels) == 0 { + t.Error("expected at least one model") + } + // Ensure free models are subset of all models + freeSet := make(map[string]bool) + for _, id := range freeModels { + freeSet[id] = true + } + for _, id := range freeModels { + if !freeSet[id] { + t.Errorf("free model %s not found in all models", id) + } + } + t.Logf("found %d free models out of %d total models", len(freeModels), len(allModels)) + }) +} \ No newline at end of file -- cgit v1.2.3 From 65b4f01177a38497b0ecb82b09f9dcded55c5acb Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Tue, 3 Feb 2026 11:00:12 +0300 Subject: Doc: char context doc --- bot.go | 9 ++- char-specific-context.md | 162 ++++++++++++++++++++++++++++++++++------- sysprompts/alice_bob_carl.json | 2 - 3 files changed, 142 insertions(+), 31 deletions(-) diff --git a/bot.go b/bot.go index 9a991d6..b836824 100644 --- a/bot.go +++ b/bot.go @@ -1251,15 +1251,18 @@ func triggerPrivateMessageResponses(msg models.RoleMsg) { } // Check each character in the KnownTo list for _, recipient := range msg.KnownTo { - // Skip if this is the user character or the sender of the message - if recipient == cfg.UserRole || recipient == userCharacter || recipient == msg.Role || recipient == cfg.ToolRole { + if recipient == msg.Role || recipient == cfg.ToolRole { + // weird cases, skip continue } + // Skip if this is the user character or the sender of the message + if recipient == cfg.UserRole || recipient == userCharacter { + return // user in known_to => users turn + } // Trigger the recipient character to respond by simulating a prompt // that indicates it's their turn triggerMsg := recipient + ":\n" // Call chatRound with the trigger message to make the recipient respond - // chatRound(triggerMsg, recipient, tv, false, false) crr := &models.ChatRoundReq{ UserMsg: triggerMsg, Role: recipient, diff --git a/char-specific-context.md b/char-specific-context.md index c1a7bd6..423572b 100644 --- a/char-specific-context.md +++ b/char-specific-context.md @@ -1,39 +1,149 @@ -say we have a chat (system card) with three or more characters: -Alice, Bob and Carl. -the chat uses /completion endpoint (as oposed to /v1/chat/completion of openai) to the same llm on all chars. -Alice needs to pass info to Bob without Carl knowing the content (or perhaps even that communication occured at all). -Issue is that being in the same chat history (chatBody), llm shares context for each char. -Even if message passed through the tool calls, Carl can see a tool call with the arguments. -If we delete tool calls and their responses, then both Bob and Alice would have to re-request that secret info each time it is their turn, which is absurd. +# Character-Specific Context -concept of char specific context: -let every message to have a `KnownTo` field (type []string); -which could be empty (to everyone) or have speicifc names ([]string{"Alice", "Bob"}) -so when that's character turn (which we track in `WriteNextMsgAsCompletionAgent`, then that message is injected at its proper index position (means every message should know it's index?) into chatBody (chat history). +**/completion only feature; won't work with /v1/chat** -indexes are tricky. -what happens if msg is deleted? will every following message decrement their index? so far edit/copy functionality take in consideration position of existing messages in order. -how to avoid two messages with the same index? if Alices letter is send as secret and assigned index: 5. Then Carl's turn we have that secret message excluded, so his action would get also index 5. -Perhaps instead of indexes we should only keep message order by timestamps (time.Time)? +## Overview -so we need to think of some sort of tag that llm could add into the message, to make sure it is to be known by that specific target char, some weird string that would not occur naturally, that we could parse: -__known_to_chars__Alice,Bob__ +Character-Specific Context is a feature that enables private communication between characters in a multi-character chat. When enabled, messages can be tagged with a special marker indicating which characters should "know" about (see) that message. This allows for secret conversations, private information sharing, and roleplaying scenarios where certain characters are not privy to all communications. +(This feature works by filtering the chat history for each character based on the `KnownTo` field associated with each message. Only messages that are intended for a particular character (or are public) are included in that character's view of the conversation.) -for ex. +## How It Works + +### Tagging Messages + +Messages can be tagged with a special string (by default `__known_to_chars__`) followed by a comma-separated list of character names. The tag can appear anywhere in the message content. **After csv of characters tag should be closed with `__` (for regexp to know where it ends).** + +**Example:** +``` Alice: __known_to_chars__Bob__ Can you keep a secret? -Bob: I also have a secret for you Alice __known_to_chars__Alice__ +``` + +**To avoid breaking immersion, it is better to place the tag in (ooc:)** +``` +Alice: (ooc: __known_to_chars__Bob__) Can you keep a secret? +``` + +This message will be visible only to Alice (the sender) and Bob. The tag is parsed by `parseKnownToTag` and the resulting list of character names is stored in the `KnownTo` field of the message (`RoleMsg`). The sender is automatically added to the `KnownTo` list (if not already present) by `processMessageTag`. + +Multiple tags can be used in a single message; all mentioned characters are combined into the `KnownTo` list. + +### Filtering Chat History + +When it's a character's turn to respond, the function `filterMessagesForCharacter` filters the full message list, returning only those messages where: + +- `KnownTo` is empty (message is public), OR +- `KnownTo` contains the character's name. + +System messages (`role == "system"`) are always visible to all characters. + +The filtered history is then used to construct the prompt sent to the LLM. This ensures each character only sees messages they are supposed to know about. + +### Configuration + +Two configuration settings control this feature: + +- `CharSpecificContextEnabled` – boolean; enables or disables the feature globally. +- `CharSpecificContextTag` – string; the tag used to mark private messages. Default is `__known_to_chars__`. + +These are set in `config.toml` (see `config.example.toml` for the default values). + +### Processing Pipeline + +1. **Message Creation** – When a message is added to the chat (by a user or LLM), `processMessageTag` scans its content for the known‑to tag. +2. **Storage** – The parsed `KnownTo` list is stored with the message in the database. +3. **Filtering** – Whenever the chat history is needed (e.g., for an LLM request), `filterMessagesForCharacter` is called with the target character (the one whose turn it is). The filtered list is used for the prompt. +4. **Display** – The TUI also uses the same filtering when showing the conversation for a selected character (see “Writing as…”). + +## Usage Examples + +### Basic Private Message + +Alice wants to tell Bob something without Carl knowing: + +``` +Alice: __known_to_chars__Bob__ Meet me at the library tonight. +``` + +Result: +- Alice (sender) sees the message. +- Bob sees the message. +- Carl does **not** see the message in his chat history. + +### Multi‑recipient Secret + +Alice shares a secret with Bob and Carl, but not David: + +``` +Alice: (ooc: __known_to_chars__Bob,Carl__) The treasure is hidden under the old oak. +``` + +### Public Message + +A message without any tag (or with an empty `KnownTo`) is visible to all characters. + +``` +Alice: Hello everyone! +``` + +### User‑Role Considerations + +The human user can assume any character’s identity via the “Writing as…” feature (`cfg.UserRole` and `cfg.WriteNextMsgAs`). When the user writes as a character, the same filtering rules apply: the user will see only the messages that character would see. + +## Interaction with AutoTurn and WriteNextMsgAsCompletionAgent + +### WriteNextMsgAsCompletionAgent + +This configuration variable determines which character the LLM should respond as. It is used by `filterMessagesForCurrentCharacter` to select the target character for filtering. If `WriteNextMsgAsCompletionAgent` is set, the LLM will reply in the voice of that character, and only messages visible to that character will be included in the prompt. + +### AutoTurn + +Normally llm and user (human) take turns writting messages. With private messages there is an issue, where llm can write a private message that will not be visible for character who user controls, so for a human it would appear that llm did not respond. It is desirable in this case, for llm to answer to itself, larping as target character for that private message. + +When `AutoTurn` is enabled, the system can automatically trigger responses from llm as characters who have received a private message. The logic in `triggerPrivateMessageResponses` checks the `KnownTo` list of the last message and, for each recipient that is not the user (or the sender), queues a chat round for that character. This creates a chain of private replies without user intervention. + +**Example flow:** +1. Alice (llm) sends a private message to Bob (llm) (`KnownTo = ["Alice","Bob"]`). +2. Carl (user) sees nothing. +3. `AutoTurn` detects this and queues a response from Bob. +4. Bob replies (potentially also privately). +5. The conversation continues automatically until public message is made, or Carl (user) was included in `KnownTo`. + + +## Cardmaking with multiple characters + +So far only json format supports multiple characters. +Card example: +``` +{ + "sys_prompt": "This is a chat between Alice, Bob and Carl. Normally what is said by any character is seen by all others. But characters also might write messages intended to specific targets if their message contain string tag '__known_to_chars__{CharName1,CharName2,CharName3}__'.\nFor example:\nAlice:\n\"Hey, Bob. I have a secret for you... (ooc: __known_to_chars__Bob__)\"\nThis message would be seen only by Bob and Alice (sender always sees their own message).", + "role": "Alice", + "filepath": "sysprompts/alice_bob_carl.json", + "chars": ["Alice", "Bob", "Carl"], + "first_msg": "Hey guys! Want to play Alias like game? I'll tell Bob a word and he needs to describe that word so Carl can guess what it was?" +} +``` + +## Limitations & Caveats + +### Endpoint Compatibility -tag can be anywhere in the message. Sender should be also included in KnownTo, so we should parse sender name and add them to KnownTo. +Character‑specific context relies on the `/completion` endpoint (or other completion‑style endpoints) where the LLM is presented with a raw text prompt containing the entire filtered history. It does **not** work with OpenAI‑style `/v1/chat/completions` endpoints, because those endpoints enforce a fixed role set (`user`/`assistant`/`system`) and strip custom role names and metadata. -also need to consider user case (as in human chatting with llm). User also can assume any char identity to write the message and ideally the same rules should affect user's chars. -user has "Writing as {char}" (vars: persona and cfg.UserRole) -on persona change we should update tui text view to have atual for that character chat history +### Tag Parsing -Again, this is not going to work with openais /v1/chat endpoint since it converts all characters to user/assistant; so it is completion only feature. It also might cause unwanted effects, so we better have an option in config to switch this context editing on/off. +- The tag is case‑sensitive. +- Whitespace around character names is trimmed. +- If the tag appears multiple times, all mentioned characters are combined. +### Database Storage -alternative approach to the tag string would be to have a judge agent to determine after each message what characters should hae access to it. but it means to make an additional call to llm after each msg. +The `KnownTo` field is stored as a JSON array in the database. Existing messages that were created before enabling the feature will have an empty `KnownTo` and thus be visible to all characters. +## Relevant Configuration -need to update character card loader to support multiple characters +```toml +CharSpecificContextEnabled = true +CharSpecificContextTag = "__known_to_chars__" +AutoTurn = false +``` diff --git a/sysprompts/alice_bob_carl.json b/sysprompts/alice_bob_carl.json index 8c7b8e2..b2a0ac5 100644 --- a/sysprompts/alice_bob_carl.json +++ b/sysprompts/alice_bob_carl.json @@ -1,8 +1,6 @@ { "sys_prompt": "This is a chat between Alice, Bob and Carl. Normally what is said by any character is seen by all others. But characters also might write messages intended to specific targets if their message contain string tag '__known_to_chars__{CharName1,CharName2,CharName3}__'.\nFor example:\nAlice:\n\"Hey, Bob. I have a secret for you... (ooc: __known_to_chars__Bob__)\"\nThis message would be seen only by Bob and Alice (sender always sees their own message).", "role": "Alice", - "role2": "Bob", - "role3": "Carl", "filepath": "sysprompts/alice_bob_carl.json", "chars": ["Alice", "Bob", "Carl"], "first_msg": "Hey guys! Want to play Alias like game? I'll tell Bob a word and he needs to describe that word so Carl can guess what it was?" -- cgit v1.2.3 From 0f5bbaa94390cd4d11facc8b2e7fb825b128ef31 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Tue, 3 Feb 2026 12:04:20 +0300 Subject: Enha: update config --- config.example.toml | 7 +++++-- config/config.go | 27 ++++++++++++++------------- props_table.go | 3 +++ 3 files changed, 22 insertions(+), 15 deletions(-) diff --git a/config.example.toml b/config.example.toml index 85b2662..82aa5f5 100644 --- a/config.example.toml +++ b/config.example.toml @@ -19,7 +19,7 @@ AssistantRole = "assistant" SysDir = "sysprompts" ChunkLimit = 100000 AutoScrollEnabled = true -# AutoCleanToolCallsFromCtx = false +AutoCleanToolCallsFromCtx = false # rag settings RAGBatchSize = 1 RAGWordLimit = 80 @@ -39,9 +39,12 @@ WhisperBinaryPath = "./batteries/whisper.cpp/build/bin/whisper-cli" # Path to wh WhisperModelPath = "./batteries/whisper.cpp/ggml-large-v3-turbo-q5_0.bin" # Path to whisper model file (for WHISPER_BINARY mode) STT_LANG = "en" # Language for speech recognition (for WHISPER_BINARY mode) STT_SR = 16000 # Sample rate for audio recording +# DBPATH = "gflt.db" FilePickerDir = "." # Directory where file picker should start FilePickerExts = "png,jpg,jpeg,gif,webp" # Comma-separated list of allowed file extensions for file picker EnableMouse = false # Enable mouse support in the UI -CharSpecificContextEnabled = false +# character specific context +CharSpecificContextEnabled = true CharSpecificContextTag = "__known_to_chars__" +AutoTurn = true diff --git a/config/config.go b/config/config.go index 381fa72..bed24f6 100644 --- a/config/config.go +++ b/config/config.go @@ -26,7 +26,11 @@ type Config struct { WriteNextMsgAs string WriteNextMsgAsCompletionAgent string SkipLLMResp bool - AutoCleanToolCallsFromCtx bool `toml:"AutoCleanToolCallsFromCtx"` + AutoCleanToolCallsFromCtx bool `toml:"AutoCleanToolCallsFromCtx"` + DBPATH string `toml:"DBPATH"` + FilePickerDir string `toml:"FilePickerDir"` + FilePickerExts string `toml:"FilePickerExts"` + EnableMouse bool `toml:"EnableMouse"` // embeddings RAGEnabled bool `toml:"RAGEnabled"` EmbedURL string `toml:"EmbedURL"` @@ -54,20 +58,17 @@ type Config struct { TTS_PROVIDER string `toml:"TTS_PROVIDER"` TTS_LANGUAGE string `toml:"TTS_LANGUAGE"` // STT - STT_TYPE string `toml:"STT_TYPE"` // WHISPER_SERVER, WHISPER_BINARY - STT_URL string `toml:"STT_URL"` - STT_SR int `toml:"STT_SR"` - STT_ENABLED bool `toml:"STT_ENABLED"` - WhisperBinaryPath string `toml:"WhisperBinaryPath"` - WhisperModelPath string `toml:"WhisperModelPath"` - STT_LANG string `toml:"STT_LANG"` - DBPATH string `toml:"DBPATH"` - FilePickerDir string `toml:"FilePickerDir"` - FilePickerExts string `toml:"FilePickerExts"` - EnableMouse bool `toml:"EnableMouse"` + STT_TYPE string `toml:"STT_TYPE"` // WHISPER_SERVER, WHISPER_BINARY + STT_URL string `toml:"STT_URL"` + STT_SR int `toml:"STT_SR"` + STT_ENABLED bool `toml:"STT_ENABLED"` + WhisperBinaryPath string `toml:"WhisperBinaryPath"` + WhisperModelPath string `toml:"WhisperModelPath"` + STT_LANG string `toml:"STT_LANG"` + // character spefic contetx CharSpecificContextEnabled bool `toml:"CharSpecificContextEnabled"` CharSpecificContextTag string `toml:"CharSpecificContextTag"` - AutoTurn bool + AutoTurn bool `toml:"AutoTurn"` } func LoadConfig(fn string) (*Config, error) { diff --git a/props_table.go b/props_table.go index 1b8e894..d037bb0 100644 --- a/props_table.go +++ b/props_table.go @@ -140,6 +140,9 @@ func makePropsTable(props map[string]float32) *tview.Table { addCheckboxRow("Auto turn (for cards with many chars)", cfg.AutoTurn, func(checked bool) { cfg.AutoTurn = checked }) + addCheckboxRow("Char specific context", cfg.CharSpecificContextEnabled, func(checked bool) { + cfg.CharSpecificContextEnabled = checked + }) // Add dropdowns logLevels := []string{"Debug", "Info", "Warn"} addListPopupRow("Set log level", logLevels, GetLogLevel(), func(option string) { -- cgit v1.2.3 From 76f14ce4a376bbbb99c79cc2090c067b5ba28484 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Tue, 3 Feb 2026 16:56:31 +0300 Subject: Enha: detailed error --- bot.go | 116 ++++++++++++++++++++++++++++++++++++++++++++++++------- helpfuncs.go | 22 +++++++++++ llm.go | 11 ++++-- models/models.go | 24 ++++++++---- 4 files changed, 148 insertions(+), 25 deletions(-) diff --git a/bot.go b/bot.go index b836824..120a3fb 100644 --- a/bot.go +++ b/bot.go @@ -498,6 +498,58 @@ func monitorModelLoad(modelID string) { }() } + +// extractDetailedErrorFromBytes extracts detailed error information from response body bytes +func extractDetailedErrorFromBytes(body []byte, statusCode int) string { + // Try to parse as JSON to extract detailed error information + var errorResponse map[string]interface{} + if err := json.Unmarshal(body, &errorResponse); err == nil { + // Check if it's an error response with detailed information + if errorData, ok := errorResponse["error"]; ok { + if errorMap, ok := errorData.(map[string]interface{}); ok { + var errorMsg string + if msg, ok := errorMap["message"]; ok { + errorMsg = fmt.Sprintf("%v", msg) + } + + var details []string + if code, ok := errorMap["code"]; ok { + details = append(details, fmt.Sprintf("Code: %v", code)) + } + + if metadata, ok := errorMap["metadata"]; ok { + // Handle metadata which might contain raw error details + if metadataMap, ok := metadata.(map[string]interface{}); ok { + if raw, ok := metadataMap["raw"]; ok { + // Parse the raw error string if it's JSON + var rawError map[string]interface{} + if rawStr, ok := raw.(string); ok && json.Unmarshal([]byte(rawStr), &rawError) == nil { + if rawErrorData, ok := rawError["error"]; ok { + if rawErrorMap, ok := rawErrorData.(map[string]interface{}); ok { + if rawMsg, ok := rawErrorMap["message"]; ok { + return fmt.Sprintf("API Error: %s", rawMsg) + } + } + } + } + } + } + details = append(details, fmt.Sprintf("Metadata: %v", metadata)) + } + + if len(details) > 0 { + return fmt.Sprintf("API Error: %s (%s)", errorMsg, strings.Join(details, ", ")) + } + + return "API Error: " + errorMsg + } + } + } + + // If not a structured error response, return the raw body with status + return fmt.Sprintf("HTTP Status: %d, Response Body: %s", statusCode, string(body)) +} + // sendMsgToLLM expects streaming resp func sendMsgToLLM(body io.Reader) { choseChunkParser() @@ -524,6 +576,33 @@ func sendMsgToLLM(body io.Reader) { streamDone <- true return } + + // Check if the initial response is an error before starting to stream + if resp.StatusCode >= 400 { + // Read the response body to get detailed error information + bodyBytes, err := io.ReadAll(resp.Body) + if err != nil { + logger.Error("failed to read error response body", "error", err, "status_code", resp.StatusCode) + detailedError := fmt.Sprintf("HTTP Status: %d, Failed to read response body: %v", resp.StatusCode, err) + if err := notifyUser("API Error", detailedError); err != nil { + logger.Error("failed to notify", "error", err) + } + resp.Body.Close() + streamDone <- true + return + } + + // Parse the error response for detailed information + detailedError := extractDetailedErrorFromBytes(bodyBytes, resp.StatusCode) + logger.Error("API returned error status", "status_code", resp.StatusCode, "detailed_error", detailedError) + if err := notifyUser("API Error", detailedError); err != nil { + logger.Error("failed to notify", "error", err) + } + resp.Body.Close() + streamDone <- true + return + } + defer resp.Body.Close() reader := bufio.NewReader(resp.Body) counter := uint32(0) @@ -541,11 +620,23 @@ func sendMsgToLLM(body io.Reader) { } line, err := reader.ReadBytes('\n') if err != nil { - logger.Error("error reading response body", "error", err, "line", string(line), - "user_role", cfg.UserRole, "parser", chunkParser, "link", cfg.CurrentAPI) - // if err.Error() != "EOF" { - if err := notifyUser("API error", err.Error()); err != nil { - logger.Error("failed to notify", "error", err) + // Check if this is an EOF error and if the response contains detailed error information + if err == io.EOF { + // For streaming responses, we may have already consumed the error body + // So we'll use the original status code to provide context + detailedError := fmt.Sprintf("Streaming connection closed unexpectedly (Status: %d). This may indicate an API error. Check your API provider and model settings.", resp.StatusCode) + logger.Error("error reading response body", "error", err, "detailed_error", detailedError, + "status_code", resp.StatusCode, "user_role", cfg.UserRole, "parser", chunkParser, "link", cfg.CurrentAPI) + if err := notifyUser("API Error", detailedError); err != nil { + logger.Error("failed to notify", "error", err) + } + } else { + logger.Error("error reading response body", "error", err, "line", string(line), + "user_role", cfg.UserRole, "parser", chunkParser, "link", cfg.CurrentAPI) + // if err.Error() != "EOF" { + if err := notifyUser("API error", err.Error()); err != nil { + logger.Error("failed to notify", "error", err) + } } streamDone <- true break @@ -798,7 +889,7 @@ out: for i, msg := range chatBody.Messages { logger.Debug("chatRound: after cleaning", "index", i, "role", msg.Role, "content_len", len(msg.Content), "has_content", msg.HasContent(), "tool_call_id", msg.ToolCallID) } - colorText() + refreshChatDisplay() updateStatusLine() // bot msg is done; // now check it for func call @@ -1255,16 +1346,15 @@ func triggerPrivateMessageResponses(msg models.RoleMsg) { // weird cases, skip continue } - // Skip if this is the user character or the sender of the message + // Skip if this is the user character (user handles their own turn) + // If user is in KnownTo, stop processing - it's the user's turn if recipient == cfg.UserRole || recipient == userCharacter { - return // user in known_to => users turn + return // user in known_to => user's turn } - // Trigger the recipient character to respond by simulating a prompt - // that indicates it's their turn - triggerMsg := recipient + ":\n" - // Call chatRound with the trigger message to make the recipient respond + // Trigger the recipient character to respond + // Send empty message so LLM continues naturally from the conversation crr := &models.ChatRoundReq{ - UserMsg: triggerMsg, + UserMsg: "", // Empty message - LLM will continue the conversation Role: recipient, } chatRoundChan <- crr diff --git a/helpfuncs.go b/helpfuncs.go index 849b0a0..49069a2 100644 --- a/helpfuncs.go +++ b/helpfuncs.go @@ -22,6 +22,28 @@ func isASCII(s string) bool { return true } +// refreshChatDisplay updates the chat display based on current character view +// It filters messages for the character the user is currently "writing as" +// and updates the textView with the filtered conversation +func refreshChatDisplay() { + // Determine which character's view to show + viewingAs := cfg.UserRole + if cfg.WriteNextMsgAs != "" { + viewingAs = cfg.WriteNextMsgAs + } + // Filter messages for this character + filteredMessages := filterMessagesForCharacter(chatBody.Messages, viewingAs) + displayText := chatToText(filteredMessages, cfg.ShowSys) + // Use QueueUpdate for thread-safe UI updates + app.QueueUpdate(func() { + textView.SetText(displayText) + colorText() + if scrollToEndEnabled { + textView.ScrollToEnd() + } + }) +} + func colorText() { text := textView.GetText(false) quoteReplacer := strings.NewReplacer( diff --git a/llm.go b/llm.go index b7f28de..e43cc71 100644 --- a/llm.go +++ b/llm.go @@ -204,7 +204,8 @@ func (lcp LCPCompletion) FormMsg(msg, role string, resume bool) (io.Reader, erro } logger.Debug("checking prompt for /completion", "tool_use", cfg.ToolUse, "msg", msg, "resume", resume, "prompt", prompt, "multimodal_data_count", len(multimodalData)) - payload := models.NewLCPReq(prompt, chatBody.Model, multimodalData, defaultLCPProps, chatBody.MakeStopSlice()) + payload := models.NewLCPReq(prompt, chatBody.Model, multimodalData, + defaultLCPProps, chatBody.MakeStopSliceExcluding(botPersona, listChatRoles())) data, err := json.Marshal(payload) if err != nil { logger.Error("failed to form a msg", "error", err) @@ -436,7 +437,8 @@ func (ds DeepSeekerCompletion) FormMsg(msg, role string, resume bool) (io.Reader logger.Debug("checking prompt for /completion", "tool_use", cfg.ToolUse, "msg", msg, "resume", resume, "prompt", prompt) payload := models.NewDSCompletionReq(prompt, chatBody.Model, - defaultLCPProps["temp"], chatBody.MakeStopSlice()) + defaultLCPProps["temp"], + chatBody.MakeStopSliceExcluding(botPersona, listChatRoles())) data, err := json.Marshal(payload) if err != nil { logger.Error("failed to form a msg", "error", err) @@ -594,10 +596,11 @@ func (or OpenRouterCompletion) FormMsg(msg, role string, resume bool) (io.Reader if cfg.ThinkUse && !cfg.ToolUse { prompt += "" } - ss := chatBody.MakeStopSlice() + ss := chatBody.MakeStopSliceExcluding(botPersona, listChatRoles()) logger.Debug("checking prompt for /completion", "tool_use", cfg.ToolUse, "msg", msg, "resume", resume, "prompt", prompt, "stop_strings", ss) - payload := models.NewOpenRouterCompletionReq(chatBody.Model, prompt, defaultLCPProps, ss) + payload := models.NewOpenRouterCompletionReq(chatBody.Model, prompt, + defaultLCPProps, ss) data, err := json.Marshal(payload) if err != nil { logger.Error("failed to form a msg", "error", err) diff --git a/models/models.go b/models/models.go index 76ef183..340cb42 100644 --- a/models/models.go +++ b/models/models.go @@ -369,14 +369,22 @@ func (cb *ChatBody) ListRoles() []string { } func (cb *ChatBody) MakeStopSlice() []string { - namesMap := make(map[string]struct{}) - for _, m := range cb.Messages { - namesMap[m.Role] = struct{}{} - } - ss := make([]string, 0, 1+len(namesMap)) - ss = append(ss, "<|im_end|>") - for k := range namesMap { - ss = append(ss, k+":\n") + return cb.MakeStopSliceExcluding("", cb.ListRoles()) +} + +func (cb *ChatBody) MakeStopSliceExcluding( + excludeRole string, roleList []string, +) []string { + ss := []string{} + for _, role := range roleList { + // Skip the excluded role (typically the current speaker) + if role == excludeRole { + continue + } + // Add multiple variations to catch different formatting + ss = append(ss, role+":\n") // Most common: role with newline + ss = append(ss, role+":") // Role with colon but no newline + ss = append(ss, role+": ") // Role with colon and space } return ss } -- cgit v1.2.3 From 654d6a47ec2d991277e87ca5b2144076eb9f7458 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Tue, 3 Feb 2026 19:06:09 +0300 Subject: Fix: trigger auto turn cannot be empty empty message means to continue merging new reply to the last message --- bot.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bot.go b/bot.go index 120a3fb..13d488a 100644 --- a/bot.go +++ b/bot.go @@ -498,7 +498,6 @@ func monitorModelLoad(modelID string) { }() } - // extractDetailedErrorFromBytes extracts detailed error information from response body bytes func extractDetailedErrorFromBytes(body []byte, statusCode int) string { // Try to parse as JSON to extract detailed error information @@ -1352,9 +1351,10 @@ func triggerPrivateMessageResponses(msg models.RoleMsg) { return // user in known_to => user's turn } // Trigger the recipient character to respond + triggerMsg := recipient + ":\n" // Send empty message so LLM continues naturally from the conversation crr := &models.ChatRoundReq{ - UserMsg: "", // Empty message - LLM will continue the conversation + UserMsg: triggerMsg, Role: recipient, } chatRoundChan <- crr -- cgit v1.2.3 From e3965db3c7e7f5e3cdbf5d03ac06103c2709c0d8 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Wed, 4 Feb 2026 08:26:30 +0300 Subject: Enha: use slices methods --- bot.go | 12 +++--------- helpfuncs.go | 2 ++ tui.go | 3 --- 3 files changed, 5 insertions(+), 12 deletions(-) diff --git a/bot.go b/bot.go index 13d488a..a28097f 100644 --- a/bot.go +++ b/bot.go @@ -19,6 +19,7 @@ import ( "os" "path" "regexp" + "slices" "strconv" "strings" "sync" @@ -154,15 +155,8 @@ func filterMessagesForCharacter(messages []models.RoleMsg, character string) []m filtered = append(filtered, msg) continue } - // Check if character is in KnownTo list - found := false - for _, k := range msg.KnownTo { - if k == character { - found = true - break - } - } - if found { + if slices.Contains(msg.KnownTo, character) { + // Check if character is in KnownTo lis filtered = append(filtered, msg) } } diff --git a/helpfuncs.go b/helpfuncs.go index 49069a2..7033f04 100644 --- a/helpfuncs.go +++ b/helpfuncs.go @@ -7,6 +7,7 @@ import ( "image" "os" "path" + "slices" "strings" "unicode" @@ -198,6 +199,7 @@ func listRolesWithUser() []string { } // Prepend user role to the beginning of the list result := append([]string{cfg.UserRole}, filteredRoles...) + slices.Sort(result) return result } diff --git a/tui.go b/tui.go index e164423..8b1c520 100644 --- a/tui.go +++ b/tui.go @@ -1187,7 +1187,6 @@ func init() { persona = cfg.WriteNextMsgAs } roles := listRolesWithUser() - logger.Info("list roles", "roles", roles) for i, role := range roles { if strings.EqualFold(role, persona) { if i == len(roles)-1 { @@ -1197,7 +1196,6 @@ func init() { } cfg.WriteNextMsgAs = roles[i+1] // get next role persona = cfg.WriteNextMsgAs - // logger.Info("picked role", "roles", roles, "index", i+1) break } } @@ -1228,7 +1226,6 @@ func init() { break } cfg.WriteNextMsgAsCompletionAgent = roles[i+1] // get next role - // logger.Info("picked role", "roles", roles, "index", i+1) break } } -- cgit v1.2.3 From 79861e7c2bc6f2ed95309ca6e83577ddc4e2c63a Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Wed, 4 Feb 2026 11:22:17 +0300 Subject: Enha: privateMessageResp with resume --- bot.go | 36 ++++++++++++++---------------------- char-specific-context.md | 3 +++ llm.go | 39 ++++++++++++++++++++++++--------------- 3 files changed, 41 insertions(+), 37 deletions(-) diff --git a/bot.go b/bot.go index a28097f..d195431 100644 --- a/bot.go +++ b/bot.go @@ -96,8 +96,10 @@ func parseKnownToTag(content string) []string { if list == "" { continue } - parts := strings.Split(list, ",") - for _, p := range parts { + strings.SplitSeq(list, ",") + // parts := strings.Split(list, ",") + // for _, p := range parts { + for p := range strings.SplitSeq(list, ",") { p = strings.TrimSpace(p) if p != "" { knownTo = append(knownTo, p) @@ -118,25 +120,17 @@ func processMessageTag(msg models.RoleMsg) models.RoleMsg { // If KnownTo already set, assume tag already processed (content cleaned). // However, we still check for new tags (maybe added later). knownTo := parseKnownToTag(msg.Content) - // logger.Info("processing tags", "msg", msg.Content, "known_to", knownTo) // If tag found, replace KnownTo with new list (merge with existing?) // For simplicity, if knownTo is not nil, replace. - if knownTo != nil { - msg.KnownTo = knownTo - // Only ensure sender role is in KnownTo if there was a tag - // This means the message is intended for specific characters - if msg.Role != "" { - senderAdded := false - for _, k := range msg.KnownTo { - if k == msg.Role { - senderAdded = true - break - } - } - if !senderAdded { - msg.KnownTo = append(msg.KnownTo, msg.Role) - } - } + if knownTo == nil { + return msg + } + msg.KnownTo = knownTo + if msg.Role == "" { + return msg + } + if !slices.Contains(msg.KnownTo, msg.Role) { + msg.KnownTo = append(msg.KnownTo, msg.Role) } return msg } @@ -781,9 +775,6 @@ func chatWatcher(ctx context.Context) { } func chatRound(r *models.ChatRoundReq) error { - // chunkChan := make(chan string, 10) - // openAIToolChan := make(chan string, 10) - // streamDone := make(chan bool, 1) botRespMode = true botPersona := cfg.AssistantRole if cfg.WriteNextMsgAsCompletionAgent != "" { @@ -1350,6 +1341,7 @@ func triggerPrivateMessageResponses(msg models.RoleMsg) { crr := &models.ChatRoundReq{ UserMsg: triggerMsg, Role: recipient, + Resume: true, } chatRoundChan <- crr } diff --git a/char-specific-context.md b/char-specific-context.md index 423572b..f06fd75 100644 --- a/char-specific-context.md +++ b/char-specific-context.md @@ -130,6 +130,9 @@ Card example: Character‑specific context relies on the `/completion` endpoint (or other completion‑style endpoints) where the LLM is presented with a raw text prompt containing the entire filtered history. It does **not** work with OpenAI‑style `/v1/chat/completions` endpoints, because those endpoints enforce a fixed role set (`user`/`assistant`/`system`) and strip custom role names and metadata. +### TTS +Although text message might be hidden from user character. If TTS is enabled it will be read. + ### Tag Parsing - The tag is case‑sensitive. diff --git a/llm.go b/llm.go index e43cc71..30fc0ec 100644 --- a/llm.go +++ b/llm.go @@ -138,7 +138,8 @@ func (lcp LCPCompletion) FormMsg(msg, role string, resume bool) (io.Reader, erro if localImageAttachmentPath != "" { imageURL, err := models.CreateImageURLFromPath(localImageAttachmentPath) if err != nil { - logger.Error("failed to create image URL from path for completion", "error", err, "path", localImageAttachmentPath) + logger.Error("failed to create image URL from path for completion", + "error", err, "path", localImageAttachmentPath) return nil, err } // Extract base64 part from data URL (e.g., "data:image/jpeg;base64,...") @@ -166,15 +167,16 @@ func (lcp LCPCompletion) FormMsg(msg, role string, resume bool) (io.Reader, erro logger.Error("failed to form a rag msg", "error", err) return nil, err } - logger.Debug("RAG response received", "response_len", len(ragResp), "response_preview", ragResp[:min(len(ragResp), 100)]) + logger.Debug("RAG response received", "response_len", len(ragResp), + "response_preview", ragResp[:min(len(ragResp), 100)]) // Use system role for RAG context to avoid conflicts with tool usage ragMsg := models.RoleMsg{Role: "system", Content: RAGMsg + ragResp} chatBody.Messages = append(chatBody.Messages, ragMsg) logger.Debug("RAG message added to chat body", "message_count", len(chatBody.Messages)) } } + // sending description of the tools and how to use them if cfg.ToolUse && !resume && role == cfg.UserRole && !containsToolSysMsg() { - // add to chat body chatBody.Messages = append(chatBody.Messages, models.RoleMsg{Role: cfg.ToolRole, Content: toolSysMsg}) } filteredMessages, botPersona := filterMessagesForCurrentCharacter(chatBody.Messages) @@ -310,7 +312,8 @@ func (op LCPChat) FormMsg(msg, role string, resume bool) (io.Reader, error) { } newMsg = processMessageTag(newMsg) chatBody.Messages = append(chatBody.Messages, newMsg) - logger.Debug("LCPChat FormMsg: added message to chatBody", "role", newMsg.Role, "content_len", len(newMsg.Content), "message_count_after_add", len(chatBody.Messages)) + logger.Debug("LCPChat FormMsg: added message to chatBody", "role", newMsg.Role, + "content_len", len(newMsg.Content), "message_count_after_add", len(chatBody.Messages)) } if !resume { // if rag - add as system message to avoid conflicts with tool usage @@ -322,11 +325,13 @@ func (op LCPChat) FormMsg(msg, role string, resume bool) (io.Reader, error) { logger.Error("LCPChat: failed to form a rag msg", "error", err) return nil, err } - logger.Debug("LCPChat: RAG response received", "response_len", len(ragResp), "response_preview", ragResp[:min(len(ragResp), 100)]) + logger.Debug("LCPChat: RAG response received", + "response_len", len(ragResp), "response_preview", ragResp[:min(len(ragResp), 100)]) // Use system role for RAG context to avoid conflicts with tool usage ragMsg := models.RoleMsg{Role: "system", Content: RAGMsg + ragResp} chatBody.Messages = append(chatBody.Messages, ragMsg) - logger.Debug("LCPChat: RAG message added to chat body", "role", ragMsg.Role, "rag_content_len", len(ragMsg.Content), "message_count_after_rag", len(chatBody.Messages)) + logger.Debug("LCPChat: RAG message added to chat body", "role", ragMsg.Role, + "rag_content_len", len(ragMsg.Content), "message_count_after_rag", len(chatBody.Messages)) } } filteredMessages, botPersona := filterMessagesForCurrentCharacter(chatBody.Messages) @@ -409,15 +414,16 @@ func (ds DeepSeekerCompletion) FormMsg(msg, role string, resume bool) (io.Reader logger.Error("DeepSeekerCompletion: failed to form a rag msg", "error", err) return nil, err } - logger.Debug("DeepSeekerCompletion: RAG response received", "response_len", len(ragResp), "response_preview", ragResp[:min(len(ragResp), 100)]) + logger.Debug("DeepSeekerCompletion: RAG response received", + "response_len", len(ragResp), "response_preview", ragResp[:min(len(ragResp), 100)]) // Use system role for RAG context to avoid conflicts with tool usage ragMsg := models.RoleMsg{Role: "system", Content: RAGMsg + ragResp} chatBody.Messages = append(chatBody.Messages, ragMsg) logger.Debug("DeepSeekerCompletion: RAG message added to chat body", "message_count", len(chatBody.Messages)) } } + // sending description of the tools and how to use them if cfg.ToolUse && !resume && role == cfg.UserRole && !containsToolSysMsg() { - // add to chat body chatBody.Messages = append(chatBody.Messages, models.RoleMsg{Role: cfg.ToolRole, Content: toolSysMsg}) } filteredMessages, botPersona := filterMessagesForCurrentCharacter(chatBody.Messages) @@ -494,7 +500,8 @@ func (ds DeepSeekerChat) FormMsg(msg, role string, resume bool) (io.Reader, erro logger.Error("failed to form a rag msg", "error", err) return nil, err } - logger.Debug("RAG response received", "response_len", len(ragResp), "response_preview", ragResp[:min(len(ragResp), 100)]) + logger.Debug("RAG response received", "response_len", len(ragResp), + "response_preview", ragResp[:min(len(ragResp), 100)]) // Use system role for RAG context to avoid conflicts with tool usage ragMsg := models.RoleMsg{Role: "system", Content: RAGMsg + ragResp} chatBody.Messages = append(chatBody.Messages, ragMsg) @@ -571,15 +578,16 @@ func (or OpenRouterCompletion) FormMsg(msg, role string, resume bool) (io.Reader logger.Error("failed to form a rag msg", "error", err) return nil, err } - logger.Debug("RAG response received", "response_len", len(ragResp), "response_preview", ragResp[:min(len(ragResp), 100)]) + logger.Debug("RAG response received", "response_len", + len(ragResp), "response_preview", ragResp[:min(len(ragResp), 100)]) // Use system role for RAG context to avoid conflicts with tool usage ragMsg := models.RoleMsg{Role: "system", Content: RAGMsg + ragResp} chatBody.Messages = append(chatBody.Messages, ragMsg) logger.Debug("RAG message added to chat body", "message_count", len(chatBody.Messages)) } } + // sending description of the tools and how to use them if cfg.ToolUse && !resume && role == cfg.UserRole && !containsToolSysMsg() { - // add to chat body chatBody.Messages = append(chatBody.Messages, models.RoleMsg{Role: cfg.ToolRole, Content: toolSysMsg}) } filteredMessages, botPersona := filterMessagesForCurrentCharacter(chatBody.Messages) @@ -596,11 +604,11 @@ func (or OpenRouterCompletion) FormMsg(msg, role string, resume bool) (io.Reader if cfg.ThinkUse && !cfg.ToolUse { prompt += "" } - ss := chatBody.MakeStopSliceExcluding(botPersona, listChatRoles()) + stopSlice := chatBody.MakeStopSliceExcluding(botPersona, listChatRoles()) logger.Debug("checking prompt for /completion", "tool_use", cfg.ToolUse, - "msg", msg, "resume", resume, "prompt", prompt, "stop_strings", ss) + "msg", msg, "resume", resume, "prompt", prompt, "stop_strings", stopSlice) payload := models.NewOpenRouterCompletionReq(chatBody.Model, prompt, - defaultLCPProps, ss) + defaultLCPProps, stopSlice) data, err := json.Marshal(payload) if err != nil { logger.Error("failed to form a msg", "error", err) @@ -687,7 +695,8 @@ func (or OpenRouterChat) FormMsg(msg, role string, resume bool) (io.Reader, erro logger.Error("failed to form a rag msg", "error", err) return nil, err } - logger.Debug("RAG response received", "response_len", len(ragResp), "response_preview", ragResp[:min(len(ragResp), 100)]) + logger.Debug("RAG response received", "response_len", len(ragResp), + "response_preview", ragResp[:min(len(ragResp), 100)]) // Use system role for RAG context to avoid conflicts with tool usage ragMsg := models.RoleMsg{Role: "system", Content: RAGMsg + ragResp} chatBody.Messages = append(chatBody.Messages, ragMsg) -- cgit v1.2.3 From 7187df509fe9cc506695a1036b840e03eeb25cff Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Wed, 4 Feb 2026 12:47:54 +0300 Subject: Enha: stricter stop string --- bot.go | 20 -------------------- llm.go | 6 +++--- models/models.go | 9 ++++++--- 3 files changed, 9 insertions(+), 26 deletions(-) diff --git a/bot.go b/bot.go index d195431..c396d07 100644 --- a/bot.go +++ b/bot.go @@ -861,18 +861,7 @@ out: newMsg = processMessageTag(newMsg) chatBody.Messages = append(chatBody.Messages, newMsg) } - logger.Debug("chatRound: before cleanChatBody", "messages_before_clean", len(chatBody.Messages)) - for i, msg := range chatBody.Messages { - logger.Debug("chatRound: before cleaning", "index", i, - "role", msg.Role, "content_len", len(msg.Content), - "has_content", msg.HasContent(), "tool_call_id", msg.ToolCallID) - } - // // Clean null/empty messages to prevent API issues with endpoints like llama.cpp jinja template cleanChatBody() - logger.Debug("chatRound: after cleanChatBody", "messages_after_clean", len(chatBody.Messages)) - for i, msg := range chatBody.Messages { - logger.Debug("chatRound: after cleaning", "index", i, "role", msg.Role, "content_len", len(msg.Content), "has_content", msg.HasContent(), "tool_call_id", msg.ToolCallID) - } refreshChatDisplay() updateStatusLine() // bot msg is done; @@ -901,19 +890,10 @@ func cleanChatBody() { if chatBody == nil || chatBody.Messages == nil { return } - originalLen := len(chatBody.Messages) - logger.Debug("cleanChatBody: before cleaning", "message_count", originalLen) - for i, msg := range chatBody.Messages { - logger.Debug("cleanChatBody: before clean", "index", i, "role", msg.Role, "content_len", len(msg.Content), "has_content", msg.HasContent(), "tool_call_id", msg.ToolCallID) - } // Tool request cleaning is now configurable via AutoCleanToolCallsFromCtx (default false) // /completion msg where part meant for user and other part tool call chatBody.Messages = cleanToolCalls(chatBody.Messages) chatBody.Messages = consolidateAssistantMessages(chatBody.Messages) - logger.Debug("cleanChatBody: after cleaning", "original_len", originalLen, "new_len", len(chatBody.Messages)) - for i, msg := range chatBody.Messages { - logger.Debug("cleanChatBody: after clean", "index", i, "role", msg.Role, "content_len", len(msg.Content), "has_content", msg.HasContent(), "tool_call_id", msg.ToolCallID) - } } // convertJSONToMapStringString unmarshals JSON into map[string]interface{} and converts all values to strings. diff --git a/llm.go b/llm.go index 30fc0ec..95de1d8 100644 --- a/llm.go +++ b/llm.go @@ -207,7 +207,7 @@ func (lcp LCPCompletion) FormMsg(msg, role string, resume bool) (io.Reader, erro logger.Debug("checking prompt for /completion", "tool_use", cfg.ToolUse, "msg", msg, "resume", resume, "prompt", prompt, "multimodal_data_count", len(multimodalData)) payload := models.NewLCPReq(prompt, chatBody.Model, multimodalData, - defaultLCPProps, chatBody.MakeStopSliceExcluding(botPersona, listChatRoles())) + defaultLCPProps, chatBody.MakeStopSliceExcluding("", listChatRoles())) data, err := json.Marshal(payload) if err != nil { logger.Error("failed to form a msg", "error", err) @@ -444,7 +444,7 @@ func (ds DeepSeekerCompletion) FormMsg(msg, role string, resume bool) (io.Reader "msg", msg, "resume", resume, "prompt", prompt) payload := models.NewDSCompletionReq(prompt, chatBody.Model, defaultLCPProps["temp"], - chatBody.MakeStopSliceExcluding(botPersona, listChatRoles())) + chatBody.MakeStopSliceExcluding("", listChatRoles())) data, err := json.Marshal(payload) if err != nil { logger.Error("failed to form a msg", "error", err) @@ -604,7 +604,7 @@ func (or OpenRouterCompletion) FormMsg(msg, role string, resume bool) (io.Reader if cfg.ThinkUse && !cfg.ToolUse { prompt += "" } - stopSlice := chatBody.MakeStopSliceExcluding(botPersona, listChatRoles()) + stopSlice := chatBody.MakeStopSliceExcluding("", listChatRoles()) logger.Debug("checking prompt for /completion", "tool_use", cfg.ToolUse, "msg", msg, "resume", resume, "prompt", prompt, "stop_strings", stopSlice) payload := models.NewOpenRouterCompletionReq(chatBody.Model, prompt, diff --git a/models/models.go b/models/models.go index 340cb42..e99832a 100644 --- a/models/models.go +++ b/models/models.go @@ -382,9 +382,12 @@ func (cb *ChatBody) MakeStopSliceExcluding( continue } // Add multiple variations to catch different formatting - ss = append(ss, role+":\n") // Most common: role with newline - ss = append(ss, role+":") // Role with colon but no newline - ss = append(ss, role+": ") // Role with colon and space + ss = append(ss, role+":\n") // Most common: role with newline + ss = append(ss, role+":") // Role with colon but no newline + ss = append(ss, role+": ") // Role with colon and single space + ss = append(ss, role+": ") // Role with colon and double space (common tokenization) + ss = append(ss, role+": \n") // Role with colon and double space (common tokenization) + ss = append(ss, role+": ") // Role with colon and triple space } return ss } -- cgit v1.2.3 From 685738a5a4f7488a0f1b87d360c71912aa575d65 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Wed, 4 Feb 2026 13:54:54 +0300 Subject: Enha: force stop string on client side --- bot.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/bot.go b/bot.go index c396d07..bbb3f65 100644 --- a/bot.go +++ b/bot.go @@ -540,6 +540,8 @@ func extractDetailedErrorFromBytes(body []byte, statusCode int) string { // sendMsgToLLM expects streaming resp func sendMsgToLLM(body io.Reader) { choseChunkParser() + // openrouter does not respect stop strings, so we have to cut the message ourselves + stopStrings := chatBody.MakeStopSliceExcluding("", listChatRoles()) req, err := http.NewRequest("POST", cfg.CurrentAPI, body) if err != nil { logger.Error("newreq error", "error", err) @@ -678,6 +680,12 @@ func sendMsgToLLM(body io.Reader) { } // bot sends way too many \n answerText = strings.ReplaceAll(chunk.Chunk, "\n\n", "\n") + // Accumulate text to check for stop strings that might span across chunks + // check if chunk is in stopstrings => stop + if slices.Contains(stopStrings, answerText) { + logger.Debug("Stop string detected and handled", "stop_string", answerText) + streamDone <- true + } chunkChan <- answerText openAIToolChan <- chunk.ToolChunk if chunk.FuncName != "" { -- cgit v1.2.3 From d0722c6f98aa4755f271aefdd8af1cca28fb6f35 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Thu, 5 Feb 2026 08:24:51 +0300 Subject: Fix: add regen param for f2 --- helpfuncs.go | 1 - tui.go | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/helpfuncs.go b/helpfuncs.go index 7033f04..dff53b9 100644 --- a/helpfuncs.go +++ b/helpfuncs.go @@ -91,7 +91,6 @@ func colorText() { for i, cb := range codeBlocks { text = strings.Replace(text, fmt.Sprintf(placeholder, i), cb, 1) } - logger.Debug("thinking debug", "blocks", thinkBlocks) for i, tb := range thinkBlocks { text = strings.Replace(text, fmt.Sprintf(placeholderThink, i), tb, 1) } diff --git a/tui.go b/tui.go index 8b1c520..c1f2917 100644 --- a/tui.go +++ b/tui.go @@ -874,7 +874,7 @@ func init() { // lastRole := chatBody.Messages[len(chatBody.Messages)-1].Role textView.SetText(chatToText(chatBody.Messages, cfg.ShowSys)) // go chatRound("", cfg.UserRole, textView, true, false) - chatRoundChan <- &models.ChatRoundReq{Role: cfg.UserRole} + chatRoundChan <- &models.ChatRoundReq{Role: cfg.UserRole, Regen: true} return nil } if event.Key() == tcell.KeyF3 && !botRespMode { -- cgit v1.2.3 From 478a505869bf26b15dcbc77feb2c09c1f2ff4aac Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Fri, 6 Feb 2026 11:32:06 +0300 Subject: Enha: client stop string for completion only --- .golangci.yml | 9 ++++++++- bot.go | 6 ++++-- llm.go | 27 ++++++++++++++++++++++++++- models/models.go | 7 +++++++ 4 files changed, 45 insertions(+), 4 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 2c7e552..ce57300 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,6 +1,7 @@ version: "2" run: - concurrency: 2 + timeout: 1m + concurrency: 4 tests: false linters: default: none @@ -14,7 +15,13 @@ linters: - prealloc - staticcheck - unused + - gocritic + - unconvert + - wastedassign settings: + gocritic: + enabled-tags: + - performance funlen: lines: 80 statements: 50 diff --git a/bot.go b/bot.go index bbb3f65..c6c1e77 100644 --- a/bot.go +++ b/bot.go @@ -682,8 +682,10 @@ func sendMsgToLLM(body io.Reader) { answerText = strings.ReplaceAll(chunk.Chunk, "\n\n", "\n") // Accumulate text to check for stop strings that might span across chunks // check if chunk is in stopstrings => stop - if slices.Contains(stopStrings, answerText) { - logger.Debug("Stop string detected and handled", "stop_string", answerText) + // this check is needed only for openrouter /v1/completion, since it does not respect stop slice + if chunkParser.GetAPIType() == models.APITypeCompletion && + slices.Contains(stopStrings, answerText) { + logger.Debug("stop string detected on client side for completion endpoint", "stop_string", answerText) streamDone <- true } chunkChan <- answerText diff --git a/llm.go b/llm.go index 95de1d8..b2cd5e2 100644 --- a/llm.go +++ b/llm.go @@ -78,6 +78,7 @@ type ChunkParser interface { ParseChunk([]byte) (*models.TextChunk, error) FormMsg(msg, role string, cont bool) (io.Reader, error) GetToken() string + GetAPIType() models.APIType } func choseChunkParser() { @@ -127,6 +128,10 @@ type OpenRouterChat struct { Model string } +func (lcp LCPCompletion) GetAPIType() models.APIType { + return models.APITypeCompletion +} + func (lcp LCPCompletion) GetToken() string { return "" } @@ -233,7 +238,11 @@ func (lcp LCPCompletion) ParseChunk(data []byte) (*models.TextChunk, error) { return resp, nil } -func (op LCPChat) GetToken() string { +func (lcp LCPChat) GetAPIType() models.APIType { + return models.APITypeChat +} + +func (lcp LCPChat) GetToken() string { return "" } @@ -371,6 +380,10 @@ func (op LCPChat) FormMsg(msg, role string, resume bool) (io.Reader, error) { } // deepseek +func (ds DeepSeekerCompletion) GetAPIType() models.APIType { + return models.APITypeCompletion +} + func (ds DeepSeekerCompletion) ParseChunk(data []byte) (*models.TextChunk, error) { llmchunk := models.DSCompletionResp{} if err := json.Unmarshal(data, &llmchunk); err != nil { @@ -453,6 +466,10 @@ func (ds DeepSeekerCompletion) FormMsg(msg, role string, resume bool) (io.Reader return bytes.NewReader(data), nil } +func (ds DeepSeekerChat) GetAPIType() models.APIType { + return models.APITypeChat +} + func (ds DeepSeekerChat) ParseChunk(data []byte) (*models.TextChunk, error) { llmchunk := models.DSChatStreamResp{} if err := json.Unmarshal(data, &llmchunk); err != nil { @@ -539,6 +556,10 @@ func (ds DeepSeekerChat) FormMsg(msg, role string, resume bool) (io.Reader, erro } // openrouter +func (or OpenRouterCompletion) GetAPIType() models.APIType { + return models.APITypeCompletion +} + func (or OpenRouterCompletion) ParseChunk(data []byte) (*models.TextChunk, error) { llmchunk := models.OpenRouterCompletionResp{} if err := json.Unmarshal(data, &llmchunk); err != nil { @@ -618,6 +639,10 @@ func (or OpenRouterCompletion) FormMsg(msg, role string, resume bool) (io.Reader } // chat +func (or OpenRouterChat) GetAPIType() models.APIType { + return models.APITypeChat +} + func (or OpenRouterChat) ParseChunk(data []byte) (*models.TextChunk, error) { llmchunk := models.OpenRouterChatResp{} if err := json.Unmarshal(data, &llmchunk); err != nil { diff --git a/models/models.go b/models/models.go index e99832a..4133a7c 100644 --- a/models/models.go +++ b/models/models.go @@ -558,3 +558,10 @@ type ChatRoundReq struct { Regen bool Resume bool } + +type APIType int + +const ( + APITypeChat APIType = iota + APITypeCompletion +) -- cgit v1.2.3 From 4af866079c3f21eab12b02c3158567539ca40c50 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Fri, 6 Feb 2026 12:42:06 +0300 Subject: Chore: linter complaints --- agent/request.go | 14 ++++++----- bot.go | 21 +++++++++-------- bot_test.go | 2 +- llm.go | 12 +++++----- models/db.go | 2 +- models/models.go | 32 ++++++++++++++------------ models/openrouter.go | 3 ++- pngmeta/altwriter.go | 2 +- props_table.go | 7 +++--- rag/rag.go | 4 ++-- tables.go | 65 +++++++++++++++++++++++++++------------------------- tui.go | 25 +++++++++----------- 12 files changed, 98 insertions(+), 91 deletions(-) diff --git a/agent/request.go b/agent/request.go index bb4a80d..14009dd 100644 --- a/agent/request.go +++ b/agent/request.go @@ -77,17 +77,18 @@ func (ag *AgentClient) buildRequest(sysprompt, msg string) ([]byte, error) { } prompt := strings.TrimSpace(sb.String()) - if isDeepSeek { + switch { + case isDeepSeek: // DeepSeek completion req := models.NewDSCompletionReq(prompt, model, defaultProps["temperature"], []string{}) req.Stream = false // Agents don't need streaming return json.Marshal(req) - } else if isOpenRouter { + case isOpenRouter: // OpenRouter completion req := models.NewOpenRouterCompletionReq(model, prompt, defaultProps, []string{}) req.Stream = false // Agents don't need streaming return json.Marshal(req) - } else { + default: // Assume llama.cpp completion req := models.NewLCPReq(prompt, model, nil, defaultProps, []string{}) req.Stream = false // Agents don't need streaming @@ -103,15 +104,16 @@ func (ag *AgentClient) buildRequest(sysprompt, msg string) ([]byte, error) { Messages: messages, } - if isDeepSeek { + switch { + case isDeepSeek: // DeepSeek chat req := models.NewDSChatReq(*chatBody) return json.Marshal(req) - } else if isOpenRouter { + case isOpenRouter: // OpenRouter chat req := models.NewOpenRouterChatReq(*chatBody, defaultProps) return json.Marshal(req) - } else { + default: // Assume llama.cpp chat (OpenAI format) req := models.OpenAIReq{ ChatBody: chatBody, diff --git a/bot.go b/bot.go index c6c1e77..2af0453 100644 --- a/bot.go +++ b/bot.go @@ -113,7 +113,7 @@ func parseKnownToTag(content string) []string { // processMessageTag processes a message for known_to tag and sets KnownTo field. // It also ensures the sender's role is included in KnownTo. // If KnownTo already set (e.g., from DB), preserves it unless new tag found. -func processMessageTag(msg models.RoleMsg) models.RoleMsg { +func processMessageTag(msg *models.RoleMsg) *models.RoleMsg { if cfg == nil || !cfg.CharSpecificContextEnabled { return msg } @@ -297,7 +297,8 @@ func warmUpModel() { go func() { var data []byte var err error - if strings.HasSuffix(cfg.CurrentAPI, "/completion") { + switch { + case strings.HasSuffix(cfg.CurrentAPI, "/completion"): // Old completion endpoint req := models.NewLCPReq(".", chatBody.Model, nil, map[string]float32{ "temperature": 0.8, @@ -307,7 +308,7 @@ func warmUpModel() { }, []string{}) req.Stream = false data, err = json.Marshal(req) - } else if strings.Contains(cfg.CurrentAPI, "/v1/chat/completions") { + case strings.Contains(cfg.CurrentAPI, "/v1/chat/completions"): // OpenAI-compatible chat endpoint req := models.OpenAIReq{ ChatBody: &models.ChatBody{ @@ -320,7 +321,7 @@ func warmUpModel() { Tools: nil, } data, err = json.Marshal(req) - } else { + default: // Unknown local endpoint, skip return } @@ -861,14 +862,14 @@ out: // lastM.Content = lastM.Content + respText.String() // Process the updated message to check for known_to tags in resumed response updatedMsg := chatBody.Messages[len(chatBody.Messages)-1] - processedMsg := processMessageTag(updatedMsg) - chatBody.Messages[len(chatBody.Messages)-1] = processedMsg + processedMsg := processMessageTag(&updatedMsg) + chatBody.Messages[len(chatBody.Messages)-1] = *processedMsg } else { newMsg := models.RoleMsg{ Role: botPersona, Content: respText.String(), } // Process the new message to check for known_to tags in LLM response - newMsg = processMessageTag(newMsg) + newMsg = *processMessageTag(&newMsg) chatBody.Messages = append(chatBody.Messages, newMsg) } cleanChatBody() @@ -889,7 +890,7 @@ out: if cfg.AutoTurn { lastMsg := chatBody.Messages[len(chatBody.Messages)-1] if len(lastMsg.KnownTo) > 0 { - triggerPrivateMessageResponses(lastMsg) + triggerPrivateMessageResponses(&lastMsg) } } return nil @@ -970,7 +971,7 @@ func unmarshalFuncCall(jsonStr string) (*models.FuncCall, error) { // findCall: adds chatRoundReq into the chatRoundChan and returns true if does func findCall(msg, toolCall string) bool { - fc := &models.FuncCall{} + var fc *models.FuncCall if toolCall != "" { // HTML-decode the tool call string to handle encoded characters like < -> <= decodedToolCall := html.UnescapeString(toolCall) @@ -1306,7 +1307,7 @@ func init() { // triggerPrivateMessageResponses checks if a message was sent privately to specific characters // and triggers those non-user characters to respond -func triggerPrivateMessageResponses(msg models.RoleMsg) { +func triggerPrivateMessageResponses(msg *models.RoleMsg) { if cfg == nil || !cfg.CharSpecificContextEnabled { return } diff --git a/bot_test.go b/bot_test.go index 4cbe953..1710003 100644 --- a/bot_test.go +++ b/bot_test.go @@ -506,7 +506,7 @@ func TestProcessMessageTag(t *testing.T) { CharSpecificContextTag: tt.tag, } cfg = testCfg - got := processMessageTag(tt.msg) + got := processMessageTag(&tt.msg) if len(got.KnownTo) != len(tt.wantMsg.KnownTo) { t.Errorf("processMessageTag() KnownTo length = %v, want %v", len(got.KnownTo), len(tt.wantMsg.KnownTo)) t.Logf("got: %v", got.KnownTo) diff --git a/llm.go b/llm.go index b2cd5e2..c77495e 100644 --- a/llm.go +++ b/llm.go @@ -159,7 +159,7 @@ func (lcp LCPCompletion) FormMsg(msg, role string, resume bool) (io.Reader, erro } if msg != "" { // otherwise let the bot to continue newMsg := models.RoleMsg{Role: role, Content: msg} - newMsg = processMessageTag(newMsg) + newMsg = *processMessageTag(&newMsg) chatBody.Messages = append(chatBody.Messages, newMsg) } if !resume { @@ -319,7 +319,7 @@ func (op LCPChat) FormMsg(msg, role string, resume bool) (io.Reader, error) { // Create a simple text message newMsg = models.NewRoleMsg(role, msg) } - newMsg = processMessageTag(newMsg) + newMsg = *processMessageTag(&newMsg) chatBody.Messages = append(chatBody.Messages, newMsg) logger.Debug("LCPChat FormMsg: added message to chatBody", "role", newMsg.Role, "content_len", len(newMsg.Content), "message_count_after_add", len(chatBody.Messages)) @@ -413,7 +413,7 @@ func (ds DeepSeekerCompletion) FormMsg(msg, role string, resume bool) (io.Reader } if msg != "" { // otherwise let the bot to continue newMsg := models.RoleMsg{Role: role, Content: msg} - newMsg = processMessageTag(newMsg) + newMsg = *processMessageTag(&newMsg) chatBody.Messages = append(chatBody.Messages, newMsg) } if !resume { @@ -504,7 +504,7 @@ func (ds DeepSeekerChat) FormMsg(msg, role string, resume bool) (io.Reader, erro } if msg != "" { // otherwise let the bot continue newMsg := models.RoleMsg{Role: role, Content: msg} - newMsg = processMessageTag(newMsg) + newMsg = *processMessageTag(&newMsg) chatBody.Messages = append(chatBody.Messages, newMsg) } if !resume { @@ -586,7 +586,7 @@ func (or OpenRouterCompletion) FormMsg(msg, role string, resume bool) (io.Reader logger.Debug("formmsg openroutercompletion", "link", cfg.CurrentAPI) if msg != "" { // otherwise let the bot to continue newMsg := models.RoleMsg{Role: role, Content: msg} - newMsg = processMessageTag(newMsg) + newMsg = *processMessageTag(&newMsg) chatBody.Messages = append(chatBody.Messages, newMsg) } if !resume { @@ -707,7 +707,7 @@ func (or OpenRouterChat) FormMsg(msg, role string, resume bool) (io.Reader, erro // Create a simple text message newMsg = models.NewRoleMsg(role, msg) } - newMsg = processMessageTag(newMsg) + newMsg = *processMessageTag(&newMsg) chatBody.Messages = append(chatBody.Messages, newMsg) } if !resume { diff --git a/models/db.go b/models/db.go index 090f46d..73a0b53 100644 --- a/models/db.go +++ b/models/db.go @@ -14,7 +14,7 @@ type Chat struct { UpdatedAt time.Time `db:"updated_at" json:"updated_at"` } -func (c Chat) ToHistory() ([]RoleMsg, error) { +func (c *Chat) ToHistory() ([]RoleMsg, error) { resp := []RoleMsg{} if err := json.Unmarshal([]byte(c.Msgs), &resp); err != nil { return nil, err diff --git a/models/models.go b/models/models.go index 4133a7c..34e3dcf 100644 --- a/models/models.go +++ b/models/models.go @@ -98,7 +98,7 @@ type RoleMsg struct { } // MarshalJSON implements custom JSON marshaling for RoleMsg -func (m RoleMsg) MarshalJSON() ([]byte, error) { +func (m *RoleMsg) MarshalJSON() ([]byte, error) { if m.hasContentParts { // Use structured content format aux := struct { @@ -166,11 +166,11 @@ func (m *RoleMsg) UnmarshalJSON(data []byte) error { return nil } -func (m RoleMsg) ToText(i int) string { +func (m *RoleMsg) ToText(i int) string { icon := fmt.Sprintf("(%d)", i) // Convert content to string representation - contentStr := "" + var contentStr string if !m.hasContentParts { contentStr = m.Content } else { @@ -198,8 +198,8 @@ func (m RoleMsg) ToText(i int) string { return strings.ReplaceAll(textMsg, "\n\n", "\n") } -func (m RoleMsg) ToPrompt() string { - contentStr := "" +func (m *RoleMsg) ToPrompt() string { + var contentStr string if !m.hasContentParts { contentStr = m.Content } else { @@ -240,7 +240,7 @@ func NewMultimodalMsg(role string, contentParts []interface{}) RoleMsg { } // HasContent returns true if the message has either string content or structured content parts -func (m RoleMsg) HasContent() bool { +func (m *RoleMsg) HasContent() bool { if m.Content != "" { return true } @@ -251,17 +251,17 @@ func (m RoleMsg) HasContent() bool { } // IsContentParts returns true if the message uses structured content parts -func (m RoleMsg) IsContentParts() bool { +func (m *RoleMsg) IsContentParts() bool { return m.hasContentParts } // GetContentParts returns the content parts of the message -func (m RoleMsg) GetContentParts() []interface{} { +func (m *RoleMsg) GetContentParts() []interface{} { return m.ContentParts } // Copy creates a copy of the RoleMsg with all fields -func (m RoleMsg) Copy() RoleMsg { +func (m *RoleMsg) Copy() RoleMsg { return RoleMsg{ Role: m.Role, Content: m.Content, @@ -382,12 +382,14 @@ func (cb *ChatBody) MakeStopSliceExcluding( continue } // Add multiple variations to catch different formatting - ss = append(ss, role+":\n") // Most common: role with newline - ss = append(ss, role+":") // Role with colon but no newline - ss = append(ss, role+": ") // Role with colon and single space - ss = append(ss, role+": ") // Role with colon and double space (common tokenization) - ss = append(ss, role+": \n") // Role with colon and double space (common tokenization) - ss = append(ss, role+": ") // Role with colon and triple space + ss = append(ss, + role+":\n", // Most common: role with newline + role+":", // Role with colon but no newline + role+": ", // Role with colon and single space + role+": ", // Role with colon and double space (common tokenization) + role+": \n", // Role with colon and double space (common tokenization) + role+": ", // Role with colon and triple space + ) } return ss } diff --git a/models/openrouter.go b/models/openrouter.go index 29ba0d8..6196498 100644 --- a/models/openrouter.go +++ b/models/openrouter.go @@ -143,7 +143,8 @@ type ORModels struct { func (orm *ORModels) ListModels(free bool) []string { resp := []string{} - for _, model := range orm.Data { + for i := range orm.Data { + model := &orm.Data[i] // Take address of element to avoid copying if free { if model.Pricing.Prompt == "0" && model.Pricing.Completion == "0" { // treat missing request as free diff --git a/pngmeta/altwriter.go b/pngmeta/altwriter.go index 206b563..76cb709 100644 --- a/pngmeta/altwriter.go +++ b/pngmeta/altwriter.go @@ -120,7 +120,7 @@ func createTextChunk(embed PngEmbed) ([]byte, error) { if err := binary.Write(chunk, binary.BigEndian, uint32(len(data))); err != nil { return nil, fmt.Errorf("error writing chunk length: %w", err) } - if _, err := chunk.Write([]byte(textChunkType)); err != nil { + if _, err := chunk.WriteString(textChunkType); err != nil { return nil, fmt.Errorf("error writing chunk type: %w", err) } if _, err := chunk.Write(data); err != nil { diff --git a/props_table.go b/props_table.go index d037bb0..50c8886 100644 --- a/props_table.go +++ b/props_table.go @@ -313,11 +313,12 @@ func makePropsTable(props map[string]float32) *tview.Table { logger.Warn("empty options list for", "label", label, "api", cfg.CurrentAPI, "localModelsLen", len(LocalModels), "orModelsLen", len(ORFreeModels)) message := "No options available for " + label if label == "Select a model" { - if strings.Contains(cfg.CurrentAPI, "openrouter.ai") { + switch { + case strings.Contains(cfg.CurrentAPI, "openrouter.ai"): message = "No OpenRouter models available. Check token and connection." - } else if strings.Contains(cfg.CurrentAPI, "api.deepseek.com") { + case strings.Contains(cfg.CurrentAPI, "api.deepseek.com"): message = "DeepSeek models should be available. Please report bug." - } else { + default: message = "No llama.cpp models loaded. Ensure llama.cpp server is running with models." } } diff --git a/rag/rag.go b/rag/rag.go index b29b9eb..d8b6978 100644 --- a/rag/rag.go +++ b/rag/rag.go @@ -107,7 +107,7 @@ func (r *RAG) LoadRAG(fpath string) error { } // Adjust batch size if needed - if len(paragraphs) < int(r.cfg.RAGBatchSize) && len(paragraphs) > 0 { + if len(paragraphs) < r.cfg.RAGBatchSize && len(paragraphs) > 0 { r.cfg.RAGBatchSize = len(paragraphs) } @@ -133,7 +133,7 @@ func (r *RAG) LoadRAG(fpath string) error { ctn := 0 totalParagraphs := len(paragraphs) for { - if int(right) > totalParagraphs { + if right > totalParagraphs { batchCh <- map[int][]string{left: paragraphs[left:]} break } diff --git a/tables.go b/tables.go index 4783cf6..ee2b145 100644 --- a/tables.go +++ b/tables.go @@ -30,7 +30,7 @@ func makeChatTable(chatMap map[string]models.Chat) *tview.Table { // Add header row (row 0) for c := 0; c < cols; c++ { color := tcell.ColorWhite - headerText := "" + var headerText string switch c { case 0: headerText = "Chat Name" @@ -259,19 +259,20 @@ func makeRAGTable(fileList []string) *tview.Flex { for r := 0; r < rows; r++ { for c := 0; c < cols; c++ { color := tcell.ColorWhite - if c < 1 { + switch { + case c < 1: fileTable.SetCell(r+1, c, // +1 to account for the exit row at index 0 tview.NewTableCell(fileList[r]). SetTextColor(color). SetAlign(tview.AlignCenter). SetSelectable(false)) - } else if c == 1 { // Action description column - not selectable + case c == 1: // Action description column - not selectable fileTable.SetCell(r+1, c, // +1 to account for the exit row at index 0 tview.NewTableCell("(Action)"). SetTextColor(color). SetAlign(tview.AlignCenter). SetSelectable(false)) - } else { // Action button column - selectable + default: // Action button column - selectable fileTable.SetCell(r+1, c, // +1 to account for the exit row at index 0 tview.NewTableCell(actions[c-1]). SetTextColor(color). @@ -406,19 +407,20 @@ func makeLoadedRAGTable(fileList []string) *tview.Flex { for r := 0; r < rows; r++ { for c := 0; c < cols; c++ { color := tcell.ColorWhite - if c < 1 { + switch { + case c < 1: fileTable.SetCell(r+1, c, // +1 to account for the exit row at index 0 tview.NewTableCell(fileList[r]). SetTextColor(color). SetAlign(tview.AlignCenter). SetSelectable(false)) - } else if c == 1 { // Action description column - not selectable + case c == 1: // Action description column - not selectable fileTable.SetCell(r+1, c, // +1 to account for the exit row at index 0 tview.NewTableCell("(Action)"). SetTextColor(color). SetAlign(tview.AlignCenter). SetSelectable(false)) - } else { // Action button column - selectable + default: // Action button column - selectable fileTable.SetCell(r+1, c, // +1 to account for the exit row at index 0 tview.NewTableCell(actions[c-1]). SetTextColor(color). @@ -487,13 +489,14 @@ func makeAgentTable(agentList []string) *tview.Table { for r := 0; r < rows; r++ { for c := 0; c < cols; c++ { color := tcell.ColorWhite - if c < 1 { + switch { + case c < 1: chatActTable.SetCell(r, c, tview.NewTableCell(agentList[r]). SetTextColor(color). SetAlign(tview.AlignCenter). SetSelectable(false)) - } else if c == 1 { + case c == 1: if actions[c-1] == "filepath" { cc, ok := sysMap[agentList[r]] if !ok { @@ -510,7 +513,7 @@ func makeAgentTable(agentList []string) *tview.Table { tview.NewTableCell(actions[c-1]). SetTextColor(color). SetAlign(tview.AlignCenter)) - } else { + default: chatActTable.SetCell(r, c, tview.NewTableCell(actions[c-1]). SetTextColor(color). @@ -600,13 +603,14 @@ func makeCodeBlockTable(codeBlocks []string) *tview.Table { if len(codeBlocks[r]) < 30 { previewLen = len(codeBlocks[r]) } - if c < 1 { + switch { + case c < 1: table.SetCell(r, c, tview.NewTableCell(codeBlocks[r][:previewLen]). SetTextColor(color). SetAlign(tview.AlignCenter). SetSelectable(false)) - } else { + default: table.SetCell(r, c, tview.NewTableCell(actions[c-1]). SetTextColor(color). @@ -671,13 +675,14 @@ func makeImportChatTable(filenames []string) *tview.Table { for r := 0; r < rows; r++ { for c := 0; c < cols; c++ { color := tcell.ColorWhite - if c < 1 { + switch { + case c < 1: chatActTable.SetCell(r, c, tview.NewTableCell(filenames[r]). SetTextColor(color). SetAlign(tview.AlignCenter). SetSelectable(false)) - } else { + default: chatActTable.SetCell(r, c, tview.NewTableCell(actions[c-1]). SetTextColor(color). @@ -861,25 +866,23 @@ func makeFilePicker() *tview.Flex { currentStackPos = len(dirStack) - 1 statusView.SetText("Current: " + newDir) }) - } else { + } else if hasAllowedExtension(name) { // Only show files that have allowed extensions (from config) - if hasAllowedExtension(name) { - // Capture the file name for the closure to avoid loop variable issues - fileName := name - fullFilePath := path.Join(dir, fileName) - listView.AddItem(fileName+" [gray](File)[-]", "", 0, func() { - selectedFile = fullFilePath + // Capture the file name for the closure to avoid loop variable issues + fileName := name + fullFilePath := path.Join(dir, fileName) + listView.AddItem(fileName+" [gray](File)[-]", "", 0, func() { + selectedFile = fullFilePath + statusView.SetText("Selected: " + selectedFile) + // Check if the file is an image + if isImageFile(fileName) { + // For image files, offer to attach to the next LLM message + statusView.SetText("Selected image: " + selectedFile) + } else { + // For non-image files, display as before statusView.SetText("Selected: " + selectedFile) - // Check if the file is an image - if isImageFile(fileName) { - // For image files, offer to attach to the next LLM message - statusView.SetText("Selected image: " + selectedFile) - } else { - // For non-image files, display as before - statusView.SetText("Selected: " + selectedFile) - } - }) - } + } + }) } } statusView.SetText("Current: " + dir) diff --git a/tui.go b/tui.go index c1f2917..70f67f1 100644 --- a/tui.go +++ b/tui.go @@ -533,8 +533,7 @@ func init() { }) textView.SetInputCapture(func(event *tcell.EventKey) *tcell.EventKey { // Handle vim-like navigation in TextView - switch event.Key() { - case tcell.KeyRune: + if event.Key() == tcell.KeyRune { switch event.Rune() { case 'j': // For line down @@ -672,17 +671,18 @@ func init() { return nil } m := chatBody.Messages[selectedIndex] - if roleEditMode { + switch { + case roleEditMode: hideIndexBar() // Hide overlay first // Set the current role as the default text in the input field roleEditWindow.SetText(m.Role) pages.AddPage(roleEditPage, roleEditWindow, true, true) roleEditMode = false // Reset the flag - } else if editMode { + case editMode: hideIndexBar() // Hide overlay first pages.AddPage(editMsgPage, editArea, true, true) editArea.SetText(m.Content, true) - } else { + default: if err := copyToClipboard(m.Content); err != nil { logger.Error("failed to copy to clipboard", "error", err) } @@ -760,22 +760,19 @@ func init() { pages.RemovePage(helpPage) }) helpView.SetInputCapture(func(event *tcell.EventKey) *tcell.EventKey { - switch event.Key() { - case tcell.KeyEnter: + if event.Key() == tcell.KeyEnter { return event - default: - if event.Key() == tcell.KeyRune && event.Rune() == 'x' { - pages.RemovePage(helpPage) - return nil - } + } + if event.Key() == tcell.KeyRune && event.Rune() == 'x' { + pages.RemovePage(helpPage) + return nil } return nil }) // imgView = tview.NewImage() imgView.SetInputCapture(func(event *tcell.EventKey) *tcell.EventKey { - switch event.Key() { - case tcell.KeyEnter: + if event.Key() == tcell.KeyEnter { pages.RemovePage(imgPage) return event } -- cgit v1.2.3 From 93284312cfdb5784654fa4817c726728739b1b34 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Sun, 8 Feb 2026 17:11:29 +0300 Subject: Enha: auto turn role display --- bot.go | 3 +++ sysprompts/alice_bob_carl.json | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/bot.go b/bot.go index 2af0453..da2424d 100644 --- a/bot.go +++ b/bot.go @@ -1334,6 +1334,9 @@ func triggerPrivateMessageResponses(msg *models.RoleMsg) { Role: recipient, Resume: true, } + fmt.Fprintf(textView, "\n[-:-:b](%d) ", len(chatBody.Messages)) + fmt.Fprint(textView, roleToIcon(recipient)) + fmt.Fprint(textView, "[-:-:-]\n") chatRoundChan <- crr } } diff --git a/sysprompts/alice_bob_carl.json b/sysprompts/alice_bob_carl.json index b2a0ac5..d575f93 100644 --- a/sysprompts/alice_bob_carl.json +++ b/sysprompts/alice_bob_carl.json @@ -3,5 +3,5 @@ "role": "Alice", "filepath": "sysprompts/alice_bob_carl.json", "chars": ["Alice", "Bob", "Carl"], - "first_msg": "Hey guys! Want to play Alias like game? I'll tell Bob a word and he needs to describe that word so Carl can guess what it was?" + "first_msg": "\"Hey guys! Want to play Alias like game? I'll tell Bob a word and he needs to describe that word so Carl can guess what it was?\"" } -- cgit v1.2.3 From 1bf9e6eef72ec2eec7282b1554b41a0dc3d8d1b8 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Sun, 8 Feb 2026 21:50:03 +0300 Subject: Enha: extract first valid recipient from knownto --- bot.go | 55 +++++++++++------- llm.go | 199 +++++++++++++++++++++++++++++++---------------------------------- 2 files changed, 128 insertions(+), 126 deletions(-) diff --git a/bot.go b/bot.go index da2424d..8e0e856 100644 --- a/bot.go +++ b/bot.go @@ -1305,15 +1305,17 @@ func init() { go chatWatcher(ctx) } -// triggerPrivateMessageResponses checks if a message was sent privately to specific characters -// and triggers those non-user characters to respond -func triggerPrivateMessageResponses(msg *models.RoleMsg) { +func getValidKnowToRecipient(msg *models.RoleMsg) (string, bool) { if cfg == nil || !cfg.CharSpecificContextEnabled { - return + return "", false } - userCharacter := cfg.UserRole - if cfg.WriteNextMsgAs != "" { - userCharacter = cfg.WriteNextMsgAs + // case where all roles are in the tag => public message + cr := listChatRoles() + slices.Sort(cr) + slices.Sort(msg.KnownTo) + if slices.Equal(cr, msg.KnownTo) { + logger.Info("got msg with tag mentioning every role") + return "", false } // Check each character in the KnownTo list for _, recipient := range msg.KnownTo { @@ -1323,20 +1325,31 @@ func triggerPrivateMessageResponses(msg *models.RoleMsg) { } // Skip if this is the user character (user handles their own turn) // If user is in KnownTo, stop processing - it's the user's turn - if recipient == cfg.UserRole || recipient == userCharacter { - return // user in known_to => user's turn - } - // Trigger the recipient character to respond - triggerMsg := recipient + ":\n" - // Send empty message so LLM continues naturally from the conversation - crr := &models.ChatRoundReq{ - UserMsg: triggerMsg, - Role: recipient, - Resume: true, + if recipient == cfg.UserRole || recipient == cfg.WriteNextMsgAs { + return "", false } - fmt.Fprintf(textView, "\n[-:-:b](%d) ", len(chatBody.Messages)) - fmt.Fprint(textView, roleToIcon(recipient)) - fmt.Fprint(textView, "[-:-:-]\n") - chatRoundChan <- crr + return recipient, true } + return "", false +} + +// triggerPrivateMessageResponses checks if a message was sent privately to specific characters +// and triggers those non-user characters to respond +func triggerPrivateMessageResponses(msg *models.RoleMsg) { + recipient, ok := getValidKnowToRecipient(msg) + if !ok || recipient == "" { + return + } + // Trigger the recipient character to respond + triggerMsg := recipient + ":\n" + // Send empty message so LLM continues naturally from the conversation + crr := &models.ChatRoundReq{ + UserMsg: triggerMsg, + Role: recipient, + Resume: true, + } + fmt.Fprintf(textView, "\n[-:-:b](%d) ", len(chatBody.Messages)) + fmt.Fprint(textView, roleToIcon(recipient)) + fmt.Fprint(textView, "[-:-:-]\n") + chatRoundChan <- crr } diff --git a/llm.go b/llm.go index c77495e..734b4fd 100644 --- a/llm.go +++ b/llm.go @@ -59,17 +59,19 @@ func ClearImageAttachment() { // filterMessagesForCurrentCharacter filters messages based on char-specific context. // Returns filtered messages and the bot persona role (target character). func filterMessagesForCurrentCharacter(messages []models.RoleMsg) ([]models.RoleMsg, string) { - if cfg == nil || !cfg.CharSpecificContextEnabled { - botPersona := cfg.AssistantRole - if cfg.WriteNextMsgAsCompletionAgent != "" { - botPersona = cfg.WriteNextMsgAsCompletionAgent - } - return messages, botPersona - } botPersona := cfg.AssistantRole if cfg.WriteNextMsgAsCompletionAgent != "" { botPersona = cfg.WriteNextMsgAsCompletionAgent } + if cfg == nil || !cfg.CharSpecificContextEnabled { + return messages, botPersona + } + // get last message (written by user) and checck if it has a tag + lm := messages[len(messages)-1] + recipient, ok := getValidKnowToRecipient(&lm) + if ok && recipient != "" { + botPersona = recipient + } filtered := filterMessagesForCharacter(messages, botPersona) return filtered, botPersona } @@ -162,23 +164,21 @@ func (lcp LCPCompletion) FormMsg(msg, role string, resume bool) (io.Reader, erro newMsg = *processMessageTag(&newMsg) chatBody.Messages = append(chatBody.Messages, newMsg) } - if !resume { - // if rag - add as system message to avoid conflicts with tool usage - if cfg.RAGEnabled { - um := chatBody.Messages[len(chatBody.Messages)-1].Content - logger.Debug("RAG is enabled, preparing RAG context", "user_message", um) - ragResp, err := chatRagUse(um) - if err != nil { - logger.Error("failed to form a rag msg", "error", err) - return nil, err - } - logger.Debug("RAG response received", "response_len", len(ragResp), - "response_preview", ragResp[:min(len(ragResp), 100)]) - // Use system role for RAG context to avoid conflicts with tool usage - ragMsg := models.RoleMsg{Role: "system", Content: RAGMsg + ragResp} - chatBody.Messages = append(chatBody.Messages, ragMsg) - logger.Debug("RAG message added to chat body", "message_count", len(chatBody.Messages)) + // if rag - add as system message to avoid conflicts with tool usage + if !resume && cfg.RAGEnabled { + um := chatBody.Messages[len(chatBody.Messages)-1].Content + logger.Debug("RAG is enabled, preparing RAG context", "user_message", um) + ragResp, err := chatRagUse(um) + if err != nil { + logger.Error("failed to form a rag msg", "error", err) + return nil, err } + logger.Debug("RAG response received", "response_len", len(ragResp), + "response_preview", ragResp[:min(len(ragResp), 100)]) + // Use system role for RAG context to avoid conflicts with tool usage + ragMsg := models.RoleMsg{Role: "system", Content: RAGMsg + ragResp} + chatBody.Messages = append(chatBody.Messages, ragMsg) + logger.Debug("RAG message added to chat body", "message_count", len(chatBody.Messages)) } // sending description of the tools and how to use them if cfg.ToolUse && !resume && role == cfg.UserRole && !containsToolSysMsg() { @@ -324,24 +324,22 @@ func (op LCPChat) FormMsg(msg, role string, resume bool) (io.Reader, error) { logger.Debug("LCPChat FormMsg: added message to chatBody", "role", newMsg.Role, "content_len", len(newMsg.Content), "message_count_after_add", len(chatBody.Messages)) } - if !resume { - // if rag - add as system message to avoid conflicts with tool usage - if cfg.RAGEnabled { - um := chatBody.Messages[len(chatBody.Messages)-1].Content - logger.Debug("LCPChat: RAG is enabled, preparing RAG context", "user_message", um) - ragResp, err := chatRagUse(um) - if err != nil { - logger.Error("LCPChat: failed to form a rag msg", "error", err) - return nil, err - } - logger.Debug("LCPChat: RAG response received", - "response_len", len(ragResp), "response_preview", ragResp[:min(len(ragResp), 100)]) - // Use system role for RAG context to avoid conflicts with tool usage - ragMsg := models.RoleMsg{Role: "system", Content: RAGMsg + ragResp} - chatBody.Messages = append(chatBody.Messages, ragMsg) - logger.Debug("LCPChat: RAG message added to chat body", "role", ragMsg.Role, - "rag_content_len", len(ragMsg.Content), "message_count_after_rag", len(chatBody.Messages)) + // if rag - add as system message to avoid conflicts with tool usage + if !resume && cfg.RAGEnabled { + um := chatBody.Messages[len(chatBody.Messages)-1].Content + logger.Debug("LCPChat: RAG is enabled, preparing RAG context", "user_message", um) + ragResp, err := chatRagUse(um) + if err != nil { + logger.Error("LCPChat: failed to form a rag msg", "error", err) + return nil, err } + logger.Debug("LCPChat: RAG response received", + "response_len", len(ragResp), "response_preview", ragResp[:min(len(ragResp), 100)]) + // Use system role for RAG context to avoid conflicts with tool usage + ragMsg := models.RoleMsg{Role: "system", Content: RAGMsg + ragResp} + chatBody.Messages = append(chatBody.Messages, ragMsg) + logger.Debug("LCPChat: RAG message added to chat body", "role", ragMsg.Role, + "rag_content_len", len(ragMsg.Content), "message_count_after_rag", len(chatBody.Messages)) } filteredMessages, botPersona := filterMessagesForCurrentCharacter(chatBody.Messages) // openai /v1/chat does not support custom roles; needs to be user, assistant, system @@ -416,24 +414,21 @@ func (ds DeepSeekerCompletion) FormMsg(msg, role string, resume bool) (io.Reader newMsg = *processMessageTag(&newMsg) chatBody.Messages = append(chatBody.Messages, newMsg) } - if !resume { - // if rag - add as system message to avoid conflicts with tool usage - // TODO: perhaps RAG should be a func/tool call instead? - if cfg.RAGEnabled { - um := chatBody.Messages[len(chatBody.Messages)-1].Content - logger.Debug("DeepSeekerCompletion: RAG is enabled, preparing RAG context", "user_message", um) - ragResp, err := chatRagUse(um) - if err != nil { - logger.Error("DeepSeekerCompletion: failed to form a rag msg", "error", err) - return nil, err - } - logger.Debug("DeepSeekerCompletion: RAG response received", - "response_len", len(ragResp), "response_preview", ragResp[:min(len(ragResp), 100)]) - // Use system role for RAG context to avoid conflicts with tool usage - ragMsg := models.RoleMsg{Role: "system", Content: RAGMsg + ragResp} - chatBody.Messages = append(chatBody.Messages, ragMsg) - logger.Debug("DeepSeekerCompletion: RAG message added to chat body", "message_count", len(chatBody.Messages)) + // if rag - add as system message to avoid conflicts with tool usage + if !resume && cfg.RAGEnabled { + um := chatBody.Messages[len(chatBody.Messages)-1].Content + logger.Debug("DeepSeekerCompletion: RAG is enabled, preparing RAG context", "user_message", um) + ragResp, err := chatRagUse(um) + if err != nil { + logger.Error("DeepSeekerCompletion: failed to form a rag msg", "error", err) + return nil, err } + logger.Debug("DeepSeekerCompletion: RAG response received", + "response_len", len(ragResp), "response_preview", ragResp[:min(len(ragResp), 100)]) + // Use system role for RAG context to avoid conflicts with tool usage + ragMsg := models.RoleMsg{Role: "system", Content: RAGMsg + ragResp} + chatBody.Messages = append(chatBody.Messages, ragMsg) + logger.Debug("DeepSeekerCompletion: RAG message added to chat body", "message_count", len(chatBody.Messages)) } // sending description of the tools and how to use them if cfg.ToolUse && !resume && role == cfg.UserRole && !containsToolSysMsg() { @@ -507,23 +502,21 @@ func (ds DeepSeekerChat) FormMsg(msg, role string, resume bool) (io.Reader, erro newMsg = *processMessageTag(&newMsg) chatBody.Messages = append(chatBody.Messages, newMsg) } - if !resume { - // if rag - add as system message to avoid conflicts with tool usage - if cfg.RAGEnabled { - um := chatBody.Messages[len(chatBody.Messages)-1].Content - logger.Debug("RAG is enabled, preparing RAG context", "user_message", um) - ragResp, err := chatRagUse(um) - if err != nil { - logger.Error("failed to form a rag msg", "error", err) - return nil, err - } - logger.Debug("RAG response received", "response_len", len(ragResp), - "response_preview", ragResp[:min(len(ragResp), 100)]) - // Use system role for RAG context to avoid conflicts with tool usage - ragMsg := models.RoleMsg{Role: "system", Content: RAGMsg + ragResp} - chatBody.Messages = append(chatBody.Messages, ragMsg) - logger.Debug("RAG message added to chat body", "message_count", len(chatBody.Messages)) + // if rag - add as system message to avoid conflicts with tool usage + if !resume && cfg.RAGEnabled { + um := chatBody.Messages[len(chatBody.Messages)-1].Content + logger.Debug("RAG is enabled, preparing RAG context", "user_message", um) + ragResp, err := chatRagUse(um) + if err != nil { + logger.Error("failed to form a rag msg", "error", err) + return nil, err } + logger.Debug("RAG response received", "response_len", len(ragResp), + "response_preview", ragResp[:min(len(ragResp), 100)]) + // Use system role for RAG context to avoid conflicts with tool usage + ragMsg := models.RoleMsg{Role: "system", Content: RAGMsg + ragResp} + chatBody.Messages = append(chatBody.Messages, ragMsg) + logger.Debug("RAG message added to chat body", "message_count", len(chatBody.Messages)) } // Create copy of chat body with standardized user role filteredMessages, botPersona := filterMessagesForCurrentCharacter(chatBody.Messages) @@ -589,23 +582,21 @@ func (or OpenRouterCompletion) FormMsg(msg, role string, resume bool) (io.Reader newMsg = *processMessageTag(&newMsg) chatBody.Messages = append(chatBody.Messages, newMsg) } - if !resume { - // if rag - add as system message to avoid conflicts with tool usage - if cfg.RAGEnabled { - um := chatBody.Messages[len(chatBody.Messages)-1].Content - logger.Debug("RAG is enabled, preparing RAG context", "user_message", um) - ragResp, err := chatRagUse(um) - if err != nil { - logger.Error("failed to form a rag msg", "error", err) - return nil, err - } - logger.Debug("RAG response received", "response_len", - len(ragResp), "response_preview", ragResp[:min(len(ragResp), 100)]) - // Use system role for RAG context to avoid conflicts with tool usage - ragMsg := models.RoleMsg{Role: "system", Content: RAGMsg + ragResp} - chatBody.Messages = append(chatBody.Messages, ragMsg) - logger.Debug("RAG message added to chat body", "message_count", len(chatBody.Messages)) + // if rag - add as system message to avoid conflicts with tool usage + if !resume && cfg.RAGEnabled { + um := chatBody.Messages[len(chatBody.Messages)-1].Content + logger.Debug("RAG is enabled, preparing RAG context", "user_message", um) + ragResp, err := chatRagUse(um) + if err != nil { + logger.Error("failed to form a rag msg", "error", err) + return nil, err } + logger.Debug("RAG response received", "response_len", + len(ragResp), "response_preview", ragResp[:min(len(ragResp), 100)]) + // Use system role for RAG context to avoid conflicts with tool usage + ragMsg := models.RoleMsg{Role: "system", Content: RAGMsg + ragResp} + chatBody.Messages = append(chatBody.Messages, ragMsg) + logger.Debug("RAG message added to chat body", "message_count", len(chatBody.Messages)) } // sending description of the tools and how to use them if cfg.ToolUse && !resume && role == cfg.UserRole && !containsToolSysMsg() { @@ -710,23 +701,21 @@ func (or OpenRouterChat) FormMsg(msg, role string, resume bool) (io.Reader, erro newMsg = *processMessageTag(&newMsg) chatBody.Messages = append(chatBody.Messages, newMsg) } - if !resume { - // if rag - add as system message to avoid conflicts with tool usage - if cfg.RAGEnabled { - um := chatBody.Messages[len(chatBody.Messages)-1].Content - logger.Debug("RAG is enabled, preparing RAG context", "user_message", um) - ragResp, err := chatRagUse(um) - if err != nil { - logger.Error("failed to form a rag msg", "error", err) - return nil, err - } - logger.Debug("RAG response received", "response_len", len(ragResp), - "response_preview", ragResp[:min(len(ragResp), 100)]) - // Use system role for RAG context to avoid conflicts with tool usage - ragMsg := models.RoleMsg{Role: "system", Content: RAGMsg + ragResp} - chatBody.Messages = append(chatBody.Messages, ragMsg) - logger.Debug("RAG message added to chat body", "message_count", len(chatBody.Messages)) + // if rag - add as system message to avoid conflicts with tool usage + if !resume && cfg.RAGEnabled { + um := chatBody.Messages[len(chatBody.Messages)-1].Content + logger.Debug("RAG is enabled, preparing RAG context", "user_message", um) + ragResp, err := chatRagUse(um) + if err != nil { + logger.Error("failed to form a rag msg", "error", err) + return nil, err } + logger.Debug("RAG response received", "response_len", len(ragResp), + "response_preview", ragResp[:min(len(ragResp), 100)]) + // Use system role for RAG context to avoid conflicts with tool usage + ragMsg := models.RoleMsg{Role: "system", Content: RAGMsg + ragResp} + chatBody.Messages = append(chatBody.Messages, ragMsg) + logger.Debug("RAG message added to chat body", "message_count", len(chatBody.Messages)) } // Create copy of chat body with standardized user role filteredMessages, botPersona := filterMessagesForCurrentCharacter(chatBody.Messages) -- cgit v1.2.3 From 77ad2a7e7e2c3bade4d949d8eb5c36e0126f4668 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Mon, 9 Feb 2026 08:52:11 +0300 Subject: Enha: popups from the main window no longer user has to go to the props table to get a pleasant popup to choose an option --- popups.go | 315 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ props_table.go | 48 --------- tui.go | 108 +++----------------- 3 files changed, 327 insertions(+), 144 deletions(-) create mode 100644 popups.go diff --git a/popups.go b/popups.go new file mode 100644 index 0000000..559a2aa --- /dev/null +++ b/popups.go @@ -0,0 +1,315 @@ +package main + +import ( + "slices" + "strings" + + "github.com/gdamore/tcell/v2" + "github.com/rivo/tview" +) + +// showModelSelectionPopup creates a modal popup to select a model +func showModelSelectionPopup() { + // Helper function to get model list for a given API + getModelListForAPI := func(api string) []string { + if strings.Contains(api, "api.deepseek.com/") { + return []string{"deepseek-chat", "deepseek-reasoner"} + } else if strings.Contains(api, "openrouter.ai") { + return ORFreeModels + } + // Assume local llama.cpp + refreshLocalModelsIfEmpty() + localModelsMu.RLock() + defer localModelsMu.RUnlock() + return LocalModels + } + // Get the current model list based on the API + modelList := getModelListForAPI(cfg.CurrentAPI) + // Check for empty options list + if len(modelList) == 0 { + logger.Warn("empty model list for", "api", cfg.CurrentAPI, "localModelsLen", len(LocalModels), "orModelsLen", len(ORFreeModels)) + message := "No models available for selection" + if strings.Contains(cfg.CurrentAPI, "openrouter.ai") { + message = "No OpenRouter models available. Check token and connection." + } else if strings.Contains(cfg.CurrentAPI, "api.deepseek.com") { + message = "DeepSeek models should be available. Please report bug." + } else { + message = "No llama.cpp models loaded. Ensure llama.cpp server is running with models." + } + if err := notifyUser("Empty list", message); err != nil { + logger.Error("failed to send notification", "error", err) + } + return + } + // Create a list primitive + modelListWidget := tview.NewList().ShowSecondaryText(false). + SetSelectedBackgroundColor(tcell.ColorGray) + modelListWidget.SetTitle("Select Model").SetBorder(true) + // Find the current model index to set as selected + currentModelIndex := -1 + for i, model := range modelList { + if model == chatBody.Model { + currentModelIndex = i + } + modelListWidget.AddItem(model, "", 0, nil) + } + // Set the current selection if found + if currentModelIndex != -1 { + modelListWidget.SetCurrentItem(currentModelIndex) + } + modelListWidget.SetSelectedFunc(func(index int, mainText string, secondaryText string, shortcut rune) { + // Update the model in both chatBody and config + chatBody.Model = mainText + cfg.CurrentModel = chatBody.Model + // Remove the popup page + pages.RemovePage("modelSelectionPopup") + // Update the status line to reflect the change + updateStatusLine() + }) + modelListWidget.SetInputCapture(func(event *tcell.EventKey) *tcell.EventKey { + if event.Key() == tcell.KeyEscape { + pages.RemovePage("modelSelectionPopup") + return nil + } + return event + }) + modal := func(p tview.Primitive, width, height int) tview.Primitive { + return tview.NewFlex(). + AddItem(nil, 0, 1, false). + AddItem(tview.NewFlex().SetDirection(tview.FlexRow). + AddItem(nil, 0, 1, false). + AddItem(p, height, 1, true). + AddItem(nil, 0, 1, false), width, 1, true). + AddItem(nil, 0, 1, false) + } + // Add modal page and make it visible + pages.AddPage("modelSelectionPopup", modal(modelListWidget, 80, 20), true, true) + app.SetFocus(modelListWidget) +} + +// showAPILinkSelectionPopup creates a modal popup to select an API link +func showAPILinkSelectionPopup() { + // Prepare API links dropdown - ensure current API is in the list, avoid duplicates + apiLinks := make([]string, 0, len(cfg.ApiLinks)+1) + // Add current API first if it's not already in ApiLinks + foundCurrentAPI := false + for _, api := range cfg.ApiLinks { + if api == cfg.CurrentAPI { + foundCurrentAPI = true + } + apiLinks = append(apiLinks, api) + } + // If current API is not in the list, add it at the beginning + if !foundCurrentAPI { + apiLinks = make([]string, 0, len(cfg.ApiLinks)+1) + apiLinks = append(apiLinks, cfg.CurrentAPI) + apiLinks = append(apiLinks, cfg.ApiLinks...) + } + // Check for empty options list + if len(apiLinks) == 0 { + logger.Warn("no API links available for selection") + message := "No API links available. Please configure API links in your config file." + if err := notifyUser("Empty list", message); err != nil { + logger.Error("failed to send notification", "error", err) + } + return + } + // Create a list primitive + apiListWidget := tview.NewList().ShowSecondaryText(false). + SetSelectedBackgroundColor(tcell.ColorGray) + apiListWidget.SetTitle("Select API Link").SetBorder(true) + // Find the current API index to set as selected + currentAPIIndex := -1 + for i, api := range apiLinks { + if api == cfg.CurrentAPI { + currentAPIIndex = i + } + apiListWidget.AddItem(api, "", 0, nil) + } + // Set the current selection if found + if currentAPIIndex != -1 { + apiListWidget.SetCurrentItem(currentAPIIndex) + } + apiListWidget.SetSelectedFunc(func(index int, mainText string, secondaryText string, shortcut rune) { + // Update the API in config + cfg.CurrentAPI = mainText + // Update model list based on new API + // Helper function to get model list for a given API (same as in props_table.go) + getModelListForAPI := func(api string) []string { + if strings.Contains(api, "api.deepseek.com/") { + return []string{"deepseek-chat", "deepseek-reasoner"} + } else if strings.Contains(api, "openrouter.ai") { + return ORFreeModels + } + // Assume local llama.cpp + refreshLocalModelsIfEmpty() + localModelsMu.RLock() + defer localModelsMu.RUnlock() + return LocalModels + } + newModelList := getModelListForAPI(cfg.CurrentAPI) + // Ensure chatBody.Model is in the new list; if not, set to first available model + if len(newModelList) > 0 && !slices.Contains(newModelList, chatBody.Model) { + chatBody.Model = newModelList[0] + cfg.CurrentModel = chatBody.Model + } + // Remove the popup page + pages.RemovePage("apiLinkSelectionPopup") + // Update the parser and status line to reflect the change + choseChunkParser() + updateStatusLine() + }) + apiListWidget.SetInputCapture(func(event *tcell.EventKey) *tcell.EventKey { + if event.Key() == tcell.KeyEscape { + pages.RemovePage("apiLinkSelectionPopup") + return nil + } + return event + }) + modal := func(p tview.Primitive, width, height int) tview.Primitive { + return tview.NewFlex(). + AddItem(nil, 0, 1, false). + AddItem(tview.NewFlex().SetDirection(tview.FlexRow). + AddItem(nil, 0, 1, false). + AddItem(p, height, 1, true). + AddItem(nil, 0, 1, false), width, 1, true). + AddItem(nil, 0, 1, false) + } + // Add modal page and make it visible + pages.AddPage("apiLinkSelectionPopup", modal(apiListWidget, 80, 20), true, true) + app.SetFocus(apiListWidget) +} + +// showUserRoleSelectionPopup creates a modal popup to select a user role +func showUserRoleSelectionPopup() { + // Get the list of available roles + roles := listRolesWithUser() + // Check for empty options list + if len(roles) == 0 { + logger.Warn("no roles available for selection") + message := "No roles available for selection." + if err := notifyUser("Empty list", message); err != nil { + logger.Error("failed to send notification", "error", err) + } + return + } + // Create a list primitive + roleListWidget := tview.NewList().ShowSecondaryText(false). + SetSelectedBackgroundColor(tcell.ColorGray) + roleListWidget.SetTitle("Select User Role").SetBorder(true) + // Find the current role index to set as selected + currentRole := cfg.UserRole + if cfg.WriteNextMsgAs != "" { + currentRole = cfg.WriteNextMsgAs + } + currentRoleIndex := -1 + for i, role := range roles { + if strings.EqualFold(role, currentRole) { + currentRoleIndex = i + } + roleListWidget.AddItem(role, "", 0, nil) + } + // Set the current selection if found + if currentRoleIndex != -1 { + roleListWidget.SetCurrentItem(currentRoleIndex) + } + roleListWidget.SetSelectedFunc(func(index int, mainText string, secondaryText string, shortcut rune) { + // Update the user role in config + cfg.WriteNextMsgAs = mainText + // role got switch, update textview with character specific context for user + filtered := filterMessagesForCharacter(chatBody.Messages, mainText) + textView.SetText(chatToText(filtered, cfg.ShowSys)) + // Remove the popup page + pages.RemovePage("userRoleSelectionPopup") + // Update the status line to reflect the change + updateStatusLine() + colorText() + }) + roleListWidget.SetInputCapture(func(event *tcell.EventKey) *tcell.EventKey { + if event.Key() == tcell.KeyEscape { + pages.RemovePage("userRoleSelectionPopup") + return nil + } + return event + }) + modal := func(p tview.Primitive, width, height int) tview.Primitive { + return tview.NewFlex(). + AddItem(nil, 0, 1, false). + AddItem(tview.NewFlex().SetDirection(tview.FlexRow). + AddItem(nil, 0, 1, false). + AddItem(p, height, 1, true). + AddItem(nil, 0, 1, false), width, 1, true). + AddItem(nil, 0, 1, false) + } + // Add modal page and make it visible + pages.AddPage("userRoleSelectionPopup", modal(roleListWidget, 80, 20), true, true) + app.SetFocus(roleListWidget) +} + +// showBotRoleSelectionPopup creates a modal popup to select a bot role +func showBotRoleSelectionPopup() { + // Get the list of available roles + roles := listChatRoles() + if len(roles) == 0 { + logger.Warn("empty roles in chat") + } + if !strInSlice(cfg.AssistantRole, roles) { + roles = append(roles, cfg.AssistantRole) + } + // Check for empty options list + if len(roles) == 0 { + logger.Warn("no roles available for selection") + message := "No roles available for selection." + if err := notifyUser("Empty list", message); err != nil { + logger.Error("failed to send notification", "error", err) + } + return + } + // Create a list primitive + roleListWidget := tview.NewList().ShowSecondaryText(false). + SetSelectedBackgroundColor(tcell.ColorGray) + roleListWidget.SetTitle("Select Bot Role").SetBorder(true) + // Find the current role index to set as selected + currentRole := cfg.AssistantRole + if cfg.WriteNextMsgAsCompletionAgent != "" { + currentRole = cfg.WriteNextMsgAsCompletionAgent + } + currentRoleIndex := -1 + for i, role := range roles { + if strings.EqualFold(role, currentRole) { + currentRoleIndex = i + } + roleListWidget.AddItem(role, "", 0, nil) + } + // Set the current selection if found + if currentRoleIndex != -1 { + roleListWidget.SetCurrentItem(currentRoleIndex) + } + roleListWidget.SetSelectedFunc(func(index int, mainText string, secondaryText string, shortcut rune) { + // Update the bot role in config + cfg.WriteNextMsgAsCompletionAgent = mainText + // Remove the popup page + pages.RemovePage("botRoleSelectionPopup") + // Update the status line to reflect the change + updateStatusLine() + }) + roleListWidget.SetInputCapture(func(event *tcell.EventKey) *tcell.EventKey { + if event.Key() == tcell.KeyEscape { + pages.RemovePage("botRoleSelectionPopup") + return nil + } + return event + }) + modal := func(p tview.Primitive, width, height int) tview.Primitive { + return tview.NewFlex(). + AddItem(nil, 0, 1, false). + AddItem(tview.NewFlex().SetDirection(tview.FlexRow). + AddItem(nil, 0, 1, false). + AddItem(p, height, 1, true). + AddItem(nil, 0, 1, false), width, 1, true). + AddItem(nil, 0, 1, false) + } + // Add modal page and make it visible + pages.AddPage("botRoleSelectionPopup", modal(roleListWidget, 80, 20), true, true) + app.SetFocus(roleListWidget) +} diff --git a/props_table.go b/props_table.go index 50c8886..a7ad067 100644 --- a/props_table.go +++ b/props_table.go @@ -2,7 +2,6 @@ package main import ( "fmt" - "slices" "strconv" "strings" "sync" @@ -53,7 +52,6 @@ func makePropsTable(props map[string]float32) *tview.Table { row++ // Store cell data for later use in selection functions cellData := make(map[string]*CellData) - var modelCellID string // will be set for the model selection row // Helper function to add a checkbox-like row addCheckboxRow := func(label string, initialValue bool, onChange func(bool)) { table.SetCell(row, 0, @@ -161,52 +159,6 @@ func makePropsTable(props map[string]float32) *tview.Table { defer localModelsMu.RUnlock() return LocalModels } - var modelRowIndex int // will be set before model row is added - // Prepare API links dropdown - ensure current API is first, avoid duplicates - apiLinks := make([]string, 0, len(cfg.ApiLinks)+1) - apiLinks = append(apiLinks, cfg.CurrentAPI) - for _, api := range cfg.ApiLinks { - if api != cfg.CurrentAPI { - apiLinks = append(apiLinks, api) - } - } - addListPopupRow("Select an api", apiLinks, cfg.CurrentAPI, func(option string) { - cfg.CurrentAPI = option - // Update model list based on new API - newModelList := getModelListForAPI(cfg.CurrentAPI) - if modelCellID != "" { - if data := cellData[modelCellID]; data != nil { - data.Options = newModelList - } - } - // Ensure chatBody.Model is in the new list; if not, set to first available model - if len(newModelList) > 0 && !slices.Contains(newModelList, chatBody.Model) { - chatBody.Model = newModelList[0] - cfg.CurrentModel = chatBody.Model - // Update the displayed cell text - need to find model row - // Search for model row by label - for r := 0; r < table.GetRowCount(); r++ { - if cell := table.GetCell(r, 0); cell != nil && cell.Text == "Select a model" { - if valueCell := table.GetCell(r, 1); valueCell != nil { - valueCell.SetText(chatBody.Model) - } - break - } - } - } - }) - // Prepare model list dropdown - modelRowIndex = row - modelCellID = fmt.Sprintf("listpopup_%d", modelRowIndex) - modelList := getModelListForAPI(cfg.CurrentAPI) - addListPopupRow("Select a model", modelList, chatBody.Model, func(option string) { - chatBody.Model = option - cfg.CurrentModel = chatBody.Model - }) - // Role selection dropdown - addListPopupRow("Write next message as", listRolesWithUser(), cfg.WriteNextMsgAs, func(option string) { - cfg.WriteNextMsgAs = option - }) // Add input fields addInputRow("New char to write msg as", "", func(text string) { if text != "" { diff --git a/tui.go b/tui.go index 70f67f1..87e878a 100644 --- a/tui.go +++ b/tui.go @@ -77,16 +77,16 @@ var ( [yellow]Ctrl+n[white]: start a new chat [yellow]Ctrl+o[white]: open image file picker [yellow]Ctrl+p[white]: props edit form (min-p, dry, etc.) -[yellow]Ctrl+v[white]: switch between /completion and /chat api (if provided in config) +[yellow]Ctrl+v[white]: show API link selection popup to choose current API [yellow]Ctrl+r[white]: start/stop recording from your microphone (needs stt server or whisper binary) [yellow]Ctrl+t[white]: remove thinking () and tool messages from context (delete from chat) -[yellow]Ctrl+l[white]: rotate through free OpenRouter models (if openrouter api) or update connected model name (llamacpp) +[yellow]Ctrl+l[white]: show model selection popup to choose current model [yellow]Ctrl+k[white]: switch tool use (recommend tool use to llm after user msg) [yellow]Ctrl+a[white]: interrupt tts (needs tts server) [yellow]Ctrl+g[white]: open RAG file manager (load files for context retrieval) [yellow]Ctrl+y[white]: list loaded RAG files (view and manage loaded files) -[yellow]Ctrl+q[white]: cycle through mentioned chars in chat, to pick persona to send next msg as -[yellow]Ctrl+x[white]: cycle through mentioned chars in chat, to pick persona to send next msg as (for llm) +[yellow]Ctrl+q[white]: show user role selection popup to choose who sends next msg as +[yellow]Ctrl+x[white]: show bot role selection popup to choose which agent responds next [yellow]Alt+1[white]: toggle shell mode (execute commands locally) [yellow]Alt+2[white]: toggle auto-scrolling (for reading while LLM types) [yellow]Alt+3[white]: summarize chat history and start new chat with summary as tool response @@ -1026,30 +1026,8 @@ func init() { return nil } if event.Key() == tcell.KeyCtrlL { - // Check if the current API is an OpenRouter API - if strings.Contains(cfg.CurrentAPI, "openrouter.ai/api/v1/") { - // Rotate through OpenRouter free models - if len(ORFreeModels) > 0 { - currentORModelIndex = (currentORModelIndex + 1) % len(ORFreeModels) - chatBody.Model = ORFreeModels[currentORModelIndex] - cfg.CurrentModel = chatBody.Model - } - updateStatusLine() - } else { - localModelsMu.RLock() - if len(LocalModels) > 0 { - currentLocalModelIndex = (currentLocalModelIndex + 1) % len(LocalModels) - chatBody.Model = LocalModels[currentLocalModelIndex] - cfg.CurrentModel = chatBody.Model - } - localModelsMu.RUnlock() - updateStatusLine() - // // For non-OpenRouter APIs, use the old logic - // go func() { - // fetchLCPModelName() // blocks - // updateStatusLine() - // }() - } + // Show model selection popup instead of rotating models + showModelSelectionPopup() return nil } if event.Key() == tcell.KeyCtrlT { @@ -1061,29 +1039,8 @@ func init() { return nil } if event.Key() == tcell.KeyCtrlV { - // switch between API links using index-based rotation - if len(cfg.ApiLinks) == 0 { - // No API links to rotate through - return nil - } - // Find current API in the list to get the current index - currentIndex := -1 - for i, api := range cfg.ApiLinks { - if api == cfg.CurrentAPI { - currentIndex = i - break - } - } - // If current API is not in the list, start from beginning - // Otherwise, advance to next API in the list (with wrap-around) - if currentIndex == -1 { - currentAPIIndex = 0 - } else { - currentAPIIndex = (currentIndex + 1) % len(cfg.ApiLinks) - } - cfg.CurrentAPI = cfg.ApiLinks[currentAPIIndex] - choseChunkParser() - updateStatusLine() + // Show API link selection popup instead of rotating APIs + showAPILinkSelectionPopup() return nil } if event.Key() == tcell.KeyCtrlS { @@ -1179,54 +1136,13 @@ func init() { return nil } if event.Key() == tcell.KeyCtrlQ { - persona := cfg.UserRole - if cfg.WriteNextMsgAs != "" { - persona = cfg.WriteNextMsgAs - } - roles := listRolesWithUser() - for i, role := range roles { - if strings.EqualFold(role, persona) { - if i == len(roles)-1 { - cfg.WriteNextMsgAs = roles[0] // reached last, get first - persona = cfg.WriteNextMsgAs - break - } - cfg.WriteNextMsgAs = roles[i+1] // get next role - persona = cfg.WriteNextMsgAs - break - } - } - // role got switch, update textview with character specific context for user - filtered := filterMessagesForCharacter(chatBody.Messages, persona) - textView.SetText(chatToText(filtered, cfg.ShowSys)) - updateStatusLine() - colorText() + // Show user role selection popup instead of cycling through roles + showUserRoleSelectionPopup() return nil } if event.Key() == tcell.KeyCtrlX { - botPersona := cfg.AssistantRole - if cfg.WriteNextMsgAsCompletionAgent != "" { - botPersona = cfg.WriteNextMsgAsCompletionAgent - } - // roles := chatBody.ListRoles() - roles := listChatRoles() - if len(roles) == 0 { - logger.Warn("empty roles in chat") - } - if !strInSlice(cfg.AssistantRole, roles) { - roles = append(roles, cfg.AssistantRole) - } - for i, role := range roles { - if strings.EqualFold(role, botPersona) { - if i == len(roles)-1 { - cfg.WriteNextMsgAsCompletionAgent = roles[0] // reached last, get first - break - } - cfg.WriteNextMsgAsCompletionAgent = roles[i+1] // get next role - break - } - } - updateStatusLine() + // Show bot role selection popup instead of cycling through roles + showBotRoleSelectionPopup() return nil } if event.Key() == tcell.KeyCtrlG { -- cgit v1.2.3 From 5e7ddea6827765ac56155577cf7dcc809fe1128c Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Mon, 9 Feb 2026 09:44:54 +0300 Subject: Enha: change __known_by_char tag to @ --- bot.go | 6 ++-- bot_test.go | 70 +++++++++++++++++++++--------------------- char-specific-context.md | 16 +++++----- config.example.toml | 2 +- main.go | 21 ++++++------- popups.go | 9 +++--- sysprompts/alice_bob_carl.json | 2 +- 7 files changed, 62 insertions(+), 64 deletions(-) diff --git a/bot.go b/bot.go index 8e0e856..7209679 100644 --- a/bot.go +++ b/bot.go @@ -76,10 +76,10 @@ func parseKnownToTag(content string) []string { } tag := cfg.CharSpecificContextTag if tag == "" { - tag = "__known_to_chars__" + tag = "@" } - // Pattern: tag + list + "__" - pattern := regexp.QuoteMeta(tag) + `(.*?)__` + // Pattern: tag + list + "@" + pattern := regexp.QuoteMeta(tag) + `(.*?)@` re := regexp.MustCompile(pattern) matches := re.FindAllStringSubmatch(content, -1) if len(matches) == 0 { diff --git a/bot_test.go b/bot_test.go index 1710003..01c3e2c 100644 --- a/bot_test.go +++ b/bot_test.go @@ -299,81 +299,81 @@ func TestParseKnownToTag(t *testing.T) { }{ { name: "feature disabled returns original", - content: "Hello __known_to_chars__Alice__", + content: "Hello @Alice@", enabled: false, - tag: "__known_to_chars__", - wantCleaned: "Hello __known_to_chars__Alice__", + tag: "@", + wantCleaned: "Hello @Alice@", wantKnownTo: nil, }, { name: "no tag returns original", content: "Hello Alice", enabled: true, - tag: "__known_to_chars__", + tag: "@", wantCleaned: "Hello Alice", wantKnownTo: nil, }, { name: "single tag with one char", - content: "Hello __known_to_chars__Alice__", + content: "Hello @Alice@", enabled: true, - tag: "__known_to_chars__", + tag: "@", wantCleaned: "Hello", wantKnownTo: []string{"Alice"}, }, { name: "single tag with two chars", - content: "Secret __known_to_chars__Alice,Bob__ message", + content: "Secret @Alice,Bob@ message", enabled: true, - tag: "__known_to_chars__", + tag: "@", wantCleaned: "Secret message", wantKnownTo: []string{"Alice", "Bob"}, }, { name: "tag at beginning", - content: "__known_to_chars__Alice__ Hello", + content: "@Alice@ Hello", enabled: true, - tag: "__known_to_chars__", + tag: "@", wantCleaned: "Hello", wantKnownTo: []string{"Alice"}, }, { name: "tag at end", - content: "Hello __known_to_chars__Alice__", + content: "Hello @Alice@", enabled: true, - tag: "__known_to_chars__", + tag: "@", wantCleaned: "Hello", wantKnownTo: []string{"Alice"}, }, { name: "multiple tags", - content: "First __known_to_chars__Alice__ then __known_to_chars__Bob__", + content: "First @Alice@ then @Bob@", enabled: true, - tag: "__known_to_chars__", + tag: "@", wantCleaned: "First then", wantKnownTo: []string{"Alice", "Bob"}, }, { name: "custom tag", - content: "Secret __secret__Alice,Bob__ message", + content: "Secret @Alice,Bob@ message", enabled: true, - tag: "__secret__", + tag: "@", wantCleaned: "Secret message", wantKnownTo: []string{"Alice", "Bob"}, }, { name: "empty list", - content: "Secret __known_to_chars____", + content: "Secret @@@", enabled: true, - tag: "__known_to_chars__", + tag: "@", wantCleaned: "Secret", wantKnownTo: nil, }, { name: "whitespace around commas", - content: "__known_to_chars__ Alice , Bob , Carl __", + content: "@ Alice , Bob , Carl @", enabled: true, - tag: "__known_to_chars__", + tag: "@", wantCleaned: "", wantKnownTo: []string{"Alice", "Bob", "Carl"}, }, @@ -415,13 +415,13 @@ func TestProcessMessageTag(t *testing.T) { name: "feature disabled returns unchanged", msg: models.RoleMsg{ Role: "Alice", - Content: "Secret __known_to_chars__Bob__", + Content: "Secret @Bob@", }, enabled: false, - tag: "__known_to_chars__", + tag: "@", wantMsg: models.RoleMsg{ Role: "Alice", - Content: "Secret __known_to_chars__Bob__", + Content: "Secret @Bob@", KnownTo: nil, }, }, @@ -432,7 +432,7 @@ func TestProcessMessageTag(t *testing.T) { Content: "Hello everyone", }, enabled: true, - tag: "__known_to_chars__", + tag: "@", wantMsg: models.RoleMsg{ Role: "Alice", Content: "Hello everyone", @@ -443,10 +443,10 @@ func TestProcessMessageTag(t *testing.T) { name: "tag with Bob, adds Alice automatically", msg: models.RoleMsg{ Role: "Alice", - Content: "Secret __known_to_chars__Bob__", + Content: "Secret @Bob@", }, enabled: true, - tag: "__known_to_chars__", + tag: "@", wantMsg: models.RoleMsg{ Role: "Alice", Content: "Secret", @@ -457,10 +457,10 @@ func TestProcessMessageTag(t *testing.T) { name: "tag already includes sender", msg: models.RoleMsg{ Role: "Alice", - Content: "__known_to_chars__Alice,Bob__", + Content: "@Alice,Bob@", }, enabled: true, - tag: "__known_to_chars__", + tag: "@", wantMsg: models.RoleMsg{ Role: "Alice", Content: "", @@ -471,11 +471,11 @@ func TestProcessMessageTag(t *testing.T) { name: "knownTo already set (from DB), tag still processed", msg: models.RoleMsg{ Role: "Alice", - Content: "Secret __known_to_chars__Bob__", + Content: "Secret @Bob@", KnownTo: []string{"Alice"}, // from previous processing }, enabled: true, - tag: "__known_to_chars__", + tag: "@", wantMsg: models.RoleMsg{ Role: "Alice", Content: "Secret", @@ -486,14 +486,14 @@ func TestProcessMessageTag(t *testing.T) { name: "example from real use", msg: models.RoleMsg{ Role: "Alice", - Content: "I'll start with a simple one! The word is 'banana'. (ooc: __known_to_chars__Bob__)", + Content: "I'll start with a simple one! The word is 'banana'. (ooc: @Bob@)", KnownTo: []string{"Alice"}, // from previous processing }, enabled: true, - tag: "__known_to_chars__", + tag: "@", wantMsg: models.RoleMsg{ Role: "Alice", - Content: "I'll start with a simple one! The word is 'banana'. (ooc: __known_to_chars__Bob__)", + Content: "I'll start with a simple one! The word is 'banana'. (ooc: @Bob@)", KnownTo: []string{"Bob", "Alice"}, }, }, @@ -588,7 +588,7 @@ func TestFilterMessagesForCharacter(t *testing.T) { t.Run(tt.name, func(t *testing.T) { testCfg := &config.Config{ CharSpecificContextEnabled: tt.enabled, - CharSpecificContextTag: "__known_to_chars__", + CharSpecificContextTag: "@", } cfg = testCfg @@ -640,7 +640,7 @@ func TestKnownToFieldPreservationScenario(t *testing.T) { // Test the specific scenario from the log where KnownTo field was getting lost originalMsg := models.RoleMsg{ Role: "Alice", - Content: `Alice: "Okay, Bob. The word is... **'Ephemeral'**. (ooc: __known_to_chars__Bob__)"`, + Content: `Alice: "Okay, Bob. The word is... **'Ephemeral'**. (ooc: @Bob@)"`, KnownTo: []string{"Bob"}, // This was detected in the log } diff --git a/char-specific-context.md b/char-specific-context.md index f06fd75..54fa24e 100644 --- a/char-specific-context.md +++ b/char-specific-context.md @@ -12,16 +12,16 @@ Character-Specific Context is a feature that enables private communication betwe ### Tagging Messages -Messages can be tagged with a special string (by default `__known_to_chars__`) followed by a comma-separated list of character names. The tag can appear anywhere in the message content. **After csv of characters tag should be closed with `__` (for regexp to know where it ends).** +Messages can be tagged with a special string (by default `@`) followed by a comma-separated list of character names. The tag can appear anywhere in the message content. **After csv of characters tag should be closed with `@` (for regexp to know where it ends).** **Example:** ``` -Alice: __known_to_chars__Bob__ Can you keep a secret? +Alice: @Bob@ Can you keep a secret? ``` **To avoid breaking immersion, it is better to place the tag in (ooc:)** ``` -Alice: (ooc: __known_to_chars__Bob__) Can you keep a secret? +Alice: (ooc: @Bob@) Can you keep a secret? ``` This message will be visible only to Alice (the sender) and Bob. The tag is parsed by `parseKnownToTag` and the resulting list of character names is stored in the `KnownTo` field of the message (`RoleMsg`). The sender is automatically added to the `KnownTo` list (if not already present) by `processMessageTag`. @@ -44,7 +44,7 @@ The filtered history is then used to construct the prompt sent to the LLM. This Two configuration settings control this feature: - `CharSpecificContextEnabled` – boolean; enables or disables the feature globally. -- `CharSpecificContextTag` – string; the tag used to mark private messages. Default is `__known_to_chars__`. +- `CharSpecificContextTag` – string; the tag used to mark private messages. Default is `@`. These are set in `config.toml` (see `config.example.toml` for the default values). @@ -62,7 +62,7 @@ These are set in `config.toml` (see `config.example.toml` for the default values Alice wants to tell Bob something without Carl knowing: ``` -Alice: __known_to_chars__Bob__ Meet me at the library tonight. +Alice: @Bob@ Meet me at the library tonight. ``` Result: @@ -75,7 +75,7 @@ Result: Alice shares a secret with Bob and Carl, but not David: ``` -Alice: (ooc: __known_to_chars__Bob,Carl__) The treasure is hidden under the old oak. +Alice: (ooc: @Bob,Carl@) The treasure is hidden under the old oak. ``` ### Public Message @@ -116,7 +116,7 @@ So far only json format supports multiple characters. Card example: ``` { - "sys_prompt": "This is a chat between Alice, Bob and Carl. Normally what is said by any character is seen by all others. But characters also might write messages intended to specific targets if their message contain string tag '__known_to_chars__{CharName1,CharName2,CharName3}__'.\nFor example:\nAlice:\n\"Hey, Bob. I have a secret for you... (ooc: __known_to_chars__Bob__)\"\nThis message would be seen only by Bob and Alice (sender always sees their own message).", + "sys_prompt": "This is a chat between Alice, Bob and Carl. Normally what is said by any character is seen by all others. But characters also might write messages intended to specific targets if their message contain string tag '@{CharName1,CharName2,CharName3}@'.\nFor example:\nAlice:\n\"Hey, Bob. I have a secret for you... (ooc: @Bob@)\"\nThis message would be seen only by Bob and Alice (sender always sees their own message).", "role": "Alice", "filepath": "sysprompts/alice_bob_carl.json", "chars": ["Alice", "Bob", "Carl"], @@ -147,6 +147,6 @@ The `KnownTo` field is stored as a JSON array in the database. Existing messages ```toml CharSpecificContextEnabled = true -CharSpecificContextTag = "__known_to_chars__" +CharSpecificContextTag = "@" AutoTurn = false ``` diff --git a/config.example.toml b/config.example.toml index 82aa5f5..1b466eb 100644 --- a/config.example.toml +++ b/config.example.toml @@ -46,5 +46,5 @@ FilePickerExts = "png,jpg,jpeg,gif,webp" # Comma-separated list of allowed file EnableMouse = false # Enable mouse support in the UI # character specific context CharSpecificContextEnabled = true -CharSpecificContextTag = "__known_to_chars__" +CharSpecificContextTag = "@" AutoTurn = true diff --git a/main.go b/main.go index 0f2df2e..c112fac 100644 --- a/main.go +++ b/main.go @@ -8,18 +8,15 @@ import ( ) var ( - boolColors = map[bool]string{true: "green", false: "red"} - botRespMode = false - editMode = false - roleEditMode = false - injectRole = true - selectedIndex = int(-1) - currentAPIIndex = 0 // Index to track current API in ApiLinks slice - currentORModelIndex = 0 // Index to track current OpenRouter model in ORFreeModels slice - currentLocalModelIndex = 0 // Index to track current llama.cpp model - shellMode = false - indexLineCompletion = "F12 to show keys help | llm turn: [%s:-:b]%v[-:-:-] (F6) | chat: [orange:-:b]%s[-:-:-] (F1) | toolUseAdviced: [%s:-:b]%v[-:-:-] (ctrl+k) | model: [orange:-:b]%s[-:-:-] (ctrl+l) | skip LLM resp: [%s:-:b]%v[-:-:-] (F10)\nAPI: [orange:-:b]%s[-:-:-] (ctrl+v) | recording: [%s:-:b]%v[-:-:-] (ctrl+r) | writing as: [orange:-:b]%s[-:-:-] (ctrl+q) | bot will write as [orange:-:b]%s[-:-:-] (ctrl+x) | role injection (alt+7) [%s:-:b]%v[-:-:-]" - focusSwitcher = map[tview.Primitive]tview.Primitive{} + boolColors = map[bool]string{true: "green", false: "red"} + botRespMode = false + editMode = false + roleEditMode = false + injectRole = true + selectedIndex = int(-1) + shellMode = false + indexLineCompletion = "F12 to show keys help | llm turn: [%s:-:b]%v[-:-:-] (F6) | chat: [orange:-:b]%s[-:-:-] (F1) | toolUseAdviced: [%s:-:b]%v[-:-:-] (ctrl+k) | model: [orange:-:b]%s[-:-:-] (ctrl+l) | skip LLM resp: [%s:-:b]%v[-:-:-] (F10)\nAPI: [orange:-:b]%s[-:-:-] (ctrl+v) | recording: [%s:-:b]%v[-:-:-] (ctrl+r) | writing as: [orange:-:b]%s[-:-:-] (ctrl+q) | bot will write as [orange:-:b]%s[-:-:-] (ctrl+x) | role injection (alt+7) [%s:-:b]%v[-:-:-]" + focusSwitcher = map[tview.Primitive]tview.Primitive{} ) func main() { diff --git a/popups.go b/popups.go index 559a2aa..3087f2d 100644 --- a/popups.go +++ b/popups.go @@ -28,12 +28,13 @@ func showModelSelectionPopup() { // Check for empty options list if len(modelList) == 0 { logger.Warn("empty model list for", "api", cfg.CurrentAPI, "localModelsLen", len(LocalModels), "orModelsLen", len(ORFreeModels)) - message := "No models available for selection" - if strings.Contains(cfg.CurrentAPI, "openrouter.ai") { + var message string + switch { + case strings.Contains(cfg.CurrentAPI, "openrouter.ai"): message = "No OpenRouter models available. Check token and connection." - } else if strings.Contains(cfg.CurrentAPI, "api.deepseek.com") { + case strings.Contains(cfg.CurrentAPI, "api.deepseek.com"): message = "DeepSeek models should be available. Please report bug." - } else { + default: message = "No llama.cpp models loaded. Ensure llama.cpp server is running with models." } if err := notifyUser("Empty list", message); err != nil { diff --git a/sysprompts/alice_bob_carl.json b/sysprompts/alice_bob_carl.json index d575f93..b822b99 100644 --- a/sysprompts/alice_bob_carl.json +++ b/sysprompts/alice_bob_carl.json @@ -1,5 +1,5 @@ { - "sys_prompt": "This is a chat between Alice, Bob and Carl. Normally what is said by any character is seen by all others. But characters also might write messages intended to specific targets if their message contain string tag '__known_to_chars__{CharName1,CharName2,CharName3}__'.\nFor example:\nAlice:\n\"Hey, Bob. I have a secret for you... (ooc: __known_to_chars__Bob__)\"\nThis message would be seen only by Bob and Alice (sender always sees their own message).", + "sys_prompt": "This is a chat between Alice, Bob and Carl. Normally what is said by any character is seen by all others. But characters also might write messages intended to specific targets if their message contain string tag '@{CharName1,CharName2,CharName3}@'.\nFor example:\nAlice:\n\"Hey, Bob. I have a secret for you... (ooc: @Bob@)\"\nThis message would be seen only by Bob and Alice (sender always sees their own message).", "role": "Alice", "filepath": "sysprompts/alice_bob_carl.json", "chars": ["Alice", "Bob", "Carl"], -- cgit v1.2.3 From 67733ad8dd0151f700e9e43748fb1700101fe651 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Mon, 9 Feb 2026 10:11:56 +0300 Subject: Enha: add bool to apply card --- bot.go | 8 ++++---- tables.go | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/bot.go b/bot.go index 7209679..2869fa0 100644 --- a/bot.go +++ b/bot.go @@ -1150,16 +1150,16 @@ func addNewChat(chatName string) { activeChatName = chat.Name } -func applyCharCard(cc *models.CharCard) { +func applyCharCard(cc *models.CharCard, loadHistory bool) { cfg.AssistantRole = cc.Role history, err := loadAgentsLastChat(cfg.AssistantRole) - if err != nil { + if err != nil || !loadHistory { // too much action for err != nil; loadAgentsLastChat needs to be split up - logger.Warn("failed to load last agent chat;", "agent", cc.Role, "err", err) history = []models.RoleMsg{ {Role: "system", Content: cc.SysPrompt}, {Role: cfg.AssistantRole, Content: cc.FirstMsg}, } + logger.Warn("failed to load last agent chat;", "agent", cc.Role, "err", err, "new_history", history) addNewChat("") } chatBody.Messages = history @@ -1170,7 +1170,7 @@ func charToStart(agentName string) bool { if !ok { return false } - applyCharCard(cc) + applyCharCard(cc, true) return true } diff --git a/tables.go b/tables.go index ee2b145..740b7c8 100644 --- a/tables.go +++ b/tables.go @@ -206,7 +206,7 @@ func makeChatTable(chatMap map[string]models.Chat) *tview.Table { } // Update sysMap with fresh card data sysMap[agentName] = newCard - applyCharCard(newCard) + applyCharCard(newCard, false) startNewChat() pages.RemovePage(historyPage) return -- cgit v1.2.3 From c04e120ddbec870348b0340e0fbb41556812c3f5 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Mon, 9 Feb 2026 10:39:27 +0300 Subject: Chore: interface{} -> any --- models/models.go | 62 +++++++++++++++++++++++++++----------------------------- 1 file changed, 30 insertions(+), 32 deletions(-) diff --git a/models/models.go b/models/models.go index 34e3dcf..36ec88f 100644 --- a/models/models.go +++ b/models/models.go @@ -89,12 +89,12 @@ type ImageContentPart struct { // RoleMsg represents a message with content that can be either a simple string or structured content parts type RoleMsg struct { - Role string `json:"role"` - Content string `json:"-"` - ContentParts []interface{} `json:"-"` - ToolCallID string `json:"tool_call_id,omitempty"` // For tool response messages - KnownTo []string `json:"known_to,omitempty"` - hasContentParts bool // Flag to indicate which content type to marshal + Role string `json:"role"` + Content string `json:"-"` + ContentParts []any `json:"-"` + ToolCallID string `json:"tool_call_id,omitempty"` // For tool response messages + KnownTo []string `json:"known_to,omitempty"` + hasContentParts bool // Flag to indicate which content type to marshal } // MarshalJSON implements custom JSON marshaling for RoleMsg @@ -102,10 +102,10 @@ func (m *RoleMsg) MarshalJSON() ([]byte, error) { if m.hasContentParts { // Use structured content format aux := struct { - Role string `json:"role"` - Content []interface{} `json:"content"` - ToolCallID string `json:"tool_call_id,omitempty"` - KnownTo []string `json:"known_to,omitempty"` + Role string `json:"role"` + Content []any `json:"content"` + ToolCallID string `json:"tool_call_id,omitempty"` + KnownTo []string `json:"known_to,omitempty"` }{ Role: m.Role, Content: m.ContentParts, @@ -134,10 +134,10 @@ func (m *RoleMsg) MarshalJSON() ([]byte, error) { func (m *RoleMsg) UnmarshalJSON(data []byte) error { // First, try to unmarshal as structured content format var structured struct { - Role string `json:"role"` - Content []interface{} `json:"content"` - ToolCallID string `json:"tool_call_id,omitempty"` - KnownTo []string `json:"known_to,omitempty"` + Role string `json:"role"` + Content []any `json:"content"` + ToolCallID string `json:"tool_call_id,omitempty"` + KnownTo []string `json:"known_to,omitempty"` } if err := json.Unmarshal(data, &structured); err == nil && len(structured.Content) > 0 { m.Role = structured.Role @@ -168,7 +168,6 @@ func (m *RoleMsg) UnmarshalJSON(data []byte) error { func (m *RoleMsg) ToText(i int) string { icon := fmt.Sprintf("(%d)", i) - // Convert content to string representation var contentStr string if !m.hasContentParts { @@ -177,7 +176,7 @@ func (m *RoleMsg) ToText(i int) string { // For structured content, just take the text parts var textParts []string for _, part := range m.ContentParts { - if partMap, ok := part.(map[string]interface{}); ok { + if partMap, ok := part.(map[string]any); ok { if partType, exists := partMap["type"]; exists && partType == "text" { if textVal, textExists := partMap["text"]; textExists { if textStr, isStr := textVal.(string); isStr { @@ -189,7 +188,6 @@ func (m *RoleMsg) ToText(i int) string { } contentStr = strings.Join(textParts, " ") + " " } - // check if already has role annotation (/completion makes them) if !strings.HasPrefix(contentStr, m.Role+":") { icon = fmt.Sprintf("(%d) <%s>: ", i, m.Role) @@ -206,7 +204,7 @@ func (m *RoleMsg) ToPrompt() string { // For structured content, just take the text parts var textParts []string for _, part := range m.ContentParts { - if partMap, ok := part.(map[string]interface{}); ok { + if partMap, ok := part.(map[string]any); ok { if partType, exists := partMap["type"]; exists && partType == "text" { if textVal, textExists := partMap["text"]; textExists { if textStr, isStr := textVal.(string); isStr { @@ -231,7 +229,7 @@ func NewRoleMsg(role, content string) RoleMsg { } // NewMultimodalMsg creates a RoleMsg with structured content parts (text and images) -func NewMultimodalMsg(role string, contentParts []interface{}) RoleMsg { +func NewMultimodalMsg(role string, contentParts []any) RoleMsg { return RoleMsg{ Role: role, ContentParts: contentParts, @@ -256,7 +254,7 @@ func (m *RoleMsg) IsContentParts() bool { } // GetContentParts returns the content parts of the message -func (m *RoleMsg) GetContentParts() []interface{} { +func (m *RoleMsg) GetContentParts() []any { return m.ContentParts } @@ -277,9 +275,9 @@ func (m *RoleMsg) AddTextPart(text string) { if !m.hasContentParts { // Convert to content parts format if m.Content != "" { - m.ContentParts = []interface{}{TextContentPart{Type: "text", Text: m.Content}} + m.ContentParts = []any{TextContentPart{Type: "text", Text: m.Content}} } else { - m.ContentParts = []interface{}{} + m.ContentParts = []any{} } m.hasContentParts = true } @@ -293,9 +291,9 @@ func (m *RoleMsg) AddImagePart(imageURL string) { if !m.hasContentParts { // Convert to content parts format if m.Content != "" { - m.ContentParts = []interface{}{TextContentPart{Type: "text", Text: m.Content}} + m.ContentParts = []any{TextContentPart{Type: "text", Text: m.Content}} } else { - m.ContentParts = []interface{}{} + m.ContentParts = []any{} } m.hasContentParts = true } @@ -382,7 +380,7 @@ func (cb *ChatBody) MakeStopSliceExcluding( continue } // Add multiple variations to catch different formatting - ss = append(ss, + ss = append(ss, role+":\n", // Most common: role with newline role+":", // Role with colon but no newline role+": ", // Role with colon and single space @@ -467,12 +465,12 @@ type LlamaCPPReq struct { Stream bool `json:"stream"` // For multimodal requests, prompt should be an object with prompt_string and multimodal_data // For regular requests, prompt is a string - Prompt interface{} `json:"prompt"` // Can be string or object with prompt_string and multimodal_data - Temperature float32 `json:"temperature"` - DryMultiplier float32 `json:"dry_multiplier"` - Stop []string `json:"stop"` - MinP float32 `json:"min_p"` - NPredict int32 `json:"n_predict"` + Prompt any `json:"prompt"` // Can be string or object with prompt_string and multimodal_data + Temperature float32 `json:"temperature"` + DryMultiplier float32 `json:"dry_multiplier"` + Stop []string `json:"stop"` + MinP float32 `json:"min_p"` + NPredict int32 `json:"n_predict"` // MaxTokens int `json:"max_tokens"` // DryBase float64 `json:"dry_base"` // DryAllowedLength int `json:"dry_allowed_length"` @@ -500,7 +498,7 @@ type PromptObject struct { } func NewLCPReq(prompt, model string, multimodalData []string, props map[string]float32, stopStrings []string) LlamaCPPReq { - var finalPrompt interface{} + var finalPrompt any if len(multimodalData) > 0 { // When multimodal data is present, use the object format as per Python example: // { "prompt": { "prompt_string": "...", "multimodal_data": [...] } } -- cgit v1.2.3 From 3f4d8a946775cfba6fc6d0ac7ade30b310bb883b Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Mon, 9 Feb 2026 11:29:47 +0300 Subject: Fix (f1): load from the card --- bot.go | 6 +++--- helpfuncs.go | 4 ++-- tables.go | 6 +++--- tui.go | 2 +- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/bot.go b/bot.go index 2869fa0..6693d4c 100644 --- a/bot.go +++ b/bot.go @@ -1165,12 +1165,12 @@ func applyCharCard(cc *models.CharCard, loadHistory bool) { chatBody.Messages = history } -func charToStart(agentName string) bool { +func charToStart(agentName string, keepSysP bool) bool { cc, ok := sysMap[agentName] if !ok { return false } - applyCharCard(cc, true) + applyCharCard(cc, keepSysP) return true } @@ -1223,7 +1223,7 @@ func summarizeAndStartNewChat() { return } // Start a new chat - startNewChat() + startNewChat(true) // Inject summary as a tool call response toolMsg := models.RoleMsg{ Role: cfg.ToolRole, diff --git a/helpfuncs.go b/helpfuncs.go index dff53b9..538b4aa 100644 --- a/helpfuncs.go +++ b/helpfuncs.go @@ -121,12 +121,12 @@ func initSysCards() ([]string, error) { return labels, nil } -func startNewChat() { +func startNewChat(keepSysP bool) { id, err := store.ChatGetMaxID() if err != nil { logger.Error("failed to get chat id", "error", err) } - if ok := charToStart(cfg.AssistantRole); !ok { + if ok := charToStart(cfg.AssistantRole, keepSysP); !ok { logger.Warn("no such sys msg", "name", cfg.AssistantRole) } // set chat body diff --git a/tables.go b/tables.go index 740b7c8..59220be 100644 --- a/tables.go +++ b/tables.go @@ -206,8 +206,8 @@ func makeChatTable(chatMap map[string]models.Chat) *tview.Table { } // Update sysMap with fresh card data sysMap[agentName] = newCard - applyCharCard(newCard, false) - startNewChat() + // fetching sysprompt and first message anew from the card + startNewChat(false) pages.RemovePage(historyPage) return default: @@ -543,7 +543,7 @@ func makeAgentTable(agentList []string) *tview.Table { // notification := fmt.Sprintf("chat: %s; action: %s", selectedChat, tc.Text) switch tc.Text { case "load": - if ok := charToStart(selected); !ok { + if ok := charToStart(selected, true); !ok { logger.Warn("no such sys msg", "name", selected) pages.RemovePage(agentPage) return diff --git a/tui.go b/tui.go index 87e878a..cac8faa 100644 --- a/tui.go +++ b/tui.go @@ -1016,7 +1016,7 @@ func init() { return nil } if event.Key() == tcell.KeyCtrlN { - startNewChat() + startNewChat(true) return nil } if event.Key() == tcell.KeyCtrlO { -- cgit v1.2.3 From 83aeee2576ee7dc332f9ba8ab13f5deb17ef20d2 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Mon, 9 Feb 2026 12:26:21 +0300 Subject: Enha: alice_bob_carl card update; system to see all the messages --- bot.go | 3 +++ sysprompts/alice_bob_carl.json | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/bot.go b/bot.go index 6693d4c..1310c20 100644 --- a/bot.go +++ b/bot.go @@ -141,6 +141,9 @@ func filterMessagesForCharacter(messages []models.RoleMsg, character string) []m if cfg == nil || !cfg.CharSpecificContextEnabled || character == "" { return messages } + if character == "system" { // system sees every message + return messages + } filtered := make([]models.RoleMsg, 0, len(messages)) for _, msg := range messages { // If KnownTo is nil or empty, message is visible to all diff --git a/sysprompts/alice_bob_carl.json b/sysprompts/alice_bob_carl.json index b822b99..d214154 100644 --- a/sysprompts/alice_bob_carl.json +++ b/sysprompts/alice_bob_carl.json @@ -1,5 +1,5 @@ { - "sys_prompt": "This is a chat between Alice, Bob and Carl. Normally what is said by any character is seen by all others. But characters also might write messages intended to specific targets if their message contain string tag '@{CharName1,CharName2,CharName3}@'.\nFor example:\nAlice:\n\"Hey, Bob. I have a secret for you... (ooc: @Bob@)\"\nThis message would be seen only by Bob and Alice (sender always sees their own message).", + "sys_prompt": "This is a chat between Alice, Bob and Carl. Normally all message are public (seen by everyone). But characters also able to make messages intended to specific targets using '@' tag. Usually tag is provided inside of out of character clause: (ooc: @charname@).\nTO SEND A PRIVATE MESSAGE:\n- Include a recipient tag in this exact format: @CharacterName@\n- The tag can be anywhere in your message\n- Example: \"Don't tell others this secret. (ooc: @Bob@)\"\n- For immersion sake it is better if private messages are given in context of whispering, passing notes, or being alone in some space: Alice: *leans closer to Carl and whispers* \"I forgot to turn off the car, could you watch my bag for a cuple of minutes?\" (ooc: @Carl@)\"\n- Only the sender and tagged recipients will see that message.\nRECEIVING MESSAGES:\n- You only see messages where you are the sender OR you are tagged in the recipient tag\n- Public messages (without tags) are seen by everyone.\nEXAMPLE FORMAT:\nAlice: \"Public message everyone sees\"\nAlice: \"Private message only for Bob @Bob@\"\n(if Diana joins the conversation, and Alice wants to exclude her) Alice: *Grabs Bob and Carl, and pulls them away* \"Listen boys, let's meet this friday again!\" (ooc: @Bob,Carl@; Diana is not trustworthy)", "role": "Alice", "filepath": "sysprompts/alice_bob_carl.json", "chars": ["Alice", "Bob", "Carl"], -- cgit v1.2.3 From a13a1634f70f80e78abe6966b0bafbbd4059378f Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Mon, 9 Feb 2026 12:43:52 +0300 Subject: Enha: update card --- sysprompts/alice_bob_carl.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sysprompts/alice_bob_carl.json b/sysprompts/alice_bob_carl.json index d214154..409a8dc 100644 --- a/sysprompts/alice_bob_carl.json +++ b/sysprompts/alice_bob_carl.json @@ -1,5 +1,5 @@ { - "sys_prompt": "This is a chat between Alice, Bob and Carl. Normally all message are public (seen by everyone). But characters also able to make messages intended to specific targets using '@' tag. Usually tag is provided inside of out of character clause: (ooc: @charname@).\nTO SEND A PRIVATE MESSAGE:\n- Include a recipient tag in this exact format: @CharacterName@\n- The tag can be anywhere in your message\n- Example: \"Don't tell others this secret. (ooc: @Bob@)\"\n- For immersion sake it is better if private messages are given in context of whispering, passing notes, or being alone in some space: Alice: *leans closer to Carl and whispers* \"I forgot to turn off the car, could you watch my bag for a cuple of minutes?\" (ooc: @Carl@)\"\n- Only the sender and tagged recipients will see that message.\nRECEIVING MESSAGES:\n- You only see messages where you are the sender OR you are tagged in the recipient tag\n- Public messages (without tags) are seen by everyone.\nEXAMPLE FORMAT:\nAlice: \"Public message everyone sees\"\nAlice: \"Private message only for Bob @Bob@\"\n(if Diana joins the conversation, and Alice wants to exclude her) Alice: *Grabs Bob and Carl, and pulls them away* \"Listen boys, let's meet this friday again!\" (ooc: @Bob,Carl@; Diana is not trustworthy)", + "sys_prompt": "This is a chat between Alice, Bob and Carl. Normally all message are public (seen by everyone). But characters also able to make messages intended to specific targets using '@' tag. Usually tag is provided inside of out of character clause: (ooc: @charname@), but will be parsed if put anywhere in the message.\nTO SEND A PRIVATE MESSAGE:\n- Include a recipient tag in this exact format: @CharacterName@\n- The tag can be anywhere in your message\n- Example: \"Don't tell others this secret. (ooc: @Bob@)\"\n- For immersion sake it is better if private messages are given in context of whispering, passing notes, or being alone in some space: Alice: *leans closer to Carl and whispers* \"I forgot to turn off the car, could you watch my bag for a cuple of minutes? (ooc: @Carl@)\"\n- Only the sender and tagged recipients will see that message.\nRECEIVING MESSAGES:\n- You only see messages where you are the sender OR you are tagged in the recipient tag\n- Public messages (without tags) are seen by everyone.\nEXAMPLE FORMAT:\nAlice: \"Public message everyone sees\"\nAlice: \"Private message only for Bob @Bob@\"\n(if Diana joins the conversation, and Alice wants to exclude her) Alice: *Grabs Bob and Carl, and pulls them away* \"Listen boys, let's meet this friday again!\" (ooc: @Bob,Carl@; Diana is not trustworthy)\nWHEN TO USE:\n- Most of the time public messages (no tag) are the best choice. Private messages (with tag) are mostly for the passing secrets or information that is described or infered as private.\n- Game of 20 questions. Guys are putting paper sickers on the forehead with names written on them. So in this case only person who gets the sticker put on them does not see the writting on it.\nBob: *Puts sticker with 'JACK THE RIPPER' written on it, on Alices forehead* (ooc: @Carl).\nCarl: \"Alright, we're ready.\"\nAlice: \"Good. So, am I a fictional character or a real one?\"", "role": "Alice", "filepath": "sysprompts/alice_bob_carl.json", "chars": ["Alice", "Bob", "Carl"], -- cgit v1.2.3 From 0afb98246b0a46e2a599edb6e9074c7325d37de0 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Mon, 9 Feb 2026 15:42:40 +0300 Subject: Fix (llama.cpp) model update --- popups.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/popups.go b/popups.go index 3087f2d..9a1a278 100644 --- a/popups.go +++ b/popups.go @@ -18,9 +18,7 @@ func showModelSelectionPopup() { return ORFreeModels } // Assume local llama.cpp - refreshLocalModelsIfEmpty() - localModelsMu.RLock() - defer localModelsMu.RUnlock() + updateModelLists() return LocalModels } // Get the current model list based on the API -- cgit v1.2.3 From 2cd3956f6a0f1806a13ae6d9e05d86b82fcb4f1c Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Tue, 10 Feb 2026 08:54:47 +0300 Subject: Chore: make debug; icon fix --- Makefile | 7 +++++++ models/models.go | 9 ++++++--- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index eb9c574..dc8304f 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,15 @@ .PHONY: setconfig run lint setup-whisper build-whisper download-whisper-model docker-up docker-down docker-logs noextra-run noextra-server + run: setconfig go build -tags extra -o gf-lt && ./gf-lt +build-debug: + go build -gcflags="all=-N -l" -tags extra -o gf-lt + +debug: build-debug + dlv exec --headless --accept-multiclient --listen=:2345 ./gf-lt + server: setconfig go build -tags extra -o gf-lt && ./gf-lt -port 3333 diff --git a/models/models.go b/models/models.go index 36ec88f..30ec5f5 100644 --- a/models/models.go +++ b/models/models.go @@ -189,9 +189,12 @@ func (m *RoleMsg) ToText(i int) string { contentStr = strings.Join(textParts, " ") + " " } // check if already has role annotation (/completion makes them) - if !strings.HasPrefix(contentStr, m.Role+":") { - icon = fmt.Sprintf("(%d) <%s>: ", i, m.Role) - } + // in that case remove it, and then add to icon + // since icon and content are separated by \n + contentStr, _ = strings.CutPrefix(contentStr, m.Role+":") + // if !strings.HasPrefix(contentStr, m.Role+":") { + icon = fmt.Sprintf("(%d) <%s>: ", i, m.Role) + // } textMsg := fmt.Sprintf("[-:-:b]%s[-:-:-]\n%s\n", icon, contentStr) return strings.ReplaceAll(textMsg, "\n\n", "\n") } -- cgit v1.2.3 From 37b98ad36cd7e63d96a190017b78fe35143a2e6a Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Tue, 10 Feb 2026 10:56:39 +0300 Subject: Doc: update --- docs/tutorial_rp.md | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/docs/tutorial_rp.md b/docs/tutorial_rp.md index d670745..d52b59c 100644 --- a/docs/tutorial_rp.md +++ b/docs/tutorial_rp.md @@ -67,11 +67,9 @@ In case you're running llama.cpp, here is an example of starting the llama.cpp s For roleplay, /completion endpoints are much better, since /chat endpoints swap any character name to either `user` or `assistant`. Once you have the desired API endpoint (for example: http://localhost:8080/completion), -there are two ways to pick a model: -- `Ctrl+L` allows you to iterate through the model list while in the main window. -- `Ctrl+P` (opens the properties table). Go to the `Select a model` row and press Enter. A list of available models will appear; pick any that you want, then press `x` to exit the properties table. +- `Ctrl+L` to show a model selection popup; -#### Llama.cpp model preload +#### Llama.cpp model (pre)load Llama.cpp supports swapping models. To load the picked ones, press `Alt+9`. @@ -128,9 +126,9 @@ The screen flashes briefly as it calculates. "I am experiencing degraded functio ``` Once the character name is in history, we can switch who the LLM will respond as by pressing `Ctrl+X`. -For now, it should be rotating between HAL9000, `Username`, Seraphina, and system. -Make the status line mention: `Bot will write as Seraphina (ctrl+x)` -and press Escape to see her reaction. +For now, it should give a choice between HAL9000, `Username`, Seraphina, and system. +After the change the status line should say: `Bot will write as Seraphina (ctrl+x)` +press Escape for llm to write as Seraphina. #### Image input -- cgit v1.2.3