summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGrail Finder <wohilas@gmail.com>2026-02-04 12:47:54 +0300
committerGrail Finder <wohilas@gmail.com>2026-02-04 12:47:54 +0300
commit7187df509fe9cc506695a1036b840e03eeb25cff (patch)
tree394b40b5375909293ead99e12c733fc1f91370f5
parent79861e7c2bc6f2ed95309ca6e83577ddc4e2c63a (diff)
Enha: stricter stop string
-rw-r--r--bot.go20
-rw-r--r--llm.go6
-rw-r--r--models/models.go9
3 files changed, 9 insertions, 26 deletions
diff --git a/bot.go b/bot.go
index d195431..c396d07 100644
--- a/bot.go
+++ b/bot.go
@@ -861,18 +861,7 @@ out:
newMsg = processMessageTag(newMsg)
chatBody.Messages = append(chatBody.Messages, newMsg)
}
- logger.Debug("chatRound: before cleanChatBody", "messages_before_clean", len(chatBody.Messages))
- for i, msg := range chatBody.Messages {
- logger.Debug("chatRound: before cleaning", "index", i,
- "role", msg.Role, "content_len", len(msg.Content),
- "has_content", msg.HasContent(), "tool_call_id", msg.ToolCallID)
- }
- // // Clean null/empty messages to prevent API issues with endpoints like llama.cpp jinja template
cleanChatBody()
- logger.Debug("chatRound: after cleanChatBody", "messages_after_clean", len(chatBody.Messages))
- for i, msg := range chatBody.Messages {
- logger.Debug("chatRound: after cleaning", "index", i, "role", msg.Role, "content_len", len(msg.Content), "has_content", msg.HasContent(), "tool_call_id", msg.ToolCallID)
- }
refreshChatDisplay()
updateStatusLine()
// bot msg is done;
@@ -901,19 +890,10 @@ func cleanChatBody() {
if chatBody == nil || chatBody.Messages == nil {
return
}
- originalLen := len(chatBody.Messages)
- logger.Debug("cleanChatBody: before cleaning", "message_count", originalLen)
- for i, msg := range chatBody.Messages {
- logger.Debug("cleanChatBody: before clean", "index", i, "role", msg.Role, "content_len", len(msg.Content), "has_content", msg.HasContent(), "tool_call_id", msg.ToolCallID)
- }
// Tool request cleaning is now configurable via AutoCleanToolCallsFromCtx (default false)
// /completion msg where part meant for user and other part tool call
chatBody.Messages = cleanToolCalls(chatBody.Messages)
chatBody.Messages = consolidateAssistantMessages(chatBody.Messages)
- logger.Debug("cleanChatBody: after cleaning", "original_len", originalLen, "new_len", len(chatBody.Messages))
- for i, msg := range chatBody.Messages {
- logger.Debug("cleanChatBody: after clean", "index", i, "role", msg.Role, "content_len", len(msg.Content), "has_content", msg.HasContent(), "tool_call_id", msg.ToolCallID)
- }
}
// convertJSONToMapStringString unmarshals JSON into map[string]interface{} and converts all values to strings.
diff --git a/llm.go b/llm.go
index 30fc0ec..95de1d8 100644
--- a/llm.go
+++ b/llm.go
@@ -207,7 +207,7 @@ func (lcp LCPCompletion) FormMsg(msg, role string, resume bool) (io.Reader, erro
logger.Debug("checking prompt for /completion", "tool_use", cfg.ToolUse,
"msg", msg, "resume", resume, "prompt", prompt, "multimodal_data_count", len(multimodalData))
payload := models.NewLCPReq(prompt, chatBody.Model, multimodalData,
- defaultLCPProps, chatBody.MakeStopSliceExcluding(botPersona, listChatRoles()))
+ defaultLCPProps, chatBody.MakeStopSliceExcluding("", listChatRoles()))
data, err := json.Marshal(payload)
if err != nil {
logger.Error("failed to form a msg", "error", err)
@@ -444,7 +444,7 @@ func (ds DeepSeekerCompletion) FormMsg(msg, role string, resume bool) (io.Reader
"msg", msg, "resume", resume, "prompt", prompt)
payload := models.NewDSCompletionReq(prompt, chatBody.Model,
defaultLCPProps["temp"],
- chatBody.MakeStopSliceExcluding(botPersona, listChatRoles()))
+ chatBody.MakeStopSliceExcluding("", listChatRoles()))
data, err := json.Marshal(payload)
if err != nil {
logger.Error("failed to form a msg", "error", err)
@@ -604,7 +604,7 @@ func (or OpenRouterCompletion) FormMsg(msg, role string, resume bool) (io.Reader
if cfg.ThinkUse && !cfg.ToolUse {
prompt += "<think>"
}
- stopSlice := chatBody.MakeStopSliceExcluding(botPersona, listChatRoles())
+ stopSlice := chatBody.MakeStopSliceExcluding("", listChatRoles())
logger.Debug("checking prompt for /completion", "tool_use", cfg.ToolUse,
"msg", msg, "resume", resume, "prompt", prompt, "stop_strings", stopSlice)
payload := models.NewOpenRouterCompletionReq(chatBody.Model, prompt,
diff --git a/models/models.go b/models/models.go
index 340cb42..e99832a 100644
--- a/models/models.go
+++ b/models/models.go
@@ -382,9 +382,12 @@ func (cb *ChatBody) MakeStopSliceExcluding(
continue
}
// Add multiple variations to catch different formatting
- ss = append(ss, role+":\n") // Most common: role with newline
- ss = append(ss, role+":") // Role with colon but no newline
- ss = append(ss, role+": ") // Role with colon and space
+ ss = append(ss, role+":\n") // Most common: role with newline
+ ss = append(ss, role+":") // Role with colon but no newline
+ ss = append(ss, role+": ") // Role with colon and single space
+ ss = append(ss, role+": ") // Role with colon and double space (common tokenization)
+ ss = append(ss, role+": \n") // Role with colon and double space (common tokenization)
+ ss = append(ss, role+": ") // Role with colon and triple space
}
return ss
}