From 7b2fa043911a8749e7c7b8aa36e32e3d0e79a6f8 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Tue, 17 Feb 2026 08:23:08 +0300 Subject: Fix (img prompt): botname: after <__media__> for /completion --- llm.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/llm.go b/llm.go index 734b4fd..1b5e3fb 100644 --- a/llm.go +++ b/llm.go @@ -190,14 +190,6 @@ func (lcp LCPCompletion) FormMsg(msg, role string, resume bool) (io.Reader, erro messages[i] = m.ToPrompt() } prompt := strings.Join(messages, "\n") - // strings builder? - if !resume { - botMsgStart := "\n" + botPersona + ":\n" - prompt += botMsgStart - } - if cfg.ThinkUse && !cfg.ToolUse { - prompt += "" - } // Add multimodal media markers to the prompt text when multimodal data is present // This is required by llama.cpp multimodal models so they know where to insert media if len(multimodalData) > 0 { @@ -209,6 +201,14 @@ func (lcp LCPCompletion) FormMsg(msg, role string, resume bool) (io.Reader, erro } prompt = sb.String() } + // needs to be after <__media__> if there are images + if !resume { + botMsgStart := "\n" + botPersona + ":\n" + prompt += botMsgStart + } + if cfg.ThinkUse && !cfg.ToolUse { + prompt += "" + } logger.Debug("checking prompt for /completion", "tool_use", cfg.ToolUse, "msg", msg, "resume", resume, "prompt", prompt, "multimodal_data_count", len(multimodalData)) payload := models.NewLCPReq(prompt, chatBody.Model, multimodalData, -- cgit v1.2.3