diff options
| author | Grail Finder <wohilas@gmail.com> | 2026-02-17 08:23:08 +0300 |
|---|---|---|
| committer | Grail Finder <wohilas@gmail.com> | 2026-02-17 08:23:08 +0300 |
| commit | 7b2fa043911a8749e7c7b8aa36e32e3d0e79a6f8 (patch) | |
| tree | 858015c7f106a4bed60c94b824b6d317832521a4 | |
| parent | 43b0fe3739b27deae8193daf1145f42fd616b1f1 (diff) | |
Fix (img prompt): botname: after <__media__> for /completion
| -rw-r--r-- | llm.go | 16 |
1 files changed, 8 insertions, 8 deletions
@@ -190,14 +190,6 @@ func (lcp LCPCompletion) FormMsg(msg, role string, resume bool) (io.Reader, erro messages[i] = m.ToPrompt() } prompt := strings.Join(messages, "\n") - // strings builder? - if !resume { - botMsgStart := "\n" + botPersona + ":\n" - prompt += botMsgStart - } - if cfg.ThinkUse && !cfg.ToolUse { - prompt += "<think>" - } // Add multimodal media markers to the prompt text when multimodal data is present // This is required by llama.cpp multimodal models so they know where to insert media if len(multimodalData) > 0 { @@ -209,6 +201,14 @@ func (lcp LCPCompletion) FormMsg(msg, role string, resume bool) (io.Reader, erro } prompt = sb.String() } + // needs to be after <__media__> if there are images + if !resume { + botMsgStart := "\n" + botPersona + ":\n" + prompt += botMsgStart + } + if cfg.ThinkUse && !cfg.ToolUse { + prompt += "<think>" + } logger.Debug("checking prompt for /completion", "tool_use", cfg.ToolUse, "msg", msg, "resume", resume, "prompt", prompt, "multimodal_data_count", len(multimodalData)) payload := models.NewLCPReq(prompt, chatBody.Model, multimodalData, |
