From 7ac85a6d7eba37c7f04d359bf64aef0f1f6bece0 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Mon, 22 Dec 2025 12:33:41 +0300 Subject: Enha: statusline cleanup --- helpfuncs.go | 4 ++-- main.go | 2 +- tui.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/helpfuncs.go b/helpfuncs.go index 30d9967..ff4d806 100644 --- a/helpfuncs.go +++ b/helpfuncs.go @@ -236,8 +236,8 @@ func makeStatusLine() string { shellModeInfo = "" } - statusLine := fmt.Sprintf(indexLineCompletion, botRespMode, cfg.AssistantRole, activeChatName, - cfg.ToolUse, chatBody.Model, cfg.SkipLLMResp, cfg.CurrentAPI, cfg.ThinkUse, logLevel.Level(), + statusLine := fmt.Sprintf(indexLineCompletion, botRespMode, activeChatName, + cfg.ToolUse, chatBody.Model, cfg.SkipLLMResp, cfg.CurrentAPI, cfg.ThinkUse, isRecording, persona, botPersona, injectRole) return statusLine + imageInfo + shellModeInfo } diff --git a/main.go b/main.go index ec175bf..ac8f205 100644 --- a/main.go +++ b/main.go @@ -18,7 +18,7 @@ var ( currentLocalModelIndex = 0 // Index to track current llama.cpp model shellMode = false // indexLine = "F12 to show keys help | bot resp mode: [orange:-:b]%v[-:-:-] (F6) | card's char: [orange:-:b]%s[-:-:-] (ctrl+s) | chat: [orange:-:b]%s[-:-:-] (F1) | toolUseAdviced: [orange:-:b]%v[-:-:-] (ctrl+k) | model: [orange:-:b]%s[-:-:-] (ctrl+l) | skip LLM resp: [orange:-:b]%v[-:-:-] (F10)\nAPI_URL: [orange:-:b]%s[-:-:-] (ctrl+v) | ThinkUse: [orange:-:b]%v[-:-:-] (ctrl+p) | Log Level: [orange:-:b]%v[-:-:-] (ctrl+p) | Recording: [orange:-:b]%v[-:-:-] (ctrl+r) | Writing as: [orange:-:b]%s[-:-:-] (ctrl+q)" - indexLineCompletion = "F12 to show keys help | bot resp mode: [orange:-:b]%v[-:-:-] (F6) | card's char: [orange:-:b]%s[-:-:-] (ctrl+s) | chat: [orange:-:b]%s[-:-:-] (F1) | toolUseAdviced: [orange:-:b]%v[-:-:-] (ctrl+k) | model: [orange:-:b]%s[-:-:-] (ctrl+l) | skip LLM resp: [orange:-:b]%v[-:-:-] (F10)\nAPI_URL: [orange:-:b]%s[-:-:-] (ctrl+v) | Insert : [orange:-:b]%v[-:-:-] (ctrl+p) | Log Level: [orange:-:b]%v[-:-:-] (ctrl+p) | Recording: [orange:-:b]%v[-:-:-] (ctrl+r) | Writing as: [orange:-:b]%s[-:-:-] (ctrl+q) | Bot will write as [orange:-:b]%s[-:-:-] (ctrl+x) | role_inject [orange:-:b]%v[-:-:-]" + indexLineCompletion = "F12 to show keys help | bot responding: [orange:-:b]%v[-:-:-] (F6) | chat: [orange:-:b]%s[-:-:-] (F1) | toolUseAdviced: [orange:-:b]%v[-:-:-] (ctrl+k) | model: [orange:-:b]%s[-:-:-] (ctrl+l) | skip LLM resp: [orange:-:b]%v[-:-:-] (F10)\nAPI_URL: [orange:-:b]%s[-:-:-] (ctrl+v) | Insert : [orange:-:b]%v[-:-:-] (ctrl+p) | Recording: [orange:-:b]%v[-:-:-] (ctrl+r) | Writing as: [orange:-:b]%s[-:-:-] (ctrl+q) | Bot will write as [orange:-:b]%s[-:-:-] (ctrl+x) | role_inject [orange:-:b]%v[-:-:-]" focusSwitcher = map[tview.Primitive]tview.Primitive{} ) diff --git a/tui.go b/tui.go index 581ae4f..0b7488e 100644 --- a/tui.go +++ b/tui.go @@ -82,7 +82,6 @@ var ( [yellow]Ctrl+t[white]: remove thinking () and tool messages from context (delete from chat) [yellow]Ctrl+l[white]: rotate through free OpenRouter models (if openrouter api) or update connected model name (llamacpp) [yellow]Ctrl+k[white]: switch tool use (recommend tool use to llm after user msg) -[yellow]Alt+8[white]: if chat agent is char.png will show the image; then any key to return [yellow]Ctrl+a[white]: interrupt tts (needs tts server) [yellow]Ctrl+g[white]: open RAG file manager (load files for context retrieval) [yellow]Ctrl+y[white]: list loaded RAG files (view and manage loaded files) @@ -92,6 +91,7 @@ var ( [yellow]Alt+4[white]: edit msg role [yellow]Alt+5[white]: toggle system and tool messages display [yellow]Alt+6[white]: toggle status line visibility +[yellow]Alt+8[white]: show char img or last picked img [yellow]Alt+9[white]: warm up (load) selected llama.cpp model === scrolling chat window (some keys similar to vim) === -- cgit v1.2.3