diff options
| author | Grail Finder <wohilas@gmail.com> | 2025-12-20 14:21:40 +0300 |
|---|---|---|
| committer | Grail Finder <wohilas@gmail.com> | 2025-12-20 14:21:40 +0300 |
| commit | ba3330ee54bcab5cfde470f8e465fc9ed1c6cb2c (patch) | |
| tree | 486b73c1df8bd50a0e7e81d7200294ff07d7e2e2 | |
| parent | 0ca709b7c679c641724a3a8c2fc1425286b4955a (diff) | |
Fix: model load if llama.cpp started after gf-lt
| -rw-r--r-- | bot.go | 22 | ||||
| -rw-r--r-- | props_table.go | 7 | ||||
| -rw-r--r-- | tui.go | 5 |
3 files changed, 34 insertions, 0 deletions
@@ -21,6 +21,7 @@ import ( "path" "strconv" "strings" + "sync" "time" "github.com/neurosnap/sentences/english" @@ -52,6 +53,7 @@ var ( //nolint:unused // TTS_ENABLED conditionally uses this orator extra.Orator asr extra.STT + localModelsMu sync.RWMutex defaultLCPProps = map[string]float32{ "temperature": 0.8, "dry_multiplier": 0.0, @@ -1002,12 +1004,32 @@ func updateModelLists() { } } // if llama.cpp started after gf-lt? + localModelsMu.Lock() LocalModels, err = fetchLCPModels() + localModelsMu.Unlock() if err != nil { logger.Warn("failed to fetch llama.cpp models", "error", err) } } +func refreshLocalModelsIfEmpty() { + localModelsMu.RLock() + if len(LocalModels) > 0 { + localModelsMu.RUnlock() + return + } + localModelsMu.RUnlock() + // try to fetch + models, err := fetchLCPModels() + if err != nil { + logger.Warn("failed to fetch llama.cpp models", "error", err) + return + } + localModelsMu.Lock() + LocalModels = models + localModelsMu.Unlock() +} + func init() { var err error cfg, err = config.LoadConfig("config.toml") diff --git a/props_table.go b/props_table.go index ae225d8..0c49056 100644 --- a/props_table.go +++ b/props_table.go @@ -5,11 +5,14 @@ import ( "slices" "strconv" "strings" + "sync" "github.com/gdamore/tcell/v2" "github.com/rivo/tview" ) +var _ = sync.RWMutex{} + // Define constants for cell types const ( CellTypeCheckbox = "checkbox" @@ -138,6 +141,10 @@ func makePropsTable(props map[string]float32) *tview.Table { } else if strings.Contains(api, "openrouter.ai") { return ORFreeModels } + // Assume local llama.cpp + refreshLocalModelsIfEmpty() + localModelsMu.RLock() + defer localModelsMu.RUnlock() return LocalModels } var modelRowIndex int // will be set before model row is added @@ -12,11 +12,14 @@ import ( "path" "strconv" "strings" + "sync" "github.com/gdamore/tcell/v2" "github.com/rivo/tview" ) +var _ = sync.RWMutex{} + var ( app *tview.Application pages *tview.Pages @@ -988,11 +991,13 @@ func init() { } updateStatusLine() } else { + localModelsMu.RLock() if len(LocalModels) > 0 { currentLocalModelIndex = (currentLocalModelIndex + 1) % len(LocalModels) chatBody.Model = LocalModels[currentLocalModelIndex] cfg.CurrentModel = chatBody.Model } + localModelsMu.RUnlock() updateStatusLine() // // For non-OpenRouter APIs, use the old logic // go func() { |
