From ba3330ee54bcab5cfde470f8e465fc9ed1c6cb2c Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Sat, 20 Dec 2025 14:21:40 +0300 Subject: Fix: model load if llama.cpp started after gf-lt --- bot.go | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) (limited to 'bot.go') diff --git a/bot.go b/bot.go index d84cfad..7c8ea75 100644 --- a/bot.go +++ b/bot.go @@ -21,6 +21,7 @@ import ( "path" "strconv" "strings" + "sync" "time" "github.com/neurosnap/sentences/english" @@ -52,6 +53,7 @@ var ( //nolint:unused // TTS_ENABLED conditionally uses this orator extra.Orator asr extra.STT + localModelsMu sync.RWMutex defaultLCPProps = map[string]float32{ "temperature": 0.8, "dry_multiplier": 0.0, @@ -1002,12 +1004,32 @@ func updateModelLists() { } } // if llama.cpp started after gf-lt? + localModelsMu.Lock() LocalModels, err = fetchLCPModels() + localModelsMu.Unlock() if err != nil { logger.Warn("failed to fetch llama.cpp models", "error", err) } } +func refreshLocalModelsIfEmpty() { + localModelsMu.RLock() + if len(LocalModels) > 0 { + localModelsMu.RUnlock() + return + } + localModelsMu.RUnlock() + // try to fetch + models, err := fetchLCPModels() + if err != nil { + logger.Warn("failed to fetch llama.cpp models", "error", err) + return + } + localModelsMu.Lock() + LocalModels = models + localModelsMu.Unlock() +} + func init() { var err error cfg, err = config.LoadConfig("config.toml") -- cgit v1.2.3