diff options
| author | Grail Finder <wohilas@gmail.com> | 2025-12-20 14:21:40 +0300 |
|---|---|---|
| committer | Grail Finder <wohilas@gmail.com> | 2025-12-20 14:21:40 +0300 |
| commit | ba3330ee54bcab5cfde470f8e465fc9ed1c6cb2c (patch) | |
| tree | 486b73c1df8bd50a0e7e81d7200294ff07d7e2e2 /bot.go | |
| parent | 0ca709b7c679c641724a3a8c2fc1425286b4955a (diff) | |
Fix: model load if llama.cpp started after gf-lt
Diffstat (limited to 'bot.go')
| -rw-r--r-- | bot.go | 22 |
1 files changed, 22 insertions, 0 deletions
@@ -21,6 +21,7 @@ import ( "path" "strconv" "strings" + "sync" "time" "github.com/neurosnap/sentences/english" @@ -52,6 +53,7 @@ var ( //nolint:unused // TTS_ENABLED conditionally uses this orator extra.Orator asr extra.STT + localModelsMu sync.RWMutex defaultLCPProps = map[string]float32{ "temperature": 0.8, "dry_multiplier": 0.0, @@ -1002,12 +1004,32 @@ func updateModelLists() { } } // if llama.cpp started after gf-lt? + localModelsMu.Lock() LocalModels, err = fetchLCPModels() + localModelsMu.Unlock() if err != nil { logger.Warn("failed to fetch llama.cpp models", "error", err) } } +func refreshLocalModelsIfEmpty() { + localModelsMu.RLock() + if len(LocalModels) > 0 { + localModelsMu.RUnlock() + return + } + localModelsMu.RUnlock() + // try to fetch + models, err := fetchLCPModels() + if err != nil { + logger.Warn("failed to fetch llama.cpp models", "error", err) + return + } + localModelsMu.Lock() + LocalModels = models + localModelsMu.Unlock() +} + func init() { var err error cfg, err = config.LoadConfig("config.toml") |
