summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGrail Finder <wohilas@gmail.com>2026-03-07 18:42:12 +0300
committerGrail Finder <wohilas@gmail.com>2026-03-07 18:42:12 +0300
commitbf655a10875630a6fe5f283340b6d390a1920b58 (patch)
tree56b363f3638bbe0183bab60423f78843200b8951
parentc8f00198d6f0ad66269753252f56485ee346d413 (diff)
Enha: llama.cpp on non localhost
-rw-r--r--bot.go37
-rw-r--r--helpfuncs.go7
-rw-r--r--llm.go9
3 files changed, 25 insertions, 28 deletions
diff --git a/bot.go b/bot.go
index ad52059..663dd0b 100644
--- a/bot.go
+++ b/bot.go
@@ -16,7 +16,6 @@ import (
"log/slog"
"net"
"net/http"
- "net/url"
"os"
"regexp"
"slices"
@@ -253,12 +252,7 @@ func createClient(connectTimeout time.Duration) *http.Client {
}
func warmUpModel() {
- u, err := url.Parse(cfg.CurrentAPI)
- if err != nil {
- return
- }
- host := u.Hostname()
- if host != "localhost" && host != "127.0.0.1" && host != "::1" {
+ if !isLocalLlamacpp() {
return
}
// Check if model is already loaded
@@ -1404,20 +1398,21 @@ func updateModelLists() {
time.Sleep(time.Millisecond * 100)
}
// set already loaded model in llama.cpp
- if strings.Contains(cfg.CurrentAPI, "localhost") || strings.Contains(cfg.CurrentAPI, "127.0.0.1") {
- localModelsMu.Lock()
- defer localModelsMu.Unlock()
- for i := range LocalModels {
- if strings.Contains(LocalModels[i], models.LoadedMark) {
- m := strings.TrimPrefix(LocalModels[i], models.LoadedMark)
- cfg.CurrentModel = m
- chatBody.Model = m
- cachedModelColor = "green"
- updateStatusLine()
- updateToolCapabilities()
- app.Draw()
- return
- }
+ if !isLocalLlamacpp() {
+ return
+ }
+ localModelsMu.Lock()
+ defer localModelsMu.Unlock()
+ for i := range LocalModels {
+ if strings.Contains(LocalModels[i], models.LoadedMark) {
+ m := strings.TrimPrefix(LocalModels[i], models.LoadedMark)
+ cfg.CurrentModel = m
+ chatBody.Model = m
+ cachedModelColor = "green"
+ updateStatusLine()
+ updateToolCapabilities()
+ app.Draw()
+ return
}
}
}
diff --git a/helpfuncs.go b/helpfuncs.go
index b94e672..370f4de 100644
--- a/helpfuncs.go
+++ b/helpfuncs.go
@@ -5,7 +5,6 @@ import (
"gf-lt/models"
"gf-lt/pngmeta"
"image"
- "net/url"
"os"
"os/exec"
"path"
@@ -323,12 +322,10 @@ func strInSlice(s string, sl []string) bool {
// isLocalLlamacpp checks if the current API is a local llama.cpp instance.
func isLocalLlamacpp() bool {
- u, err := url.Parse(cfg.CurrentAPI)
- if err != nil {
+ if strings.Contains(cfg.CurrentAPI, "openrouter") || strings.Contains(cfg.CurrentAPI, "deepseek") {
return false
}
- host := u.Hostname()
- return host == "localhost" || host == "127.0.0.1" || host == "::1"
+ return true
}
// getModelColor returns the cached color tag for the model name.
diff --git a/llm.go b/llm.go
index eaa0df8..0e77bc9 100644
--- a/llm.go
+++ b/llm.go
@@ -62,11 +62,11 @@ type ChunkParser interface {
func choseChunkParser() {
chunkParser = LCPCompletion{}
switch cfg.CurrentAPI {
- case "http://localhost:8080/completion":
+ case "http://localhost:8080/completion", "http://127.0.0.1:8080/completion":
chunkParser = LCPCompletion{}
logger.Debug("chosen lcpcompletion", "link", cfg.CurrentAPI)
return
- case "http://localhost:8080/v1/chat/completions":
+ case "http://localhost:8080/v1/chat/completions", "http://127.0.0.1:8080/v1/chat/completions":
chunkParser = LCPChat{}
logger.Debug("chosen lcpchat", "link", cfg.CurrentAPI)
return
@@ -87,6 +87,11 @@ func choseChunkParser() {
logger.Debug("chosen openrouterchat", "link", cfg.CurrentAPI)
return
default:
+ logger.Warn("unexpected case, assuming llama.cpp on non default address", "link", cfg.CurrentAPI)
+ if strings.Contains(cfg.CurrentAPI, "chat") {
+ chunkParser = LCPChat{}
+ return
+ }
chunkParser = LCPCompletion{}
}
}