diff options
| author | Grail Finder <wohilas@gmail.com> | 2026-02-12 10:26:30 +0300 |
|---|---|---|
| committer | Grail Finder <wohilas@gmail.com> | 2026-02-12 10:26:30 +0300 |
| commit | 8c3c2b9b23a5b41e207d5771bb365e2c391ed0ea (patch) | |
| tree | 54bda61b46c4291054c46a08b2fd7f87d0ca5714 | |
| parent | e42eb9637190a2e89cf7e37cb10ca986835d9d7a (diff) | |
Chore: server should live in separate branch
until a usecase for it is found
| -rw-r--r-- | Makefile | 9 | ||||
| -rw-r--r-- | bot.go | 27 | ||||
| -rw-r--r-- | main.go | 10 | ||||
| -rw-r--r-- | server.go | 74 |
4 files changed, 1 insertions, 119 deletions
@@ -1,5 +1,4 @@ -.PHONY: setconfig run lint setup-whisper build-whisper download-whisper-model docker-up docker-down docker-logs noextra-run noextra-server - +.PHONY: setconfig run lint setup-whisper build-whisper download-whisper-model docker-up docker-down docker-logs noextra-run run: setconfig go build -tags extra -o gf-lt && ./gf-lt @@ -10,15 +9,9 @@ build-debug: debug: build-debug dlv exec --headless --accept-multiclient --listen=:2345 ./gf-lt -server: setconfig - go build -tags extra -o gf-lt && ./gf-lt -port 3333 - noextra-run: setconfig go build -tags '!extra' -o gf-lt && ./gf-lt -noextra-server: setconfig - go build -tags '!extra' -o gf-lt && ./gf-lt -port 3333 - setconfig: find config.toml &>/dev/null || cp config.example.toml config.toml @@ -17,7 +17,6 @@ import ( "net/http" "net/url" "os" - "path" "regexp" "slices" "strconv" @@ -343,32 +342,6 @@ func warmUpModel() { }() } -func fetchLCPModelName() *models.LCPModels { - //nolint - resp, err := httpClient.Get(cfg.FetchModelNameAPI) - if err != nil { - chatBody.Model = "disconnected" - logger.Warn("failed to get model", "link", cfg.FetchModelNameAPI, "error", err) - if err := notifyUser("error", "request failed "+cfg.FetchModelNameAPI); err != nil { - logger.Debug("failed to notify user", "error", err, "fn", "fetchLCPModelName") - } - return nil - } - defer resp.Body.Close() - llmModel := models.LCPModels{} - if err := json.NewDecoder(resp.Body).Decode(&llmModel); err != nil { - logger.Warn("failed to decode resp", "link", cfg.FetchModelNameAPI, "error", err) - return nil - } - if resp.StatusCode != 200 { - chatBody.Model = "disconnected" - return nil - } - chatBody.Model = path.Base(llmModel.Data[0].ID) - cfg.CurrentModel = chatBody.Model - return &llmModel -} - // nolint func fetchDSBalance() *models.DSBalance { url := "https://api.deepseek.com/user/balance" @@ -1,9 +1,6 @@ package main import ( - "flag" - "strconv" - "github.com/rivo/tview" ) @@ -20,13 +17,6 @@ var ( ) func main() { - apiPort := flag.Int("port", 0, "port to host api") - flag.Parse() - if apiPort != nil && *apiPort > 3000 { - srv := Server{} - srv.ListenToRequests(strconv.Itoa(*apiPort)) - return - } pages.AddPage("main", flex, true, true) if err := app.SetRoot(pages, true).EnableMouse(cfg.EnableMouse).EnablePaste(true).Run(); err != nil { diff --git a/server.go b/server.go deleted file mode 100644 index 2f5638c..0000000 --- a/server.go +++ /dev/null @@ -1,74 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "gf-lt/config" - "net/http" - "time" -) - -type Server struct { - // nolint - config config.Config -} - -func (srv *Server) ListenToRequests(port string) { - // h := srv.actions - mux := http.NewServeMux() - server := &http.Server{ - Addr: "localhost:" + port, - Handler: mux, - ReadTimeout: time.Second * 5, - WriteTimeout: time.Second * 5, - } - mux.HandleFunc("GET /ping", pingHandler) - mux.HandleFunc("GET /model", modelHandler) - mux.HandleFunc("POST /completion", completionHandler) - fmt.Println("Listening", "addr", server.Addr) - if err := server.ListenAndServe(); err != nil { - panic(err) - } -} - -// create server -// listen to the completion endpoint handler -func pingHandler(w http.ResponseWriter, req *http.Request) { - if _, err := w.Write([]byte("pong")); err != nil { - logger.Error("server ping", "error", err) - } -} - -func completionHandler(w http.ResponseWriter, req *http.Request) { - // post request - body := req.Body - // get body as io.reader - // pass it to the /completion - go sendMsgToLLM(body) -out: - for { - select { - case chunk := <-chunkChan: - fmt.Print(chunk) - if _, err := w.Write([]byte(chunk)); err != nil { - logger.Warn("failed to write chunk", "value", chunk) - continue - } - case <-streamDone: - break out - } - } -} - -func modelHandler(w http.ResponseWriter, req *http.Request) { - llmModel := fetchLCPModelName() - payload, err := json.Marshal(llmModel) - if err != nil { - logger.Error("model handler", "error", err) - // return err - return - } - if _, err := w.Write(payload); err != nil { - logger.Error("model handler", "error", err) - } -} |
