From 884004a855980444319769d9b10f9cf6e3ba33cd Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Thu, 6 Feb 2025 16:57:37 +0300 Subject: Enha: server update --- Makefile | 3 +++ bot.go | 12 ++++++------ main.go | 7 ++----- server.go | 38 +++++++++++++++++++++++++++++++++++++- 4 files changed, 48 insertions(+), 12 deletions(-) diff --git a/Makefile b/Makefile index ec1cfa7..4e96ed5 100644 --- a/Makefile +++ b/Makefile @@ -3,6 +3,9 @@ run: setconfig go build -o elefant && ./elefant +server: setconfig + go build -o elefant && ./elefant -port 3333 + setconfig: find config.toml &>/dev/null || cp config.example.toml config.toml diff --git a/bot.go b/bot.go index c3108a3..a61128f 100644 --- a/bot.go +++ b/bot.go @@ -46,25 +46,25 @@ var ( } ) -func fetchModelName() { +func fetchModelName() *models.LLMModels { api := "http://localhost:8080/v1/models" resp, err := httpClient.Get(api) if err != nil { logger.Warn("failed to get model", "link", api, "error", err) - return + return nil } defer resp.Body.Close() llmModel := models.LLMModels{} if err := json.NewDecoder(resp.Body).Decode(&llmModel); err != nil { logger.Warn("failed to decode resp", "link", api, "error", err) - return + return nil } if resp.StatusCode != 200 { currentModel = "none" - return + return nil } currentModel = path.Base(llmModel.Data[0].ID) - updateStatusLine() + return &llmModel } // func fetchProps() { @@ -88,7 +88,7 @@ func fetchModelName() { // updateStatusLine() // } -// func sendMsgToLLM(body io.Reader) (*models.LLMRespChunk, error) { +// TODO: should be a part of server? func sendMsgToLLM(body io.Reader) { // nolint resp, err := httpClient.Post(cfg.CurrentAPI, "application/json", body) diff --git a/main.go b/main.go index 7766a60..a95db19 100644 --- a/main.go +++ b/main.go @@ -3,7 +3,6 @@ package main import ( "flag" "fmt" - "net/http" "unicode" "github.com/rivo/tview" @@ -30,10 +29,8 @@ func main() { apiPort := flag.Int("port", 0, "port to host api") flag.Parse() if apiPort != nil && *apiPort > 3000 { - // start api server - http.HandleFunc("POST /completion", completion) - http.ListenAndServe(fmt.Sprintf(":%d", *apiPort), nil) - // no tui + srv := Server{} + srv.ListenToRequests(fmt.Sprintf("%d", *apiPort)) return } pages.AddPage("main", flex, true, true) diff --git a/server.go b/server.go index 2e25559..79aeb2f 100644 --- a/server.go +++ b/server.go @@ -1,14 +1,40 @@ package main import ( + "elefant/config" + "encoding/json" "fmt" "net/http" + "time" ) +type Server struct { + config config.Config +} + +func (srv *Server) ListenToRequests(port string) { + // h := srv.actions + mux := http.NewServeMux() + server := &http.Server{ + Addr: "localhost:" + port, + Handler: mux, + ReadTimeout: time.Second * 5, + WriteTimeout: time.Second * 5, + } + mux.HandleFunc("GET /ping", pingHandler) + mux.HandleFunc("GET /model", modelHandler) + mux.HandleFunc("POST /completion", completionHandler) + fmt.Println("Listening", "addr", server.Addr) + server.ListenAndServe() +} + // create server // listen to the completion endpoint handler +func pingHandler(w http.ResponseWriter, req *http.Request) { + w.Write([]byte("pong")) +} -func completion(w http.ResponseWriter, req *http.Request) { +func completionHandler(w http.ResponseWriter, req *http.Request) { // post request body := req.Body // get body as io.reader @@ -19,9 +45,19 @@ out: select { case chunk := <-chunkChan: fmt.Println(chunk) + w.Write([]byte(chunk)) case <-streamDone: break out } } return } + +func modelHandler(w http.ResponseWriter, req *http.Request) { + llmModel := fetchModelName() + payload, err := json.Marshal(llmModel) + if err != nil { + // return err + } + w.Write(payload) +} -- cgit v1.2.3