summaryrefslogtreecommitdiff
path: root/bot.go
diff options
context:
space:
mode:
authorGrail Finder <wohilas@gmail.com>2024-11-30 08:05:03 +0300
committerGrail Finder <wohilas@gmail.com>2024-11-30 08:05:03 +0300
commit8d3997baff25f28c23441699be7692f853cf1f0f (patch)
tree0f3d394953d46f287f06b0960ba77b94a85549e4 /bot.go
parent34d415c9308f2d089520c2a193cdf8393dcc6b5d (diff)
Feat (config): chunk limit
Diffstat (limited to 'bot.go')
-rw-r--r--bot.go19
1 files changed, 7 insertions, 12 deletions
diff --git a/bot.go b/bot.go
index fcad46f..cbdb9f3 100644
--- a/bot.go
+++ b/bot.go
@@ -13,19 +13,15 @@ import (
"net/http"
"os"
"strings"
- "time"
"github.com/rivo/tview"
)
-var httpClient = http.Client{
- Timeout: time.Second * 20,
-}
+var httpClient = http.Client{}
var (
cfg *config.Config
logger *slog.Logger
- chunkLimit = 1000
activeChatName string
chunkChan = make(chan string, 10)
streamDone = make(chan bool, 1)
@@ -63,25 +59,25 @@ func sendMsgToLLM(body io.Reader) {
defer resp.Body.Close()
// llmResp := []models.LLMRespChunk{}
reader := bufio.NewReader(resp.Body)
- counter := 0
+ counter := uint32(0)
for {
+ counter++
if interruptResp {
interruptResp = false
logger.Info("interrupted bot response")
break
}
- llmchunk := models.LLMRespChunk{}
- if counter > chunkLimit {
- logger.Warn("response hit chunk limit", "limit", chunkLimit)
+ if cfg.ChunkLimit > 0 && counter > cfg.ChunkLimit {
+ logger.Warn("response hit chunk limit", "limit", cfg.ChunkLimit)
streamDone <- true
break
}
+ llmchunk := models.LLMRespChunk{}
line, err := reader.ReadBytes('\n')
if err != nil {
- streamDone <- true
logger.Error("error reading response body", "error", err)
+ continue
}
- // logger.Info("linecheck", "line", string(line), "len", len(line), "counter", counter)
if len(line) <= 1 {
continue // skip \n
}
@@ -100,7 +96,6 @@ func sendMsgToLLM(body io.Reader) {
// last chunk
break
}
- counter++
// bot sends way too many \n
answerText := strings.ReplaceAll(llmchunk.Choices[0].Delta.Content, "\n\n", "\n")
chunkChan <- answerText