summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGrail Finder <wohilas@gmail.com>2025-10-09 10:36:55 +0300
committerGrail Finder <wohilas@gmail.com>2025-10-09 12:23:48 +0300
commit314c07835e62f575ab8cd6f4c3c92bab8dd4f856 (patch)
tree63e536f45639c061d7acb58373ff2df84f788f52
parentdc183e3692711e566226bbe2e462498ca9762687 (diff)
Feat: add external tool web search
-rw-r--r--bot.go8
-rw-r--r--config/config.go3
-rw-r--r--llm.go6
-rw-r--r--models/deepseek.go144
-rw-r--r--models/models.go296
-rw-r--r--models/openrouter.go154
-rw-r--r--tools.go68
7 files changed, 373 insertions, 306 deletions
diff --git a/bot.go b/bot.go
index 10b39f7..f8170e1 100644
--- a/bot.go
+++ b/bot.go
@@ -151,13 +151,8 @@ func fetchORModels(free bool) ([]string, error) {
func sendMsgToLLM(body io.Reader) {
choseChunkParser()
- bodyBytes, _ := io.ReadAll(body)
- ok := json.Valid(bodyBytes)
- if !ok {
- panic("invalid json")
- }
// nolint
- req, err := http.NewRequest("POST", cfg.CurrentAPI, bytes.NewReader(bodyBytes))
+ req, err := http.NewRequest("POST", cfg.CurrentAPI, body)
if err != nil {
logger.Error("newreq error", "error", err)
if err := notifyUser("error", "apicall failed:"+err.Error()); err != nil {
@@ -172,7 +167,6 @@ func sendMsgToLLM(body io.Reader) {
// req.Header.Set("Content-Length", strconv.Itoa(len(bodyBytes)))
req.Header.Set("Accept-Encoding", "gzip")
// nolint
- // resp, err := httpClient.Post(cfg.CurrentAPI, "application/json", body)
resp, err := httpClient.Do(req)
if err != nil {
logger.Error("llamacpp api", "error", err)
diff --git a/config/config.go b/config/config.go
index b3eaace..d73bf28 100644
--- a/config/config.go
+++ b/config/config.go
@@ -15,6 +15,9 @@ type Config struct {
CurrentProvider string
APIMap map[string]string
FetchModelNameAPI string `toml:"FetchModelNameAPI"`
+ // ToolsAPI list?
+ SearchAPI string `toml:"SearchAPI"`
+ SearchDescribe string `toml:"SearchDescribe"`
//
ShowSys bool `toml:"ShowSys"`
LogFile string `toml:"LogFile"`
diff --git a/llm.go b/llm.go
index dd8229d..fb93615 100644
--- a/llm.go
+++ b/llm.go
@@ -320,7 +320,7 @@ func (ds DeepSeekerChat) FormMsg(msg, role string, resume bool) (io.Reader, erro
bodyCopy.Messages[i] = msg
}
}
- dsBody := models.NewDSCharReq(*bodyCopy)
+ dsBody := models.NewDSChatReq(*bodyCopy)
data, err := json.Marshal(dsBody)
if err != nil {
logger.Error("failed to form a msg", "error", err)
@@ -462,8 +462,8 @@ func (or OpenRouterChat) FormMsg(msg, role string, resume bool) (io.Reader, erro
bodyCopy.Messages[i] = msg
}
}
- dsBody := models.NewDSCharReq(*bodyCopy)
- data, err := json.Marshal(dsBody)
+ orBody := models.NewOpenRouterChatReq(*bodyCopy, defaultLCPProps)
+ data, err := json.Marshal(orBody)
if err != nil {
logger.Error("failed to form a msg", "error", err)
return nil, err
diff --git a/models/deepseek.go b/models/deepseek.go
new file mode 100644
index 0000000..8f9868d
--- /dev/null
+++ b/models/deepseek.go
@@ -0,0 +1,144 @@
+package models
+
+type DSChatReq struct {
+ Messages []RoleMsg `json:"messages"`
+ Model string `json:"model"`
+ Stream bool `json:"stream"`
+ FrequencyPenalty int `json:"frequency_penalty"`
+ MaxTokens int `json:"max_tokens"`
+ PresencePenalty int `json:"presence_penalty"`
+ Temperature float32 `json:"temperature"`
+ TopP float32 `json:"top_p"`
+ // ResponseFormat struct {
+ // Type string `json:"type"`
+ // } `json:"response_format"`
+ // Stop any `json:"stop"`
+ // StreamOptions any `json:"stream_options"`
+ // Tools any `json:"tools"`
+ // ToolChoice string `json:"tool_choice"`
+ // Logprobs bool `json:"logprobs"`
+ // TopLogprobs any `json:"top_logprobs"`
+}
+
+func NewDSChatReq(cb ChatBody) DSChatReq {
+ return DSChatReq{
+ Messages: cb.Messages,
+ Model: cb.Model,
+ Stream: cb.Stream,
+ MaxTokens: 2048,
+ PresencePenalty: 0,
+ FrequencyPenalty: 0,
+ Temperature: 1.0,
+ TopP: 1.0,
+ }
+}
+
+type DSCompletionReq struct {
+ Model string `json:"model"`
+ Prompt string `json:"prompt"`
+ Echo bool `json:"echo"`
+ FrequencyPenalty int `json:"frequency_penalty"`
+ // Logprobs int `json:"logprobs"`
+ MaxTokens int `json:"max_tokens"`
+ PresencePenalty int `json:"presence_penalty"`
+ Stop any `json:"stop"`
+ Stream bool `json:"stream"`
+ StreamOptions any `json:"stream_options"`
+ Suffix any `json:"suffix"`
+ Temperature float32 `json:"temperature"`
+ TopP float32 `json:"top_p"`
+}
+
+func NewDSCompletionReq(prompt, model string, temp float32, stopSlice []string) DSCompletionReq {
+ return DSCompletionReq{
+ Model: model,
+ Prompt: prompt,
+ Temperature: temp,
+ Stream: true,
+ Echo: false,
+ MaxTokens: 2048,
+ PresencePenalty: 0,
+ FrequencyPenalty: 0,
+ TopP: 1.0,
+ Stop: stopSlice,
+ }
+}
+
+type DSCompletionResp struct {
+ ID string `json:"id"`
+ Choices []struct {
+ FinishReason string `json:"finish_reason"`
+ Index int `json:"index"`
+ Logprobs struct {
+ TextOffset []int `json:"text_offset"`
+ TokenLogprobs []int `json:"token_logprobs"`
+ Tokens []string `json:"tokens"`
+ TopLogprobs []struct {
+ } `json:"top_logprobs"`
+ } `json:"logprobs"`
+ Text string `json:"text"`
+ } `json:"choices"`
+ Created int `json:"created"`
+ Model string `json:"model"`
+ SystemFingerprint string `json:"system_fingerprint"`
+ Object string `json:"object"`
+ Usage struct {
+ CompletionTokens int `json:"completion_tokens"`
+ PromptTokens int `json:"prompt_tokens"`
+ PromptCacheHitTokens int `json:"prompt_cache_hit_tokens"`
+ PromptCacheMissTokens int `json:"prompt_cache_miss_tokens"`
+ TotalTokens int `json:"total_tokens"`
+ CompletionTokensDetails struct {
+ ReasoningTokens int `json:"reasoning_tokens"`
+ } `json:"completion_tokens_details"`
+ } `json:"usage"`
+}
+
+type DSChatResp struct {
+ Choices []struct {
+ Delta struct {
+ Content string `json:"content"`
+ Role any `json:"role"`
+ } `json:"delta"`
+ FinishReason string `json:"finish_reason"`
+ Index int `json:"index"`
+ Logprobs any `json:"logprobs"`
+ } `json:"choices"`
+ Created int `json:"created"`
+ ID string `json:"id"`
+ Model string `json:"model"`
+ Object string `json:"object"`
+ SystemFingerprint string `json:"system_fingerprint"`
+ Usage struct {
+ CompletionTokens int `json:"completion_tokens"`
+ PromptTokens int `json:"prompt_tokens"`
+ TotalTokens int `json:"total_tokens"`
+ } `json:"usage"`
+}
+
+type DSChatStreamResp struct {
+ ID string `json:"id"`
+ Object string `json:"object"`
+ Created int `json:"created"`
+ Model string `json:"model"`
+ SystemFingerprint string `json:"system_fingerprint"`
+ Choices []struct {
+ Index int `json:"index"`
+ Delta struct {
+ Content string `json:"content"`
+ ReasoningContent string `json:"reasoning_content"`
+ } `json:"delta"`
+ Logprobs any `json:"logprobs"`
+ FinishReason string `json:"finish_reason"`
+ } `json:"choices"`
+}
+
+type DSBalance struct {
+ IsAvailable bool `json:"is_available"`
+ BalanceInfos []struct {
+ Currency string `json:"currency"`
+ TotalBalance string `json:"total_balance"`
+ GrantedBalance string `json:"granted_balance"`
+ ToppedUpBalance string `json:"topped_up_balance"`
+ } `json:"balance_infos"`
+}
diff --git a/models/models.go b/models/models.go
index f0f4130..0a10da1 100644
--- a/models/models.go
+++ b/models/models.go
@@ -127,139 +127,6 @@ func (cb *ChatBody) MakeStopSlice() []string {
return ss
}
-type DSChatReq struct {
- Messages []RoleMsg `json:"messages"`
- Model string `json:"model"`
- Stream bool `json:"stream"`
- FrequencyPenalty int `json:"frequency_penalty"`
- MaxTokens int `json:"max_tokens"`
- PresencePenalty int `json:"presence_penalty"`
- Temperature float32 `json:"temperature"`
- TopP float32 `json:"top_p"`
- // ResponseFormat struct {
- // Type string `json:"type"`
- // } `json:"response_format"`
- // Stop any `json:"stop"`
- // StreamOptions any `json:"stream_options"`
- // Tools any `json:"tools"`
- // ToolChoice string `json:"tool_choice"`
- // Logprobs bool `json:"logprobs"`
- // TopLogprobs any `json:"top_logprobs"`
-}
-
-func NewDSCharReq(cb ChatBody) DSChatReq {
- return DSChatReq{
- Messages: cb.Messages,
- Model: cb.Model,
- Stream: cb.Stream,
- MaxTokens: 2048,
- PresencePenalty: 0,
- FrequencyPenalty: 0,
- Temperature: 1.0,
- TopP: 1.0,
- }
-}
-
-type DSCompletionReq struct {
- Model string `json:"model"`
- Prompt string `json:"prompt"`
- Echo bool `json:"echo"`
- FrequencyPenalty int `json:"frequency_penalty"`
- // Logprobs int `json:"logprobs"`
- MaxTokens int `json:"max_tokens"`
- PresencePenalty int `json:"presence_penalty"`
- Stop any `json:"stop"`
- Stream bool `json:"stream"`
- StreamOptions any `json:"stream_options"`
- Suffix any `json:"suffix"`
- Temperature float32 `json:"temperature"`
- TopP float32 `json:"top_p"`
-}
-
-func NewDSCompletionReq(prompt, model string, temp float32, stopSlice []string) DSCompletionReq {
- return DSCompletionReq{
- Model: model,
- Prompt: prompt,
- Temperature: temp,
- Stream: true,
- Echo: false,
- MaxTokens: 2048,
- PresencePenalty: 0,
- FrequencyPenalty: 0,
- TopP: 1.0,
- Stop: stopSlice,
- }
-}
-
-type DSCompletionResp struct {
- ID string `json:"id"`
- Choices []struct {
- FinishReason string `json:"finish_reason"`
- Index int `json:"index"`
- Logprobs struct {
- TextOffset []int `json:"text_offset"`
- TokenLogprobs []int `json:"token_logprobs"`
- Tokens []string `json:"tokens"`
- TopLogprobs []struct {
- } `json:"top_logprobs"`
- } `json:"logprobs"`
- Text string `json:"text"`
- } `json:"choices"`
- Created int `json:"created"`
- Model string `json:"model"`
- SystemFingerprint string `json:"system_fingerprint"`
- Object string `json:"object"`
- Usage struct {
- CompletionTokens int `json:"completion_tokens"`
- PromptTokens int `json:"prompt_tokens"`
- PromptCacheHitTokens int `json:"prompt_cache_hit_tokens"`
- PromptCacheMissTokens int `json:"prompt_cache_miss_tokens"`
- TotalTokens int `json:"total_tokens"`
- CompletionTokensDetails struct {
- ReasoningTokens int `json:"reasoning_tokens"`
- } `json:"completion_tokens_details"`
- } `json:"usage"`
-}
-
-type DSChatResp struct {
- Choices []struct {
- Delta struct {
- Content string `json:"content"`
- Role any `json:"role"`
- } `json:"delta"`
- FinishReason string `json:"finish_reason"`
- Index int `json:"index"`
- Logprobs any `json:"logprobs"`
- } `json:"choices"`
- Created int `json:"created"`
- ID string `json:"id"`
- Model string `json:"model"`
- Object string `json:"object"`
- SystemFingerprint string `json:"system_fingerprint"`
- Usage struct {
- CompletionTokens int `json:"completion_tokens"`
- PromptTokens int `json:"prompt_tokens"`
- TotalTokens int `json:"total_tokens"`
- } `json:"usage"`
-}
-
-type DSChatStreamResp struct {
- ID string `json:"id"`
- Object string `json:"object"`
- Created int `json:"created"`
- Model string `json:"model"`
- SystemFingerprint string `json:"system_fingerprint"`
- Choices []struct {
- Index int `json:"index"`
- Delta struct {
- Content string `json:"content"`
- ReasoningContent string `json:"reasoning_content"`
- } `json:"delta"`
- Logprobs any `json:"logprobs"`
- FinishReason string `json:"finish_reason"`
- } `json:"choices"`
-}
-
type EmbeddingResp struct {
Embedding []float32 `json:"embedding"`
Index uint32 `json:"index"`
@@ -374,166 +241,3 @@ type LlamaCPPResp struct {
Content string `json:"content"`
Stop bool `json:"stop"`
}
-
-type DSBalance struct {
- IsAvailable bool `json:"is_available"`
- BalanceInfos []struct {
- Currency string `json:"currency"`
- TotalBalance string `json:"total_balance"`
- GrantedBalance string `json:"granted_balance"`
- ToppedUpBalance string `json:"topped_up_balance"`
- } `json:"balance_infos"`
-}
-
-// openrouter
-// https://openrouter.ai/docs/api-reference/completion
-type OpenRouterCompletionReq struct {
- Model string `json:"model"`
- Prompt string `json:"prompt"`
- Stream bool `json:"stream"`
- Temperature float32 `json:"temperature"`
- Stop []string `json:"stop"` // not present in docs
- MinP float32 `json:"min_p"`
- NPredict int32 `json:"max_tokens"`
-}
-
-func NewOpenRouterCompletionReq(model, prompt string, props map[string]float32, stopStrings []string) OpenRouterCompletionReq {
- return OpenRouterCompletionReq{
- Stream: true,
- Prompt: prompt,
- Temperature: props["temperature"],
- MinP: props["min_p"],
- NPredict: int32(props["n_predict"]),
- Stop: stopStrings,
- Model: model,
- }
-}
-
-type OpenRouterChatReq struct {
- Messages []RoleMsg `json:"messages"`
- Model string `json:"model"`
- Stream bool `json:"stream"`
- Temperature float32 `json:"temperature"`
- MinP float32 `json:"min_p"`
- NPredict int32 `json:"max_tokens"`
-}
-
-func NewOpenRouterChatReq(cb ChatBody, props map[string]float32) OpenRouterChatReq {
- return OpenRouterChatReq{
- Messages: cb.Messages,
- Model: cb.Model,
- Stream: cb.Stream,
- Temperature: props["temperature"],
- MinP: props["min_p"],
- NPredict: int32(props["n_predict"]),
- }
-}
-
-type OpenRouterChatRespNonStream struct {
- ID string `json:"id"`
- Provider string `json:"provider"`
- Model string `json:"model"`
- Object string `json:"object"`
- Created int `json:"created"`
- Choices []struct {
- Logprobs any `json:"logprobs"`
- FinishReason string `json:"finish_reason"`
- NativeFinishReason string `json:"native_finish_reason"`
- Index int `json:"index"`
- Message struct {
- Role string `json:"role"`
- Content string `json:"content"`
- Refusal any `json:"refusal"`
- Reasoning any `json:"reasoning"`
- } `json:"message"`
- } `json:"choices"`
- Usage struct {
- PromptTokens int `json:"prompt_tokens"`
- CompletionTokens int `json:"completion_tokens"`
- TotalTokens int `json:"total_tokens"`
- } `json:"usage"`
-}
-
-type OpenRouterChatResp struct {
- ID string `json:"id"`
- Provider string `json:"provider"`
- Model string `json:"model"`
- Object string `json:"object"`
- Created int `json:"created"`
- Choices []struct {
- Index int `json:"index"`
- Delta struct {
- Role string `json:"role"`
- Content string `json:"content"`
- } `json:"delta"`
- FinishReason string `json:"finish_reason"`
- NativeFinishReason string `json:"native_finish_reason"`
- Logprobs any `json:"logprobs"`
- } `json:"choices"`
-}
-
-type OpenRouterCompletionResp struct {
- ID string `json:"id"`
- Provider string `json:"provider"`
- Model string `json:"model"`
- Object string `json:"object"`
- Created int `json:"created"`
- Choices []struct {
- Text string `json:"text"`
- FinishReason string `json:"finish_reason"`
- NativeFinishReason string `json:"native_finish_reason"`
- Logprobs any `json:"logprobs"`
- } `json:"choices"`
-}
-
-type ORModel struct {
- ID string `json:"id"`
- CanonicalSlug string `json:"canonical_slug"`
- HuggingFaceID string `json:"hugging_face_id"`
- Name string `json:"name"`
- Created int `json:"created"`
- Description string `json:"description"`
- ContextLength int `json:"context_length"`
- Architecture struct {
- Modality string `json:"modality"`
- InputModalities []string `json:"input_modalities"`
- OutputModalities []string `json:"output_modalities"`
- Tokenizer string `json:"tokenizer"`
- InstructType any `json:"instruct_type"`
- } `json:"architecture"`
- Pricing struct {
- Prompt string `json:"prompt"`
- Completion string `json:"completion"`
- Request string `json:"request"`
- Image string `json:"image"`
- Audio string `json:"audio"`
- WebSearch string `json:"web_search"`
- InternalReasoning string `json:"internal_reasoning"`
- } `json:"pricing,omitempty"`
- TopProvider struct {
- ContextLength int `json:"context_length"`
- MaxCompletionTokens int `json:"max_completion_tokens"`
- IsModerated bool `json:"is_moderated"`
- } `json:"top_provider"`
- PerRequestLimits any `json:"per_request_limits"`
- SupportedParameters []string `json:"supported_parameters"`
-}
-
-type ORModels struct {
- Data []ORModel `json:"data"`
-}
-
-func (orm *ORModels) ListModels(free bool) []string {
- resp := []string{}
- for _, model := range orm.Data {
- if free {
- if model.Pricing.Prompt == "0" && model.Pricing.Request == "0" &&
- model.Pricing.Completion == "0" {
- resp = append(resp, model.ID)
- }
- } else {
- resp = append(resp, model.ID)
- }
- }
- return resp
-}
diff --git a/models/openrouter.go b/models/openrouter.go
new file mode 100644
index 0000000..933598e
--- /dev/null
+++ b/models/openrouter.go
@@ -0,0 +1,154 @@
+package models
+
+// openrouter
+// https://openrouter.ai/docs/api-reference/completion
+type OpenRouterCompletionReq struct {
+ Model string `json:"model"`
+ Prompt string `json:"prompt"`
+ Stream bool `json:"stream"`
+ Temperature float32 `json:"temperature"`
+ Stop []string `json:"stop"` // not present in docs
+ MinP float32 `json:"min_p"`
+ NPredict int32 `json:"max_tokens"`
+}
+
+func NewOpenRouterCompletionReq(model, prompt string, props map[string]float32, stopStrings []string) OpenRouterCompletionReq {
+ return OpenRouterCompletionReq{
+ Stream: true,
+ Prompt: prompt,
+ Temperature: props["temperature"],
+ MinP: props["min_p"],
+ NPredict: int32(props["n_predict"]),
+ Stop: stopStrings,
+ Model: model,
+ }
+}
+
+type OpenRouterChatReq struct {
+ Messages []RoleMsg `json:"messages"`
+ Model string `json:"model"`
+ Stream bool `json:"stream"`
+ Temperature float32 `json:"temperature"`
+ MinP float32 `json:"min_p"`
+ NPredict int32 `json:"max_tokens"`
+}
+
+func NewOpenRouterChatReq(cb ChatBody, props map[string]float32) OpenRouterChatReq {
+ return OpenRouterChatReq{
+ Messages: cb.Messages,
+ Model: cb.Model,
+ Stream: cb.Stream,
+ Temperature: props["temperature"],
+ MinP: props["min_p"],
+ NPredict: int32(props["n_predict"]),
+ }
+}
+
+type OpenRouterChatRespNonStream struct {
+ ID string `json:"id"`
+ Provider string `json:"provider"`
+ Model string `json:"model"`
+ Object string `json:"object"`
+ Created int `json:"created"`
+ Choices []struct {
+ Logprobs any `json:"logprobs"`
+ FinishReason string `json:"finish_reason"`
+ NativeFinishReason string `json:"native_finish_reason"`
+ Index int `json:"index"`
+ Message struct {
+ Role string `json:"role"`
+ Content string `json:"content"`
+ Refusal any `json:"refusal"`
+ Reasoning any `json:"reasoning"`
+ } `json:"message"`
+ } `json:"choices"`
+ Usage struct {
+ PromptTokens int `json:"prompt_tokens"`
+ CompletionTokens int `json:"completion_tokens"`
+ TotalTokens int `json:"total_tokens"`
+ } `json:"usage"`
+}
+
+type OpenRouterChatResp struct {
+ ID string `json:"id"`
+ Provider string `json:"provider"`
+ Model string `json:"model"`
+ Object string `json:"object"`
+ Created int `json:"created"`
+ Choices []struct {
+ Index int `json:"index"`
+ Delta struct {
+ Role string `json:"role"`
+ Content string `json:"content"`
+ } `json:"delta"`
+ FinishReason string `json:"finish_reason"`
+ NativeFinishReason string `json:"native_finish_reason"`
+ Logprobs any `json:"logprobs"`
+ } `json:"choices"`
+}
+
+type OpenRouterCompletionResp struct {
+ ID string `json:"id"`
+ Provider string `json:"provider"`
+ Model string `json:"model"`
+ Object string `json:"object"`
+ Created int `json:"created"`
+ Choices []struct {
+ Text string `json:"text"`
+ FinishReason string `json:"finish_reason"`
+ NativeFinishReason string `json:"native_finish_reason"`
+ Logprobs any `json:"logprobs"`
+ } `json:"choices"`
+}
+
+type ORModel struct {
+ ID string `json:"id"`
+ CanonicalSlug string `json:"canonical_slug"`
+ HuggingFaceID string `json:"hugging_face_id"`
+ Name string `json:"name"`
+ Created int `json:"created"`
+ Description string `json:"description"`
+ ContextLength int `json:"context_length"`
+ Architecture struct {
+ Modality string `json:"modality"`
+ InputModalities []string `json:"input_modalities"`
+ OutputModalities []string `json:"output_modalities"`
+ Tokenizer string `json:"tokenizer"`
+ InstructType any `json:"instruct_type"`
+ } `json:"architecture"`
+ Pricing struct {
+ Prompt string `json:"prompt"`
+ Completion string `json:"completion"`
+ Request string `json:"request"`
+ Image string `json:"image"`
+ Audio string `json:"audio"`
+ WebSearch string `json:"web_search"`
+ InternalReasoning string `json:"internal_reasoning"`
+ } `json:"pricing,omitempty"`
+ TopProvider struct {
+ ContextLength int `json:"context_length"`
+ MaxCompletionTokens int `json:"max_completion_tokens"`
+ IsModerated bool `json:"is_moderated"`
+ } `json:"top_provider"`
+ PerRequestLimits any `json:"per_request_limits"`
+ SupportedParameters []string `json:"supported_parameters"`
+}
+
+type ORModels struct {
+ Data []ORModel `json:"data"`
+}
+
+func (orm *ORModels) ListModels(free bool) []string {
+ resp := []string{}
+ for _, model := range orm.Data {
+ if free {
+ if model.Pricing.Prompt == "0" && model.Pricing.Request == "0" &&
+ model.Pricing.Completion == "0" {
+ resp = append(resp, model.ID)
+ }
+ } else {
+ resp = append(resp, model.ID)
+ }
+ }
+ return resp
+}
diff --git a/tools.go b/tools.go
index acc4697..c1b84ab 100644
--- a/tools.go
+++ b/tools.go
@@ -1,8 +1,13 @@
package main
import (
+ "bytes"
+ "encoding/json"
"fmt"
+ "gf-lt/config"
"gf-lt/models"
+ "io"
+ "net/http"
"regexp"
"strings"
"time"
@@ -76,6 +81,69 @@ After that you are free to respond to the user.
sysLabels = []string{"basic_sys", "tool_sys"}
)
+func populateTools(cfg config.Config) {
+ // if we have access to some server with funcs we can populate funcs (tools|toolbelt?) with it
+ // there must be a better way
+ if cfg.SearchAPI == "" || cfg.SearchDescribe == "" {
+ return
+ }
+ resp, err := httpClient.Get(cfg.SearchDescribe)
+ if err != nil {
+ logger.Error("failed to get websearch tool description",
+ "link", cfg.SearchDescribe, "error", err)
+ return
+ }
+ descResp := models.Tool{}
+ if err := json.NewDecoder(resp.Body).Decode(&descResp); err != nil {
+ logger.Error("failed to unmarshal websearch tool description",
+ "link", cfg.SearchDescribe, "error", err)
+ return
+ }
+ fnMap["web_search"] = websearch
+ baseTools = append(baseTools, descResp)
+ logger.Info("added web_search tool", "tool", descResp)
+ return
+}
+
+// {"type":"function","function":{"name":"web_search","description":"Perform a web search to find information on varioust topics","parameters":{"type":"object","properties":{"num_results":{"type":"integer","description":"Maximum number of results to return (default: 10)"},"query":{"type":"string","description":"The search query to find information about"},"search_type":{"type":"string","description":"Type of search to perform: 'api' for SearXNG API search or 'scraper' for web scraping (default: 'scraper')"}},"required":["query"]}}}
+
+// web search (depends on extra server)
+func websearch(args map[string]string) []byte {
+ // make http request return bytes
+ query, ok := args["query"]
+ if !ok || query == "" {
+ msg := "query not provided to web_search tool"
+ logger.Error(msg)
+ return []byte(msg)
+ }
+ payload, err := json.Marshal(args)
+ if err != nil {
+ logger.Error("failed to marshal web_search arguments", "error", err)
+ msg := fmt.Sprintf("failed to marshal web_search arguments; error: %s\n", err)
+ return []byte(msg)
+ }
+ req, err := http.NewRequest("POST", cfg.SearchAPI, bytes.NewReader(payload))
+ if err != nil {
+ logger.Error("failed to build an http request", "error", err)
+ msg := fmt.Sprintf("failed to build an http request; error: %s\n", err)
+ return []byte(msg)
+ }
+ resp, err := httpClient.Do(req)
+ if err != nil {
+ logger.Error("failed to execute http request", "error", err)
+ msg := fmt.Sprintf("failed to execute http request; error: %s\n", err)
+ return []byte(msg)
+ }
+ defer resp.Body.Close()
+ data, err := io.ReadAll(resp.Body)
+ if err != nil {
+ logger.Error("failed to read response body", "error", err)
+ msg := fmt.Sprintf("failed to read response body; error: %s\n", err)
+ return []byte(msg)
+ }
+ return data
+}
+
/*
consider cases:
- append mode (treat it like a journal appendix)