1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
|
package agent
import (
"bytes"
"encoding/json"
"fmt"
"gf-lt/config"
"gf-lt/models"
"io"
"log/slog"
"net/http"
"strings"
)
var httpClient = &http.Client{}
var defaultProps = map[string]float32{
"temperature": 0.8,
"dry_multiplier": 0.0,
"min_p": 0.05,
"n_predict": -1.0,
}
func detectAPI(api string) (isCompletion, isChat, isDeepSeek, isOpenRouter bool) {
isCompletion = strings.Contains(api, "/completion") && !strings.Contains(api, "/chat/completions")
isChat = strings.Contains(api, "/chat/completions")
isDeepSeek = strings.Contains(api, "deepseek.com")
isOpenRouter = strings.Contains(api, "openrouter.ai")
return
}
type AgentClient struct {
cfg *config.Config
getToken func() string
log slog.Logger
}
func NewAgentClient(cfg *config.Config, log slog.Logger, gt func() string) *AgentClient {
return &AgentClient{
cfg: cfg,
getToken: gt,
log: log,
}
}
func (ag *AgentClient) Log() *slog.Logger {
return &ag.log
}
func (ag *AgentClient) FormMsg(sysprompt, msg string) (io.Reader, error) {
b, err := ag.buildRequest(sysprompt, msg)
if err != nil {
return nil, err
}
return bytes.NewReader(b), nil
}
// buildRequest creates the appropriate LLM request based on the current API endpoint.
func (ag *AgentClient) buildRequest(sysprompt, msg string) ([]byte, error) {
api := ag.cfg.CurrentAPI
model := ag.cfg.CurrentModel
messages := []models.RoleMsg{
{Role: "system", Content: sysprompt},
{Role: "user", Content: msg},
}
// Determine API type
isCompletion, isChat, isDeepSeek, isOpenRouter := detectAPI(api)
ag.log.Debug("agent building request", "api", api, "isCompletion", isCompletion, "isChat", isChat, "isDeepSeek", isDeepSeek, "isOpenRouter", isOpenRouter)
// Build prompt for completion endpoints
if isCompletion {
var sb strings.Builder
for _, m := range messages {
sb.WriteString(m.ToPrompt())
sb.WriteString("\n")
}
prompt := strings.TrimSpace(sb.String())
if isDeepSeek {
// DeepSeek completion
req := models.NewDSCompletionReq(prompt, model, defaultProps["temperature"], []string{})
req.Stream = false // Agents don't need streaming
return json.Marshal(req)
} else if isOpenRouter {
// OpenRouter completion
req := models.NewOpenRouterCompletionReq(model, prompt, defaultProps, []string{})
req.Stream = false // Agents don't need streaming
return json.Marshal(req)
} else {
// Assume llama.cpp completion
req := models.NewLCPReq(prompt, model, nil, defaultProps, []string{})
req.Stream = false // Agents don't need streaming
return json.Marshal(req)
}
}
// Chat completions endpoints
if isChat || !isCompletion {
chatBody := &models.ChatBody{
Model: model,
Stream: false, // Agents don't need streaming
Messages: messages,
}
if isDeepSeek {
// DeepSeek chat
req := models.NewDSChatReq(*chatBody)
return json.Marshal(req)
} else if isOpenRouter {
// OpenRouter chat
req := models.NewOpenRouterChatReq(*chatBody, defaultProps)
return json.Marshal(req)
} else {
// Assume llama.cpp chat (OpenAI format)
req := models.OpenAIReq{
ChatBody: chatBody,
Tools: nil,
}
return json.Marshal(req)
}
}
// Fallback (should not reach here)
ag.log.Warn("unknown API, using default chat completions format", "api", api)
chatBody := &models.ChatBody{
Model: model,
Stream: false, // Agents don't need streaming
Messages: messages,
}
return json.Marshal(chatBody)
}
func (ag *AgentClient) LLMRequest(body io.Reader) ([]byte, error) {
// Read the body for debugging (but we need to recreate it for the request)
bodyBytes, err := io.ReadAll(body)
if err != nil {
ag.log.Error("failed to read request body", "error", err)
return nil, err
}
req, err := http.NewRequest("POST", ag.cfg.CurrentAPI, bytes.NewReader(bodyBytes))
if err != nil {
ag.log.Error("failed to create request", "error", err)
return nil, err
}
req.Header.Add("Accept", "application/json")
req.Header.Add("Content-Type", "application/json")
req.Header.Add("Authorization", "Bearer "+ag.getToken())
req.Header.Set("Accept-Encoding", "gzip")
ag.log.Debug("agent LLM request", "url", ag.cfg.CurrentAPI, "body_preview", string(bodyBytes[:min(len(bodyBytes), 500)]))
resp, err := httpClient.Do(req)
if err != nil {
ag.log.Error("llamacpp api request failed", "error", err, "url", ag.cfg.CurrentAPI)
return nil, err
}
defer resp.Body.Close()
responseBytes, err := io.ReadAll(resp.Body)
if err != nil {
ag.log.Error("failed to read response", "error", err)
return nil, err
}
if resp.StatusCode >= 400 {
ag.log.Error("agent LLM request failed", "status", resp.StatusCode, "response", string(responseBytes[:min(len(responseBytes), 1000)]))
return responseBytes, fmt.Errorf("HTTP %d: %s", resp.StatusCode, string(responseBytes[:min(len(responseBytes), 200)]))
}
// Parse response and extract text content
text, err := extractTextFromResponse(responseBytes)
if err != nil {
ag.log.Error("failed to extract text from response", "error", err, "response_preview", string(responseBytes[:min(len(responseBytes), 500)]))
// Return raw response as fallback
return responseBytes, nil
}
return []byte(text), nil
}
// extractTextFromResponse parses common LLM response formats and extracts the text content.
func extractTextFromResponse(data []byte) (string, error) {
// Try to parse as generic JSON first
var genericResp map[string]interface{}
if err := json.Unmarshal(data, &genericResp); err != nil {
// Not JSON, return as string
return string(data), nil
}
// Check for OpenAI chat completion format
if choices, ok := genericResp["choices"].([]interface{}); ok && len(choices) > 0 {
if firstChoice, ok := choices[0].(map[string]interface{}); ok {
// Chat completion: choices[0].message.content
if message, ok := firstChoice["message"].(map[string]interface{}); ok {
if content, ok := message["content"].(string); ok {
return content, nil
}
}
// Completion: choices[0].text
if text, ok := firstChoice["text"].(string); ok {
return text, nil
}
// Delta format for streaming (should not happen with stream: false)
if delta, ok := firstChoice["delta"].(map[string]interface{}); ok {
if content, ok := delta["content"].(string); ok {
return content, nil
}
}
}
}
// Check for llama.cpp completion format
if content, ok := genericResp["content"].(string); ok {
return content, nil
}
// Unknown format, return pretty-printed JSON
prettyJSON, err := json.MarshalIndent(genericResp, "", " ")
if err != nil {
return string(data), nil
}
return string(prettyJSON), nil
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
|