summaryrefslogtreecommitdiff
path: root/rag/main.go
blob: 5f2aa000c470a6f9d84cf404c36f35b50f20478c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
package rag

import (
	"bytes"
	"elefant/config"
	"elefant/models"
	"elefant/storage"
	"encoding/json"
	"errors"
	"fmt"
	"log/slog"
	"net/http"
	"os"
	"path"
	"strings"
	"sync"

	"github.com/neurosnap/sentences/english"
)

var (
	LongJobStatusCh = make(chan string, 1)
	// messages
	FinishedRAGStatus   = "finished loading RAG file; press Enter"
	LoadedFileRAGStatus = "loaded file"
	ErrRAGStatus        = "some error occured; failed to transfer data to vector db"
)

type RAG struct {
	logger *slog.Logger
	store  storage.FullRepo
	cfg    *config.Config
}

func New(l *slog.Logger, s storage.FullRepo, cfg *config.Config) *RAG {
	return &RAG{
		logger: l,
		store:  s,
		cfg:    cfg,
	}
}

func wordCounter(sentence string) int {
	return len(strings.Split(sentence, " "))
}

func (r *RAG) LoadRAG(fpath string) error {
	data, err := os.ReadFile(fpath)
	if err != nil {
		return err
	}
	r.logger.Debug("rag: loaded file", "fp", fpath)
	LongJobStatusCh <- LoadedFileRAGStatus
	fileText := string(data)
	tokenizer, err := english.NewSentenceTokenizer(nil)
	if err != nil {
		return err
	}
	sentences := tokenizer.Tokenize(fileText)
	sents := make([]string, len(sentences))
	for i, s := range sentences {
		sents[i] = s.Text
	}
	// TODO: maybe better to decide batch size based on sentences len
	var (
		// TODO: to config
		workers   = 5
		batchSize = 100
		maxChSize = 1000
		//
		wordLimit = 80
		left      = 0
		right     = batchSize
		batchCh   = make(chan map[int][]string, maxChSize)
		vectorCh  = make(chan []models.VectorRow, maxChSize)
		errCh     = make(chan error, 1)
		doneCh    = make(chan bool, 1)
		lock      = new(sync.Mutex)
	)
	defer close(doneCh)
	defer close(errCh)
	defer close(batchCh)
	// group sentences
	paragraphs := []string{}
	par := strings.Builder{}
	for i := 0; i < len(sents); i++ {
		par.WriteString(sents[i])
		if wordCounter(par.String()) > wordLimit {
			paragraphs = append(paragraphs, par.String())
			par.Reset()
		}
	}
	if len(paragraphs) < batchSize {
		batchSize = len(paragraphs)
	}
	// fill input channel
	ctn := 0
	for {
		if right > len(paragraphs) {
			batchCh <- map[int][]string{left: paragraphs[left:]}
			break
		}
		batchCh <- map[int][]string{left: paragraphs[left:right]}
		left, right = right, right+batchSize
		ctn++
	}
	finishedBatchesMsg := fmt.Sprintf("finished batching batches#: %d; paragraphs: %d; sentences: %d\n", len(batchCh), len(paragraphs), len(sents))
	r.logger.Debug(finishedBatchesMsg)
	LongJobStatusCh <- finishedBatchesMsg
	for w := 0; w < workers; w++ {
		go r.batchToVectorHFAsync(lock, w, batchCh, vectorCh, errCh, doneCh, path.Base(fpath))
	}
	// wait for emb to be done
	<-doneCh
	// write to db
	return r.writeVectors(vectorCh)
}

func (r *RAG) writeVectors(vectorCh chan []models.VectorRow) error {
	for {
		for batch := range vectorCh {
			for _, vector := range batch {
				if err := r.store.WriteVector(&vector); err != nil {
					r.logger.Error("failed to write vector", "error", err, "slug", vector.Slug)
					LongJobStatusCh <- ErrRAGStatus
					continue // a duplicate is not critical
					// return err
				}
			}
			r.logger.Debug("wrote batch to db", "size", len(batch), "vector_chan_len", len(vectorCh))
			if len(vectorCh) == 0 {
				r.logger.Debug("finished writing vectors")
				LongJobStatusCh <- FinishedRAGStatus
				defer close(vectorCh)
				return nil
			}
		}
	}
}

func (r *RAG) batchToVectorHFAsync(lock *sync.Mutex, id int, inputCh <-chan map[int][]string,
	vectorCh chan<- []models.VectorRow, errCh chan error, doneCh chan bool, filename string) {
	for {
		lock.Lock()
		if len(inputCh) == 0 {
			if len(doneCh) == 0 {
				doneCh <- true
			}
			lock.Unlock()
			return
		}
		select {
		case linesMap := <-inputCh:
			for leftI, v := range linesMap {
				r.fecthEmbHF(v, errCh, vectorCh, fmt.Sprintf("%s_%d", filename, leftI), filename)
			}
			lock.Unlock()
		case err := <-errCh:
			r.logger.Error("got an error", "error", err)
			lock.Unlock()
			return
		}
		r.logger.Debug("to vector batches", "batches#", len(inputCh), "worker#", id)
		LongJobStatusCh <- fmt.Sprintf("converted to vector; batches: %d, worker#: %d", len(inputCh), id)
	}
}

func (r *RAG) fecthEmbHF(lines []string, errCh chan error, vectorCh chan<- []models.VectorRow, slug, filename string) {
	payload, err := json.Marshal(
		map[string]any{"inputs": lines, "options": map[string]bool{"wait_for_model": true}},
	)
	if err != nil {
		r.logger.Error("failed to marshal payload", "err:", err.Error())
		errCh <- err
		return
	}
	// nolint
	req, err := http.NewRequest("POST", r.cfg.EmbedURL, bytes.NewReader(payload))
	if err != nil {
		r.logger.Error("failed to create new req", "err:", err.Error())
		errCh <- err
		return
	}
	req.Header.Add("Authorization", "Bearer "+r.cfg.HFToken)
	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		r.logger.Error("failed to embedd line", "err:", err.Error())
		errCh <- err
		return
	}
	defer resp.Body.Close()
	if resp.StatusCode != 200 {
		r.logger.Error("non 200 resp", "code", resp.StatusCode)
		return
	}
	emb := [][]float32{}
	if err := json.NewDecoder(resp.Body).Decode(&emb); err != nil {
		r.logger.Error("failed to embedd line", "err:", err.Error())
		errCh <- err
		return
	}
	if len(emb) == 0 {
		r.logger.Error("empty emb")
		err = errors.New("empty emb")
		errCh <- err
		return
	}
	vectors := make([]models.VectorRow, len(emb))
	for i, e := range emb {
		vector := models.VectorRow{
			Embeddings: e,
			RawText:    lines[i],
			Slug:       fmt.Sprintf("%s_%d", slug, i),
			FileName:   filename,
		}
		vectors[i] = vector
	}
	vectorCh <- vectors
}

func (r *RAG) LineToVector(line string) ([]float32, error) {
	lines := []string{line}
	payload, err := json.Marshal(
		map[string]any{"inputs": lines, "options": map[string]bool{"wait_for_model": true}},
	)
	if err != nil {
		r.logger.Error("failed to marshal payload", "err:", err.Error())
		return nil, err
	}
	// nolint
	req, err := http.NewRequest("POST", r.cfg.EmbedURL, bytes.NewReader(payload))
	if err != nil {
		r.logger.Error("failed to create new req", "err:", err.Error())
		return nil, err
	}
	req.Header.Add("Authorization", "Bearer "+r.cfg.HFToken)
	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		r.logger.Error("failed to embedd line", "err:", err.Error())
		return nil, err
	}
	defer resp.Body.Close()
	if resp.StatusCode != 200 {
		err = fmt.Errorf("non 200 resp; code: %v\n", resp.StatusCode)
		r.logger.Error(err.Error())
		return nil, err
	}
	emb := [][]float32{}
	if err := json.NewDecoder(resp.Body).Decode(&emb); err != nil {
		r.logger.Error("failed to embedd line", "err:", err.Error())
		return nil, err
	}
	if len(emb) == 0 || len(emb[0]) == 0 {
		r.logger.Error("empty emb")
		err = errors.New("empty emb")
		return nil, err
	}
	return emb[0], nil
}

func (r *RAG) SearchEmb(emb *models.EmbeddingResp) ([]models.VectorRow, error) {
	return r.store.SearchClosest(emb.Embedding)
}

func (r *RAG) ListLoaded() ([]string, error) {
	return r.store.ListFiles()
}

func (r *RAG) RemoveFile(filename string) error {
	return r.store.RemoveEmbByFileName(filename)
}