summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGrail Finder <wohilas@gmail.com>2025-10-19 13:14:56 +0300
committerGrail Finder <wohilas@gmail.com>2025-10-19 13:14:56 +0300
commit60ccaed2009c535c9c92c163995577fcde7aadb6 (patch)
tree4621fdbcd4b86cc32c7c05ff13b907136424f765
parentdfa164e871a62f814aeeb9ced6350e74a52f65b3 (diff)
Chore: remove old rag
-rw-r--r--bot.go10
-rw-r--r--rag/embedder.go (renamed from rag_new/embedder.go)17
-rw-r--r--rag/main.go265
-rw-r--r--rag/rag.go (renamed from rag_new/rag.go)69
-rw-r--r--rag/storage.go (renamed from rag_new/storage.go)55
-rw-r--r--storage/migrate.go6
-rw-r--r--storage/storage.go10
-rw-r--r--storage/vector.go62
8 files changed, 101 insertions, 393 deletions
diff --git a/bot.go b/bot.go
index a5d16e1..537df0c 100644
--- a/bot.go
+++ b/bot.go
@@ -9,7 +9,7 @@ import (
"gf-lt/config"
"gf-lt/extra"
"gf-lt/models"
- "gf-lt/rag_new"
+ "gf-lt/rag"
"gf-lt/storage"
"io"
"log/slog"
@@ -41,7 +41,7 @@ var (
defaultStarter = []models.RoleMsg{}
defaultStarterBytes = []byte{}
interruptResp = false
- ragger *rag_new.RAG
+ ragger *rag.RAG
chunkParser ChunkParser
lastToolCall *models.FuncCall
//nolint:unused // TTS_ENABLED conditionally uses this
@@ -277,13 +277,13 @@ func chatRagUse(qText string) (string, error) {
logger.Error("failed to get embs", "error", err, "index", i, "question", q)
continue
}
-
+
// Create EmbeddingResp struct for the search
embeddingResp := &models.EmbeddingResp{
Embedding: emb,
Index: 0, // Not used in search but required for the struct
}
-
+
vecs, err := ragger.SearchEmb(embeddingResp)
if err != nil {
logger.Error("failed to query embs", "error", err, "index", i, "question", q)
@@ -571,7 +571,7 @@ func init() {
if store == nil {
os.Exit(1)
}
- ragger = rag_new.New(logger, store, cfg)
+ ragger = rag.New(logger, store, cfg)
// https://github.com/coreydaley/ggerganov-llama.cpp/blob/master/examples/server/README.md
// load all chats in memory
if _, err := loadHistoryChats(); err != nil {
diff --git a/rag_new/embedder.go b/rag/embedder.go
index 27b975a..1804019 100644
--- a/rag_new/embedder.go
+++ b/rag/embedder.go
@@ -1,10 +1,10 @@
-package rag_new
+package rag
import (
"bytes"
- "gf-lt/config"
"encoding/json"
"fmt"
+ "gf-lt/config"
"log/slog"
"net/http"
)
@@ -17,9 +17,9 @@ type Embedder interface {
// APIEmbedder implements embedder using an API (like Hugging Face, OpenAI, etc.)
type APIEmbedder struct {
- logger *slog.Logger
- client *http.Client
- cfg *config.Config
+ logger *slog.Logger
+ client *http.Client
+ cfg *config.Config
}
func NewAPIEmbedder(l *slog.Logger, cfg *config.Config) *APIEmbedder {
@@ -44,11 +44,11 @@ func (a *APIEmbedder) Embed(text []string) ([][]float32, error) {
a.logger.Error("failed to create new req", "err", err.Error())
return nil, err
}
-
+
if a.cfg.HFToken != "" {
req.Header.Add("Authorization", "Bearer "+a.cfg.HFToken)
}
-
+
resp, err := a.client.Do(req)
if err != nil {
a.logger.Error("failed to embed text", "err", err.Error())
@@ -95,4 +95,5 @@ func (a *APIEmbedder) EmbedSingle(text string) ([]float32, error) {
// 3. Converting text to embeddings without external API calls
//
// For now, we'll focus on the API implementation which is already working in the current system,
-// and can be extended later when we have ONNX runtime integration \ No newline at end of file
+// and can be extended later when we have ONNX runtime integration
+
diff --git a/rag/main.go b/rag/main.go
deleted file mode 100644
index b7e0c00..0000000
--- a/rag/main.go
+++ /dev/null
@@ -1,265 +0,0 @@
-package rag
-
-import (
- "bytes"
- "gf-lt/config"
- "gf-lt/models"
- "gf-lt/storage"
- "encoding/json"
- "errors"
- "fmt"
- "log/slog"
- "net/http"
- "os"
- "path"
- "strings"
- "sync"
-
- "github.com/neurosnap/sentences/english"
-)
-
-var (
- LongJobStatusCh = make(chan string, 1)
- // messages
- FinishedRAGStatus = "finished loading RAG file; press Enter"
- LoadedFileRAGStatus = "loaded file"
- ErrRAGStatus = "some error occured; failed to transfer data to vector db"
-)
-
-type RAG struct {
- logger *slog.Logger
- store storage.FullRepo
- cfg *config.Config
-}
-
-func New(l *slog.Logger, s storage.FullRepo, cfg *config.Config) *RAG {
- return &RAG{
- logger: l,
- store: s,
- cfg: cfg,
- }
-}
-
-func wordCounter(sentence string) int {
- return len(strings.Split(sentence, " "))
-}
-
-func (r *RAG) LoadRAG(fpath string) error {
- data, err := os.ReadFile(fpath)
- if err != nil {
- return err
- }
- r.logger.Debug("rag: loaded file", "fp", fpath)
- LongJobStatusCh <- LoadedFileRAGStatus
- fileText := string(data)
- tokenizer, err := english.NewSentenceTokenizer(nil)
- if err != nil {
- return err
- }
- sentences := tokenizer.Tokenize(fileText)
- sents := make([]string, len(sentences))
- for i, s := range sentences {
- sents[i] = s.Text
- }
- var (
- maxChSize = 1000
- left = 0
- right = r.cfg.RAGBatchSize
- batchCh = make(chan map[int][]string, maxChSize)
- vectorCh = make(chan []models.VectorRow, maxChSize)
- errCh = make(chan error, 1)
- doneCh = make(chan bool, 1)
- lock = new(sync.Mutex)
- )
- defer close(doneCh)
- defer close(errCh)
- defer close(batchCh)
- // group sentences
- paragraphs := []string{}
- par := strings.Builder{}
- for i := 0; i < len(sents); i++ {
- par.WriteString(sents[i])
- if wordCounter(par.String()) > int(r.cfg.RAGWordLimit) {
- paragraphs = append(paragraphs, par.String())
- par.Reset()
- }
- }
- if len(paragraphs) < int(r.cfg.RAGBatchSize) {
- r.cfg.RAGBatchSize = len(paragraphs)
- }
- // fill input channel
- ctn := 0
- for {
- if int(right) > len(paragraphs) {
- batchCh <- map[int][]string{left: paragraphs[left:]}
- break
- }
- batchCh <- map[int][]string{left: paragraphs[left:right]}
- left, right = right, right+r.cfg.RAGBatchSize
- ctn++
- }
- finishedBatchesMsg := fmt.Sprintf("finished batching batches#: %d; paragraphs: %d; sentences: %d\n", len(batchCh), len(paragraphs), len(sents))
- r.logger.Debug(finishedBatchesMsg)
- LongJobStatusCh <- finishedBatchesMsg
- for w := 0; w < int(r.cfg.RAGWorkers); w++ {
- go r.batchToVectorHFAsync(lock, w, batchCh, vectorCh, errCh, doneCh, path.Base(fpath))
- }
- // wait for emb to be done
- <-doneCh
- // write to db
- return r.writeVectors(vectorCh)
-}
-
-func (r *RAG) writeVectors(vectorCh chan []models.VectorRow) error {
- for {
- for batch := range vectorCh {
- for _, vector := range batch {
- if err := r.store.WriteVector(&vector); err != nil {
- r.logger.Error("failed to write vector", "error", err, "slug", vector.Slug)
- LongJobStatusCh <- ErrRAGStatus
- continue // a duplicate is not critical
- // return err
- }
- }
- r.logger.Debug("wrote batch to db", "size", len(batch), "vector_chan_len", len(vectorCh))
- if len(vectorCh) == 0 {
- r.logger.Debug("finished writing vectors")
- LongJobStatusCh <- FinishedRAGStatus
- defer close(vectorCh)
- return nil
- }
- }
- }
-}
-
-func (r *RAG) batchToVectorHFAsync(lock *sync.Mutex, id int, inputCh <-chan map[int][]string,
- vectorCh chan<- []models.VectorRow, errCh chan error, doneCh chan bool, filename string) {
- for {
- lock.Lock()
- if len(inputCh) == 0 {
- if len(doneCh) == 0 {
- doneCh <- true
- }
- lock.Unlock()
- return
- }
- select {
- case linesMap := <-inputCh:
- for leftI, v := range linesMap {
- r.fecthEmbHF(v, errCh, vectorCh, fmt.Sprintf("%s_%d", filename, leftI), filename)
- }
- lock.Unlock()
- case err := <-errCh:
- r.logger.Error("got an error", "error", err)
- lock.Unlock()
- return
- }
- r.logger.Debug("to vector batches", "batches#", len(inputCh), "worker#", id)
- LongJobStatusCh <- fmt.Sprintf("converted to vector; batches: %d, worker#: %d", len(inputCh), id)
- }
-}
-
-func (r *RAG) fecthEmbHF(lines []string, errCh chan error, vectorCh chan<- []models.VectorRow, slug, filename string) {
- payload, err := json.Marshal(
- map[string]any{"inputs": lines, "options": map[string]bool{"wait_for_model": true}},
- )
- if err != nil {
- r.logger.Error("failed to marshal payload", "err:", err.Error())
- errCh <- err
- return
- }
- // nolint
- req, err := http.NewRequest("POST", r.cfg.EmbedURL, bytes.NewReader(payload))
- if err != nil {
- r.logger.Error("failed to create new req", "err:", err.Error())
- errCh <- err
- return
- }
- req.Header.Add("Authorization", "Bearer "+r.cfg.HFToken)
- resp, err := http.DefaultClient.Do(req)
- if err != nil {
- r.logger.Error("failed to embedd line", "err:", err.Error())
- errCh <- err
- return
- }
- defer resp.Body.Close()
- if resp.StatusCode != 200 {
- r.logger.Error("non 200 resp", "code", resp.StatusCode)
- return
- }
- emb := [][]float32{}
- if err := json.NewDecoder(resp.Body).Decode(&emb); err != nil {
- r.logger.Error("failed to embedd line", "err:", err.Error())
- errCh <- err
- return
- }
- if len(emb) == 0 {
- r.logger.Error("empty emb")
- err = errors.New("empty emb")
- errCh <- err
- return
- }
- vectors := make([]models.VectorRow, len(emb))
- for i, e := range emb {
- vector := models.VectorRow{
- Embeddings: e,
- RawText: lines[i],
- Slug: fmt.Sprintf("%s_%d", slug, i),
- FileName: filename,
- }
- vectors[i] = vector
- }
- vectorCh <- vectors
-}
-
-func (r *RAG) LineToVector(line string) ([]float32, error) {
- lines := []string{line}
- payload, err := json.Marshal(
- map[string]any{"inputs": lines, "options": map[string]bool{"wait_for_model": true}},
- )
- if err != nil {
- r.logger.Error("failed to marshal payload", "err:", err.Error())
- return nil, err
- }
- // nolint
- req, err := http.NewRequest("POST", r.cfg.EmbedURL, bytes.NewReader(payload))
- if err != nil {
- r.logger.Error("failed to create new req", "err:", err.Error())
- return nil, err
- }
- req.Header.Add("Authorization", "Bearer "+r.cfg.HFToken)
- resp, err := http.DefaultClient.Do(req)
- if err != nil {
- r.logger.Error("failed to embedd line", "err:", err.Error())
- return nil, err
- }
- defer resp.Body.Close()
- if resp.StatusCode != 200 {
- err = fmt.Errorf("non 200 resp; code: %v", resp.StatusCode)
- r.logger.Error(err.Error())
- return nil, err
- }
- emb := [][]float32{}
- if err := json.NewDecoder(resp.Body).Decode(&emb); err != nil {
- r.logger.Error("failed to embedd line", "err:", err.Error())
- return nil, err
- }
- if len(emb) == 0 || len(emb[0]) == 0 {
- r.logger.Error("empty emb")
- err = errors.New("empty emb")
- return nil, err
- }
- return emb[0], nil
-}
-
-func (r *RAG) SearchEmb(emb *models.EmbeddingResp) ([]models.VectorRow, error) {
- return r.store.SearchClosest(emb.Embedding)
-}
-
-func (r *RAG) ListLoaded() ([]string, error) {
- return r.store.ListFiles()
-}
-
-func (r *RAG) RemoveFile(filename string) error {
- return r.store.RemoveEmbByFileName(filename)
-}
diff --git a/rag_new/rag.go b/rag/rag.go
index d012087..c05d38a 100644
--- a/rag_new/rag.go
+++ b/rag/rag.go
@@ -1,10 +1,10 @@
-package rag_new
+package rag
import (
+ "fmt"
"gf-lt/config"
"gf-lt/models"
"gf-lt/storage"
- "fmt"
"log/slog"
"os"
"path"
@@ -16,37 +16,37 @@ import (
var (
// Status messages for TUI integration
- LongJobStatusCh = make(chan string, 10) // Increased buffer size to prevent blocking
+ LongJobStatusCh = make(chan string, 10) // Increased buffer size to prevent blocking
FinishedRAGStatus = "finished loading RAG file; press Enter"
LoadedFileRAGStatus = "loaded file"
ErrRAGStatus = "some error occurred; failed to transfer data to vector db"
)
type RAG struct {
- logger *slog.Logger
- store storage.FullRepo
- cfg *config.Config
+ logger *slog.Logger
+ store storage.FullRepo
+ cfg *config.Config
embedder Embedder
- storage *VectorStorage
+ storage *VectorStorage
}
func New(l *slog.Logger, s storage.FullRepo, cfg *config.Config) *RAG {
// Initialize with API embedder by default, could be configurable later
embedder := NewAPIEmbedder(l, cfg)
-
+
rag := &RAG{
- logger: l,
- store: s,
- cfg: cfg,
+ logger: l,
+ store: s,
+ cfg: cfg,
embedder: embedder,
- storage: NewVectorStorage(l, s),
+ storage: NewVectorStorage(l, s),
}
-
+
// Create the necessary tables
if err := rag.storage.CreateTables(); err != nil {
l.Error("failed to create vector tables", "error", err)
}
-
+
return rag
}
@@ -61,7 +61,7 @@ func (r *RAG) LoadRAG(fpath string) error {
}
r.logger.Debug("rag: loaded file", "fp", fpath)
LongJobStatusCh <- LoadedFileRAGStatus
-
+
fileText := string(data)
tokenizer, err := english.NewSentenceTokenizer(nil)
if err != nil {
@@ -72,7 +72,7 @@ func (r *RAG) LoadRAG(fpath string) error {
for i, s := range sentences {
sents[i] = s.Text
}
-
+
// Group sentences into paragraphs based on word limit
paragraphs := []string{}
par := strings.Builder{}
@@ -84,7 +84,7 @@ func (r *RAG) LoadRAG(fpath string) error {
}
par.WriteString(sents[i])
}
-
+
if wordCounter(par.String()) > int(r.cfg.RAGWordLimit) {
paragraph := strings.TrimSpace(par.String())
if paragraph != "" {
@@ -93,7 +93,7 @@ func (r *RAG) LoadRAG(fpath string) error {
par.Reset()
}
}
-
+
// Handle any remaining content in the paragraph buffer
if par.Len() > 0 {
paragraph := strings.TrimSpace(par.String())
@@ -101,16 +101,16 @@ func (r *RAG) LoadRAG(fpath string) error {
paragraphs = append(paragraphs, paragraph)
}
}
-
+
// Adjust batch size if needed
if len(paragraphs) < int(r.cfg.RAGBatchSize) && len(paragraphs) > 0 {
r.cfg.RAGBatchSize = len(paragraphs)
}
-
+
if len(paragraphs) == 0 {
return fmt.Errorf("no valid paragraphs found in file")
}
-
+
var (
maxChSize = 100
left = 0
@@ -121,11 +121,11 @@ func (r *RAG) LoadRAG(fpath string) error {
doneCh = make(chan bool, 1)
lock = new(sync.Mutex)
)
-
+
defer close(doneCh)
defer close(errCh)
defer close(batchCh)
-
+
// Fill input channel with batches
ctn := 0
totalParagraphs := len(paragraphs)
@@ -138,19 +138,19 @@ func (r *RAG) LoadRAG(fpath string) error {
left, right = right, right+r.cfg.RAGBatchSize
ctn++
}
-
+
finishedBatchesMsg := fmt.Sprintf("finished batching batches#: %d; paragraphs: %d; sentences: %d\n", ctn+1, len(paragraphs), len(sents))
r.logger.Debug(finishedBatchesMsg)
LongJobStatusCh <- finishedBatchesMsg
-
+
// Start worker goroutines
for w := 0; w < int(r.cfg.RAGWorkers); w++ {
go r.batchToVectorAsync(lock, w, batchCh, vectorCh, errCh, doneCh, path.Base(fpath))
}
-
+
// Wait for embedding to be done
<-doneCh
-
+
// Write vectors to storage
return r.writeVectors(vectorCh)
}
@@ -182,14 +182,14 @@ func (r *RAG) batchToVectorAsync(lock *sync.Mutex, id int, inputCh <-chan map[in
doneCh <- true
}
}()
-
+
for {
lock.Lock()
if len(inputCh) == 0 {
lock.Unlock()
return
}
-
+
select {
case linesMap := <-inputCh:
for leftI, lines := range linesMap {
@@ -207,7 +207,7 @@ func (r *RAG) batchToVectorAsync(lock *sync.Mutex, id int, inputCh <-chan map[in
default:
lock.Unlock()
}
-
+
r.logger.Debug("processed batch", "batches#", len(inputCh), "worker#", id)
LongJobStatusCh <- fmt.Sprintf("converted to vector; batches: %d, worker#: %d", len(inputCh), id)
}
@@ -220,14 +220,14 @@ func (r *RAG) fetchEmb(lines []string, errCh chan error, vectorCh chan<- []model
errCh <- err
return err
}
-
+
if len(embeddings) == 0 {
err := fmt.Errorf("no embeddings returned")
r.logger.Error("empty embeddings")
errCh <- err
return err
}
-
+
vectors := make([]models.VectorRow, len(embeddings))
for i, emb := range embeddings {
vector := models.VectorRow{
@@ -238,7 +238,7 @@ func (r *RAG) fetchEmb(lines []string, errCh chan error, vectorCh chan<- []model
}
vectors[i] = vector
}
-
+
vectorCh <- vectors
return nil
}
@@ -257,4 +257,5 @@ func (r *RAG) ListLoaded() ([]string, error) {
func (r *RAG) RemoveFile(filename string) error {
return r.storage.RemoveEmbByFileName(filename)
-} \ No newline at end of file
+}
+
diff --git a/rag_new/storage.go b/rag/storage.go
index 2ab56fb..26ca0e3 100644
--- a/rag_new/storage.go
+++ b/rag/storage.go
@@ -1,10 +1,10 @@
-package rag_new
+package rag
import (
- "gf-lt/models"
- "gf-lt/storage"
"encoding/binary"
"fmt"
+ "gf-lt/models"
+ "gf-lt/storage"
"log/slog"
"sort"
"strings"
@@ -23,7 +23,7 @@ type VectorStorage struct {
func NewVectorStorage(logger *slog.Logger, store storage.FullRepo) *VectorStorage {
return &VectorStorage{
logger: logger,
- sqlxDB: store.DB(), // Use the new DB() method
+ sqlxDB: store.DB(), // Use the new DB() method
store: store,
}
}
@@ -53,7 +53,7 @@ func (vs *VectorStorage) CreateTables() error {
`CREATE INDEX IF NOT EXISTS idx_embeddings_5120_filename ON embeddings_5120(filename)`,
`CREATE INDEX IF NOT EXISTS idx_embeddings_384_slug ON embeddings_384(slug)`,
`CREATE INDEX IF NOT EXISTS idx_embeddings_5120_slug ON embeddings_5120(slug)`,
-
+
// Additional indexes that may help with searches
`CREATE INDEX IF NOT EXISTS idx_embeddings_384_created_at ON embeddings_384(created_at)`,
`CREATE INDEX IF NOT EXISTS idx_embeddings_5120_created_at ON embeddings_5120(created_at)`,
@@ -140,7 +140,7 @@ func (vs *VectorStorage) SearchClosest(query []float32) ([]models.VectorRow, err
// For better performance, instead of loading all vectors at once,
// we'll implement batching and potentially add L2 distance-based pre-filtering
// since cosine similarity is related to L2 distance for normalized vectors
-
+
querySQL := fmt.Sprintf("SELECT embeddings, slug, raw_text, filename FROM %s", tableName)
rows, err := vs.sqlxDB.Query(querySQL)
if err != nil {
@@ -153,27 +153,27 @@ func (vs *VectorStorage) SearchClosest(query []float32) ([]models.VectorRow, err
vector models.VectorRow
distance float32
}
-
+
var topResults []SearchResult
-
+
// Process vectors one by one to avoid loading everything into memory
for rows.Next() {
var (
- embeddingsBlob []byte
+ embeddingsBlob []byte
slug, rawText, fileName string
)
-
+
if err := rows.Scan(&embeddingsBlob, &slug, &rawText, &fileName); err != nil {
vs.logger.Error("failed to scan row", "error", err)
continue
}
-
+
storedEmbeddings := DeserializeVector(embeddingsBlob)
-
+
// Calculate cosine similarity (returns value between -1 and 1, where 1 is most similar)
similarity := cosineSimilarity(query, storedEmbeddings)
distance := 1 - similarity // Convert to distance where 0 is most similar
-
+
result := SearchResult{
vector: models.VectorRow{
Embeddings: storedEmbeddings,
@@ -183,34 +183,34 @@ func (vs *VectorStorage) SearchClosest(query []float32) ([]models.VectorRow, err
},
distance: distance,
}
-
+
// Add to top results and maintain only top 3
topResults = append(topResults, result)
-
+
// Sort and keep only top 3
sort.Slice(topResults, func(i, j int) bool {
return topResults[i].distance < topResults[j].distance
})
-
+
if len(topResults) > 3 {
topResults = topResults[:3] // Keep only closest 3
}
}
-
+
// Convert back to VectorRow slice
var results []models.VectorRow
for _, result := range topResults {
result.vector.Distance = result.distance
results = append(results, result.vector)
}
-
+
return results, nil
}
// ListFiles returns a list of all loaded files
func (vs *VectorStorage) ListFiles() ([]string, error) {
var fileLists [][]string
-
+
// Query both tables and combine results
for _, table := range []string{"embeddings_384", "embeddings_5120"} {
query := fmt.Sprintf("SELECT DISTINCT filename FROM %s", table)
@@ -219,7 +219,7 @@ func (vs *VectorStorage) ListFiles() ([]string, error) {
// Continue if one table doesn't exist
continue
}
-
+
var files []string
for rows.Next() {
var filename string
@@ -229,10 +229,10 @@ func (vs *VectorStorage) ListFiles() ([]string, error) {
files = append(files, filename)
}
rows.Close()
-
+
fileLists = append(fileLists, files)
}
-
+
// Combine and deduplicate
fileSet := make(map[string]bool)
var allFiles []string
@@ -244,25 +244,25 @@ func (vs *VectorStorage) ListFiles() ([]string, error) {
}
}
}
-
+
return allFiles, nil
}
// RemoveEmbByFileName removes all embeddings associated with a specific filename
func (vs *VectorStorage) RemoveEmbByFileName(filename string) error {
var errors []string
-
+
for _, table := range []string{"embeddings_384", "embeddings_5120"} {
query := fmt.Sprintf("DELETE FROM %s WHERE filename = ?", table)
if _, err := vs.sqlxDB.Exec(query, filename); err != nil {
errors = append(errors, err.Error())
}
}
-
+
if len(errors) > 0 {
return fmt.Errorf("errors occurred: %s", strings.Join(errors, "; "))
}
-
+
return nil
}
@@ -297,4 +297,5 @@ func sqrt(f float32) float32 {
guess = (guess + f/guess) / 2
}
return guess
-} \ No newline at end of file
+}
+
diff --git a/storage/migrate.go b/storage/migrate.go
index b05dddc..decfe9c 100644
--- a/storage/migrate.go
+++ b/storage/migrate.go
@@ -5,8 +5,6 @@ import (
"fmt"
"io/fs"
"strings"
-
- _ "github.com/asg017/sqlite-vec-go-bindings/ncruces"
)
//go:embed migrations/*
@@ -53,8 +51,8 @@ func (p *ProviderSQL) executeMigration(migrationsDir fs.FS, fileName string) err
}
func (p *ProviderSQL) executeSQL(sqlContent []byte) error {
- // Connect to the database (example using a simple connection)
- err := p.s3Conn.Exec(string(sqlContent))
+ // Execute the migration content using standard database connection
+ _, err := p.db.Exec(string(sqlContent))
if err != nil {
return fmt.Errorf("failed to execute SQL: %w", err)
}
diff --git a/storage/storage.go b/storage/storage.go
index 0416884..a092f8d 100644
--- a/storage/storage.go
+++ b/storage/storage.go
@@ -6,7 +6,6 @@ import (
_ "github.com/glebarez/go-sqlite"
"github.com/jmoiron/sqlx"
- "github.com/ncruces/go-sqlite3"
)
type FullRepo interface {
@@ -28,7 +27,6 @@ type ChatHistory interface {
type ProviderSQL struct {
db *sqlx.DB
- s3Conn *sqlite3.Conn
logger *slog.Logger
}
@@ -97,7 +95,7 @@ func (p ProviderSQL) ChatGetMaxID() (uint32, error) {
return id, err
}
-// opens two connections
+// opens database connection
func NewProviderSQL(dbPath string, logger *slog.Logger) FullRepo {
db, err := sqlx.Open("sqlite", dbPath)
if err != nil {
@@ -105,11 +103,7 @@ func NewProviderSQL(dbPath string, logger *slog.Logger) FullRepo {
return nil
}
p := ProviderSQL{db: db, logger: logger}
- p.s3Conn, err = sqlite3.Open(dbPath)
- if err != nil {
- logger.Error("failed to open vecdb connection", "error", err)
- return nil
- }
+
p.Migrate()
return p
}
diff --git a/storage/vector.go b/storage/vector.go
index b3e5654..6958634 100644
--- a/storage/vector.go
+++ b/storage/vector.go
@@ -66,35 +66,13 @@ func (p ProviderSQL) WriteVector(row *models.VectorRow) error {
if err != nil {
return err
}
- stmt, _, err := p.s3Conn.Prepare(
- fmt.Sprintf("INSERT INTO %s(embedding, slug, raw_text, filename) VALUES (?, ?, ?, ?)", tableName))
- if err != nil {
- p.logger.Error("failed to prep a stmt", "error", err)
- return err
- }
- defer stmt.Close()
+
serializedEmbeddings := SerializeVector(row.Embeddings)
- if err := stmt.BindBlob(1, serializedEmbeddings); err != nil {
- p.logger.Error("failed to bind", "error", err)
- return err
- }
- if err := stmt.BindText(2, row.Slug); err != nil {
- p.logger.Error("failed to bind", "error", err)
- return err
- }
- if err := stmt.BindText(3, row.RawText); err != nil {
- p.logger.Error("failed to bind", "error", err)
- return err
- }
- if err := stmt.BindText(4, row.FileName); err != nil {
- p.logger.Error("failed to bind", "error", err)
- return err
- }
- err = stmt.Exec()
- if err != nil {
- return err
- }
- return nil
+
+ query := fmt.Sprintf("INSERT INTO %s(embedding, slug, raw_text, filename) VALUES (?, ?, ?, ?)", tableName)
+ _, err = p.db.Exec(query, serializedEmbeddings, row.Slug, row.RawText, row.FileName)
+
+ return err
}
func decodeUnsafe(bs []byte) []float32 {
@@ -110,30 +88,30 @@ func (p ProviderSQL) SearchClosest(q []float32) ([]models.VectorRow, error) {
func (p ProviderSQL) ListFiles() ([]string, error) {
q := fmt.Sprintf("SELECT filename FROM %s GROUP BY filename", vecTableName384)
- stmt, _, err := p.s3Conn.Prepare(q)
+ rows, err := p.db.Query(q)
if err != nil {
return nil, err
}
- defer stmt.Close()
+ defer rows.Close()
+
resp := []string{}
- for stmt.Step() {
- resp = append(resp, stmt.ColumnText(0))
+ for rows.Next() {
+ var filename string
+ if err := rows.Scan(&filename); err != nil {
+ return nil, err
+ }
+ resp = append(resp, filename)
}
- if err := stmt.Err(); err != nil {
+
+ if err := rows.Err(); err != nil {
return nil, err
}
+
return resp, nil
}
func (p ProviderSQL) RemoveEmbByFileName(filename string) error {
q := fmt.Sprintf("DELETE FROM %s WHERE filename = ?", vecTableName384)
- stmt, _, err := p.s3Conn.Prepare(q)
- if err != nil {
- return err
- }
- defer stmt.Close()
- if err := stmt.BindText(1, filename); err != nil {
- return err
- }
- return stmt.Exec()
+ _, err := p.db.Exec(q, filename)
+ return err
}