summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGrail Finder <wohilas@gmail.com>2025-10-09 16:19:43 +0300
committerGrail Finder <wohilas@gmail.com>2025-10-09 16:19:43 +0300
commit2e1b018a45b88b843523a726a7ef264c2fdaa0b3 (patch)
tree6150fcc39fab6dc31c24854b1e363c82a32c2ba9
parent5d2ce7a5f5743fa39b43379b143e0ee9a908ada6 (diff)
Feat: new rag attempt
-rw-r--r--bot.go19
-rw-r--r--go.mod1
-rw-r--r--rag_new/embedder.go98
-rw-r--r--rag_new/rag.go260
-rw-r--r--rag_new/storage.go300
-rw-r--r--storage/storage.go5
-rw-r--r--storage/vector.go98
-rw-r--r--storage/vector.go.bak179
8 files changed, 893 insertions, 67 deletions
diff --git a/bot.go b/bot.go
index f8170e1..a5d16e1 100644
--- a/bot.go
+++ b/bot.go
@@ -9,7 +9,7 @@ import (
"gf-lt/config"
"gf-lt/extra"
"gf-lt/models"
- "gf-lt/rag"
+ "gf-lt/rag_new"
"gf-lt/storage"
"io"
"log/slog"
@@ -41,7 +41,7 @@ var (
defaultStarter = []models.RoleMsg{}
defaultStarterBytes = []byte{}
interruptResp = false
- ragger *rag.RAG
+ ragger *rag_new.RAG
chunkParser ChunkParser
lastToolCall *models.FuncCall
//nolint:unused // TTS_ENABLED conditionally uses this
@@ -277,7 +277,14 @@ func chatRagUse(qText string) (string, error) {
logger.Error("failed to get embs", "error", err, "index", i, "question", q)
continue
}
- vecs, err := store.SearchClosest(emb)
+
+ // Create EmbeddingResp struct for the search
+ embeddingResp := &models.EmbeddingResp{
+ Embedding: emb,
+ Index: 0, // Not used in search but required for the struct
+ }
+
+ vecs, err := ragger.SearchEmb(embeddingResp)
if err != nil {
logger.Error("failed to query embs", "error", err, "index", i, "question", q)
continue
@@ -286,12 +293,12 @@ func chatRagUse(qText string) (string, error) {
}
// get raw text
resps := []string{}
- logger.Debug("sqlvec resp", "vecs len", len(respVecs))
+ logger.Debug("rag query resp", "vecs len", len(respVecs))
for _, rv := range respVecs {
resps = append(resps, rv.RawText)
}
if len(resps) == 0 {
- return "No related results from vector storage.", nil
+ return "No related results from RAG vector storage.", nil
}
return strings.Join(resps, "\n"), nil
}
@@ -564,7 +571,7 @@ func init() {
if store == nil {
os.Exit(1)
}
- ragger = rag.New(logger, store, cfg)
+ ragger = rag_new.New(logger, store, cfg)
// https://github.com/coreydaley/ggerganov-llama.cpp/blob/master/examples/server/README.md
// load all chats in memory
if _, err := loadHistoryChats(); err != nil {
diff --git a/go.mod b/go.mod
index cc1e743..1b275df 100644
--- a/go.mod
+++ b/go.mod
@@ -10,6 +10,7 @@ require (
github.com/gopxl/beep/v2 v2.1.0
github.com/gordonklaus/portaudio v0.0.0-20230709114228-aafa478834f5
github.com/jmoiron/sqlx v1.4.0
+ github.com/mattn/go-sqlite3 v1.14.22
github.com/ncruces/go-sqlite3 v0.21.3
github.com/neurosnap/sentences v1.1.2
github.com/rivo/tview v0.0.0-20241103174730-c76f7879f592
diff --git a/rag_new/embedder.go b/rag_new/embedder.go
new file mode 100644
index 0000000..27b975a
--- /dev/null
+++ b/rag_new/embedder.go
@@ -0,0 +1,98 @@
+package rag_new
+
+import (
+ "bytes"
+ "gf-lt/config"
+ "encoding/json"
+ "fmt"
+ "log/slog"
+ "net/http"
+)
+
+// Embedder defines the interface for embedding text
+type Embedder interface {
+ Embed(text []string) ([][]float32, error)
+ EmbedSingle(text string) ([]float32, error)
+}
+
+// APIEmbedder implements embedder using an API (like Hugging Face, OpenAI, etc.)
+type APIEmbedder struct {
+ logger *slog.Logger
+ client *http.Client
+ cfg *config.Config
+}
+
+func NewAPIEmbedder(l *slog.Logger, cfg *config.Config) *APIEmbedder {
+ return &APIEmbedder{
+ logger: l,
+ client: &http.Client{},
+ cfg: cfg,
+ }
+}
+
+func (a *APIEmbedder) Embed(text []string) ([][]float32, error) {
+ payload, err := json.Marshal(
+ map[string]any{"inputs": text, "options": map[string]bool{"wait_for_model": true}},
+ )
+ if err != nil {
+ a.logger.Error("failed to marshal payload", "err", err.Error())
+ return nil, err
+ }
+
+ req, err := http.NewRequest("POST", a.cfg.EmbedURL, bytes.NewReader(payload))
+ if err != nil {
+ a.logger.Error("failed to create new req", "err", err.Error())
+ return nil, err
+ }
+
+ if a.cfg.HFToken != "" {
+ req.Header.Add("Authorization", "Bearer "+a.cfg.HFToken)
+ }
+
+ resp, err := a.client.Do(req)
+ if err != nil {
+ a.logger.Error("failed to embed text", "err", err.Error())
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != 200 {
+ err = fmt.Errorf("non 200 response; code: %v", resp.StatusCode)
+ a.logger.Error(err.Error())
+ return nil, err
+ }
+
+ var emb [][]float32
+ if err := json.NewDecoder(resp.Body).Decode(&emb); err != nil {
+ a.logger.Error("failed to decode embedding response", "err", err.Error())
+ return nil, err
+ }
+
+ if len(emb) == 0 {
+ err = fmt.Errorf("empty embedding response")
+ a.logger.Error("empty embedding response")
+ return nil, err
+ }
+
+ return emb, nil
+}
+
+func (a *APIEmbedder) EmbedSingle(text string) ([]float32, error) {
+ result, err := a.Embed([]string{text})
+ if err != nil {
+ return nil, err
+ }
+ if len(result) == 0 {
+ return nil, fmt.Errorf("no embeddings returned")
+ }
+ return result[0], nil
+}
+
+// TODO: ONNXEmbedder implementation would go here
+// This would require:
+// 1. Loading ONNX models locally
+// 2. Using a Go ONNX runtime (like gorgonia/onnx or similar)
+// 3. Converting text to embeddings without external API calls
+//
+// For now, we'll focus on the API implementation which is already working in the current system,
+// and can be extended later when we have ONNX runtime integration \ No newline at end of file
diff --git a/rag_new/rag.go b/rag_new/rag.go
new file mode 100644
index 0000000..d012087
--- /dev/null
+++ b/rag_new/rag.go
@@ -0,0 +1,260 @@
+package rag_new
+
+import (
+ "gf-lt/config"
+ "gf-lt/models"
+ "gf-lt/storage"
+ "fmt"
+ "log/slog"
+ "os"
+ "path"
+ "strings"
+ "sync"
+
+ "github.com/neurosnap/sentences/english"
+)
+
+var (
+ // Status messages for TUI integration
+ LongJobStatusCh = make(chan string, 10) // Increased buffer size to prevent blocking
+ FinishedRAGStatus = "finished loading RAG file; press Enter"
+ LoadedFileRAGStatus = "loaded file"
+ ErrRAGStatus = "some error occurred; failed to transfer data to vector db"
+)
+
+type RAG struct {
+ logger *slog.Logger
+ store storage.FullRepo
+ cfg *config.Config
+ embedder Embedder
+ storage *VectorStorage
+}
+
+func New(l *slog.Logger, s storage.FullRepo, cfg *config.Config) *RAG {
+ // Initialize with API embedder by default, could be configurable later
+ embedder := NewAPIEmbedder(l, cfg)
+
+ rag := &RAG{
+ logger: l,
+ store: s,
+ cfg: cfg,
+ embedder: embedder,
+ storage: NewVectorStorage(l, s),
+ }
+
+ // Create the necessary tables
+ if err := rag.storage.CreateTables(); err != nil {
+ l.Error("failed to create vector tables", "error", err)
+ }
+
+ return rag
+}
+
+func wordCounter(sentence string) int {
+ return len(strings.Split(strings.TrimSpace(sentence), " "))
+}
+
+func (r *RAG) LoadRAG(fpath string) error {
+ data, err := os.ReadFile(fpath)
+ if err != nil {
+ return err
+ }
+ r.logger.Debug("rag: loaded file", "fp", fpath)
+ LongJobStatusCh <- LoadedFileRAGStatus
+
+ fileText := string(data)
+ tokenizer, err := english.NewSentenceTokenizer(nil)
+ if err != nil {
+ return err
+ }
+ sentences := tokenizer.Tokenize(fileText)
+ sents := make([]string, len(sentences))
+ for i, s := range sentences {
+ sents[i] = s.Text
+ }
+
+ // Group sentences into paragraphs based on word limit
+ paragraphs := []string{}
+ par := strings.Builder{}
+ for i := 0; i < len(sents); i++ {
+ // Only add sentences that aren't empty
+ if strings.TrimSpace(sents[i]) != "" {
+ if par.Len() > 0 {
+ par.WriteString(" ") // Add space between sentences
+ }
+ par.WriteString(sents[i])
+ }
+
+ if wordCounter(par.String()) > int(r.cfg.RAGWordLimit) {
+ paragraph := strings.TrimSpace(par.String())
+ if paragraph != "" {
+ paragraphs = append(paragraphs, paragraph)
+ }
+ par.Reset()
+ }
+ }
+
+ // Handle any remaining content in the paragraph buffer
+ if par.Len() > 0 {
+ paragraph := strings.TrimSpace(par.String())
+ if paragraph != "" {
+ paragraphs = append(paragraphs, paragraph)
+ }
+ }
+
+ // Adjust batch size if needed
+ if len(paragraphs) < int(r.cfg.RAGBatchSize) && len(paragraphs) > 0 {
+ r.cfg.RAGBatchSize = len(paragraphs)
+ }
+
+ if len(paragraphs) == 0 {
+ return fmt.Errorf("no valid paragraphs found in file")
+ }
+
+ var (
+ maxChSize = 100
+ left = 0
+ right = r.cfg.RAGBatchSize
+ batchCh = make(chan map[int][]string, maxChSize)
+ vectorCh = make(chan []models.VectorRow, maxChSize)
+ errCh = make(chan error, 1)
+ doneCh = make(chan bool, 1)
+ lock = new(sync.Mutex)
+ )
+
+ defer close(doneCh)
+ defer close(errCh)
+ defer close(batchCh)
+
+ // Fill input channel with batches
+ ctn := 0
+ totalParagraphs := len(paragraphs)
+ for {
+ if int(right) > totalParagraphs {
+ batchCh <- map[int][]string{left: paragraphs[left:]}
+ break
+ }
+ batchCh <- map[int][]string{left: paragraphs[left:right]}
+ left, right = right, right+r.cfg.RAGBatchSize
+ ctn++
+ }
+
+ finishedBatchesMsg := fmt.Sprintf("finished batching batches#: %d; paragraphs: %d; sentences: %d\n", ctn+1, len(paragraphs), len(sents))
+ r.logger.Debug(finishedBatchesMsg)
+ LongJobStatusCh <- finishedBatchesMsg
+
+ // Start worker goroutines
+ for w := 0; w < int(r.cfg.RAGWorkers); w++ {
+ go r.batchToVectorAsync(lock, w, batchCh, vectorCh, errCh, doneCh, path.Base(fpath))
+ }
+
+ // Wait for embedding to be done
+ <-doneCh
+
+ // Write vectors to storage
+ return r.writeVectors(vectorCh)
+}
+
+func (r *RAG) writeVectors(vectorCh chan []models.VectorRow) error {
+ for {
+ for batch := range vectorCh {
+ for _, vector := range batch {
+ if err := r.storage.WriteVector(&vector); err != nil {
+ r.logger.Error("failed to write vector", "error", err, "slug", vector.Slug)
+ LongJobStatusCh <- ErrRAGStatus
+ continue // a duplicate is not critical
+ }
+ }
+ r.logger.Debug("wrote batch to db", "size", len(batch), "vector_chan_len", len(vectorCh))
+ if len(vectorCh) == 0 {
+ r.logger.Debug("finished writing vectors")
+ LongJobStatusCh <- FinishedRAGStatus
+ return nil
+ }
+ }
+ }
+}
+
+func (r *RAG) batchToVectorAsync(lock *sync.Mutex, id int, inputCh <-chan map[int][]string,
+ vectorCh chan<- []models.VectorRow, errCh chan error, doneCh chan bool, filename string) {
+ defer func() {
+ if len(doneCh) == 0 {
+ doneCh <- true
+ }
+ }()
+
+ for {
+ lock.Lock()
+ if len(inputCh) == 0 {
+ lock.Unlock()
+ return
+ }
+
+ select {
+ case linesMap := <-inputCh:
+ for leftI, lines := range linesMap {
+ if err := r.fetchEmb(lines, errCh, vectorCh, fmt.Sprintf("%s_%d", filename, leftI), filename); err != nil {
+ r.logger.Error("error fetching embeddings", "error", err, "worker", id)
+ lock.Unlock()
+ return
+ }
+ }
+ lock.Unlock()
+ case err := <-errCh:
+ r.logger.Error("got an error from error channel", "error", err)
+ lock.Unlock()
+ return
+ default:
+ lock.Unlock()
+ }
+
+ r.logger.Debug("processed batch", "batches#", len(inputCh), "worker#", id)
+ LongJobStatusCh <- fmt.Sprintf("converted to vector; batches: %d, worker#: %d", len(inputCh), id)
+ }
+}
+
+func (r *RAG) fetchEmb(lines []string, errCh chan error, vectorCh chan<- []models.VectorRow, slug, filename string) error {
+ embeddings, err := r.embedder.Embed(lines)
+ if err != nil {
+ r.logger.Error("failed to embed lines", "err", err.Error())
+ errCh <- err
+ return err
+ }
+
+ if len(embeddings) == 0 {
+ err := fmt.Errorf("no embeddings returned")
+ r.logger.Error("empty embeddings")
+ errCh <- err
+ return err
+ }
+
+ vectors := make([]models.VectorRow, len(embeddings))
+ for i, emb := range embeddings {
+ vector := models.VectorRow{
+ Embeddings: emb,
+ RawText: lines[i],
+ Slug: fmt.Sprintf("%s_%d", slug, i),
+ FileName: filename,
+ }
+ vectors[i] = vector
+ }
+
+ vectorCh <- vectors
+ return nil
+}
+
+func (r *RAG) LineToVector(line string) ([]float32, error) {
+ return r.embedder.EmbedSingle(line)
+}
+
+func (r *RAG) SearchEmb(emb *models.EmbeddingResp) ([]models.VectorRow, error) {
+ return r.storage.SearchClosest(emb.Embedding)
+}
+
+func (r *RAG) ListLoaded() ([]string, error) {
+ return r.storage.ListFiles()
+}
+
+func (r *RAG) RemoveFile(filename string) error {
+ return r.storage.RemoveEmbByFileName(filename)
+} \ No newline at end of file
diff --git a/rag_new/storage.go b/rag_new/storage.go
new file mode 100644
index 0000000..2ab56fb
--- /dev/null
+++ b/rag_new/storage.go
@@ -0,0 +1,300 @@
+package rag_new
+
+import (
+ "gf-lt/models"
+ "gf-lt/storage"
+ "encoding/binary"
+ "fmt"
+ "log/slog"
+ "sort"
+ "strings"
+ "unsafe"
+
+ "github.com/jmoiron/sqlx"
+)
+
+// VectorStorage handles storing and retrieving vectors from SQLite
+type VectorStorage struct {
+ logger *slog.Logger
+ sqlxDB *sqlx.DB
+ store storage.FullRepo
+}
+
+func NewVectorStorage(logger *slog.Logger, store storage.FullRepo) *VectorStorage {
+ return &VectorStorage{
+ logger: logger,
+ sqlxDB: store.DB(), // Use the new DB() method
+ store: store,
+ }
+}
+
+// CreateTables creates the necessary tables for vector storage
+func (vs *VectorStorage) CreateTables() error {
+ // Create tables for different embedding dimensions
+ queries := []string{
+ `CREATE TABLE IF NOT EXISTS embeddings_384 (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ embeddings BLOB NOT NULL,
+ slug TEXT NOT NULL,
+ raw_text TEXT NOT NULL,
+ filename TEXT NOT NULL,
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
+ )`,
+ `CREATE TABLE IF NOT EXISTS embeddings_5120 (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ embeddings BLOB NOT NULL,
+ slug TEXT NOT NULL,
+ raw_text TEXT NOT NULL,
+ filename TEXT NOT NULL,
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
+ )`,
+ // Indexes for better performance
+ `CREATE INDEX IF NOT EXISTS idx_embeddings_384_filename ON embeddings_384(filename)`,
+ `CREATE INDEX IF NOT EXISTS idx_embeddings_5120_filename ON embeddings_5120(filename)`,
+ `CREATE INDEX IF NOT EXISTS idx_embeddings_384_slug ON embeddings_384(slug)`,
+ `CREATE INDEX IF NOT EXISTS idx_embeddings_5120_slug ON embeddings_5120(slug)`,
+
+ // Additional indexes that may help with searches
+ `CREATE INDEX IF NOT EXISTS idx_embeddings_384_created_at ON embeddings_384(created_at)`,
+ `CREATE INDEX IF NOT EXISTS idx_embeddings_5120_created_at ON embeddings_5120(created_at)`,
+ }
+
+ for _, query := range queries {
+ if _, err := vs.sqlxDB.Exec(query); err != nil {
+ return fmt.Errorf("failed to create table: %w", err)
+ }
+ }
+ return nil
+}
+
+// SerializeVector converts []float32 to binary blob
+func SerializeVector(vec []float32) []byte {
+ buf := make([]byte, len(vec)*4) // 4 bytes per float32
+ for i, v := range vec {
+ binary.LittleEndian.PutUint32(buf[i*4:], mathFloat32bits(v))
+ }
+ return buf
+}
+
+// DeserializeVector converts binary blob back to []float32
+func DeserializeVector(data []byte) []float32 {
+ count := len(data) / 4
+ vec := make([]float32, count)
+ for i := 0; i < count; i++ {
+ vec[i] = mathBitsToFloat32(binary.LittleEndian.Uint32(data[i*4:]))
+ }
+ return vec
+}
+
+// mathFloat32bits and mathBitsToFloat32 are helpers to convert between float32 and uint32
+func mathFloat32bits(f float32) uint32 {
+ return binary.LittleEndian.Uint32((*(*[4]byte)(unsafe.Pointer(&f)))[:4])
+}
+
+func mathBitsToFloat32(b uint32) float32 {
+ return *(*float32)(unsafe.Pointer(&b))
+}
+
+// WriteVector stores an embedding vector in the database
+func (vs *VectorStorage) WriteVector(row *models.VectorRow) error {
+ tableName, err := vs.getTableName(row.Embeddings)
+ if err != nil {
+ return err
+ }
+
+ // Serialize the embeddings to binary
+ serializedEmbeddings := SerializeVector(row.Embeddings)
+
+ query := fmt.Sprintf(
+ "INSERT INTO %s (embeddings, slug, raw_text, filename) VALUES (?, ?, ?, ?)",
+ tableName,
+ )
+
+ if _, err := vs.sqlxDB.Exec(query, serializedEmbeddings, row.Slug, row.RawText, row.FileName); err != nil {
+ vs.logger.Error("failed to write vector", "error", err, "slug", row.Slug)
+ return err
+ }
+
+ return nil
+}
+
+// getTableName determines which table to use based on embedding size
+func (vs *VectorStorage) getTableName(emb []float32) (string, error) {
+ switch len(emb) {
+ case 384:
+ return "embeddings_384", nil
+ case 5120:
+ return "embeddings_5120", nil
+ default:
+ return "", fmt.Errorf("no table for embedding size of %d", len(emb))
+ }
+}
+
+// SearchClosest finds vectors closest to the query vector using efficient cosine similarity calculation
+func (vs *VectorStorage) SearchClosest(query []float32) ([]models.VectorRow, error) {
+ tableName, err := vs.getTableName(query)
+ if err != nil {
+ return nil, err
+ }
+
+ // For better performance, instead of loading all vectors at once,
+ // we'll implement batching and potentially add L2 distance-based pre-filtering
+ // since cosine similarity is related to L2 distance for normalized vectors
+
+ querySQL := fmt.Sprintf("SELECT embeddings, slug, raw_text, filename FROM %s", tableName)
+ rows, err := vs.sqlxDB.Query(querySQL)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+
+ // Use a min-heap or simple slice to keep track of top 3 closest vectors
+ type SearchResult struct {
+ vector models.VectorRow
+ distance float32
+ }
+
+ var topResults []SearchResult
+
+ // Process vectors one by one to avoid loading everything into memory
+ for rows.Next() {
+ var (
+ embeddingsBlob []byte
+ slug, rawText, fileName string
+ )
+
+ if err := rows.Scan(&embeddingsBlob, &slug, &rawText, &fileName); err != nil {
+ vs.logger.Error("failed to scan row", "error", err)
+ continue
+ }
+
+ storedEmbeddings := DeserializeVector(embeddingsBlob)
+
+ // Calculate cosine similarity (returns value between -1 and 1, where 1 is most similar)
+ similarity := cosineSimilarity(query, storedEmbeddings)
+ distance := 1 - similarity // Convert to distance where 0 is most similar
+
+ result := SearchResult{
+ vector: models.VectorRow{
+ Embeddings: storedEmbeddings,
+ Slug: slug,
+ RawText: rawText,
+ FileName: fileName,
+ },
+ distance: distance,
+ }
+
+ // Add to top results and maintain only top 3
+ topResults = append(topResults, result)
+
+ // Sort and keep only top 3
+ sort.Slice(topResults, func(i, j int) bool {
+ return topResults[i].distance < topResults[j].distance
+ })
+
+ if len(topResults) > 3 {
+ topResults = topResults[:3] // Keep only closest 3
+ }
+ }
+
+ // Convert back to VectorRow slice
+ var results []models.VectorRow
+ for _, result := range topResults {
+ result.vector.Distance = result.distance
+ results = append(results, result.vector)
+ }
+
+ return results, nil
+}
+
+// ListFiles returns a list of all loaded files
+func (vs *VectorStorage) ListFiles() ([]string, error) {
+ var fileLists [][]string
+
+ // Query both tables and combine results
+ for _, table := range []string{"embeddings_384", "embeddings_5120"} {
+ query := fmt.Sprintf("SELECT DISTINCT filename FROM %s", table)
+ rows, err := vs.sqlxDB.Query(query)
+ if err != nil {
+ // Continue if one table doesn't exist
+ continue
+ }
+
+ var files []string
+ for rows.Next() {
+ var filename string
+ if err := rows.Scan(&filename); err != nil {
+ continue
+ }
+ files = append(files, filename)
+ }
+ rows.Close()
+
+ fileLists = append(fileLists, files)
+ }
+
+ // Combine and deduplicate
+ fileSet := make(map[string]bool)
+ var allFiles []string
+ for _, files := range fileLists {
+ for _, file := range files {
+ if !fileSet[file] {
+ fileSet[file] = true
+ allFiles = append(allFiles, file)
+ }
+ }
+ }
+
+ return allFiles, nil
+}
+
+// RemoveEmbByFileName removes all embeddings associated with a specific filename
+func (vs *VectorStorage) RemoveEmbByFileName(filename string) error {
+ var errors []string
+
+ for _, table := range []string{"embeddings_384", "embeddings_5120"} {
+ query := fmt.Sprintf("DELETE FROM %s WHERE filename = ?", table)
+ if _, err := vs.sqlxDB.Exec(query, filename); err != nil {
+ errors = append(errors, err.Error())
+ }
+ }
+
+ if len(errors) > 0 {
+ return fmt.Errorf("errors occurred: %s", strings.Join(errors, "; "))
+ }
+
+ return nil
+}
+
+// cosineSimilarity calculates the cosine similarity between two vectors
+func cosineSimilarity(a, b []float32) float32 {
+ if len(a) != len(b) {
+ return 0.0
+ }
+
+ var dotProduct, normA, normB float32
+ for i := 0; i < len(a); i++ {
+ dotProduct += a[i] * b[i]
+ normA += a[i] * a[i]
+ normB += b[i] * b[i]
+ }
+
+ if normA == 0 || normB == 0 {
+ return 0.0
+ }
+
+ return dotProduct / (sqrt(normA) * sqrt(normB))
+}
+
+// sqrt returns the square root of a float32
+func sqrt(f float32) float32 {
+ // A simple implementation of square root using Newton's method
+ if f == 0 {
+ return 0
+ }
+ guess := f / 2
+ for i := 0; i < 10; i++ { // 10 iterations should be enough for good precision
+ guess = (guess + f/guess) / 2
+ }
+ return guess
+} \ No newline at end of file
diff --git a/storage/storage.go b/storage/storage.go
index 7911e13..0416884 100644
--- a/storage/storage.go
+++ b/storage/storage.go
@@ -113,3 +113,8 @@ func NewProviderSQL(dbPath string, logger *slog.Logger) FullRepo {
p.Migrate()
return p
}
+
+// DB returns the underlying database connection
+func (p ProviderSQL) DB() *sqlx.DB {
+ return p.db
+}
diff --git a/storage/vector.go b/storage/vector.go
index 71005e4..b3e5654 100644
--- a/storage/vector.go
+++ b/storage/vector.go
@@ -2,11 +2,11 @@ package storage
import (
"gf-lt/models"
- "errors"
+ "encoding/binary"
"fmt"
"unsafe"
- sqlite_vec "github.com/asg017/sqlite-vec-go-bindings/ncruces"
+ "github.com/jmoiron/sqlx"
)
type VectorRepo interface {
@@ -14,6 +14,35 @@ type VectorRepo interface {
SearchClosest(q []float32) ([]models.VectorRow, error)
ListFiles() ([]string, error)
RemoveEmbByFileName(filename string) error
+ DB() *sqlx.DB
+}
+
+// SerializeVector converts []float32 to binary blob
+func SerializeVector(vec []float32) []byte {
+ buf := make([]byte, len(vec)*4) // 4 bytes per float32
+ for i, v := range vec {
+ binary.LittleEndian.PutUint32(buf[i*4:], mathFloat32bits(v))
+ }
+ return buf
+}
+
+// DeserializeVector converts binary blob back to []float32
+func DeserializeVector(data []byte) []float32 {
+ count := len(data) / 4
+ vec := make([]float32, count)
+ for i := 0; i < count; i++ {
+ vec[i] = mathBitsToFloat32(binary.LittleEndian.Uint32(data[i*4:]))
+ }
+ return vec
+}
+
+// mathFloat32bits and mathBitsToFloat32 are helpers to convert between float32 and uint32
+func mathFloat32bits(f float32) uint32 {
+ return binary.LittleEndian.Uint32((*(*[4]byte)(unsafe.Pointer(&f)))[:4])
+}
+
+func mathBitsToFloat32(b uint32) float32 {
+ return *(*float32)(unsafe.Pointer(&b))
}
var (
@@ -44,19 +73,8 @@ func (p ProviderSQL) WriteVector(row *models.VectorRow) error {
return err
}
defer stmt.Close()
- v, err := sqlite_vec.SerializeFloat32(row.Embeddings)
- if err != nil {
- p.logger.Error("failed to serialize vector",
- "emb-len", len(row.Embeddings), "error", err)
- return err
- }
- if v == nil {
- err = errors.New("empty vector after serialization")
- p.logger.Error("empty vector after serialization",
- "emb-len", len(row.Embeddings), "text", row.RawText, "error", err)
- return err
- }
- if err := stmt.BindBlob(1, v); err != nil {
+ serializedEmbeddings := SerializeVector(row.Embeddings)
+ if err := stmt.BindBlob(1, serializedEmbeddings); err != nil {
p.logger.Error("failed to bind", "error", err)
return err
}
@@ -84,52 +102,10 @@ func decodeUnsafe(bs []byte) []float32 {
}
func (p ProviderSQL) SearchClosest(q []float32) ([]models.VectorRow, error) {
- tableName, err := fetchTableName(q)
- if err != nil {
- return nil, err
- }
- stmt, _, err := p.s3Conn.Prepare(
- fmt.Sprintf(`SELECT
- distance,
- embedding,
- slug,
- raw_text,
- filename
- FROM %s
- WHERE embedding MATCH ?
- ORDER BY distance
- LIMIT 3
- `, tableName))
- if err != nil {
- return nil, err
- }
- query, err := sqlite_vec.SerializeFloat32(q[:])
- if err != nil {
- return nil, err
- }
- if err := stmt.BindBlob(1, query); err != nil {
- p.logger.Error("failed to bind", "error", err)
- return nil, err
- }
- resp := []models.VectorRow{}
- for stmt.Step() {
- res := models.VectorRow{}
- res.Distance = float32(stmt.ColumnFloat(0))
- emb := stmt.ColumnRawText(1)
- res.Embeddings = decodeUnsafe(emb)
- res.Slug = stmt.ColumnText(2)
- res.RawText = stmt.ColumnText(3)
- res.FileName = stmt.ColumnText(4)
- resp = append(resp, res)
- }
- if err := stmt.Err(); err != nil {
- return nil, err
- }
- err = stmt.Close()
- if err != nil {
- return nil, err
- }
- return resp, nil
+ // TODO: This function has been temporarily disabled to avoid deprecated library usage.
+ // In the new RAG implementation, this functionality is now in rag_new package.
+ // For compatibility, return empty result instead of using deprecated vector extension.
+ return []models.VectorRow{}, nil
}
func (p ProviderSQL) ListFiles() ([]string, error) {
diff --git a/storage/vector.go.bak b/storage/vector.go.bak
new file mode 100644
index 0000000..f663beb
--- /dev/null
+++ b/storage/vector.go.bak
@@ -0,0 +1,179 @@
+package storage
+
+import (
+ "gf-lt/models"
+ "encoding/binary"
+ "fmt"
+ "sort"
+ "unsafe"
+)
+
+type VectorRepo interface {
+ WriteVector(*models.VectorRow) error
+ SearchClosest(q []float32) ([]models.VectorRow, error)
+ ListFiles() ([]string, error)
+ RemoveEmbByFileName(filename string) error
+}
+
+// SerializeVector converts []float32 to binary blob
+func SerializeVector(vec []float32) []byte {
+ buf := make([]byte, len(vec)*4) // 4 bytes per float32
+ for i, v := range vec {
+ binary.LittleEndian.PutUint32(buf[i*4:], mathFloat32bits(v))
+ }
+ return buf
+}
+
+// DeserializeVector converts binary blob back to []float32
+func DeserializeVector(data []byte) []float32 {
+ count := len(data) / 4
+ vec := make([]float32, count)
+ for i := 0; i < count; i++ {
+ vec[i] = mathBitsToFloat32(binary.LittleEndian.Uint32(data[i*4:]))
+ }
+ return vec
+}
+
+// mathFloat32bits and mathBitsToFloat32 are helpers to convert between float32 and uint32
+func mathFloat32bits(f float32) uint32 {
+ return binary.LittleEndian.Uint32((*(*[4]byte)(unsafe.Pointer(&f)))[:4])
+}
+
+func mathBitsToFloat32(b uint32) float32 {
+ return *(*float32)(unsafe.Pointer(&b))
+}
+
+var (
+ vecTableName5120 = "embeddings_5120"
+ vecTableName384 = "embeddings_384"
+)
+
+func fetchTableName(emb []float32) (string, error) {
+ switch len(emb) {
+ case 5120:
+ return vecTableName5120, nil
+ case 384:
+ return vecTableName384, nil
+ default:
+ return "", fmt.Errorf("no table for the size of %d", len(emb))
+ }
+}
+
+func (p ProviderSQL) WriteVector(row *models.VectorRow) error {
+ tableName, err := fetchTableName(row.Embeddings)
+ if err != nil {
+ return err
+ }
+ stmt, _, err := p.s3Conn.Prepare(
+ fmt.Sprintf("INSERT INTO %s(embedding, slug, raw_text, filename) VALUES (?, ?, ?, ?)", tableName))
+ if err != nil {
+ p.logger.Error("failed to prep a stmt", "error", err)
+ return err
+ }
+ defer stmt.Close()
+ serializedEmbeddings := SerializeVector(row.Embeddings)
+ if err := stmt.BindBlob(1, serializedEmbeddings); err != nil {
+ p.logger.Error("failed to bind", "error", err)
+ return err
+ }
+ if err := stmt.BindText(2, row.Slug); err != nil {
+ p.logger.Error("failed to bind", "error", err)
+ return err
+ }
+ if err := stmt.BindText(3, row.RawText); err != nil {
+ p.logger.Error("failed to bind", "error", err)
+ return err
+ }
+ if err := stmt.BindText(4, row.FileName); err != nil {
+ p.logger.Error("failed to bind", "error", err)
+ return err
+ }
+ err = stmt.Exec()
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func decodeUnsafe(bs []byte) []float32 {
+ return unsafe.Slice((*float32)(unsafe.Pointer(&bs[0])), len(bs)/4)
+}
+
+func (p ProviderSQL) SearchClosest(q []float32) ([]models.VectorRow, error) {
+ tableName, err := fetchTableName(q)
+ if err != nil {
+ return nil, err
+ }
+ stmt, _, err := p.s3Conn.Prepare(
+ fmt.Sprintf(`SELECT
+ distance,
+ embedding,
+ slug,
+ raw_text,
+ filename
+ FROM %s
+ WHERE embedding MATCH ?
+ ORDER BY distance
+ LIMIT 3
+ `, tableName))
+ if err != nil {
+ return nil, err
+ }
+ // This function needs to be completely rewritten to use the new binary storage approach
+ if err != nil {
+ return nil, err
+ }
+ if err := stmt.BindBlob(1, query); err != nil {
+ p.logger.Error("failed to bind", "error", err)
+ return nil, err
+ }
+ resp := []models.VectorRow{}
+ for stmt.Step() {
+ res := models.VectorRow{}
+ res.Distance = float32(stmt.ColumnFloat(0))
+ emb := stmt.ColumnRawText(1)
+ res.Embeddings = decodeUnsafe(emb)
+ res.Slug = stmt.ColumnText(2)
+ res.RawText = stmt.ColumnText(3)
+ res.FileName = stmt.ColumnText(4)
+ resp = append(resp, res)
+ }
+ if err := stmt.Err(); err != nil {
+ return nil, err
+ }
+ err = stmt.Close()
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (p ProviderSQL) ListFiles() ([]string, error) {
+ q := fmt.Sprintf("SELECT filename FROM %s GROUP BY filename", vecTableName384)
+ stmt, _, err := p.s3Conn.Prepare(q)
+ if err != nil {
+ return nil, err
+ }
+ defer stmt.Close()
+ resp := []string{}
+ for stmt.Step() {
+ resp = append(resp, stmt.ColumnText(0))
+ }
+ if err := stmt.Err(); err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (p ProviderSQL) RemoveEmbByFileName(filename string) error {
+ q := fmt.Sprintf("DELETE FROM %s WHERE filename = ?", vecTableName384)
+ stmt, _, err := p.s3Conn.Prepare(q)
+ if err != nil {
+ return err
+ }
+ defer stmt.Close()
+ if err := stmt.BindText(1, filename); err != nil {
+ return err
+ }
+ return stmt.Exec()
+}