summaryrefslogtreecommitdiff
path: root/rag
diff options
context:
space:
mode:
Diffstat (limited to 'rag')
-rw-r--r--rag/embedder.go444
-rw-r--r--rag/extractors.go181
-rw-r--r--rag/rag.go1197
-rw-r--r--rag/rag_integration_test.go409
-rw-r--r--rag/rag_real_test.go131
-rw-r--r--rag/rag_test.go155
-rw-r--r--rag/storage.go446
7 files changed, 2963 insertions, 0 deletions
diff --git a/rag/embedder.go b/rag/embedder.go
new file mode 100644
index 0000000..5a4aae0
--- /dev/null
+++ b/rag/embedder.go
@@ -0,0 +1,444 @@
+package rag
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "gf-lt/config"
+ "gf-lt/models"
+ "log/slog"
+ "net/http"
+ "os"
+ "sync"
+ "time"
+
+ "github.com/sugarme/tokenizer"
+ "github.com/sugarme/tokenizer/pretrained"
+ "github.com/yalue/onnxruntime_go"
+)
+
+// Embedder defines the interface for embedding text
+type Embedder interface {
+ Embed(text string) ([]float32, error)
+ EmbedSlice(lines []string) ([][]float32, error)
+}
+
+// APIEmbedder implements embedder using an API (like Hugging Face, OpenAI, etc.)
+type APIEmbedder struct {
+ logger *slog.Logger
+ client *http.Client
+ cfg *config.Config
+}
+
+func NewAPIEmbedder(l *slog.Logger, cfg *config.Config) *APIEmbedder {
+ return &APIEmbedder{
+ logger: l,
+ client: &http.Client{
+ Timeout: 30 * time.Second,
+ },
+ cfg: cfg,
+ }
+}
+
+func (a *APIEmbedder) Embed(text string) ([]float32, error) {
+ payload, err := json.Marshal(
+ map[string]any{"input": text, "encoding_format": "float"},
+ )
+ if err != nil {
+ a.logger.Error("failed to marshal payload", "err", err.Error())
+ return nil, err
+ }
+ req, err := http.NewRequest("POST", a.cfg.EmbedURL, bytes.NewReader(payload))
+ if err != nil {
+ a.logger.Error("failed to create new req", "err", err.Error())
+ return nil, err
+ }
+ if a.cfg.HFToken != "" {
+ req.Header.Add("Authorization", "Bearer "+a.cfg.HFToken)
+ }
+ resp, err := a.client.Do(req)
+ if err != nil {
+ a.logger.Error("failed to embed text", "err", err.Error())
+ return nil, err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != 200 {
+ err = fmt.Errorf("non 200 response; code: %v", resp.StatusCode)
+ a.logger.Error(err.Error())
+ return nil, err
+ }
+ embResp := &models.LCPEmbedResp{}
+ if err := json.NewDecoder(resp.Body).Decode(&embResp); err != nil {
+ a.logger.Error("failed to decode embedding response", "err", err.Error())
+ return nil, err
+ }
+ if len(embResp.Data) == 0 || len(embResp.Data[0].Embedding) == 0 {
+ err = errors.New("empty embedding response")
+ a.logger.Error("empty embedding response")
+ return nil, err
+ }
+ return embResp.Data[0].Embedding, nil
+}
+
+func (a *APIEmbedder) EmbedSlice(lines []string) ([][]float32, error) {
+ payload, err := json.Marshal(
+ map[string]any{"input": lines, "encoding_format": "float"},
+ )
+ if err != nil {
+ a.logger.Error("failed to marshal payload", "err", err.Error())
+ return nil, err
+ }
+ req, err := http.NewRequest("POST", a.cfg.EmbedURL, bytes.NewReader(payload))
+ if err != nil {
+ a.logger.Error("failed to create new req", "err", err.Error())
+ return nil, err
+ }
+ if a.cfg.HFToken != "" {
+ req.Header.Add("Authorization", "Bearer "+a.cfg.HFToken)
+ }
+ resp, err := a.client.Do(req)
+ if err != nil {
+ a.logger.Error("failed to embed text", "err", err.Error())
+ return nil, err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != 200 {
+ err = fmt.Errorf("non 200 response; code: %v", resp.StatusCode)
+ a.logger.Error(err.Error())
+ return nil, err
+ }
+ embResp := &models.LCPEmbedResp{}
+ if err := json.NewDecoder(resp.Body).Decode(&embResp); err != nil {
+ a.logger.Error("failed to decode embedding response", "err", err.Error())
+ return nil, err
+ }
+ if len(embResp.Data) == 0 {
+ err = errors.New("empty embedding response")
+ a.logger.Error("empty embedding response")
+ return nil, err
+ }
+
+ // Collect all embeddings from the response
+ embeddings := make([][]float32, len(embResp.Data))
+ for i := range embResp.Data {
+ if len(embResp.Data[i].Embedding) == 0 {
+ err = fmt.Errorf("empty embedding at index %d", i)
+ a.logger.Error("empty embedding", "index", i)
+ return nil, err
+ }
+ embeddings[i] = embResp.Data[i].Embedding
+ }
+
+ // Sort embeddings by index to match the order of input lines
+ // API responses may not be in order
+ for _, data := range embResp.Data {
+ if data.Index >= len(embeddings) || data.Index < 0 {
+ err = fmt.Errorf("invalid embedding index %d", data.Index)
+ a.logger.Error("invalid embedding index", "index", data.Index)
+ return nil, err
+ }
+ embeddings[data.Index] = data.Embedding
+ }
+ return embeddings, nil
+}
+
+// 1. Loading ONNX models locally
+// 2. Using a Go ONNX runtime (like gorgonia/onnx or similar)
+// 3. Converting text to embeddings without external API calls
+type ONNXEmbedder struct {
+ session *onnxruntime_go.DynamicAdvancedSession
+ tokenizer *tokenizer.Tokenizer
+ tokenizerPath string
+ dims int
+ logger *slog.Logger
+ mu sync.Mutex
+ modelPath string
+}
+
+var onnxInitOnce sync.Once
+var onnxReady bool
+var onnxLibPath string
+var cudaLibPath string
+
+var onnxLibPaths = []string{
+ "/usr/lib/libonnxruntime.so",
+ "/usr/lib/libonnxruntime.so.1.24.2",
+ "/usr/local/lib/libonnxruntime.so",
+ "/usr/lib/x86_64-linux-gnu/libonnxruntime.so",
+ "/opt/onnxruntime/lib/libonnxruntime.so",
+}
+
+var cudaLibPaths = []string{
+ "/usr/lib/libonnxruntime_providers_cuda.so",
+ "/usr/local/lib/libonnxruntime_providers_cuda.so",
+ "/opt/onnxruntime/lib/libonnxruntime_providers_cuda.so",
+}
+
+func findONNXLibrary() string {
+ for _, path := range onnxLibPaths {
+ if _, err := os.Stat(path); err == nil {
+ return path
+ }
+ }
+ return ""
+}
+
+func findCUDALibrary() string {
+ for _, path := range cudaLibPaths {
+ if _, err := os.Stat(path); err == nil {
+ return path
+ }
+ }
+ return ""
+}
+
+func NewONNXEmbedder(modelPath, tokenizerPath string, dims int, logger *slog.Logger) (*ONNXEmbedder, error) {
+ // Check if model and tokenizer files exist
+ if _, err := os.Stat(modelPath); err != nil {
+ return nil, fmt.Errorf("ONNX model not found: %w", err)
+ }
+ if _, err := os.Stat(tokenizerPath); err != nil {
+ return nil, fmt.Errorf("tokenizer not found: %w", err)
+ }
+
+ // Find ONNX library
+ onnxLibPath = findONNXLibrary()
+ if onnxLibPath == "" {
+ return nil, errors.New("ONNX runtime library not found in standard locations")
+ }
+
+ // Find CUDA provider library (optional)
+ cudaLibPath = findCUDALibrary()
+ if cudaLibPath == "" {
+ fmt.Println("WARNING: CUDA provider library not found, will use CPU")
+ }
+ emb := &ONNXEmbedder{
+ tokenizerPath: tokenizerPath,
+ dims: dims,
+ logger: logger,
+ modelPath: modelPath,
+ }
+ return emb, nil
+}
+
+func (e *ONNXEmbedder) ensureInitialized() error {
+ if e.session != nil {
+ return nil
+ }
+ e.mu.Lock()
+ defer e.mu.Unlock()
+ if e.session != nil {
+ return nil
+ }
+ // Load tokenizer lazily
+ if e.tokenizer == nil {
+ tok, err := pretrained.FromFile(e.tokenizerPath)
+ if err != nil {
+ return fmt.Errorf("failed to load tokenizer: %w", err)
+ }
+ e.tokenizer = tok
+ }
+ onnxInitOnce.Do(func() {
+ onnxruntime_go.SetSharedLibraryPath(onnxLibPath)
+ if err := onnxruntime_go.InitializeEnvironment(); err != nil {
+ e.logger.Error("failed to initialize ONNX runtime", "error", err)
+ onnxReady = false
+ return
+ }
+ // Register CUDA provider if available
+ if cudaLibPath != "" {
+ if err := onnxruntime_go.RegisterExecutionProviderLibrary("CUDA", cudaLibPath); err != nil {
+ e.logger.Warn("failed to register CUDA provider", "error", err)
+ }
+ }
+ onnxReady = true
+ })
+ if !onnxReady {
+ return errors.New("ONNX runtime not ready")
+ }
+ // Create session options
+ opts, err := onnxruntime_go.NewSessionOptions()
+ if err != nil {
+ return fmt.Errorf("failed to create session options: %w", err)
+ }
+ defer func() {
+ _ = opts.Destroy()
+ }()
+
+ // Try to add CUDA provider
+ useCUDA := cudaLibPath != ""
+ if useCUDA {
+ cudaOpts, err := onnxruntime_go.NewCUDAProviderOptions()
+ if err != nil {
+ e.logger.Warn("failed to create CUDA provider options, falling back to CPU", "error", err)
+ useCUDA = false
+ } else {
+ defer func() {
+ _ = cudaOpts.Destroy()
+ }()
+ if err := cudaOpts.Update(map[string]string{"device_id": "0"}); err != nil {
+ e.logger.Warn("failed to update CUDA options, falling back to CPU", "error", err)
+ useCUDA = false
+ } else if err := opts.AppendExecutionProviderCUDA(cudaOpts); err != nil {
+ e.logger.Warn("failed to append CUDA provider, falling back to CPU", "error", err)
+ useCUDA = false
+ }
+ }
+ }
+ if useCUDA {
+ e.logger.Info("Using CUDA for ONNX inference")
+ } else {
+ e.logger.Info("Using CPU for ONNX inference")
+ }
+
+ // Create session with options
+ session, err := onnxruntime_go.NewDynamicAdvancedSession(
+ e.getModelPath(),
+ []string{"input_ids", "attention_mask"},
+ []string{"sentence_embedding"},
+ opts,
+ )
+ if err != nil {
+ return fmt.Errorf("failed to create ONNX session: %w", err)
+ }
+ e.session = session
+ return nil
+}
+
+func (e *ONNXEmbedder) getModelPath() string {
+ return e.modelPath
+}
+
+func (e *ONNXEmbedder) Destroy() error {
+ e.mu.Lock()
+ defer e.mu.Unlock()
+ if e.session != nil {
+ if err := e.session.Destroy(); err != nil {
+ return fmt.Errorf("failed to destroy ONNX session: %w", err)
+ }
+ e.session = nil
+ e.logger.Info("ONNX session destroyed, VRAM freed")
+ }
+ return nil
+}
+
+func (e *ONNXEmbedder) Embed(text string) ([]float32, error) {
+ if err := e.ensureInitialized(); err != nil {
+ return nil, err
+ }
+ // 1. Tokenize
+ encoding, err := e.tokenizer.EncodeSingle(text)
+ if err != nil {
+ return nil, fmt.Errorf("tokenization failed: %w", err)
+ }
+ // 2. Convert to int64 and create attention mask
+ ids := encoding.Ids
+ inputIDs := make([]int64, len(ids))
+ attentionMask := make([]int64, len(ids))
+ for i, id := range ids {
+ inputIDs[i] = int64(id)
+ attentionMask[i] = 1
+ }
+ // 3. Create input tensors (shape: [1, seq_len])
+ seqLen := int64(len(inputIDs))
+ inputIDsTensor, err := onnxruntime_go.NewTensor[int64](
+ onnxruntime_go.NewShape(1, seqLen),
+ inputIDs,
+ )
+ if err != nil {
+ return nil, fmt.Errorf("failed to create input_ids tensor: %w", err)
+ }
+ defer func() { _ = inputIDsTensor.Destroy() }()
+ maskTensor, err := onnxruntime_go.NewTensor[int64](
+ onnxruntime_go.NewShape(1, seqLen),
+ attentionMask,
+ )
+ if err != nil {
+ return nil, fmt.Errorf("failed to create attention_mask tensor: %w", err)
+ }
+ defer func() { _ = maskTensor.Destroy() }()
+ // 4. Create output tensor
+ outputTensor, err := onnxruntime_go.NewEmptyTensor[float32](
+ onnxruntime_go.NewShape(1, int64(e.dims)),
+ )
+ if err != nil {
+ return nil, fmt.Errorf("failed to create output tensor: %w", err)
+ }
+ defer func() { _ = outputTensor.Destroy() }()
+ // 5. Run inference
+ err = e.session.Run(
+ []onnxruntime_go.Value{inputIDsTensor, maskTensor},
+ []onnxruntime_go.Value{outputTensor},
+ )
+ if err != nil {
+ return nil, fmt.Errorf("inference failed: %w", err)
+ }
+ // 6. Copy output data
+ outputData := outputTensor.GetData()
+ embedding := make([]float32, len(outputData))
+ copy(embedding, outputData)
+ return embedding, nil
+}
+
+func (e *ONNXEmbedder) EmbedSlice(texts []string) ([][]float32, error) {
+ if err := e.ensureInitialized(); err != nil {
+ return nil, err
+ }
+ encodings := make([]*tokenizer.Encoding, len(texts))
+ maxLen := 0
+ for i, txt := range texts {
+ enc, err := e.tokenizer.EncodeSingle(txt)
+ if err != nil {
+ return nil, err
+ }
+ encodings[i] = enc
+ if l := len(enc.Ids); l > maxLen {
+ maxLen = l
+ }
+ }
+ batchSize := len(texts)
+ inputIDs := make([]int64, batchSize*maxLen)
+ attentionMask := make([]int64, batchSize*maxLen)
+ for i, enc := range encodings {
+ ids := enc.Ids
+ offset := i * maxLen
+ for j, id := range ids {
+ inputIDs[offset+j] = int64(id)
+ attentionMask[offset+j] = 1
+ }
+ // Remaining positions are already zero (padding)
+ }
+ // Create tensors with shape [batchSize, maxLen]
+ inputTensor, _ := onnxruntime_go.NewTensor[int64](
+ onnxruntime_go.NewShape(int64(batchSize), int64(maxLen)),
+ inputIDs,
+ )
+ defer func() { _ = inputTensor.Destroy() }()
+ maskTensor, _ := onnxruntime_go.NewTensor[int64](
+ onnxruntime_go.NewShape(int64(batchSize), int64(maxLen)),
+ attentionMask,
+ )
+ defer func() { _ = maskTensor.Destroy() }()
+ outputTensor, _ := onnxruntime_go.NewEmptyTensor[float32](
+ onnxruntime_go.NewShape(int64(batchSize), int64(e.dims)),
+ )
+ defer func() { _ = outputTensor.Destroy() }()
+ err := e.session.Run(
+ []onnxruntime_go.Value{inputTensor, maskTensor},
+ []onnxruntime_go.Value{outputTensor},
+ )
+ if err != nil {
+ return nil, err
+ }
+ // Extract embeddings per batch item
+ data := outputTensor.GetData()
+ embeddings := make([][]float32, batchSize)
+ for i := 0; i < batchSize; i++ {
+ start := i * e.dims
+ emb := make([]float32, e.dims)
+ copy(emb, data[start:start+e.dims])
+ embeddings[i] = emb
+ }
+ return embeddings, nil
+}
diff --git a/rag/extractors.go b/rag/extractors.go
new file mode 100644
index 0000000..0f9f3f4
--- /dev/null
+++ b/rag/extractors.go
@@ -0,0 +1,181 @@
+package rag
+
+import (
+ "archive/zip"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "path"
+ "strings"
+
+ "github.com/PuerkitoBio/goquery"
+ "github.com/ledongthuc/pdf"
+ "github.com/yuin/goldmark"
+ "github.com/yuin/goldmark/extension"
+ "github.com/yuin/goldmark/parser"
+ "github.com/yuin/goldmark/renderer/html"
+)
+
+func ExtractText(fpath string) (string, error) {
+ ext := strings.ToLower(path.Ext(fpath))
+ switch ext {
+ case ".txt":
+ return extractTextFromFile(fpath)
+ case ".md", ".markdown":
+ return extractTextFromMarkdown(fpath)
+ case ".html", ".htm":
+ return extractTextFromHtmlFile(fpath)
+ case ".epub":
+ return extractTextFromEpub(fpath)
+ case ".pdf":
+ return extractTextFromPdf(fpath)
+ default:
+ return "", fmt.Errorf("unsupported file format: %s", ext)
+ }
+}
+
+func extractTextFromFile(fpath string) (string, error) {
+ data, err := os.ReadFile(fpath)
+ if err != nil {
+ return "", err
+ }
+ return string(data), nil
+}
+
+func extractTextFromHtmlFile(fpath string) (string, error) {
+ data, err := os.ReadFile(fpath)
+ if err != nil {
+ return "", err
+ }
+ return extractTextFromHtmlContent(data)
+}
+
+// non utf-8 encoding?
+func extractTextFromHtmlContent(data []byte) (string, error) {
+ doc, err := goquery.NewDocumentFromReader(bytes.NewReader(data))
+ if err != nil {
+ return "", err
+ }
+ // Remove script and style tags
+ doc.Find("script, style, noscript").Each(func(i int, s *goquery.Selection) {
+ s.Remove()
+ })
+ // Get text and clean it
+ text := doc.Text()
+ // Collapse all whitespace (newlines, tabs, multiple spaces) into single spaces
+ cleaned := strings.Join(strings.Fields(text), " ")
+ return cleaned, nil
+}
+
+func extractTextFromMarkdown(fpath string) (string, error) {
+ data, err := os.ReadFile(fpath)
+ if err != nil {
+ return "", err
+ }
+ // Convert markdown to HTML
+ md := goldmark.New(
+ goldmark.WithExtensions(extension.GFM),
+ goldmark.WithParserOptions(parser.WithAutoHeadingID()),
+ goldmark.WithRendererOptions(html.WithUnsafe()), // allow raw HTML if needed
+ )
+ var buf bytes.Buffer
+ if err := md.Convert(data, &buf); err != nil {
+ return "", err
+ }
+ // Now extract text from the resulting HTML (using goquery or similar)
+ return extractTextFromHtmlContent(buf.Bytes())
+}
+
+func extractTextFromEpub(fpath string) (string, error) {
+ r, err := zip.OpenReader(fpath)
+ if err != nil {
+ return "", fmt.Errorf("failed to open epub: %w", err)
+ }
+ defer r.Close()
+ var sb strings.Builder
+ for _, f := range r.File {
+ ext := strings.ToLower(path.Ext(f.Name))
+ if ext != ".xhtml" && ext != ".html" && ext != ".htm" && ext != ".xml" {
+ continue
+ }
+
+ // Skip manifest, toc, ncx files - they don't contain book content
+ nameLower := strings.ToLower(f.Name)
+ if strings.Contains(nameLower, "toc") || strings.Contains(nameLower, "nav") ||
+ strings.Contains(nameLower, "manifest") || strings.Contains(nameLower, ".opf") ||
+ strings.HasSuffix(nameLower, ".ncx") {
+ continue
+ }
+
+ rc, err := f.Open()
+ if err != nil {
+ continue
+ }
+
+ if sb.Len() > 0 {
+ sb.WriteString("\n\n")
+ }
+ sb.WriteString(f.Name)
+ sb.WriteString("\n")
+
+ buf, readErr := io.ReadAll(rc)
+ rc.Close()
+ if readErr == nil {
+ sb.WriteString(stripHTML(string(buf)))
+ }
+ }
+ if sb.Len() == 0 {
+ return "", errors.New("no content extracted from epub")
+ }
+ return sb.String(), nil
+}
+
+func stripHTML(html string) string {
+ var sb strings.Builder
+ inTag := false
+ for _, r := range html {
+ switch r {
+ case '<':
+ inTag = true
+ case '>':
+ inTag = false
+ default:
+ if !inTag {
+ sb.WriteRune(r)
+ }
+ }
+ }
+ return sb.String()
+}
+
+func extractTextFromPdf(fpath string) (string, error) {
+ _, err := exec.LookPath("pdftotext")
+ if err == nil {
+ out, err := exec.Command("pdftotext", "-layout", fpath, "-").Output()
+ if err == nil && len(out) > 0 {
+ return string(out), nil
+ }
+ }
+ return extractTextFromPdfPureGo(fpath)
+}
+
+func extractTextFromPdfPureGo(fpath string) (string, error) {
+ df, r, err := pdf.Open(fpath)
+ if err != nil {
+ return "", fmt.Errorf("failed to open pdf: %w", err)
+ }
+ defer df.Close()
+ textReader, err := r.GetPlainText()
+ if err != nil {
+ return "", fmt.Errorf("failed to extract text from pdf: %w", err)
+ }
+ var buf bytes.Buffer
+ _, err = io.Copy(&buf, textReader)
+ if err != nil {
+ return "", fmt.Errorf("failed to read pdf text: %w", err)
+ }
+ return buf.String(), nil
+}
diff --git a/rag/rag.go b/rag/rag.go
new file mode 100644
index 0000000..3a771d4
--- /dev/null
+++ b/rag/rag.go
@@ -0,0 +1,1197 @@
+package rag
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "gf-lt/config"
+ "gf-lt/models"
+ "gf-lt/storage"
+ "log/slog"
+ "path"
+ "regexp"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/neurosnap/sentences/english"
+)
+
+const ()
+
+var (
+ // Status messages for TUI integration
+ LongJobStatusCh = make(chan string, 100) // Increased buffer size for parallel batch updates
+ FinishedRAGStatus = "finished loading RAG file; press x to exit"
+ LoadedFileRAGStatus = "loaded file"
+ ErrRAGStatus = "some error occurred; failed to transfer data to vector db"
+
+ // stopWords are common words that can be removed from queries when not part of phrases
+ stopWords = []string{"the", "a", "an", "and", "or", "but", "in", "on", "at", "to", "for", "of", "with", "by", "from", "up", "down", "left", "right", "about", "like", "such", "than", "then", "also", "too"}
+)
+
+// isStopWord checks if a word is in the stop words list
+func isStopWord(word string) bool {
+ for _, stop := range stopWords {
+ if strings.EqualFold(word, stop) {
+ return true
+ }
+ }
+ return false
+}
+
+// detectPhrases returns multi-word phrases from a query that should be treated as units
+func detectPhrases(query string) []string {
+ words := strings.Fields(strings.ToLower(query))
+ var phrases []string
+
+ for i := 0; i < len(words)-1; i++ {
+ word1 := strings.Trim(words[i], ".,!?;:'\"()[]{}")
+ word2 := strings.Trim(words[i+1], ".,!?;:'\"()[]{}")
+
+ // Skip if either word is a stop word or too short
+ if isStopWord(word1) || isStopWord(word2) || len(word1) < 2 || len(word2) < 2 {
+ continue
+ }
+
+ // Check if this pair appears to be a meaningful phrase
+ // Simple heuristic: consecutive non-stop words of reasonable length
+ phrase := word1 + " " + word2
+ phrases = append(phrases, phrase)
+
+ // Optionally check for 3-word phrases
+ if i < len(words)-2 {
+ word3 := strings.Trim(words[i+2], ".,!?;:'\"()[]{}")
+ if !isStopWord(word3) && len(word3) >= 2 {
+ phrases = append(phrases, word1+" "+word2+" "+word3)
+ }
+ }
+ }
+
+ return phrases
+}
+
+// countPhraseMatches returns the number of query phrases found in text
+func countPhraseMatches(text, query string) int {
+ phrases := detectPhrases(query)
+ if len(phrases) == 0 {
+ return 0
+ }
+ textLower := strings.ToLower(text)
+ count := 0
+ for _, phrase := range phrases {
+ if strings.Contains(textLower, phrase) {
+ count++
+ }
+ }
+ return count
+}
+
+// parseSlugIndices extracts batch and chunk indices from a slug
+// slug format: filename_batch_chunk (e.g., "kjv_bible.epub_1786_0")
+func parseSlugIndices(slug string) (batch, chunk int, ok bool) {
+ // Find the last two numbers separated by underscores
+ re := regexp.MustCompile(`_(\d+)_(\d+)$`)
+ matches := re.FindStringSubmatch(slug)
+ if matches == nil || len(matches) != 3 {
+ return 0, 0, false
+ }
+ batch, err1 := strconv.Atoi(matches[1])
+ chunk, err2 := strconv.Atoi(matches[2])
+ if err1 != nil || err2 != nil {
+ return 0, 0, false
+ }
+ return batch, chunk, true
+}
+
+// areSlugsAdjacent returns true if two slugs are from the same file and have sequential indices
+func areSlugsAdjacent(slug1, slug2 string) bool {
+ // Extract filename prefix (everything before the last underscore sequence)
+ parts1 := strings.Split(slug1, "_")
+ parts2 := strings.Split(slug2, "_")
+ if len(parts1) < 3 || len(parts2) < 3 {
+ return false
+ }
+
+ // Compare filename prefixes (all parts except last two)
+ prefix1 := strings.Join(parts1[:len(parts1)-2], "_")
+ prefix2 := strings.Join(parts2[:len(parts2)-2], "_")
+ if prefix1 != prefix2 {
+ return false
+ }
+
+ batch1, chunk1, ok1 := parseSlugIndices(slug1)
+ batch2, chunk2, ok2 := parseSlugIndices(slug2)
+ if !ok1 || !ok2 {
+ return false
+ }
+
+ // Check if they're in same batch and chunks are sequential
+ if batch1 == batch2 && (chunk1 == chunk2+1 || chunk2 == chunk1+1) {
+ return true
+ }
+
+ // Check if they're in sequential batches and chunk indices suggest continuity
+ // This is heuristic but useful for cross-batch adjacency
+ if (batch1 == batch2+1 && chunk1 == 0) || (batch2 == batch1+1 && chunk2 == 0) {
+ return true
+ }
+ return false
+}
+
+type RAG struct {
+ logger *slog.Logger
+ store storage.FullRepo
+ cfg *config.Config
+ embedder Embedder
+ storage *VectorStorage
+ mu sync.RWMutex
+ idleMu sync.Mutex
+ fallbackMsg string
+ idleTimer *time.Timer
+ idleTimeout time.Duration
+}
+
+// batchTask represents a single batch to be embedded
+type batchTask struct {
+ batchIndex int
+ paragraphs []string
+ filename string
+ totalBatches int
+}
+
+// batchResult represents the result of embedding a batch
+type batchResult struct {
+ batchIndex int
+ embeddings [][]float32
+ paragraphs []string
+ filename string
+}
+
+// sendStatusNonBlocking sends a status message without blocking
+func (r *RAG) sendStatusNonBlocking(status string) {
+ select {
+ case LongJobStatusCh <- status:
+ default:
+ r.logger.Warn("LongJobStatusCh channel is full or closed, dropping status message", "message", status)
+ }
+}
+
+func New(l *slog.Logger, s storage.FullRepo, cfg *config.Config) (*RAG, error) {
+ var embedder Embedder
+ var fallbackMsg string
+ if cfg.EmbedModelPath != "" && cfg.EmbedTokenizerPath != "" {
+ emb, err := NewONNXEmbedder(cfg.EmbedModelPath, cfg.EmbedTokenizerPath, cfg.EmbedDims, l)
+ if err != nil {
+ l.Error("failed to create ONNX embedder, falling back to API", "error", err)
+ fallbackMsg = err.Error()
+ embedder = NewAPIEmbedder(l, cfg)
+ } else {
+ embedder = emb
+ l.Info("using ONNX embedder", "model", cfg.EmbedModelPath, "dims", cfg.EmbedDims)
+ }
+ } else {
+ embedder = NewAPIEmbedder(l, cfg)
+ l.Info("using API embedder", "url", cfg.EmbedURL)
+ }
+ rag := &RAG{
+ logger: l,
+ store: s,
+ cfg: cfg,
+ embedder: embedder,
+ storage: NewVectorStorage(l, s),
+ fallbackMsg: fallbackMsg,
+ idleTimeout: 30 * time.Second,
+ }
+
+ // Note: Vector tables are created via database migrations, not at runtime
+
+ return rag, nil
+}
+
+func createChunks(sentences []string, wordLimit, overlapWords uint32) []string {
+ if len(sentences) == 0 {
+ return nil
+ }
+ if overlapWords >= wordLimit {
+ overlapWords = wordLimit / 2
+ }
+ var chunks []string
+ i := 0
+ for i < len(sentences) {
+ var chunkWords []string
+ wordCount := 0
+ j := i
+ for j < len(sentences) && wordCount <= int(wordLimit) {
+ sentence := sentences[j]
+ words := strings.Fields(sentence)
+ chunkWords = append(chunkWords, sentence)
+ wordCount += len(words)
+ j++
+ // If this sentence alone exceeds limit, still include it and stop
+ if wordCount > int(wordLimit) {
+ break
+ }
+ }
+ if len(chunkWords) == 0 {
+ break
+ }
+ chunk := strings.Join(chunkWords, " ")
+ chunks = append(chunks, chunk)
+ if j >= len(sentences) {
+ break
+ }
+ // Move i forward by skipping overlap
+ if overlapWords == 0 {
+ i = j
+ continue
+ }
+ // Calculate how many sentences to skip to achieve overlapWords
+ overlapRemaining := int(overlapWords)
+ newI := i
+ for newI < j && overlapRemaining > 0 {
+ words := len(strings.Fields(sentences[newI]))
+ overlapRemaining -= words
+ if overlapRemaining >= 0 {
+ newI++
+ }
+ }
+ if newI == i {
+ newI = j
+ }
+ i = newI
+ }
+ return chunks
+}
+
+func sanitizeFTSQuery(query string) string {
+ // Keep double quotes for FTS5 phrase matching
+ // Remove other problematic characters
+ query = strings.ReplaceAll(query, "'", " ")
+ query = strings.ReplaceAll(query, ";", " ")
+ query = strings.ReplaceAll(query, "\\", " ")
+ query = strings.TrimSpace(query)
+ if query == "" {
+ return "*" // match all
+ }
+ return query
+}
+
+func (r *RAG) LoadRAG(fpath string) error {
+ return r.LoadRAGWithContext(context.Background(), fpath)
+}
+
+func (r *RAG) LoadRAGWithContext(ctx context.Context, fpath string) error {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ fileText, err := ExtractText(fpath)
+ if err != nil {
+ return err
+ }
+ r.logger.Debug("rag: loaded file", "fp", fpath)
+
+ // Send initial status (non-blocking with retry)
+ r.sendStatusNonBlocking(LoadedFileRAGStatus)
+ tokenizer, err := english.NewSentenceTokenizer(nil)
+ if err != nil {
+ return err
+ }
+ sentences := tokenizer.Tokenize(fileText)
+ sents := make([]string, len(sentences))
+ for i, s := range sentences {
+ sents[i] = s.Text
+ }
+
+ // Create chunks with overlap
+ paragraphs := createChunks(sents, r.cfg.RAGWordLimit, r.cfg.RAGOverlapWords)
+ // Adjust batch size if needed
+ if len(paragraphs) < r.cfg.RAGBatchSize && len(paragraphs) > 0 {
+ r.cfg.RAGBatchSize = len(paragraphs)
+ }
+ if len(paragraphs) == 0 {
+ return errors.New("no valid paragraphs found in file")
+ }
+ totalBatches := (len(paragraphs) + r.cfg.RAGBatchSize - 1) / r.cfg.RAGBatchSize
+ r.logger.Debug("starting parallel embedding", "total_batches", totalBatches, "batch_size", r.cfg.RAGBatchSize)
+
+ // Determine concurrency level
+ concurrency := runtime.NumCPU()
+ if concurrency > totalBatches {
+ concurrency = totalBatches
+ }
+ if concurrency < 1 {
+ concurrency = 1
+ }
+ // If using ONNX embedder, limit concurrency to 1 due to mutex serialization
+ var isONNX bool
+ if _, isONNX = r.embedder.(*ONNXEmbedder); isONNX {
+ concurrency = 1
+ }
+ embedderType := "API"
+ if isONNX {
+ embedderType = "ONNX"
+ }
+ r.logger.Debug("parallel embedding setup",
+ "total_batches", totalBatches,
+ "concurrency", concurrency,
+ "embedder", embedderType,
+ "batch_size", r.cfg.RAGBatchSize)
+
+ // Create context with timeout (30 minutes) and cancellation for error handling
+ ctx, cancel := context.WithTimeout(ctx, 30*time.Minute)
+ defer cancel()
+
+ // Channels for task distribution and results
+ taskCh := make(chan batchTask, totalBatches)
+ resultCh := make(chan batchResult, totalBatches)
+ errorCh := make(chan error, totalBatches)
+
+ // Start worker goroutines
+ var wg sync.WaitGroup
+ for w := 0; w < concurrency; w++ {
+ wg.Add(1)
+ go r.embeddingWorker(ctx, w, taskCh, resultCh, errorCh, &wg)
+ }
+
+ // Close task channel after all tasks are sent (by separate goroutine)
+ go func() {
+ // Ensure task channel is closed when this goroutine exits
+ defer close(taskCh)
+ r.logger.Debug("task distributor started", "total_batches", totalBatches)
+ for i := 0; i < totalBatches; i++ {
+ start := i * r.cfg.RAGBatchSize
+ end := start + r.cfg.RAGBatchSize
+ if end > len(paragraphs) {
+ end = len(paragraphs)
+ }
+ batch := paragraphs[start:end]
+
+ // Filter empty paragraphs
+ nonEmptyBatch := make([]string, 0, len(batch))
+ for _, p := range batch {
+ if strings.TrimSpace(p) != "" {
+ nonEmptyBatch = append(nonEmptyBatch, strings.TrimSpace(p))
+ }
+ }
+
+ task := batchTask{
+ batchIndex: i,
+ paragraphs: nonEmptyBatch,
+ filename: path.Base(fpath),
+ totalBatches: totalBatches,
+ }
+
+ select {
+ case taskCh <- task:
+ r.logger.Debug("task distributor sent batch", "batch", i, "paragraphs", len(nonEmptyBatch))
+ case <-ctx.Done():
+ r.logger.Debug("task distributor cancelled", "batches_sent", i+1, "total_batches", totalBatches)
+ return
+ }
+ }
+ r.logger.Debug("task distributor finished", "batches_sent", totalBatches)
+ }()
+
+ // Wait for workers to finish and close result channel
+ go func() {
+ wg.Wait()
+ close(resultCh)
+ }()
+
+ // Process results in order and write to database
+ nextExpectedBatch := 0
+ resultsBuffer := make(map[int]batchResult)
+ filename := path.Base(fpath)
+ batchesProcessed := 0
+ for {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+
+ case err := <-errorCh:
+ // First error from any worker, cancel everything
+ cancel()
+ r.logger.Error("embedding worker failed", "error", err)
+ r.sendStatusNonBlocking(ErrRAGStatus)
+ return fmt.Errorf("embedding failed: %w", err)
+
+ case result, ok := <-resultCh:
+ if !ok {
+ // All results processed
+ resultCh = nil
+ r.logger.Debug("result channel closed", "batches_processed", batchesProcessed, "total_batches", totalBatches)
+ continue
+ }
+
+ // Store result in buffer
+ resultsBuffer[result.batchIndex] = result
+
+ // Process buffered results in order
+ for {
+ if res, exists := resultsBuffer[nextExpectedBatch]; exists {
+ // Write this batch to database
+ if err := r.writeBatchToStorage(ctx, res, filename); err != nil {
+ cancel()
+ return err
+ }
+
+ batchesProcessed++
+ // Send progress update
+ statusMsg := fmt.Sprintf("processed batch %d/%d", batchesProcessed, totalBatches)
+ r.sendStatusNonBlocking(statusMsg)
+
+ delete(resultsBuffer, nextExpectedBatch)
+ nextExpectedBatch++
+ } else {
+ break
+ }
+ }
+
+ default:
+ // No channels ready, check for deadlock conditions
+ if resultCh == nil && nextExpectedBatch < totalBatches {
+ // Missing batch results after result channel closed
+ r.logger.Error("missing batch results",
+ "expected", totalBatches,
+ "received", nextExpectedBatch,
+ "missing", totalBatches-nextExpectedBatch)
+
+ // Wait a short time for any delayed errors, then cancel
+ select {
+ case <-time.After(5 * time.Second):
+ cancel()
+ return fmt.Errorf("missing batch results: expected %d, got %d", totalBatches, nextExpectedBatch)
+ case <-ctx.Done():
+ return ctx.Err()
+ case err := <-errorCh:
+ cancel()
+ r.logger.Error("embedding worker failed after result channel closed", "error", err)
+ r.sendStatusNonBlocking(ErrRAGStatus)
+ return fmt.Errorf("embedding failed: %w", err)
+ }
+ }
+ // If we reach here, no deadlock yet, just busy loop prevention
+ time.Sleep(100 * time.Millisecond)
+ }
+
+ // Check if we're done
+ if resultCh == nil && nextExpectedBatch >= totalBatches {
+ r.logger.Debug("all batches processed successfully", "total", totalBatches)
+ break
+ }
+ }
+ r.logger.Debug("finished writing vectors", "batches", batchesProcessed)
+ r.resetIdleTimer()
+ r.sendStatusNonBlocking(FinishedRAGStatus)
+ return nil
+}
+
+// embeddingWorker processes batch embedding tasks
+func (r *RAG) embeddingWorker(ctx context.Context, workerID int, taskCh <-chan batchTask, resultCh chan<- batchResult, errorCh chan<- error, wg *sync.WaitGroup) {
+ defer wg.Done()
+ r.logger.Debug("embedding worker started", "worker", workerID)
+
+ // Panic recovery to ensure worker doesn't crash silently
+ defer func() {
+ if rec := recover(); rec != nil {
+ r.logger.Error("embedding worker panicked", "worker", workerID, "panic", rec)
+ // Try to send error, but don't block if channel is full
+ select {
+ case errorCh <- fmt.Errorf("worker %d panicked: %v", workerID, rec):
+ default:
+ r.logger.Warn("error channel full, dropping panic error", "worker", workerID)
+ }
+ }
+ }()
+ for task := range taskCh {
+ select {
+ case <-ctx.Done():
+ r.logger.Debug("embedding worker cancelled", "worker", workerID)
+ return
+ default:
+ }
+ r.logger.Debug("worker processing batch", "worker", workerID, "batch", task.batchIndex, "paragraphs", len(task.paragraphs), "total_batches", task.totalBatches)
+
+ // Skip empty batches
+ if len(task.paragraphs) == 0 {
+ select {
+ case resultCh <- batchResult{
+ batchIndex: task.batchIndex,
+ embeddings: nil,
+ paragraphs: nil,
+ filename: task.filename,
+ }:
+ case <-ctx.Done():
+ r.logger.Debug("embedding worker cancelled while sending empty batch", "worker", workerID)
+ return
+ }
+ r.logger.Debug("worker sent empty batch", "worker", workerID, "batch", task.batchIndex)
+ continue
+ }
+ // Embed with retry for API embedder
+ embeddings, err := r.embedWithRetry(ctx, task.paragraphs, 3)
+ if err != nil {
+ // Try to send error, but don't block indefinitely
+ select {
+ case errorCh <- fmt.Errorf("worker %d batch %d: %w", workerID, task.batchIndex, err):
+ case <-ctx.Done():
+ r.logger.Debug("embedding worker cancelled while sending error", "worker", workerID)
+ }
+ return
+ }
+ // Send result with context awareness
+ select {
+ case resultCh <- batchResult{
+ batchIndex: task.batchIndex,
+ embeddings: embeddings,
+ paragraphs: task.paragraphs,
+ filename: task.filename,
+ }:
+ case <-ctx.Done():
+ r.logger.Debug("embedding worker cancelled while sending result", "worker", workerID)
+ return
+ }
+ r.logger.Debug("worker completed batch", "worker", workerID, "batch", task.batchIndex, "embeddings", len(embeddings))
+ }
+ r.logger.Debug("embedding worker finished", "worker", workerID)
+}
+
+// embedWithRetry attempts embedding with exponential backoff for API embedder
+func (r *RAG) embedWithRetry(ctx context.Context, paragraphs []string, maxRetries int) ([][]float32, error) {
+ var lastErr error
+ for attempt := 0; attempt < maxRetries; attempt++ {
+ if attempt > 0 {
+ // Exponential backoff
+ backoff := time.Duration(attempt*attempt) * time.Second
+ if backoff > 10*time.Second {
+ backoff = 10 * time.Second
+ }
+ select {
+ case <-time.After(backoff):
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ }
+ r.logger.Debug("retrying embedding", "attempt", attempt, "max_retries", maxRetries)
+ }
+
+ embeddings, err := r.embedder.EmbedSlice(paragraphs)
+ if err == nil {
+ // Validate embedding count
+ if len(embeddings) != len(paragraphs) {
+ return nil, fmt.Errorf("embedding count mismatch: expected %d, got %d", len(paragraphs), len(embeddings))
+ }
+ return embeddings, nil
+ }
+
+ lastErr = err
+ // Only retry for API embedder errors (network/timeout)
+ // For ONNX embedder, fail fast
+ if _, isAPI := r.embedder.(*APIEmbedder); !isAPI {
+ break
+ }
+ }
+ return nil, fmt.Errorf("embedding failed after %d attempts: %w", maxRetries, lastErr)
+}
+
+// writeBatchToStorage writes a single batch of vectors to the database
+func (r *RAG) writeBatchToStorage(ctx context.Context, result batchResult, filename string) error {
+ if len(result.embeddings) == 0 {
+ // Empty batch, skip
+ return nil
+ }
+ // Check context before starting
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ // Build all vectors for batch write
+ vectors := make([]*models.VectorRow, 0, len(result.paragraphs))
+ for j, text := range result.paragraphs {
+ vectors = append(vectors, &models.VectorRow{
+ Embeddings: result.embeddings[j],
+ RawText: text,
+ Slug: fmt.Sprintf("%s_%d_%d", filename, result.batchIndex+1, j),
+ FileName: filename,
+ })
+ }
+
+ // Write all vectors in a single transaction
+ if err := r.storage.WriteVectors(vectors); err != nil {
+ r.logger.Error("failed to write vectors batch to DB", "error", err, "batch", result.batchIndex+1, "size", len(vectors))
+ r.sendStatusNonBlocking(ErrRAGStatus)
+ return fmt.Errorf("failed to write vectors batch: %w", err)
+ }
+ r.logger.Debug("wrote batch to db", "batch", result.batchIndex+1, "size", len(result.paragraphs))
+ return nil
+}
+
+func (r *RAG) LineToVector(line string) ([]float32, error) {
+ r.resetIdleTimer()
+ return r.embedder.Embed(line)
+}
+
+func (r *RAG) searchEmb(emb *models.EmbeddingResp, limit int) ([]models.VectorRow, error) {
+ r.resetIdleTimer()
+ return r.storage.SearchClosest(emb.Embedding, limit)
+}
+
+func (r *RAG) searchKeyword(query string, limit int) ([]models.VectorRow, error) {
+ r.resetIdleTimer()
+ sanitized := sanitizeFTSQuery(query)
+ return r.storage.SearchKeyword(sanitized, limit)
+}
+
+func (r *RAG) ListLoaded() ([]string, error) {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ return r.storage.ListFiles()
+}
+
+func (r *RAG) RemoveFile(filename string) error {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ r.resetIdleTimer()
+ return r.storage.RemoveEmbByFileName(filename)
+}
+
+var (
+ queryRefinementPattern = regexp.MustCompile(`(?i)(based on my (vector db|vector db|vector database|rags?|past (conversations?|chat|messages?))|from my (files?|documents?|data|information|memory)|search (in|my) (vector db|database|rags?)|rag search for)`)
+ importantKeywords = []string{"project", "architecture", "code", "file", "chat", "conversation", "topic", "summary", "details", "history", "previous", "my", "user", "me"}
+)
+
+func (r *RAG) RefineQuery(query string) string {
+ original := query
+ query = strings.TrimSpace(query)
+ if len(query) == 0 {
+ return original
+ }
+ if len(query) <= 3 {
+ return original
+ }
+ // If query already contains double quotes, assume it's a phrase query and skip refinement
+ if strings.Contains(query, "\"") {
+ return original
+ }
+ query = strings.ToLower(query)
+ words := strings.Fields(query)
+ if len(words) >= 3 {
+ // Detect phrases and protect words that are part of phrases
+ phrases := detectPhrases(query)
+ protectedWords := make(map[string]bool)
+ for _, phrase := range phrases {
+ for _, word := range strings.Fields(phrase) {
+ protectedWords[word] = true
+ }
+ }
+
+ // Remove stop words that are not protected
+ for _, stopWord := range stopWords {
+ if protectedWords[stopWord] {
+ continue
+ }
+ wordPattern := `\b` + stopWord + `\b`
+ re := regexp.MustCompile(wordPattern)
+ query = re.ReplaceAllString(query, "")
+ }
+ }
+ query = strings.TrimSpace(query)
+ if len(query) < 5 {
+ return original
+ }
+ if queryRefinementPattern.MatchString(original) {
+ cleaned := queryRefinementPattern.ReplaceAllString(original, "")
+ cleaned = strings.TrimSpace(cleaned)
+ if len(cleaned) >= 5 {
+ return cleaned
+ }
+ }
+ query = r.extractImportantPhrases(query)
+ if len(query) < 5 {
+ return original
+ }
+ return query
+}
+
+func (r *RAG) extractImportantPhrases(query string) string {
+ words := strings.Fields(query)
+ var important []string
+ for _, word := range words {
+ word = strings.Trim(word, ".,!?;:'\"()[]{}")
+ isImportant := false
+ for _, kw := range importantKeywords {
+ if strings.Contains(strings.ToLower(word), kw) {
+ isImportant = true
+ break
+ }
+ }
+ if isImportant || len(word) >= 3 {
+ important = append(important, word)
+ }
+ }
+ if len(important) == 0 {
+ return query
+ }
+ return strings.Join(important, " ")
+}
+
+func (r *RAG) GenerateQueryVariations(query string) []string {
+ variations := []string{query}
+ if len(query) < 5 {
+ return variations
+ }
+ parts := strings.Fields(query)
+ if len(parts) == 0 {
+ return variations
+ }
+ // Get loaded filenames to filter out filename terms
+ filenames, err := r.storage.ListFiles()
+ if err == nil && len(filenames) > 0 {
+ // Convert to lowercase for case-insensitive matching
+ lowerFilenames := make([]string, len(filenames))
+ for i, f := range filenames {
+ lowerFilenames[i] = strings.ToLower(f)
+ }
+ filteredParts := make([]string, 0, len(parts))
+ for _, part := range parts {
+ partLower := strings.ToLower(part)
+ skip := false
+ for _, fn := range lowerFilenames {
+ if strings.Contains(fn, partLower) || strings.Contains(partLower, fn) {
+ skip = true
+ break
+ }
+ }
+ if !skip {
+ filteredParts = append(filteredParts, part)
+ }
+ }
+ // If filteredParts not empty and different from original, add filtered query
+ if len(filteredParts) > 0 && len(filteredParts) != len(parts) {
+ filteredQuery := strings.Join(filteredParts, " ")
+ if len(filteredQuery) >= 5 {
+ variations = append(variations, filteredQuery)
+ }
+ }
+ }
+ if len(parts) >= 2 {
+ trimmed := strings.Join(parts[:len(parts)-1], " ")
+ if len(trimmed) >= 5 {
+ variations = append(variations, trimmed)
+ }
+ }
+ if len(parts) >= 2 {
+ trimmed := strings.Join(parts[1:], " ")
+ if len(trimmed) >= 5 {
+ variations = append(variations, trimmed)
+ }
+ }
+ if !strings.HasSuffix(query, " explanation") {
+ variations = append(variations, query+" explanation")
+ }
+ if !strings.HasPrefix(query, "what is ") {
+ variations = append(variations, "what is "+query)
+ }
+ if !strings.HasSuffix(query, " details") {
+ variations = append(variations, query+" details")
+ }
+ if !strings.HasSuffix(query, " summary") {
+ variations = append(variations, query+" summary")
+ }
+
+ // Add phrase-quoted variations for better FTS5 matching
+ phrases := detectPhrases(query)
+ if len(phrases) > 0 {
+ // Sort phrases by length descending to prioritize longer phrases
+ sort.Slice(phrases, func(i, j int) bool {
+ return len(phrases[i]) > len(phrases[j])
+ })
+
+ // Create a version with all phrases quoted
+ quotedQuery := query
+ for _, phrase := range phrases {
+ // Only quote if not already quoted
+ quotedPhrase := "\"" + phrase + "\""
+ if !strings.Contains(strings.ToLower(quotedQuery), strings.ToLower(quotedPhrase)) {
+ // Case-insensitive replacement of phrase with quoted version
+ re := regexp.MustCompile(`(?i)\b` + regexp.QuoteMeta(phrase) + `\b`)
+ quotedQuery = re.ReplaceAllString(quotedQuery, quotedPhrase)
+ }
+ }
+ // Disabled malformed quoted query for now
+ // if quotedQuery != query {
+ // variations = append(variations, quotedQuery)
+ // }
+
+ // Also add individual phrase variations for short queries
+ if len(phrases) <= 5 {
+ for _, phrase := range phrases {
+ // Create a focused query with just this phrase quoted
+ // Keep original context but emphasize this phrase
+ quotedPhrase := "\"" + phrase + "\""
+ re := regexp.MustCompile(`(?i)\b` + regexp.QuoteMeta(phrase) + `\b`)
+ focusedQuery := re.ReplaceAllString(query, quotedPhrase)
+ if focusedQuery != query && focusedQuery != quotedQuery {
+ variations = append(variations, focusedQuery)
+ }
+ // Add the phrase alone (quoted) as a separate variation
+ variations = append(variations, quotedPhrase)
+ }
+ }
+ }
+
+ return variations
+}
+
+func (r *RAG) RerankResults(results []models.VectorRow, query string) []models.VectorRow {
+ phraseCount := len(detectPhrases(query))
+ type scoredResult struct {
+ row models.VectorRow
+ distance float32
+ phraseMatches int
+ }
+ scored := make([]scoredResult, 0, len(results))
+ for i := range results {
+ row := results[i]
+
+ score := float32(0)
+ rawTextLower := strings.ToLower(row.RawText)
+ queryLower := strings.ToLower(query)
+ if strings.Contains(rawTextLower, queryLower) {
+ score += 10
+ }
+ queryWords := strings.Fields(queryLower)
+ matchCount := 0
+ for _, word := range queryWords {
+ if len(word) > 2 && strings.Contains(rawTextLower, word) {
+ matchCount++
+ }
+ }
+ if len(queryWords) > 0 {
+ score += float32(matchCount) / float32(len(queryWords)) * 5
+ }
+ if row.FileName == "chat" || strings.Contains(strings.ToLower(row.FileName), "conversation") {
+ score += 3
+ }
+
+ // Phrase match bonus: extra points for containing detected phrases
+ phraseMatches := countPhraseMatches(row.RawText, query)
+ if phraseMatches > 0 {
+ // Significant bonus per phrase to prioritize exact phrase matches
+ r.logger.Debug("phrase match bonus", "slug", row.Slug, "phraseMatches", phraseMatches, "score", score)
+ score += float32(phraseMatches) * 100
+ }
+
+ // Cross-chunk adjacency bonus: if this chunk has adjacent siblings in results,
+ // boost score to promote narrative continuity
+ adjacentCount := 0
+ for _, other := range results {
+ if other.Slug == row.Slug {
+ continue
+ }
+ if areSlugsAdjacent(row.Slug, other.Slug) {
+ adjacentCount++
+ }
+ }
+ if adjacentCount > 0 {
+ // Bonus per adjacent chunk, but diminishing returns
+ score += float32(adjacentCount) * 4
+ }
+ distance := row.Distance - score/100
+ scored = append(scored, scoredResult{row: row, distance: distance, phraseMatches: phraseMatches})
+ }
+ sort.Slice(scored, func(i, j int) bool {
+ return scored[i].distance < scored[j].distance
+ })
+ unique := make([]models.VectorRow, 0)
+ seen := make(map[string]bool)
+ maxPerFile := 2
+ if phraseCount > 0 {
+ maxPerFile = 10
+ }
+ fileCounts := make(map[string]int)
+ for i := range scored {
+ if !seen[scored[i].row.Slug] {
+ // Allow phrase-matching chunks to bypass per-file limit (up to +5 extra)
+ allowed := fileCounts[scored[i].row.FileName] < maxPerFile
+ if !allowed && scored[i].phraseMatches > 0 {
+ // If chunk has phrase matches, allow extra slots (up to maxPerFile + 5)
+ allowed = fileCounts[scored[i].row.FileName] < maxPerFile+5
+ }
+ if !allowed {
+ continue
+ }
+ seen[scored[i].row.Slug] = true
+ fileCounts[scored[i].row.FileName]++
+ unique = append(unique, scored[i].row)
+ }
+ }
+ if len(unique) > 30 {
+ unique = unique[:30]
+ }
+ return unique
+}
+
+func (r *RAG) SynthesizeAnswer(results []models.VectorRow, query string) (string, error) {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ r.resetIdleTimer()
+ if len(results) == 0 {
+ return "No relevant information found in the vector database.", nil
+ }
+ var contextBuilder strings.Builder
+ contextBuilder.WriteString("User Query: ")
+ contextBuilder.WriteString(query)
+ contextBuilder.WriteString("\n\nRetrieved Context:\n")
+ for i, row := range results {
+ fmt.Fprintf(&contextBuilder, "[Source %d: %s]\n", i+1, row.FileName)
+ contextBuilder.WriteString(row.RawText)
+ contextBuilder.WriteString("\n\n")
+ }
+ contextBuilder.WriteString("Instructions: ")
+ contextBuilder.WriteString("Based on the retrieved context above, provide a concise, coherent answer to the user's query. ")
+ contextBuilder.WriteString("Extract only the most relevant information. ")
+ contextBuilder.WriteString("If no relevant information is found, state that clearly. ")
+ contextBuilder.WriteString("Cite sources by filename when relevant. ")
+ contextBuilder.WriteString("Do not include unnecessary preamble or explanations.")
+ synthesisPrompt := contextBuilder.String()
+ emb, err := r.LineToVector(synthesisPrompt)
+ if err != nil {
+ r.logger.Error("failed to embed synthesis prompt", "error", err)
+ return "", err
+ }
+ embResp := &models.EmbeddingResp{
+ Embedding: emb,
+ Index: 0,
+ }
+ topResults, err := r.searchEmb(embResp, 1)
+ if err != nil {
+ r.logger.Error("failed to search for synthesis context", "error", err)
+ return "", err
+ }
+ if len(topResults) > 0 && topResults[0].RawText != synthesisPrompt {
+ return topResults[0].RawText, nil
+ }
+ var finalAnswer strings.Builder
+ finalAnswer.WriteString("Based on the retrieved context:\n\n")
+ for i, row := range results {
+ if i >= 5 {
+ break
+ }
+ fmt.Fprintf(&finalAnswer, "- From %s: %s\n", row.FileName, truncateString(row.RawText, 200))
+ }
+ return finalAnswer.String(), nil
+}
+
+func truncateString(s string, maxLen int) string {
+ if len(s) <= maxLen {
+ return s
+ }
+ return s[:maxLen] + "..."
+}
+
+func (r *RAG) Search(query string, limit int) ([]models.VectorRow, error) {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ r.resetIdleTimer()
+ refined := r.RefineQuery(query)
+ variations := r.GenerateQueryVariations(refined)
+ r.logger.Debug("query variations", "original", query, "refined", refined, "variations", variations)
+
+ // Collect embedding search results from all variations
+ var embResults []models.VectorRow
+ seen := make(map[string]bool)
+ for _, q := range variations {
+ emb, err := r.LineToVector(q)
+ if err != nil {
+ r.logger.Error("failed to embed query variation", "error", err, "query", q)
+ continue
+ }
+ embResp := &models.EmbeddingResp{
+ Embedding: emb,
+ Index: 0,
+ }
+ results, err := r.searchEmb(embResp, limit*2) // Get more candidates
+ if err != nil {
+ r.logger.Error("failed to search embeddings", "error", err, "query", q)
+ continue
+ }
+ for _, row := range results {
+ if !seen[row.Slug] {
+ seen[row.Slug] = true
+ embResults = append(embResults, row)
+ }
+ }
+ }
+ // Sort embedding results by distance (lower is better)
+ sort.Slice(embResults, func(i, j int) bool {
+ return embResults[i].Distance < embResults[j].Distance
+ })
+
+ // Perform keyword search on all variations
+ var kwResults []models.VectorRow
+ seenKw := make(map[string]bool)
+ for _, q := range variations {
+ results, err := r.searchKeyword(q, limit)
+ if err != nil {
+ r.logger.Debug("keyword search failed for variation", "error", err, "query", q)
+ continue
+ }
+ for _, row := range results {
+ if !seenKw[row.Slug] {
+ seenKw[row.Slug] = true
+ kwResults = append(kwResults, row)
+ }
+ }
+ }
+ // Sort keyword results by distance (lower is better)
+ sort.Slice(kwResults, func(i, j int) bool {
+ return kwResults[i].Distance < kwResults[j].Distance
+ })
+
+ // Combine using Reciprocal Rank Fusion (RRF)
+ // Use smaller K for phrase-heavy queries to give more weight to top ranks
+ phraseCount := len(detectPhrases(query))
+ rrfK := 60.0
+ if phraseCount > 0 {
+ rrfK = 30.0
+ }
+ r.logger.Debug("RRF parameters", "phraseCount", phraseCount, "rrfK", rrfK, "query", query)
+ type scoredRow struct {
+ row models.VectorRow
+ score float64
+ }
+ scoreMap := make(map[string]float64)
+ // Add embedding results
+ for rank, row := range embResults {
+ score := 1.0 / (float64(rank) + rrfK)
+ scoreMap[row.Slug] += score
+ if row.Slug == "kjv_bible.epub_1786_0" {
+ r.logger.Debug("target chunk embedding rank", "rank", rank, "score", score)
+ }
+ }
+ // Add keyword results with weight boost when phrases are present
+ kwWeight := 1.0
+ if phraseCount > 0 {
+ kwWeight = 100.0
+ }
+ r.logger.Debug("keyword weight", "kwWeight", kwWeight, "phraseCount", phraseCount)
+ for rank, row := range kwResults {
+ score := kwWeight * (1.0 / (float64(rank) + rrfK))
+ scoreMap[row.Slug] += score
+ if row.Slug == "kjv_bible.epub_1786_0" {
+ r.logger.Debug("target chunk keyword rank", "rank", rank, "score", score, "kwWeight", kwWeight, "rrfK", rrfK)
+ }
+ // Ensure row exists in combined results
+ if _, exists := seen[row.Slug]; !exists {
+ embResults = append(embResults, row)
+ }
+ }
+ // Create slice of scored rows
+ scoredRows := make([]scoredRow, 0, len(embResults))
+ for _, row := range embResults {
+ score := scoreMap[row.Slug]
+ scoredRows = append(scoredRows, scoredRow{row: row, score: score})
+ }
+ // Debug: log scores for target chunk and top chunks
+ if strings.Contains(strings.ToLower(query), "bald") || strings.Contains(strings.ToLower(query), "she bears") {
+ for _, sr := range scoredRows {
+ if sr.row.Slug == "kjv_bible.epub_1786_0" {
+ r.logger.Debug("target chunk score", "slug", sr.row.Slug, "score", sr.score, "distance", sr.row.Distance)
+ }
+ }
+ // Log top 5 scores
+ for i := 0; i < len(scoredRows) && i < 5; i++ {
+ r.logger.Debug("top scored row", "rank", i+1, "slug", scoredRows[i].row.Slug, "score", scoredRows[i].score, "distance", scoredRows[i].row.Distance)
+ }
+ }
+ // Sort by descending RRF score
+ sort.Slice(scoredRows, func(i, j int) bool {
+ return scoredRows[i].score > scoredRows[j].score
+ })
+ // Take top limit
+ if len(scoredRows) > limit {
+ scoredRows = scoredRows[:limit]
+ }
+ // Convert back to VectorRow
+ finalResults := make([]models.VectorRow, len(scoredRows))
+ for i, sr := range scoredRows {
+ finalResults[i] = sr.row
+ }
+ // Apply reranking heuristics
+ reranked := r.RerankResults(finalResults, query)
+ return reranked, nil
+}
+
+var (
+ ragInstance *RAG
+ ragOnce sync.Once
+)
+
+func (r *RAG) FallbackMessage() string {
+ return r.fallbackMsg
+}
+
+func Init(c *config.Config, l *slog.Logger, s storage.FullRepo) error {
+ var err error
+ ragOnce.Do(func() {
+ if c == nil || l == nil || s == nil {
+ return
+ }
+ ragInstance, err = New(l, s, c)
+ })
+ return err
+}
+
+func GetInstance() *RAG {
+ return ragInstance
+}
+
+func (r *RAG) resetIdleTimer() {
+ r.idleMu.Lock()
+ defer r.idleMu.Unlock()
+ if r.idleTimer != nil {
+ r.idleTimer.Stop()
+ }
+ r.idleTimer = time.AfterFunc(r.idleTimeout, func() {
+ r.freeONNXMemory()
+ })
+}
+
+func (r *RAG) freeONNXMemory() {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ if onnx, ok := r.embedder.(*ONNXEmbedder); ok {
+ if err := onnx.Destroy(); err != nil {
+ r.logger.Error("failed to free ONNX memory", "error", err)
+ } else {
+ r.logger.Info("freed ONNX VRAM after idle timeout")
+ }
+ }
+}
+
+func (r *RAG) Destroy() {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ if r.idleTimer != nil {
+ r.idleTimer.Stop()
+ r.idleTimer = nil
+ }
+ if onnx, ok := r.embedder.(*ONNXEmbedder); ok {
+ if err := onnx.Destroy(); err != nil {
+ r.logger.Error("failed to destroy ONNX embedder", "error", err)
+ }
+ }
+}
+
+// SetEmbedderForTesting replaces the internal embedder with a mock.
+// This function is only available when compiling with the "test" build tag.
+func (r *RAG) SetEmbedderForTesting(e Embedder) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ r.embedder = e
+}
diff --git a/rag/rag_integration_test.go b/rag/rag_integration_test.go
new file mode 100644
index 0000000..f3405eb
--- /dev/null
+++ b/rag/rag_integration_test.go
@@ -0,0 +1,409 @@
+package rag
+
+import (
+ "fmt"
+ "gf-lt/config"
+ "gf-lt/models"
+ "gf-lt/storage"
+ "log/slog"
+ "testing"
+
+ _ "github.com/glebarez/go-sqlite"
+ "github.com/jmoiron/sqlx"
+)
+
+// mockEmbedder returns zero vectors of a fixed dimension.
+type mockEmbedder struct {
+ dim int
+}
+
+func (m *mockEmbedder) Embed(text string) ([]float32, error) {
+ vec := make([]float32, m.dim)
+ return vec, nil
+}
+
+func (m *mockEmbedder) EmbedSlice(texts []string) ([][]float32, error) {
+ vecs := make([][]float32, len(texts))
+ for i := range vecs {
+ vecs[i] = make([]float32, m.dim)
+ }
+ return vecs, nil
+}
+
+// dummyStore implements storage.FullRepo with a minimal set of methods.
+// Only DB() is used by VectorStorage; other methods return empty values.
+type dummyStore struct {
+ db *sqlx.DB
+}
+
+func (d dummyStore) DB() *sqlx.DB { return d.db }
+
+// ChatHistory methods
+func (d dummyStore) ListChats() ([]models.Chat, error) { return nil, nil }
+func (d dummyStore) GetChatByID(id uint32) (*models.Chat, error) { return nil, nil }
+func (d dummyStore) GetChatByChar(char string) ([]models.Chat, error) { return nil, nil }
+func (d dummyStore) GetLastChat() (*models.Chat, error) { return nil, nil }
+func (d dummyStore) GetLastChatByAgent(agent string) (*models.Chat, error) { return nil, nil }
+func (d dummyStore) UpsertChat(chat *models.Chat) (*models.Chat, error) { return chat, nil }
+func (d dummyStore) RemoveChat(id uint32) error { return nil }
+func (d dummyStore) ChatGetMaxID() (uint32, error) { return 0, nil }
+
+// Memories methods
+func (d dummyStore) Memorise(m *models.Memory) (*models.Memory, error) { return m, nil }
+func (d dummyStore) Recall(agent, topic string) (string, error) { return "", nil }
+func (d dummyStore) RecallTopics(agent string) ([]string, error) { return nil, nil }
+
+// VectorRepo methods (not used but required by interface)
+func (d dummyStore) WriteVector(row *models.VectorRow) error { return nil }
+func (d dummyStore) SearchClosest(q []float32, limit int) ([]models.VectorRow, error) {
+ return nil, nil
+}
+func (d dummyStore) ListFiles() ([]string, error) { return nil, nil }
+func (d dummyStore) RemoveEmbByFileName(filename string) error { return nil }
+
+var _ storage.FullRepo = dummyStore{}
+
+// setupTestRAG creates an in‑memory SQLite database, creates the necessary tables,
+// inserts the provided chunks, and returns a RAG instance with a mock embedder.
+func setupTestRAG(t *testing.T, chunks []*models.VectorRow) (*RAG, error) {
+ t.Helper()
+ db, err := sqlx.Open("sqlite", ":memory:")
+ if err != nil {
+ return nil, fmt.Errorf("open in‑memory db: %w", err)
+ }
+ // Create the required tables (embeddings_768 and fts_embeddings).
+ // Use the same schema as production.
+ _, err = db.Exec(`
+ CREATE TABLE embeddings_768 (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ embeddings BLOB NOT NULL,
+ slug TEXT NOT NULL,
+ raw_text TEXT NOT NULL,
+ filename TEXT NOT NULL DEFAULT ''
+ );
+ `)
+ if err != nil {
+ return nil, fmt.Errorf("create embeddings table: %w", err)
+ }
+ _, err = db.Exec(`
+ CREATE VIRTUAL TABLE fts_embeddings USING fts5(
+ slug UNINDEXED,
+ raw_text,
+ filename UNINDEXED,
+ embedding_size UNINDEXED,
+ tokenize='porter unicode61'
+ );
+ `)
+ if err != nil {
+ return nil, fmt.Errorf("create FTS table: %w", err)
+ }
+ // Create a logger that discards output.
+ logger := slog.New(slog.NewTextHandler(nil, &slog.HandlerOptions{Level: slog.LevelError}))
+ store := dummyStore{db: db}
+ // Create config with embedding dimension 768.
+ cfg := &config.Config{
+ EmbedDims: 768,
+ RAGWordLimit: 250,
+ RAGOverlapWords: 25,
+ RAGBatchSize: 1,
+ }
+ // Create a RAG instance using New, which will create an embedder based on config.
+ // We'll override the embedder afterwards via reflection.
+ rag, err := New(logger, store, cfg)
+ if err != nil {
+ return nil, fmt.Errorf("create RAG: %w", err)
+ }
+ // Replace the embedder with our mock.
+ rag.SetEmbedderForTesting(&mockEmbedder{dim: cfg.EmbedDims})
+ // Insert the provided chunks using the storage directly.
+ if len(chunks) > 0 {
+ // Ensure each chunk has embeddings of correct dimension (zero vector).
+ for _, chunk := range chunks {
+ if len(chunk.Embeddings) != cfg.EmbedDims {
+ chunk.Embeddings = make([]float32, cfg.EmbedDims)
+ }
+ }
+ err = rag.storage.WriteVectors(chunks)
+ if err != nil {
+ return nil, fmt.Errorf("write test chunks: %w", err)
+ }
+ }
+ return rag, nil
+}
+
+// createTestChunks returns a slice of VectorRow representing the target chunk
+// (kjv_bible.epub_1786_0), several bald‑related noise chunks, and unrelated chunks.
+func createTestChunks() []*models.VectorRow {
+ // Target chunk: 2 Kings 2:23‑24 containing "bald head" and "two she bears".
+ targetRaw := `And he said, Ye shall not send.
+
+
+2:17 And when they urged him till he was ashamed, he said, Send. They sent
+therefore fifty men; and they sought three days, but found him not.
+
+
+2:18 And when they came again to him, (for he tarried at Jericho,) he said unto
+them, Did I not say unto you, Go not? 2:19 And the men of the city said unto
+Elisha, Behold, I pray thee, the situation of this city is pleasant, as my lord
+seeth: but the water is naught, and the ground barren.
+
+
+2:20 And he said, Bring me a new cruse, and put salt therein. And they brought
+it to him.
+
+
+2:21 And he went forth unto the spring of the waters, and cast the salt in
+there, and said, Thus saith the LORD, I have healed these waters; there shall
+not be from thence any more death or barren land.
+
+
+2:22 So the waters were healed unto this day, according to the saying of Elisha
+which he spake.
+
+
+2:23 And he went up from thence unto Bethel: and as he was going up by the way,
+there came forth little children out of the city, and mocked him, and said unto
+him, Go up, thou bald head; go up, thou bald head.
+
+
+2:24 And he turned back, and looked on them, and cursed them in the name of the
+LORD. And there came forth two she bears out of the wood, and tare forty and
+two children of them.`
+ // Noise chunk 1: Leviticus containing "bald locust"
+ noise1Raw := `11:12 Whatsoever hath no fins nor scales in the waters, that shall be an
+abomination unto you.
+
+
+11:13 And these are they which ye shall have in abomination among the fowls;
+they shall not be eaten, they are an abomination: the eagle, and the ossifrage,
+and the ospray, 11:14 And the vulture, and the kite after his kind; 11:15 Every
+raven after his kind; 11:16 And the owl, and the night hawk, and the cuckow,
+and the hawk after his kind, 11:17 And the little owl, and the cormorant, and
+the great owl, 11:18 And the swan, and the pelican, and the gier eagle, 11:19
+And the stork, the heron after her kind, and the lapwing, and the bat.
+
+
+11:20 All fowls that creep, going upon all four, shall be an abomination unto
+you.
+
+
+11:21 Yet these may ye eat of every flying creeping thing that goeth upon all
+four, which have legs above their feet, to leap withal upon the earth; 11:22
+Even these of them ye may eat; the locust after his kind, and the bald locust
+after his kind, and the beetle after his kind, and the grasshopper after his
+kind.
+
+
+11:23 But all other flying creeping things, which have four feet, shall be an
+abomination unto you.
+
+
+11:24 And for these ye shall be unclean: whosoever toucheth the carcase of them
+shall be unclean until the even.`
+ // Noise chunk 2: Leviticus containing "bald"
+ noise2Raw := `11:13 And these are they which ye shall have in abomination among the fowls;
+they shall not be eaten, they are an abomination: the eagle, and the ossifrage,
+and the ospray, 11:14 And the vulture, and the kite after his kind; 11:15 Every
+raven after his kind; 11:16 And the owl, and the night hawk, and the cuckow,
+and the hawk after his kind, 11:17 And the little owl, and the cormorant, and
+the great owl, 11:18 And the swan, and the pelican, and the gier eagle, 11:19
+And the stork, the heron after her kind, and the lapwing, and the bat.
+
+
+11:20 All fowls that creep, going upon all four, shall be an abomination unto
+you.
+
+
+11:21 Yet these may ye eat of every flying creeping thing that goeth upon all
+four, which have legs above their feet, to leap withal upon the earth; 11:22
+Even these of them ye may eat; the locust after his kind, and the bald locust
+after his kind, and the beetle after his kind, and the grasshopper after his
+kind.
+
+
+11:23 But all other flying creeping things, which have four feet, shall be an
+abomination unto you.
+
+
+11:24 And for these ye shall be unclean: whosoever toucheth the carcase of them
+shall be unclean until the even.`
+ // Additional Leviticus noise chunks (simulating 28 bald-related chunks)
+ // Using variations of the same text with different slugs
+ leviticusSlugs := []string{
+ "kjv_bible.epub_564_0",
+ "kjv_bible.epub_565_0",
+ "kjv_bible.epub_579_0",
+ "kjv_bible.epub_580_0",
+ "kjv_bible.epub_581_0",
+ "kjv_bible.epub_582_0",
+ "kjv_bible.epub_583_0",
+ "kjv_bible.epub_584_0",
+ "kjv_bible.epub_585_0",
+ "kjv_bible.epub_586_0",
+ "kjv_bible.epub_587_0",
+ "kjv_bible.epub_588_0",
+ "kjv_bible.epub_589_0",
+ "kjv_bible.epub_590_0",
+ }
+ leviticusTexts := []string{
+ noise1Raw,
+ noise2Raw,
+ `13:40 And the man whose hair is fallen off his head, he is bald; yet is he
+clean.
+
+
+13:41 And he that hath his hair fallen off from the part of his head toward his
+face, he is forehead bald; yet is he clean.`,
+ `13:42 And if there be in the bald head, or bald forehead, a white reddish sore;
+it is a leprosy sprung up in his bald head, or his bald forehead.`,
+ `13:43 Then the priest shall look upon it: and, behold, if the rising of the
+sore be white reddish in his bald head, or in his bald forehead, as the leprosy
+appearedh in the skin of the flesh;`,
+ `13:44 He is a leprous man, he is unclean: the priest shall pronounce him utterly
+unclean; his plague is in his head.`,
+ `13:45 And the leper in whom the plague is, his clothes shall be rent, and his
+head bare, and he shall put a covering upon his upper lip, and shall cry,
+Unclean, unclean.`,
+ `13:46 All the days wherein the plague shall be in him he shall be defiled; he
+is unclean: he shall dwell alone; without the camp shall his habitation be.`,
+ `13:47 The garment also that the plague of leprosy is in, whether it be a woollen
+garment, or a linen garment;`,
+ `13:48 Whether it be in the warp, or woof; of linen, or of woollen; whether in a
+skin, or in any thing made of skin;`,
+ `13:49 And if the plague be greenish or reddish in the garment, or in the skin,
+either in the warp, or in the woof, or in any thing of skin; it is a plague of
+leprosy, and shall be shewed unto the priest:`,
+ `13:50 And the priest shall look upon the plague, and shut up it that hath the
+plague seven days:`,
+ `13:51 And he shall look on the plague on the seventh day: if the plague be spread
+in the garment, either in the warp, or in the woof, or in a skin, or in any work
+that is made of skin; the plague is a fretting leprosy; it is unclean.`,
+ `13:52 He shall therefore burn that garment, whether warp or woof, in woollen or
+in linen, or any thing of skin, wherein the plague is: for it is a fretting
+leprosy; it shall be burnt in the fire.`,
+ }
+ // Unrelated chunk 1: ghost_7.txt_777_0
+ unrelated1Raw := `Doesn’t he have any pride as a hunter?!
+
+I didn’t see what other choice I had. I would just have to grovel and be ready to flee at any given moment.
+The Hidden Curse clan house was in the central region of the imperial capital. It was a high-class area with extraordinary property values that hosted the residences of people like Lord Gladis. This district was near the Imperial Castle, though “near” was a
+relative term as it was still a few kilometers away.
+
+The clan house was made of brick and conformed to an older style of architecture.`
+ // Unrelated chunk 2: ghost_7.txt_778_0
+ unrelated2Raw := `I would just have to grovel and be ready to flee at any given moment.
+The Hidden Curse clan house was in the central region of the imperial capital. It was a high-class area with extraordinary property values that hosted the residences of people like Lord Gladis. This district was near the Imperial Castle, though “near” was a
+relative term as it was still a few kilometers away.
+
+The clan house was made of brick and conformed to an older style of architecture. Nearly everyone knew about this mansion and its clock tower. It stood tall over the neighboring mansions and rumor had it that you could see the whole capital from the top. It
+spoke to this clan’s renown and history that they were able to get away with building something that dwarfed the mansions of the nobility.`
+
+ chunks := []*models.VectorRow{
+ {
+ Slug: "kjv_bible.epub_1786_0",
+ RawText: targetRaw,
+ FileName: "kjv_bible.epub",
+ Embeddings: nil, // will be filled with zero vector later
+ },
+ }
+ // Add Leviticus noise chunks
+ for i, slug := range leviticusSlugs {
+ text := leviticusTexts[i%len(leviticusTexts)]
+ chunks = append(chunks, &models.VectorRow{
+ Slug: slug,
+ RawText: text,
+ FileName: "kjv_bible.epub",
+ Embeddings: nil,
+ })
+ }
+ // Add unrelated chunks
+ chunks = append(chunks,
+ &models.VectorRow{
+ Slug: "ghost_7.txt_777_0",
+ RawText: unrelated1Raw,
+ FileName: "ghost_7.txt",
+ Embeddings: nil,
+ },
+ &models.VectorRow{
+ Slug: "ghost_7.txt_778_0",
+ RawText: unrelated2Raw,
+ FileName: "ghost_7.txt",
+ Embeddings: nil,
+ },
+ )
+ return chunks
+}
+func assertTargetInTopN(t *testing.T, results []models.VectorRow, topN int) bool {
+ t.Helper()
+ for i, row := range results {
+ if i >= topN {
+ break
+ }
+ if row.Slug == "kjv_bible.epub_1786_0" {
+ return true
+ }
+ }
+ return false
+}
+
+func TestBiblicalQuery(t *testing.T) {
+ chunks := createTestChunks()
+ rag, err := setupTestRAG(t, chunks)
+ if err != nil {
+ t.Fatalf("setup failed: %v", err)
+ }
+ query := "bald prophet and two she bears"
+ results, err := rag.Search(query, 10)
+ if err != nil {
+ t.Fatalf("search failed: %v", err)
+ }
+ // The target chunk should be in the top results.
+ if !assertTargetInTopN(t, results, 5) {
+ t.Errorf("target chunk not found in top 5 results for query %q", query)
+ t.Logf("results slugs: %v", func() []string {
+ slugs := make([]string, len(results))
+ for i, r := range results {
+ slugs[i] = r.Slug
+ }
+ return slugs
+ }())
+ }
+}
+
+func TestQueryVariations(t *testing.T) {
+ chunks := createTestChunks()
+ rag, err := setupTestRAG(t, chunks)
+ if err != nil {
+ t.Fatalf("setup failed: %v", err)
+ }
+ tests := []struct {
+ name string
+ query string
+ topN int
+ }{
+ {"she bears", "she bears", 5},
+ {"bald head", "bald head", 5},
+ {"two she bears out of the wood", "two she bears out of the wood", 5},
+ {"bald prophet", "bald prophet", 10},
+ {"go up thou bald head", "\"go up thou bald head\"", 5},
+ {"two she bears", "\"two she bears\"", 5},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ results, err := rag.Search(tt.query, 10)
+ if err != nil {
+ t.Fatalf("search failed: %v", err)
+ }
+ if !assertTargetInTopN(t, results, tt.topN) {
+ t.Errorf("target chunk not found in top %d results for query %q", tt.topN, tt.query)
+ t.Logf("results slugs: %v", func() []string {
+ slugs := make([]string, len(results))
+ for i, r := range results {
+ slugs[i] = r.Slug
+ }
+ return slugs
+ }())
+ }
+ })
+ }
+}
diff --git a/rag/rag_real_test.go b/rag/rag_real_test.go
new file mode 100644
index 0000000..87f6906
--- /dev/null
+++ b/rag/rag_real_test.go
@@ -0,0 +1,131 @@
+package rag
+
+import (
+ "gf-lt/config"
+ "gf-lt/storage"
+ "log/slog"
+ "os"
+ "path/filepath"
+ "testing"
+)
+
+func TestRealBiblicalQuery(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping real embedder test in short mode")
+ }
+ // Check if the embedder model exists
+ modelPath := filepath.Join("..", "onnx", "embedgemma", "model_q4.onnx")
+ if _, err := os.Stat(modelPath); os.IsNotExist(err) {
+ t.Skipf("embedder model not found at %s; skipping real embedder test", modelPath)
+ }
+ tokenizerPath := filepath.Join("..", "onnx", "embedgemma", "tokenizer.json")
+ dbPath := filepath.Join("..", "gflt.db")
+ if _, err := os.Stat(dbPath); os.IsNotExist(err) {
+ t.Skipf("database not found at %s; skipping real embedder test", dbPath)
+ }
+ cfg := &config.Config{
+ EmbedModelPath: modelPath,
+ EmbedTokenizerPath: tokenizerPath,
+ EmbedDims: 768,
+ RAGWordLimit: 250,
+ RAGOverlapWords: 25,
+ RAGBatchSize: 1,
+ }
+ logger := slog.New(slog.NewTextHandler(nil, &slog.HandlerOptions{Level: slog.LevelError}))
+ store := storage.NewProviderSQL(dbPath, logger)
+ if store == nil {
+ t.Fatal("failed to create storage provider")
+ }
+ rag, err := New(logger, store, cfg)
+ if err != nil {
+ t.Fatalf("failed to create RAG instance: %v", err)
+ }
+ t.Cleanup(func() { rag.Destroy() })
+
+ query := "bald prophet and two she bears"
+ results, err := rag.Search(query, 30)
+ if err != nil {
+ t.Fatalf("search failed: %v", err)
+ }
+ found := false
+ for i, row := range results {
+ if row.Slug == "kjv_bible.epub_1786_0" {
+ found = true
+ t.Logf("target chunk found at rank %d", i+1)
+ break
+ }
+ }
+ if !found {
+ t.Errorf("target chunk not found in search results for query %q", query)
+ t.Logf("results slugs:")
+ for i, r := range results {
+ t.Logf("%d: %s", i+1, r.Slug)
+ }
+ }
+}
+
+func TestRealQueryVariations(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping real embedder test in short mode")
+ }
+ modelPath := filepath.Join("..", "onnx", "embedgemma", "model_q4.onnx")
+ if _, err := os.Stat(modelPath); os.IsNotExist(err) {
+ t.Skipf("embedder model not found at %s; skipping real embedder test", modelPath)
+ }
+ tokenizerPath := filepath.Join("..", "onnx", "embedgemma", "tokenizer.json")
+ dbPath := filepath.Join("..", "gflt.db")
+ if _, err := os.Stat(dbPath); os.IsNotExist(err) {
+ t.Skipf("database not found at %s; skipping real embedder test", dbPath)
+ }
+ cfg := &config.Config{
+ EmbedModelPath: modelPath,
+ EmbedTokenizerPath: tokenizerPath,
+ EmbedDims: 768,
+ RAGWordLimit: 250,
+ RAGOverlapWords: 25,
+ RAGBatchSize: 1,
+ }
+ logger := slog.New(slog.NewTextHandler(nil, &slog.HandlerOptions{Level: slog.LevelError}))
+ store := storage.NewProviderSQL(dbPath, logger)
+ if store == nil {
+ t.Fatal("failed to create storage provider")
+ }
+ rag, err := New(logger, store, cfg)
+ if err != nil {
+ t.Fatalf("failed to create RAG instance: %v", err)
+ }
+ t.Cleanup(func() { rag.Destroy() })
+
+ tests := []struct {
+ name string
+ query string
+ }{
+ {"she bears", "she bears"},
+ {"bald head", "bald head"},
+ {"two she bears out of the wood", "two she bears out of the wood"},
+ {"bald prophet", "bald prophet"},
+ {"go up thou bald head", "\"go up thou bald head\""},
+ {"two she bears", "\"two she bears\""},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ results, err := rag.Search(tt.query, 10)
+ if err != nil {
+ t.Fatalf("search failed: %v", err)
+ }
+ found := false
+ for _, row := range results {
+ if row.Slug == "kjv_bible.epub_1786_0" {
+ found = true
+ break
+ }
+ }
+ if !found {
+ t.Errorf("target chunk not found for query %q", tt.query)
+ for i, r := range results {
+ t.Logf("%d: %s", i+1, r.Slug)
+ }
+ }
+ })
+ }
+}
diff --git a/rag/rag_test.go b/rag/rag_test.go
new file mode 100644
index 0000000..4944007
--- /dev/null
+++ b/rag/rag_test.go
@@ -0,0 +1,155 @@
+package rag
+
+import (
+ "testing"
+)
+
+func TestDetectPhrases(t *testing.T) {
+ tests := []struct {
+ query string
+ expect []string
+ }{
+ {
+ query: "bald prophet and two she bears",
+ expect: []string{"bald prophet", "two she", "two she bears", "she bears"},
+ },
+ {
+ query: "she bears",
+ expect: []string{"she bears"},
+ },
+ {
+ query: "the quick brown fox",
+ expect: []string{"quick brown", "quick brown fox", "brown fox"},
+ },
+ {
+ query: "in the house", // stop words
+ expect: []string{}, // "in" and "the" are stop words
+ },
+ {
+ query: "a", // short
+ expect: []string{},
+ },
+ }
+
+ for _, tt := range tests {
+ got := detectPhrases(tt.query)
+ if len(got) != len(tt.expect) {
+ t.Errorf("detectPhrases(%q) = %v, want %v", tt.query, got, tt.expect)
+ continue
+ }
+ for i := range got {
+ if got[i] != tt.expect[i] {
+ t.Errorf("detectPhrases(%q) = %v, want %v", tt.query, got, tt.expect)
+ break
+ }
+ }
+ }
+}
+
+func TestCountPhraseMatches(t *testing.T) {
+ tests := []struct {
+ text string
+ query string
+ expect int
+ }{
+ {
+ text: "two she bears came out of the wood",
+ query: "she bears",
+ expect: 1,
+ },
+ {
+ text: "bald head and she bears",
+ query: "bald prophet and two she bears",
+ expect: 1, // only "she bears" matches
+ },
+ {
+ text: "no match here",
+ query: "she bears",
+ expect: 0,
+ },
+ {
+ text: "she bears and bald prophet",
+ query: "bald prophet she bears",
+ expect: 2, // "she bears" and "bald prophet"
+ },
+ }
+
+ for _, tt := range tests {
+ got := countPhraseMatches(tt.text, tt.query)
+ if got != tt.expect {
+ t.Errorf("countPhraseMatches(%q, %q) = %d, want %d", tt.text, tt.query, got, tt.expect)
+ }
+ }
+}
+
+func TestAreSlugsAdjacent(t *testing.T) {
+ tests := []struct {
+ slug1 string
+ slug2 string
+ expect bool
+ }{
+ {
+ slug1: "kjv_bible.epub_1786_0",
+ slug2: "kjv_bible.epub_1787_0",
+ expect: true,
+ },
+ {
+ slug1: "kjv_bible.epub_1787_0",
+ slug2: "kjv_bible.epub_1786_0",
+ expect: true,
+ },
+ {
+ slug1: "kjv_bible.epub_1786_0",
+ slug2: "kjv_bible.epub_1788_0",
+ expect: false,
+ },
+ {
+ slug1: "otherfile.txt_1_0",
+ slug2: "kjv_bible.epub_1786_0",
+ expect: false,
+ },
+ {
+ slug1: "file_1_0",
+ slug2: "file_1_1",
+ expect: true,
+ },
+ {
+ slug1: "file_1_0",
+ slug2: "file_2_0", // different batch
+ expect: true, // sequential batches with same chunk index are adjacent
+ },
+ }
+
+ for _, tt := range tests {
+ got := areSlugsAdjacent(tt.slug1, tt.slug2)
+ if got != tt.expect {
+ t.Errorf("areSlugsAdjacent(%q, %q) = %v, want %v", tt.slug1, tt.slug2, got, tt.expect)
+ }
+ }
+}
+
+func TestParseSlugIndices(t *testing.T) {
+ tests := []struct {
+ slug string
+ wantBatch int
+ wantChunk int
+ wantOk bool
+ }{
+ {"kjv_bible.epub_1786_0", 1786, 0, true},
+ {"file_1_5", 1, 5, true},
+ {"no_underscore", 0, 0, false},
+ {"file_abc_def", 0, 0, false},
+ {"file_123_456_extra", 456, 0, false}, // regex matches last two numbers
+ }
+
+ for _, tt := range tests {
+ batch, chunk, ok := parseSlugIndices(tt.slug)
+ if ok != tt.wantOk {
+ t.Errorf("parseSlugIndices(%q) ok = %v, want %v", tt.slug, ok, tt.wantOk)
+ continue
+ }
+ if ok && (batch != tt.wantBatch || chunk != tt.wantChunk) {
+ t.Errorf("parseSlugIndices(%q) = (%d, %d), want (%d, %d)", tt.slug, batch, chunk, tt.wantBatch, tt.wantChunk)
+ }
+ }
+}
diff --git a/rag/storage.go b/rag/storage.go
new file mode 100644
index 0000000..a53f767
--- /dev/null
+++ b/rag/storage.go
@@ -0,0 +1,446 @@
+package rag
+
+import (
+ "database/sql"
+ "encoding/binary"
+ "fmt"
+ "gf-lt/models"
+ "gf-lt/storage"
+ "log/slog"
+ "sort"
+ "strings"
+ "unsafe"
+
+ "github.com/jmoiron/sqlx"
+)
+
+// VectorStorage handles storing and retrieving vectors from SQLite
+type VectorStorage struct {
+ logger *slog.Logger
+ sqlxDB *sqlx.DB
+ store storage.FullRepo
+}
+
+func NewVectorStorage(logger *slog.Logger, store storage.FullRepo) *VectorStorage {
+ return &VectorStorage{
+ logger: logger,
+ sqlxDB: store.DB(), // Use the new DB() method
+ store: store,
+ }
+}
+
+// SerializeVector converts []float32 to binary blob
+func SerializeVector(vec []float32) []byte {
+ buf := make([]byte, len(vec)*4) // 4 bytes per float32
+ for i, v := range vec {
+ binary.LittleEndian.PutUint32(buf[i*4:], mathFloat32bits(v))
+ }
+ return buf
+}
+
+// DeserializeVector converts binary blob back to []float32
+func DeserializeVector(data []byte) []float32 {
+ count := len(data) / 4
+ vec := make([]float32, count)
+ for i := 0; i < count; i++ {
+ vec[i] = mathBitsToFloat32(binary.LittleEndian.Uint32(data[i*4:]))
+ }
+ return vec
+}
+
+// mathFloat32bits and mathBitsToFloat32 are helpers to convert between float32 and uint32
+func mathFloat32bits(f float32) uint32 {
+ return binary.LittleEndian.Uint32((*(*[4]byte)(unsafe.Pointer(&f)))[:4])
+}
+
+func mathBitsToFloat32(b uint32) float32 {
+ return *(*float32)(unsafe.Pointer(&b))
+}
+
+// WriteVector stores an embedding vector in the database
+func (vs *VectorStorage) WriteVector(row *models.VectorRow) error {
+ tableName, err := vs.getTableName(row.Embeddings)
+ if err != nil {
+ return err
+ }
+ embeddingSize := len(row.Embeddings)
+ // Start transaction
+ tx, err := vs.sqlxDB.Beginx()
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ _ = tx.Rollback()
+ }
+ }()
+
+ // Serialize the embeddings to binary
+ serializedEmbeddings := SerializeVector(row.Embeddings)
+ query := fmt.Sprintf(
+ "INSERT INTO %s (embeddings, slug, raw_text, filename) VALUES (?, ?, ?, ?)",
+ tableName,
+ )
+ if _, err := tx.Exec(query, serializedEmbeddings, row.Slug, row.RawText, row.FileName); err != nil {
+ vs.logger.Error("failed to write vector", "error", err, "slug", row.Slug)
+ return err
+ }
+ // Insert into FTS table
+ ftsQuery := `INSERT INTO fts_embeddings (slug, raw_text, filename, embedding_size) VALUES (?, ?, ?, ?)`
+ if _, err := tx.Exec(ftsQuery, row.Slug, row.RawText, row.FileName, embeddingSize); err != nil {
+ vs.logger.Error("failed to write to FTS table", "error", err, "slug", row.Slug)
+ return err
+ }
+ err = tx.Commit()
+ if err != nil {
+ vs.logger.Error("failed to commit transaction", "error", err)
+ return err
+ }
+ return nil
+}
+
+// WriteVectors stores multiple embedding vectors in a single transaction
+func (vs *VectorStorage) WriteVectors(rows []*models.VectorRow) error {
+ if len(rows) == 0 {
+ return nil
+ }
+ // SQLite has limit of 999 parameters per statement, each row uses 4 parameters
+ const maxBatchSize = 200 // 200 * 4 = 800 < 999
+ if len(rows) > maxBatchSize {
+ // Process in chunks
+ for i := 0; i < len(rows); i += maxBatchSize {
+ end := i + maxBatchSize
+ if end > len(rows) {
+ end = len(rows)
+ }
+ if err := vs.WriteVectors(rows[i:end]); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+ // All rows should have same embedding size (same model)
+ firstSize := len(rows[0].Embeddings)
+ for i, row := range rows {
+ if len(row.Embeddings) != firstSize {
+ return fmt.Errorf("embedding size mismatch: row %d has size %d, expected %d", i, len(row.Embeddings), firstSize)
+ }
+ }
+ tableName, err := vs.getTableName(rows[0].Embeddings)
+ if err != nil {
+ return err
+ }
+ // Start transaction
+ tx, err := vs.sqlxDB.Beginx()
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ _ = tx.Rollback()
+ }
+ }()
+
+ // Build batch insert for embeddings table
+ embeddingPlaceholders := make([]string, 0, len(rows))
+ embeddingArgs := make([]any, 0, len(rows)*4)
+ for _, row := range rows {
+ embeddingPlaceholders = append(embeddingPlaceholders, "(?, ?, ?, ?)")
+ embeddingArgs = append(embeddingArgs, SerializeVector(row.Embeddings), row.Slug, row.RawText, row.FileName)
+ }
+ embeddingQuery := fmt.Sprintf(
+ "INSERT INTO %s (embeddings, slug, raw_text, filename) VALUES %s",
+ tableName,
+ strings.Join(embeddingPlaceholders, ", "),
+ )
+ if _, err := tx.Exec(embeddingQuery, embeddingArgs...); err != nil {
+ vs.logger.Error("failed to write vectors batch", "error", err, "batch_size", len(rows))
+ return err
+ }
+ // Build batch insert for FTS table
+ ftsPlaceholders := make([]string, 0, len(rows))
+ ftsArgs := make([]any, 0, len(rows)*4)
+ embeddingSize := len(rows[0].Embeddings)
+ for _, row := range rows {
+ ftsPlaceholders = append(ftsPlaceholders, "(?, ?, ?, ?)")
+ ftsArgs = append(ftsArgs, row.Slug, row.RawText, row.FileName, embeddingSize)
+ }
+ ftsQuery := "INSERT INTO fts_embeddings (slug, raw_text, filename, embedding_size) VALUES " +
+ strings.Join(ftsPlaceholders, ", ")
+ if _, err := tx.Exec(ftsQuery, ftsArgs...); err != nil {
+ vs.logger.Error("failed to write FTS batch", "error", err, "batch_size", len(rows))
+ return err
+ }
+ err = tx.Commit()
+ if err != nil {
+ vs.logger.Error("failed to commit transaction", "error", err)
+ return err
+ }
+ vs.logger.Debug("wrote vectors batch", "batch_size", len(rows))
+ return nil
+}
+
+// getTableName determines which table to use based on embedding size
+func (vs *VectorStorage) getTableName(emb []float32) (string, error) {
+ size := len(emb)
+
+ // Check if we support this embedding size
+ supportedSizes := map[int]bool{
+ 384: true,
+ 768: true,
+ 1024: true,
+ 1536: true,
+ 2048: true,
+ 3072: true,
+ 4096: true,
+ 5120: true,
+ }
+ if supportedSizes[size] {
+ return fmt.Sprintf("embeddings_%d", size), nil
+ }
+ return "", fmt.Errorf("no table for embedding size of %d", size)
+}
+
+// SearchClosest finds vectors closest to the query vector using efficient cosine similarity calculation
+func (vs *VectorStorage) SearchClosest(query []float32, limit int) ([]models.VectorRow, error) {
+ if limit <= 0 {
+ limit = 10
+ }
+ tableName, err := vs.getTableName(query)
+ if err != nil {
+ return nil, err
+ }
+ querySQL := "SELECT embeddings, slug, raw_text, filename FROM " + tableName
+ rows, err := vs.sqlxDB.Query(querySQL)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+ type SearchResult struct {
+ vector models.VectorRow
+ distance float32
+ }
+ var topResults []SearchResult
+ for rows.Next() {
+ var (
+ embeddingsBlob []byte
+ slug, rawText, fileName string
+ )
+
+ if err := rows.Scan(&embeddingsBlob, &slug, &rawText, &fileName); err != nil {
+ vs.logger.Error("failed to scan row", "error", err)
+ continue
+ }
+ storedEmbeddings := DeserializeVector(embeddingsBlob)
+ similarity := cosineSimilarity(query, storedEmbeddings)
+ distance := 1 - similarity
+
+ result := SearchResult{
+ vector: models.VectorRow{
+ Embeddings: storedEmbeddings,
+ Slug: slug,
+ RawText: rawText,
+ FileName: fileName,
+ },
+ distance: distance,
+ }
+
+ topResults = append(topResults, result)
+ sort.Slice(topResults, func(i, j int) bool {
+ return topResults[i].distance < topResults[j].distance
+ })
+ if len(topResults) > limit {
+ topResults = topResults[:limit]
+ }
+ }
+ results := make([]models.VectorRow, 0, len(topResults))
+ for _, result := range topResults {
+ result.vector.Distance = result.distance
+ results = append(results, result.vector)
+ }
+ return results, nil
+}
+
+// GetVectorBySlug retrieves a vector row by its slug
+func (vs *VectorStorage) GetVectorBySlug(slug string) (*models.VectorRow, error) {
+ embeddingSizes := []int{384, 768, 1024, 1536, 2048, 3072, 4096, 5120}
+ for _, size := range embeddingSizes {
+ table := fmt.Sprintf("embeddings_%d", size)
+ query := fmt.Sprintf("SELECT embeddings, slug, raw_text, filename FROM %s WHERE slug = ?", table)
+ row := vs.sqlxDB.QueryRow(query, slug)
+ var (
+ embeddingsBlob []byte
+ retrievedSlug, rawText, fileName string
+ )
+ if err := row.Scan(&embeddingsBlob, &retrievedSlug, &rawText, &fileName); err != nil {
+ // No row in this table, continue to next size
+ continue
+ }
+ storedEmbeddings := DeserializeVector(embeddingsBlob)
+ return &models.VectorRow{
+ Embeddings: storedEmbeddings,
+ Slug: retrievedSlug,
+ RawText: rawText,
+ FileName: fileName,
+ }, nil
+ }
+ return nil, fmt.Errorf("vector with slug %s not found", slug)
+}
+
+// SearchKeyword performs full-text search using FTS5
+func (vs *VectorStorage) SearchKeyword(query string, limit int) ([]models.VectorRow, error) {
+ // Use FTS5 bm25 ranking. bm25 returns negative values where more negative is better.
+ // We'll order by bm25 (ascending) and limit.
+ ftsQuery := `SELECT slug, raw_text, filename, bm25(fts_embeddings) as score
+ FROM fts_embeddings
+ WHERE fts_embeddings MATCH ?
+ ORDER BY score
+ LIMIT ?`
+
+ // Try original query first
+ rows, err := vs.sqlxDB.Query(ftsQuery, query, limit)
+ if err != nil {
+ return nil, fmt.Errorf("FTS search failed: %w", err)
+ }
+ results, err := vs.scanRows(rows)
+ rows.Close()
+ if err != nil {
+ return nil, err
+ }
+
+ // If no results and query contains multiple terms, try OR fallback
+ if len(results) == 0 && strings.Contains(query, " ") && !strings.Contains(strings.ToUpper(query), " OR ") {
+ // Build OR query: term1 OR term2 OR term3
+ terms := strings.Fields(query)
+ if len(terms) > 1 {
+ orQuery := strings.Join(terms, " OR ")
+ rows, err := vs.sqlxDB.Query(ftsQuery, orQuery, limit)
+ if err != nil {
+ // Return original empty results rather than error
+ return results, nil
+ }
+ orResults, err := vs.scanRows(rows)
+ rows.Close()
+ if err == nil {
+ results = orResults
+ }
+ }
+ }
+ return results, nil
+}
+
+// scanRows converts SQL rows to VectorRow slice
+func (vs *VectorStorage) scanRows(rows *sql.Rows) ([]models.VectorRow, error) {
+ var results []models.VectorRow
+ for rows.Next() {
+ var slug, rawText, fileName string
+ var score float64
+ if err := rows.Scan(&slug, &rawText, &fileName, &score); err != nil {
+ vs.logger.Error("failed to scan FTS row", "error", err)
+ continue
+ }
+ // Convert BM25 score to distance-like metric (lower is better)
+ // BM25 is negative, more negative is better. Keep as negative.
+ distance := float32(score) // Keep negative, more negative is better
+ // No clamping needed; negative distances are fine
+ results = append(results, models.VectorRow{
+ Slug: slug,
+ RawText: rawText,
+ FileName: fileName,
+ Distance: distance,
+ })
+ }
+ return results, nil
+}
+
+// ListFiles returns a list of all loaded files
+func (vs *VectorStorage) ListFiles() ([]string, error) {
+ fileLists := make([][]string, 0)
+ // Query all supported tables and combine results
+ embeddingSizes := []int{384, 768, 1024, 1536, 2048, 3072, 4096, 5120}
+ for _, size := range embeddingSizes {
+ table := fmt.Sprintf("embeddings_%d", size)
+ query := "SELECT DISTINCT filename FROM " + table
+ rows, err := vs.sqlxDB.Query(query)
+ if err != nil {
+ // Continue if one table doesn't exist
+ continue
+ }
+
+ var files []string
+ for rows.Next() {
+ var filename string
+ if err := rows.Scan(&filename); err != nil {
+ continue
+ }
+ files = append(files, filename)
+ }
+ rows.Close()
+
+ fileLists = append(fileLists, files)
+ }
+
+ // Combine and deduplicate
+ fileSet := make(map[string]bool)
+ var allFiles []string
+ for _, files := range fileLists {
+ for _, file := range files {
+ if !fileSet[file] {
+ fileSet[file] = true
+ allFiles = append(allFiles, file)
+ }
+ }
+ }
+ return allFiles, nil
+}
+
+// RemoveEmbByFileName removes all embeddings associated with a specific filename
+func (vs *VectorStorage) RemoveEmbByFileName(filename string) error {
+ var errors []string
+ // Delete from FTS table first
+ if _, err := vs.sqlxDB.Exec("DELETE FROM fts_embeddings WHERE filename = ?", filename); err != nil {
+ errors = append(errors, err.Error())
+ }
+ embeddingSizes := []int{384, 768, 1024, 1536, 2048, 3072, 4096, 5120}
+ for _, size := range embeddingSizes {
+ table := fmt.Sprintf("embeddings_%d", size)
+ query := fmt.Sprintf("DELETE FROM %s WHERE filename = ?", table)
+ if _, err := vs.sqlxDB.Exec(query, filename); err != nil {
+ errors = append(errors, err.Error())
+ }
+ }
+ if len(errors) > 0 {
+ return fmt.Errorf("errors occurred: %s", strings.Join(errors, "; "))
+ }
+ return nil
+}
+
+// cosineSimilarity calculates the cosine similarity between two vectors
+func cosineSimilarity(a, b []float32) float32 {
+ if len(a) != len(b) {
+ return 0.0
+ }
+ var dotProduct, normA, normB float32
+ for i := 0; i < len(a); i++ {
+ dotProduct += a[i] * b[i]
+ normA += a[i] * a[i]
+ normB += b[i] * b[i]
+ }
+ if normA == 0 || normB == 0 {
+ return 0.0
+ }
+ return dotProduct / (sqrt(normA) * sqrt(normB))
+}
+
+// sqrt returns the square root of a float32
+func sqrt(f float32) float32 {
+ // A simple implementation of square root using Newton's method
+ if f == 0 {
+ return 0
+ }
+ guess := f / 2
+ for i := 0; i < 10; i++ { // 10 iterations should be enough for good precision
+ guess = (guess + f/guess) / 2
+ }
+ return guess
+}