From fbc955ca37836553ef4b7c365b84e3dfa859c501 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Thu, 5 Mar 2026 14:13:58 +0300 Subject: Enha: local onnx --- rag/embedder.go | 65 ++++++++++++++++++++++++++++++++++++++++++++++++++++----- rag/rag.go | 2 +- 2 files changed, 61 insertions(+), 6 deletions(-) (limited to 'rag') diff --git a/rag/embedder.go b/rag/embedder.go index 1d29877..386d508 100644 --- a/rag/embedder.go +++ b/rag/embedder.go @@ -9,6 +9,10 @@ import ( "gf-lt/models" "log/slog" "net/http" + + "github.com/takara-ai/go-tokenizers/tokenizers" + + "github.com/yalue/onnxruntime_go" ) // Embedder defines the interface for embedding text @@ -134,11 +138,62 @@ func (a *APIEmbedder) EmbedSlice(lines []string) ([][]float32, error) { return embeddings, nil } -// TODO: ONNXEmbedder implementation would go here -// This would require: // 1. Loading ONNX models locally // 2. Using a Go ONNX runtime (like gorgonia/onnx or similar) // 3. Converting text to embeddings without external API calls -// -// For now, we'll focus on the API implementation which is already working in the current system, -// and can be extended later when we have ONNX runtime integration + +type ONNXEmbedder struct { + session *onnxruntime_go.DynamicAdvancedSession + tokenizer *tokenizers.Tokenizer + dims int // 768, 512, 256, or 128 for Matryoshka +} + +func (e *ONNXEmbedder) EmbedSlice(texts []string) ([][]float32, error) { + // Batch processing + inputs := e.prepareBatch(texts) + outputs := make([][]float32, len(texts)) + + // Run batch inference (much faster) + err := e.session.Run(inputs, outputs) + return outputs, err +} + +func NewONNXEmbedder(modelPath string) (*ONNXEmbedder, error) { + // Load ONNX model + session, err := onnxruntime_go.NewDynamicAdvancedSession( + modelPath, // onnx/embedgemma/model_q4.onnx + []string{"input_ids", "attention_mask"}, + []string{"sentence_embedding"}, + nil, + ) + if err != nil { + return nil, err + } + // Load tokenizer (from Hugging Face) + tokenizer, err := tokenizers.FromFile("./tokenizer.json") + return &ONNXEmbedder{ + session: session, + tokenizer: tokenizer, + }, nil +} + +func (e *ONNXEmbedder) Embed(text string) ([]float32, error) { + // Tokenize + tokens := e.tokenizer.Encode(text, true) + // Prepare inputs + inputIDs := []int64{tokens.GetIds()} + attentionMask := []int64{tokens.GetAttentionMask()} + // Run inference + output := onnxruntime_go.NewEmptyTensor[float32]( + onnxruntime_go.NewShape(1, 768), + ) + err := e.session.Run( + map[string]any{ + "input_ids": inputIDs, + "attention_mask": attentionMask, + }, + []string{"sentence_embedding"}, + []any{&output}, + ) + return output.GetData(), nil +} diff --git a/rag/rag.go b/rag/rag.go index b63cb08..3d0f38f 100644 --- a/rag/rag.go +++ b/rag/rag.go @@ -246,7 +246,7 @@ func (r *RAG) extractImportantPhrases(query string) string { break } } - if isImportant || len(word) > 3 { + if isImportant || len(word) >= 3 { important = append(important, word) } } -- cgit v1.2.3 From 7c56e27dbe904b3c08b3eee375542011458e297c Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Thu, 5 Mar 2026 14:27:19 +0300 Subject: Dep: trying sugarme tokenizer --- rag/embedder.go | 181 +++++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 145 insertions(+), 36 deletions(-) (limited to 'rag') diff --git a/rag/embedder.go b/rag/embedder.go index 386d508..396f04b 100644 --- a/rag/embedder.go +++ b/rag/embedder.go @@ -10,8 +10,8 @@ import ( "log/slog" "net/http" - "github.com/takara-ai/go-tokenizers/tokenizers" - + "github.com/sugarme/tokenizer" + "github.com/sugarme/tokenizer/pretrained" "github.com/yalue/onnxruntime_go" ) @@ -141,59 +141,168 @@ func (a *APIEmbedder) EmbedSlice(lines []string) ([][]float32, error) { // 1. Loading ONNX models locally // 2. Using a Go ONNX runtime (like gorgonia/onnx or similar) // 3. Converting text to embeddings without external API calls - type ONNXEmbedder struct { session *onnxruntime_go.DynamicAdvancedSession - tokenizer *tokenizers.Tokenizer - dims int // 768, 512, 256, or 128 for Matryoshka + tokenizer *tokenizer.Tokenizer + dims int // embedding dimension (e.g., 768) + logger *slog.Logger } -func (e *ONNXEmbedder) EmbedSlice(texts []string) ([][]float32, error) { - // Batch processing - inputs := e.prepareBatch(texts) - outputs := make([][]float32, len(texts)) - - // Run batch inference (much faster) - err := e.session.Run(inputs, outputs) - return outputs, err -} - -func NewONNXEmbedder(modelPath string) (*ONNXEmbedder, error) { - // Load ONNX model +func NewONNXEmbedder(modelPath, tokenizerPath string, dims int, logger *slog.Logger) (*ONNXEmbedder, error) { + // Load tokenizer using sugarme/tokenizer + tok, err := pretrained.FromFile(tokenizerPath) + if err != nil { + return nil, fmt.Errorf("failed to load tokenizer: %w", err) + } + // Create ONNX session session, err := onnxruntime_go.NewDynamicAdvancedSession( modelPath, // onnx/embedgemma/model_q4.onnx []string{"input_ids", "attention_mask"}, []string{"sentence_embedding"}, - nil, + nil, // optional options ) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to create ONNX session: %w", err) } - // Load tokenizer (from Hugging Face) - tokenizer, err := tokenizers.FromFile("./tokenizer.json") return &ONNXEmbedder{ session: session, - tokenizer: tokenizer, + tokenizer: tok, + dims: dims, + logger: logger, }, nil } func (e *ONNXEmbedder) Embed(text string) ([]float32, error) { - // Tokenize - tokens := e.tokenizer.Encode(text, true) - // Prepare inputs - inputIDs := []int64{tokens.GetIds()} - attentionMask := []int64{tokens.GetAttentionMask()} - // Run inference - output := onnxruntime_go.NewEmptyTensor[float32]( - onnxruntime_go.NewShape(1, 768), + // 1. Tokenize + encoding, err := e.tokenizer.Encode(text, true) // true = add special tokens + if err != nil { + return nil, fmt.Errorf("tokenization failed: %w", err) + } + // Convert []int32 to []int64 for ONNX + inputIDs := make([]int64, len(encoding.GetIDs())) + for i, id := range encoding.GetIDs() { + inputIDs[i] = int64(id) + } + attentionMask := make([]int64, len(encoding.GetAttentionMask())) + for i, m := range encoding.GetAttentionMask() { + attentionMask[i] = int64(m) + } + // 2. Create input tensors (shape: [1, seq_len]) + seqLen := int64(len(inputIDs)) + inputIDsTensor, err := onnxruntime_go.NewTensor(onnxruntime_go.NewShape(1, seqLen), inputIDs) + if err != nil { + return nil, fmt.Errorf("failed to create input_ids tensor: %w", err) + } + defer inputIDsTensor.Destroy() + maskTensor, err := onnxruntime_go.NewTensor(onnxruntime_go.NewShape(1, seqLen), attentionMask) + if err != nil { + return nil, fmt.Errorf("failed to create attention_mask tensor: %w", err) + } + defer maskTensor.Destroy() + // 3. Create output tensor (shape: [1, dims]) + outputTensor, err := onnxruntime_go.NewEmptyTensor[float32](onnxruntime_go.NewShape(1, int64(e.dims))) + if err != nil { + return nil, fmt.Errorf("failed to create output tensor: %w", err) + } + defer outputTensor.Destroy() + // 4. Run inference + err = e.session.Run( + map[string]*onnxruntime_go.Tensor{ + "input_ids": inputIDsTensor, + "attention_mask": maskTensor, + }, + []string{"sentence_embedding"}, + []*onnxruntime_go.Tensor{outputTensor}, ) - err := e.session.Run( - map[string]any{ - "input_ids": inputIDs, - "attention_mask": attentionMask, + if err != nil { + return nil, fmt.Errorf("inference failed: %w", err) + } + // 5. Extract data + outputData := outputTensor.GetData() + // outputTensor is owned by us, but GetData returns a slice that remains valid until Destroy. + // We need to copy if we want to keep it after Destroy (we defer Destroy, so copy now). + embedding := make([]float32, len(outputData)) + copy(embedding, outputData) + return embedding, nil +} + +// EmbedSlice (batch) – to be implemented properly +func (e *ONNXEmbedder) EmbedSlice(texts []string) ([][]float32, error) { + if len(texts) == 0 { + return nil, nil + } + // 1. Tokenize all texts and find max length for padding + encodings := make([]*tokenizer.Encoding, len(texts)) + maxLen := 0 + for i, txt := range texts { + enc, err := e.tokenizer.Encode(txt, true) + if err != nil { + return nil, fmt.Errorf("tokenization failed at index %d: %w", i, err) + } + encodings[i] = enc + if l := len(enc.GetIDs()); l > maxLen { + maxLen = l + } + } + // 2. Build padded input_ids and attention_mask (shape: [batch, maxLen]) + batchSize := len(texts) + inputIDs := make([]int64, batchSize*maxLen) + attentionMask := make([]int64, batchSize*maxLen) + for i, enc := range encodings { + ids := enc.GetIDs() + mask := enc.GetAttentionMask() + offset := i * maxLen + // copy actual tokens + for j := 0; j < len(ids); j++ { + inputIDs[offset+j] = int64(ids[j]) + attentionMask[offset+j] = int64(mask[j]) + } + // remaining positions (padding) are already zero-initialized + } + // 3. Create tensors + inputIDsTensor, err := onnxruntime_go.NewTensor( + onnxruntime_go.NewShape(int64(batchSize), int64(maxLen)), + inputIDs, + ) + if err != nil { + return nil, err + } + defer inputIDsTensor.Destroy() + maskTensor, err := onnxruntime_go.NewTensor( + onnxruntime_go.NewShape(int64(batchSize), int64(maxLen)), + attentionMask, + ) + if err != nil { + return nil, err + } + defer maskTensor.Destroy() + outputTensor, err := onnxruntime_go.NewEmptyTensor[float32]( + onnxruntime_go.NewShape(int64(batchSize), int64(e.dims)), + ) + if err != nil { + return nil, err + } + defer outputTensor.Destroy() + // 4. Run + err = e.session.Run( + map[string]*onnxruntime_go.Tensor{ + "input_ids": inputIDsTensor, + "attention_mask": maskTensor, }, []string{"sentence_embedding"}, - []any{&output}, + []*onnxruntime_go.Tensor{outputTensor}, ) - return output.GetData(), nil + if err != nil { + return nil, err + } + // 5. Extract batch results + outputData := outputTensor.GetData() + embeddings := make([][]float32, batchSize) + for i := 0; i < batchSize; i++ { + start := i * e.dims + emb := make([]float32, e.dims) + copy(emb, outputData[start:start+e.dims]) + embeddings[i] = emb + } + return embeddings, nil } -- cgit v1.2.3 From 4bd6883966824cff81b86e8bf79e278165d7d24a Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Thu, 5 Mar 2026 14:38:26 +0300 Subject: WIP --- rag/embedder.go | 105 +++++++++++++++++++++++--------------------------------- 1 file changed, 43 insertions(+), 62 deletions(-) (limited to 'rag') diff --git a/rag/embedder.go b/rag/embedder.go index 396f04b..988d91e 100644 --- a/rag/embedder.go +++ b/rag/embedder.go @@ -174,134 +174,115 @@ func NewONNXEmbedder(modelPath, tokenizerPath string, dims int, logger *slog.Log func (e *ONNXEmbedder) Embed(text string) ([]float32, error) { // 1. Tokenize - encoding, err := e.tokenizer.Encode(text, true) // true = add special tokens + encoding, err := e.tokenizer.EncodeSingle(text) if err != nil { return nil, fmt.Errorf("tokenization failed: %w", err) } - // Convert []int32 to []int64 for ONNX - inputIDs := make([]int64, len(encoding.GetIDs())) - for i, id := range encoding.GetIDs() { + // 2. Convert to int64 and create attention mask + ids := encoding.Ids + inputIDs := make([]int64, len(ids)) + attentionMask := make([]int64, len(ids)) + for i, id := range ids { inputIDs[i] = int64(id) + attentionMask[i] = 1 } - attentionMask := make([]int64, len(encoding.GetAttentionMask())) - for i, m := range encoding.GetAttentionMask() { - attentionMask[i] = int64(m) - } - // 2. Create input tensors (shape: [1, seq_len]) + // 3. Create input tensors (shape: [1, seq_len]) seqLen := int64(len(inputIDs)) - inputIDsTensor, err := onnxruntime_go.NewTensor(onnxruntime_go.NewShape(1, seqLen), inputIDs) + inputIDsTensor, err := onnxruntime_go.NewTensor[int64]( + onnxruntime_go.NewShape(1, seqLen), + inputIDs, + ) if err != nil { return nil, fmt.Errorf("failed to create input_ids tensor: %w", err) } defer inputIDsTensor.Destroy() - maskTensor, err := onnxruntime_go.NewTensor(onnxruntime_go.NewShape(1, seqLen), attentionMask) + maskTensor, err := onnxruntime_go.NewTensor[int64]( + onnxruntime_go.NewShape(1, seqLen), + attentionMask, + ) if err != nil { return nil, fmt.Errorf("failed to create attention_mask tensor: %w", err) } defer maskTensor.Destroy() - // 3. Create output tensor (shape: [1, dims]) - outputTensor, err := onnxruntime_go.NewEmptyTensor[float32](onnxruntime_go.NewShape(1, int64(e.dims))) + // 4. Create output tensor + outputTensor, err := onnxruntime_go.NewEmptyTensor[float32]( + onnxruntime_go.NewShape(1, int64(e.dims)), + ) if err != nil { return nil, fmt.Errorf("failed to create output tensor: %w", err) } defer outputTensor.Destroy() - // 4. Run inference + // 5. Run inference err = e.session.Run( - map[string]*onnxruntime_go.Tensor{ - "input_ids": inputIDsTensor, - "attention_mask": maskTensor, - }, + []onnxruntime_go.Value{inputIDsTensor, maskTensor}, []string{"sentence_embedding"}, - []*onnxruntime_go.Tensor{outputTensor}, + []onnxruntime_go.Value{outputTensor}, ) if err != nil { return nil, fmt.Errorf("inference failed: %w", err) } - // 5. Extract data + // 6. Copy output data outputData := outputTensor.GetData() - // outputTensor is owned by us, but GetData returns a slice that remains valid until Destroy. - // We need to copy if we want to keep it after Destroy (we defer Destroy, so copy now). embedding := make([]float32, len(outputData)) copy(embedding, outputData) return embedding, nil } -// EmbedSlice (batch) – to be implemented properly func (e *ONNXEmbedder) EmbedSlice(texts []string) ([][]float32, error) { - if len(texts) == 0 { - return nil, nil - } - // 1. Tokenize all texts and find max length for padding encodings := make([]*tokenizer.Encoding, len(texts)) maxLen := 0 for i, txt := range texts { - enc, err := e.tokenizer.Encode(txt, true) + enc, err := e.tokenizer.EncodeSingle(txt) if err != nil { - return nil, fmt.Errorf("tokenization failed at index %d: %w", i, err) + return nil, err } encodings[i] = enc - if l := len(enc.GetIDs()); l > maxLen { + if l := len(enc.Ids); l > maxLen { maxLen = l } } - // 2. Build padded input_ids and attention_mask (shape: [batch, maxLen]) batchSize := len(texts) inputIDs := make([]int64, batchSize*maxLen) attentionMask := make([]int64, batchSize*maxLen) for i, enc := range encodings { - ids := enc.GetIDs() - mask := enc.GetAttentionMask() + ids := enc.Ids offset := i * maxLen - // copy actual tokens - for j := 0; j < len(ids); j++ { - inputIDs[offset+j] = int64(ids[j]) - attentionMask[offset+j] = int64(mask[j]) + for j, id := range ids { + inputIDs[offset+j] = int64(id) + attentionMask[offset+j] = 1 } - // remaining positions (padding) are already zero-initialized + // Remaining positions are already zero (padding) } - // 3. Create tensors - inputIDsTensor, err := onnxruntime_go.NewTensor( + // Create tensors with shape [batchSize, maxLen] + inputTensor, _ := onnxruntime_go.NewTensor[int64]( onnxruntime_go.NewShape(int64(batchSize), int64(maxLen)), inputIDs, ) - if err != nil { - return nil, err - } - defer inputIDsTensor.Destroy() - maskTensor, err := onnxruntime_go.NewTensor( + defer inputTensor.Destroy() + maskTensor, _ := onnxruntime_go.NewTensor[int64]( onnxruntime_go.NewShape(int64(batchSize), int64(maxLen)), attentionMask, ) - if err != nil { - return nil, err - } defer maskTensor.Destroy() - outputTensor, err := onnxruntime_go.NewEmptyTensor[float32]( + outputTensor, _ := onnxruntime_go.NewEmptyTensor[float32]( onnxruntime_go.NewShape(int64(batchSize), int64(e.dims)), ) - if err != nil { - return nil, err - } defer outputTensor.Destroy() - // 4. Run - err = e.session.Run( - map[string]*onnxruntime_go.Tensor{ - "input_ids": inputIDsTensor, - "attention_mask": maskTensor, - }, + err := e.session.Run( + []onnxruntime_go.Value{inputTensor, maskTensor}, []string{"sentence_embedding"}, - []*onnxruntime_go.Tensor{outputTensor}, + []onnxruntime_go.Value{outputTensor}, ) if err != nil { return nil, err } - // 5. Extract batch results - outputData := outputTensor.GetData() + // Extract embeddings per batch item + data := outputTensor.GetData() embeddings := make([][]float32, batchSize) for i := 0; i < batchSize; i++ { start := i * e.dims emb := make([]float32, e.dims) - copy(emb, outputData[start:start+e.dims]) + copy(emb, data[start:start+e.dims]) embeddings[i] = emb } return embeddings, nil -- cgit v1.2.3 From c2757653a3429ab3f9e76081328a3877bc11ed4d Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Thu, 5 Mar 2026 14:49:59 +0300 Subject: Fix: buildable --- rag/embedder.go | 2 -- 1 file changed, 2 deletions(-) (limited to 'rag') diff --git a/rag/embedder.go b/rag/embedder.go index 988d91e..6903a5d 100644 --- a/rag/embedder.go +++ b/rag/embedder.go @@ -215,7 +215,6 @@ func (e *ONNXEmbedder) Embed(text string) ([]float32, error) { // 5. Run inference err = e.session.Run( []onnxruntime_go.Value{inputIDsTensor, maskTensor}, - []string{"sentence_embedding"}, []onnxruntime_go.Value{outputTensor}, ) if err != nil { @@ -270,7 +269,6 @@ func (e *ONNXEmbedder) EmbedSlice(texts []string) ([][]float32, error) { defer outputTensor.Destroy() err := e.session.Run( []onnxruntime_go.Value{inputTensor, maskTensor}, - []string{"sentence_embedding"}, []onnxruntime_go.Value{outputTensor}, ) if err != nil { -- cgit v1.2.3 From ac8c8bb0558a00cf0d025ab8522aaa57b8cba7de Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Thu, 5 Mar 2026 19:20:21 +0300 Subject: Enha: onnx config vars --- rag/embedder.go | 23 +++++++++++++++++------ rag/rag.go | 16 ++++++++++++++-- 2 files changed, 31 insertions(+), 8 deletions(-) (limited to 'rag') diff --git a/rag/embedder.go b/rag/embedder.go index 6903a5d..b0a3226 100644 --- a/rag/embedder.go +++ b/rag/embedder.go @@ -9,6 +9,7 @@ import ( "gf-lt/models" "log/slog" "net/http" + "sync" "github.com/sugarme/tokenizer" "github.com/sugarme/tokenizer/pretrained" @@ -148,7 +149,17 @@ type ONNXEmbedder struct { logger *slog.Logger } +var onnxInitOnce sync.Once + func NewONNXEmbedder(modelPath, tokenizerPath string, dims int, logger *slog.Logger) (*ONNXEmbedder, error) { + // Initialize ONNX runtime environment once + onnxInitOnce.Do(func() { + onnxruntime_go.SetSharedLibraryPath("/usr/local/lib/libonnxruntime.so") + err := onnxruntime_go.InitializeEnvironment() + if err != nil { + logger.Error("failed to initialize ONNX runtime", "error", err) + } + }) // Load tokenizer using sugarme/tokenizer tok, err := pretrained.FromFile(tokenizerPath) if err != nil { @@ -195,7 +206,7 @@ func (e *ONNXEmbedder) Embed(text string) ([]float32, error) { if err != nil { return nil, fmt.Errorf("failed to create input_ids tensor: %w", err) } - defer inputIDsTensor.Destroy() + defer func() { _ = inputIDsTensor.Destroy() }() maskTensor, err := onnxruntime_go.NewTensor[int64]( onnxruntime_go.NewShape(1, seqLen), attentionMask, @@ -203,7 +214,7 @@ func (e *ONNXEmbedder) Embed(text string) ([]float32, error) { if err != nil { return nil, fmt.Errorf("failed to create attention_mask tensor: %w", err) } - defer maskTensor.Destroy() + defer func() { _ = maskTensor.Destroy() }() // 4. Create output tensor outputTensor, err := onnxruntime_go.NewEmptyTensor[float32]( onnxruntime_go.NewShape(1, int64(e.dims)), @@ -211,7 +222,7 @@ func (e *ONNXEmbedder) Embed(text string) ([]float32, error) { if err != nil { return nil, fmt.Errorf("failed to create output tensor: %w", err) } - defer outputTensor.Destroy() + defer func() { _ = outputTensor.Destroy() }() // 5. Run inference err = e.session.Run( []onnxruntime_go.Value{inputIDsTensor, maskTensor}, @@ -257,16 +268,16 @@ func (e *ONNXEmbedder) EmbedSlice(texts []string) ([][]float32, error) { onnxruntime_go.NewShape(int64(batchSize), int64(maxLen)), inputIDs, ) - defer inputTensor.Destroy() + defer func() { _ = inputTensor.Destroy() }() maskTensor, _ := onnxruntime_go.NewTensor[int64]( onnxruntime_go.NewShape(int64(batchSize), int64(maxLen)), attentionMask, ) - defer maskTensor.Destroy() + defer func() { _ = maskTensor.Destroy() }() outputTensor, _ := onnxruntime_go.NewEmptyTensor[float32]( onnxruntime_go.NewShape(int64(batchSize), int64(e.dims)), ) - defer outputTensor.Destroy() + defer func() { _ = outputTensor.Destroy() }() err := e.session.Run( []onnxruntime_go.Value{inputTensor, maskTensor}, []onnxruntime_go.Value{outputTensor}, diff --git a/rag/rag.go b/rag/rag.go index 3d0f38f..654afde 100644 --- a/rag/rag.go +++ b/rag/rag.go @@ -34,8 +34,20 @@ type RAG struct { } func New(l *slog.Logger, s storage.FullRepo, cfg *config.Config) *RAG { - // Initialize with API embedder by default, could be configurable later - embedder := NewAPIEmbedder(l, cfg) + var embedder Embedder + if cfg.EmbedModelPath != "" && cfg.EmbedTokenizerPath != "" { + emb, err := NewONNXEmbedder(cfg.EmbedModelPath, cfg.EmbedTokenizerPath, cfg.EmbedDims, l) + if err != nil { + l.Error("failed to create ONNX embedder, falling back to API", "error", err) + embedder = NewAPIEmbedder(l, cfg) + } else { + embedder = emb + l.Info("using ONNX embedder", "model", cfg.EmbedModelPath, "dims", cfg.EmbedDims) + } + } else { + embedder = NewAPIEmbedder(l, cfg) + l.Info("using API embedder", "url", cfg.EmbedURL) + } rag := &RAG{ logger: l, store: s, -- cgit v1.2.3 From efc92d884c36498220e2b8d5ad9e02f84e42d953 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Thu, 5 Mar 2026 20:02:46 +0300 Subject: Chore: onnx library lookup --- rag/embedder.go | 113 ++++++++++++++++++++++++++++++++++++++++++++------------ rag/rag.go | 39 +++++++++++-------- 2 files changed, 113 insertions(+), 39 deletions(-) (limited to 'rag') diff --git a/rag/embedder.go b/rag/embedder.go index b0a3226..59dbfd2 100644 --- a/rag/embedder.go +++ b/rag/embedder.go @@ -9,6 +9,7 @@ import ( "gf-lt/models" "log/slog" "net/http" + "os" "sync" "github.com/sugarme/tokenizer" @@ -143,47 +144,111 @@ func (a *APIEmbedder) EmbedSlice(lines []string) ([][]float32, error) { // 2. Using a Go ONNX runtime (like gorgonia/onnx or similar) // 3. Converting text to embeddings without external API calls type ONNXEmbedder struct { - session *onnxruntime_go.DynamicAdvancedSession - tokenizer *tokenizer.Tokenizer - dims int // embedding dimension (e.g., 768) - logger *slog.Logger + session *onnxruntime_go.DynamicAdvancedSession + tokenizer *tokenizer.Tokenizer + tokenizerPath string + dims int + logger *slog.Logger + mu sync.Mutex + modelPath string } var onnxInitOnce sync.Once +var onnxReady bool +var onnxLibPath string + +var onnxLibPaths = []string{ + "/usr/lib/libonnxruntime.so", + "/usr/local/lib/libonnxruntime.so", + "/usr/lib/x86_64-linux-gnu/libonnxruntime.so", + "/opt/onnxruntime/lib/libonnxruntime.so", +} + +func findONNXLibrary() string { + for _, path := range onnxLibPaths { + if _, err := os.Stat(path); err == nil { + return path + } + } + return "" +} func NewONNXEmbedder(modelPath, tokenizerPath string, dims int, logger *slog.Logger) (*ONNXEmbedder, error) { - // Initialize ONNX runtime environment once - onnxInitOnce.Do(func() { - onnxruntime_go.SetSharedLibraryPath("/usr/local/lib/libonnxruntime.so") - err := onnxruntime_go.InitializeEnvironment() + // Check if model and tokenizer files exist + if _, err := os.Stat(modelPath); err != nil { + return nil, fmt.Errorf("ONNX model not found: %w", err) + } + if _, err := os.Stat(tokenizerPath); err != nil { + return nil, fmt.Errorf("tokenizer not found: %w", err) + } + + // Find ONNX library + onnxLibPath = findONNXLibrary() + if onnxLibPath == "" { + return nil, errors.New("ONNX runtime library not found in standard locations") + } + + emb := &ONNXEmbedder{ + tokenizerPath: tokenizerPath, + dims: dims, + logger: logger, + modelPath: modelPath, + } + return emb, nil +} + +func (e *ONNXEmbedder) ensureInitialized() error { + if e.session != nil { + return nil + } + e.mu.Lock() + defer e.mu.Unlock() + if e.session != nil { + return nil + } + + // Load tokenizer lazily + if e.tokenizer == nil { + tok, err := pretrained.FromFile(e.tokenizerPath) if err != nil { - logger.Error("failed to initialize ONNX runtime", "error", err) + return fmt.Errorf("failed to load tokenizer: %w", err) + } + e.tokenizer = tok + } + + onnxInitOnce.Do(func() { + onnxruntime_go.SetSharedLibraryPath(onnxLibPath) + if err := onnxruntime_go.InitializeEnvironment(); err != nil { + e.logger.Error("failed to initialize ONNX runtime", "error", err) + onnxReady = false + return } + onnxReady = true }) - // Load tokenizer using sugarme/tokenizer - tok, err := pretrained.FromFile(tokenizerPath) - if err != nil { - return nil, fmt.Errorf("failed to load tokenizer: %w", err) + if !onnxReady { + return errors.New("ONNX runtime not ready") } - // Create ONNX session session, err := onnxruntime_go.NewDynamicAdvancedSession( - modelPath, // onnx/embedgemma/model_q4.onnx + e.getModelPath(), []string{"input_ids", "attention_mask"}, []string{"sentence_embedding"}, - nil, // optional options + nil, ) if err != nil { - return nil, fmt.Errorf("failed to create ONNX session: %w", err) - } - return &ONNXEmbedder{ - session: session, - tokenizer: tok, - dims: dims, - logger: logger, - }, nil + return fmt.Errorf("failed to create ONNX session: %w", err) + } + e.session = session + return nil +} + +func (e *ONNXEmbedder) getModelPath() string { + return e.modelPath } func (e *ONNXEmbedder) Embed(text string) ([]float32, error) { + if err := e.ensureInitialized(); err != nil { + return nil, err + } // 1. Tokenize encoding, err := e.tokenizer.EncodeSingle(text) if err != nil { diff --git a/rag/rag.go b/rag/rag.go index 654afde..fa30303 100644 --- a/rag/rag.go +++ b/rag/rag.go @@ -25,20 +25,23 @@ var ( ) type RAG struct { - logger *slog.Logger - store storage.FullRepo - cfg *config.Config - embedder Embedder - storage *VectorStorage - mu sync.Mutex + logger *slog.Logger + store storage.FullRepo + cfg *config.Config + embedder Embedder + storage *VectorStorage + mu sync.Mutex + fallbackMsg string } -func New(l *slog.Logger, s storage.FullRepo, cfg *config.Config) *RAG { +func New(l *slog.Logger, s storage.FullRepo, cfg *config.Config) (*RAG, error) { var embedder Embedder + var fallbackMsg string if cfg.EmbedModelPath != "" && cfg.EmbedTokenizerPath != "" { emb, err := NewONNXEmbedder(cfg.EmbedModelPath, cfg.EmbedTokenizerPath, cfg.EmbedDims, l) if err != nil { l.Error("failed to create ONNX embedder, falling back to API", "error", err) + fallbackMsg = err.Error() embedder = NewAPIEmbedder(l, cfg) } else { embedder = emb @@ -49,16 +52,17 @@ func New(l *slog.Logger, s storage.FullRepo, cfg *config.Config) *RAG { l.Info("using API embedder", "url", cfg.EmbedURL) } rag := &RAG{ - logger: l, - store: s, - cfg: cfg, - embedder: embedder, - storage: NewVectorStorage(l, s), + logger: l, + store: s, + cfg: cfg, + embedder: embedder, + storage: NewVectorStorage(l, s), + fallbackMsg: fallbackMsg, } // Note: Vector tables are created via database migrations, not at runtime - return rag + return rag, nil } func wordCounter(sentence string) int { @@ -449,14 +453,19 @@ var ( ragOnce sync.Once ) +func (r *RAG) FallbackMessage() string { + return r.fallbackMsg +} + func Init(c *config.Config, l *slog.Logger, s storage.FullRepo) error { + var err error ragOnce.Do(func() { if c == nil || l == nil || s == nil { return } - ragInstance = New(l, s, c) + ragInstance, err = New(l, s, c) }) - return nil + return err } func GetInstance() *RAG { -- cgit v1.2.3 From d2caebdb4fd3ad148aad20866503b7d46d546404 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Fri, 6 Mar 2026 09:11:25 +0300 Subject: Enha (onnx): use gpu --- rag/embedder.go | 68 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 67 insertions(+), 1 deletion(-) (limited to 'rag') diff --git a/rag/embedder.go b/rag/embedder.go index 59dbfd2..13f6a6e 100644 --- a/rag/embedder.go +++ b/rag/embedder.go @@ -156,14 +156,22 @@ type ONNXEmbedder struct { var onnxInitOnce sync.Once var onnxReady bool var onnxLibPath string +var cudaLibPath string var onnxLibPaths = []string{ "/usr/lib/libonnxruntime.so", + "/usr/lib/libonnxruntime.so.1.24.2", "/usr/local/lib/libonnxruntime.so", "/usr/lib/x86_64-linux-gnu/libonnxruntime.so", "/opt/onnxruntime/lib/libonnxruntime.so", } +var cudaLibPaths = []string{ + "/usr/lib/libonnxruntime_providers_cuda.so", + "/usr/local/lib/libonnxruntime_providers_cuda.so", + "/opt/onnxruntime/lib/libonnxruntime_providers_cuda.so", +} + func findONNXLibrary() string { for _, path := range onnxLibPaths { if _, err := os.Stat(path); err == nil { @@ -173,6 +181,15 @@ func findONNXLibrary() string { return "" } +func findCUDALibrary() string { + for _, path := range cudaLibPaths { + if _, err := os.Stat(path); err == nil { + return path + } + } + return "" +} + func NewONNXEmbedder(modelPath, tokenizerPath string, dims int, logger *slog.Logger) (*ONNXEmbedder, error) { // Check if model and tokenizer files exist if _, err := os.Stat(modelPath); err != nil { @@ -188,6 +205,12 @@ func NewONNXEmbedder(modelPath, tokenizerPath string, dims int, logger *slog.Log return nil, errors.New("ONNX runtime library not found in standard locations") } + // Find CUDA provider library (optional) + cudaLibPath = findCUDALibrary() + if cudaLibPath == "" { + fmt.Println("WARNING: CUDA provider library not found, will use CPU") + } + emb := &ONNXEmbedder{ tokenizerPath: tokenizerPath, dims: dims, @@ -223,16 +246,56 @@ func (e *ONNXEmbedder) ensureInitialized() error { onnxReady = false return } + // Register CUDA provider if available + if cudaLibPath != "" { + if err := onnxruntime_go.RegisterExecutionProviderLibrary("CUDA", cudaLibPath); err != nil { + e.logger.Warn("failed to register CUDA provider", "error", err) + } + } onnxReady = true }) if !onnxReady { return errors.New("ONNX runtime not ready") } + + // Create session options + opts, err := onnxruntime_go.NewSessionOptions() + if err != nil { + return fmt.Errorf("failed to create session options: %w", err) + } + defer opts.Destroy() + + // Try to add CUDA provider + useCUDA := cudaLibPath != "" + if useCUDA { + cudaOpts, err := onnxruntime_go.NewCUDAProviderOptions() + if err != nil { + e.logger.Warn("failed to create CUDA provider options, falling back to CPU", "error", err) + useCUDA = false + } else { + defer cudaOpts.Destroy() + if err := cudaOpts.Update(map[string]string{"device_id": "0"}); err != nil { + e.logger.Warn("failed to update CUDA options, falling back to CPU", "error", err) + useCUDA = false + } else if err := opts.AppendExecutionProviderCUDA(cudaOpts); err != nil { + e.logger.Warn("failed to append CUDA provider, falling back to CPU", "error", err) + useCUDA = false + } + } + } + + if useCUDA { + e.logger.Info("Using CUDA for ONNX inference") + } else { + e.logger.Info("Using CPU for ONNX inference") + } + + // Create session with options session, err := onnxruntime_go.NewDynamicAdvancedSession( e.getModelPath(), []string{"input_ids", "attention_mask"}, []string{"sentence_embedding"}, - nil, + opts, ) if err != nil { return fmt.Errorf("failed to create ONNX session: %w", err) @@ -304,6 +367,9 @@ func (e *ONNXEmbedder) Embed(text string) ([]float32, error) { } func (e *ONNXEmbedder) EmbedSlice(texts []string) ([][]float32, error) { + if err := e.ensureInitialized(); err != nil { + return nil, err + } encodings := make([]*tokenizer.Encoding, len(texts)) maxLen := 0 for i, txt := range texts { -- cgit v1.2.3 From 4ef0a215119924347c2219f4677f11a96358307f Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Fri, 6 Mar 2026 09:32:45 +0300 Subject: Enha (onnx): unload model if noop for 30s --- rag/embedder.go | 13 +++++++++++++ rag/rag.go | 43 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+) (limited to 'rag') diff --git a/rag/embedder.go b/rag/embedder.go index 13f6a6e..39f4b5c 100644 --- a/rag/embedder.go +++ b/rag/embedder.go @@ -308,6 +308,19 @@ func (e *ONNXEmbedder) getModelPath() string { return e.modelPath } +func (e *ONNXEmbedder) Destroy() error { + e.mu.Lock() + defer e.mu.Unlock() + if e.session != nil { + if err := e.session.Destroy(); err != nil { + return fmt.Errorf("failed to destroy ONNX session: %w", err) + } + e.session = nil + e.logger.Info("ONNX session destroyed, VRAM freed") + } + return nil +} + func (e *ONNXEmbedder) Embed(text string) ([]float32, error) { if err := e.ensureInitialized(); err != nil { return nil, err diff --git a/rag/rag.go b/rag/rag.go index fa30303..d64a3e1 100644 --- a/rag/rag.go +++ b/rag/rag.go @@ -12,6 +12,7 @@ import ( "sort" "strings" "sync" + "time" "github.com/neurosnap/sentences/english" ) @@ -32,6 +33,8 @@ type RAG struct { storage *VectorStorage mu sync.Mutex fallbackMsg string + idleTimer *time.Timer + idleTimeout time.Duration } func New(l *slog.Logger, s storage.FullRepo, cfg *config.Config) (*RAG, error) { @@ -58,6 +61,7 @@ func New(l *slog.Logger, s storage.FullRepo, cfg *config.Config) (*RAG, error) { embedder: embedder, storage: NewVectorStorage(l, s), fallbackMsg: fallbackMsg, + idleTimeout: 30 * time.Second, } // Note: Vector tables are created via database migrations, not at runtime @@ -187,6 +191,7 @@ func (r *RAG) LoadRAG(fpath string) error { } } r.logger.Debug("finished writing vectors", "batches", batchCount) + r.resetIdleTimer() select { case LongJobStatusCh <- FinishedRAGStatus: default: @@ -196,10 +201,12 @@ func (r *RAG) LoadRAG(fpath string) error { } func (r *RAG) LineToVector(line string) ([]float32, error) { + r.resetIdleTimer() return r.embedder.Embed(line) } func (r *RAG) SearchEmb(emb *models.EmbeddingResp) ([]models.VectorRow, error) { + r.resetIdleTimer() return r.storage.SearchClosest(emb.Embedding) } @@ -208,6 +215,7 @@ func (r *RAG) ListLoaded() ([]string, error) { } func (r *RAG) RemoveFile(filename string) error { + r.resetIdleTimer() return r.storage.RemoveEmbByFileName(filename) } @@ -471,3 +479,38 @@ func Init(c *config.Config, l *slog.Logger, s storage.FullRepo) error { func GetInstance() *RAG { return ragInstance } + +func (r *RAG) resetIdleTimer() { + if r.idleTimer != nil { + r.idleTimer.Stop() + } + r.idleTimer = time.AfterFunc(r.idleTimeout, func() { + r.freeONNXMemory() + }) +} + +func (r *RAG) freeONNXMemory() { + r.mu.Lock() + defer r.mu.Unlock() + if onnx, ok := r.embedder.(*ONNXEmbedder); ok { + if err := onnx.Destroy(); err != nil { + r.logger.Error("failed to free ONNX memory", "error", err) + } else { + r.logger.Info("freed ONNX VRAM after idle timeout") + } + } +} + +func (r *RAG) Destroy() { + r.mu.Lock() + defer r.mu.Unlock() + if r.idleTimer != nil { + r.idleTimer.Stop() + r.idleTimer = nil + } + if onnx, ok := r.embedder.(*ONNXEmbedder); ok { + if err := onnx.Destroy(); err != nil { + r.logger.Error("failed to destroy ONNX embedder", "error", err) + } + } +} -- cgit v1.2.3 From f9866bcf5a7369e28246d51b951e81b5b2a8489f Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Fri, 6 Mar 2026 11:20:50 +0300 Subject: Feat (rag): hybrid search attempt --- rag/rag.go | 176 +++++++++++++++++++++++++++++++++++++++++++++------------ rag/storage.go | 119 ++++++++++++++++++++++++++++++++------ 2 files changed, 240 insertions(+), 55 deletions(-) (limited to 'rag') diff --git a/rag/rag.go b/rag/rag.go index d64a3e1..4e11a0d 100644 --- a/rag/rag.go +++ b/rag/rag.go @@ -73,6 +73,74 @@ func wordCounter(sentence string) int { return len(strings.Split(strings.TrimSpace(sentence), " ")) } +func createChunks(sentences []string, wordLimit, overlapWords uint32) []string { + if len(sentences) == 0 { + return nil + } + if overlapWords >= wordLimit { + overlapWords = wordLimit / 2 + } + var chunks []string + i := 0 + for i < len(sentences) { + var chunkWords []string + wordCount := 0 + j := i + for j < len(sentences) && wordCount <= int(wordLimit) { + sentence := sentences[j] + words := strings.Fields(sentence) + chunkWords = append(chunkWords, sentence) + wordCount += len(words) + j++ + // If this sentence alone exceeds limit, still include it and stop + if wordCount > int(wordLimit) { + break + } + } + if len(chunkWords) == 0 { + break + } + chunk := strings.Join(chunkWords, " ") + chunks = append(chunks, chunk) + if j >= len(sentences) { + break + } + // Move i forward by skipping overlap + if overlapWords == 0 { + i = j + continue + } + // Calculate how many sentences to skip to achieve overlapWords + overlapRemaining := int(overlapWords) + newI := i + for newI < j && overlapRemaining > 0 { + words := len(strings.Fields(sentences[newI])) + overlapRemaining -= words + if overlapRemaining >= 0 { + newI++ + } + } + if newI == i { + newI = j + } + i = newI + } + return chunks +} + +func sanitizeFTSQuery(query string) string { + // Remove double quotes and other problematic characters for FTS5 + query = strings.ReplaceAll(query, "\"", " ") + query = strings.ReplaceAll(query, "'", " ") + query = strings.ReplaceAll(query, ";", " ") + query = strings.ReplaceAll(query, "\\", " ") + query = strings.TrimSpace(query) + if query == "" { + return "*" // match all + } + return query +} + func (r *RAG) LoadRAG(fpath string) error { r.mu.Lock() defer r.mu.Unlock() @@ -95,31 +163,8 @@ func (r *RAG) LoadRAG(fpath string) error { for i, s := range sentences { sents[i] = s.Text } - // Group sentences into paragraphs based on word limit - paragraphs := []string{} - par := strings.Builder{} - for i := 0; i < len(sents); i++ { - if strings.TrimSpace(sents[i]) != "" { - if par.Len() > 0 { - par.WriteString(" ") - } - par.WriteString(sents[i]) - } - if wordCounter(par.String()) > int(r.cfg.RAGWordLimit) { - paragraph := strings.TrimSpace(par.String()) - if paragraph != "" { - paragraphs = append(paragraphs, paragraph) - } - par.Reset() - } - } - // Handle any remaining content in the paragraph buffer - if par.Len() > 0 { - paragraph := strings.TrimSpace(par.String()) - if paragraph != "" { - paragraphs = append(paragraphs, paragraph) - } - } + // Create chunks with overlap + paragraphs := createChunks(sents, r.cfg.RAGWordLimit, r.cfg.RAGOverlapWords) // Adjust batch size if needed if len(paragraphs) < r.cfg.RAGBatchSize && len(paragraphs) > 0 { r.cfg.RAGBatchSize = len(paragraphs) @@ -205,9 +250,15 @@ func (r *RAG) LineToVector(line string) ([]float32, error) { return r.embedder.Embed(line) } -func (r *RAG) SearchEmb(emb *models.EmbeddingResp) ([]models.VectorRow, error) { +func (r *RAG) SearchEmb(emb *models.EmbeddingResp, limit int) ([]models.VectorRow, error) { r.resetIdleTimer() - return r.storage.SearchClosest(emb.Embedding) + return r.storage.SearchClosest(emb.Embedding, limit) +} + +func (r *RAG) SearchKeyword(query string, limit int) ([]models.VectorRow, error) { + r.resetIdleTimer() + sanitized := sanitizeFTSQuery(query) + return r.storage.SearchKeyword(sanitized, limit) } func (r *RAG) ListLoaded() ([]string, error) { @@ -393,7 +444,7 @@ func (r *RAG) SynthesizeAnswer(results []models.VectorRow, query string) (string Embedding: emb, Index: 0, } - topResults, err := r.SearchEmb(embResp) + topResults, err := r.SearchEmb(embResp, 1) if err != nil { r.logger.Error("failed to search for synthesis context", "error", err) return "", err @@ -422,7 +473,9 @@ func truncateString(s string, maxLen int) string { func (r *RAG) Search(query string, limit int) ([]models.VectorRow, error) { refined := r.RefineQuery(query) variations := r.GenerateQueryVariations(refined) - allResults := make([]models.VectorRow, 0) + + // Collect embedding search results from all variations + var embResults []models.VectorRow seen := make(map[string]bool) for _, q := range variations { emb, err := r.LineToVector(q) @@ -430,29 +483,78 @@ func (r *RAG) Search(query string, limit int) ([]models.VectorRow, error) { r.logger.Error("failed to embed query variation", "error", err, "query", q) continue } - embResp := &models.EmbeddingResp{ Embedding: emb, Index: 0, } - - results, err := r.SearchEmb(embResp) + results, err := r.SearchEmb(embResp, limit*2) // Get more candidates if err != nil { r.logger.Error("failed to search embeddings", "error", err, "query", q) continue } - for _, row := range results { if !seen[row.Slug] { seen[row.Slug] = true - allResults = append(allResults, row) + embResults = append(embResults, row) } } } - reranked := r.RerankResults(allResults, query) - if len(reranked) > limit { - reranked = reranked[:limit] + // Sort embedding results by distance (lower is better) + sort.Slice(embResults, func(i, j int) bool { + return embResults[i].Distance < embResults[j].Distance + }) + + // Perform keyword search + kwResults, err := r.SearchKeyword(refined, limit*2) + if err != nil { + r.logger.Warn("keyword search failed, using only embeddings", "error", err) + kwResults = nil + } + // Sort keyword results by distance (already sorted by BM25 score) + // kwResults already sorted by distance (lower is better) + + // Combine using Reciprocal Rank Fusion (RRF) + const rrfK = 60 + type scoredRow struct { + row models.VectorRow + score float64 + } + scoreMap := make(map[string]float64) + // Add embedding results + for rank, row := range embResults { + score := 1.0 / (float64(rank) + rrfK) + scoreMap[row.Slug] += score + } + // Add keyword results + for rank, row := range kwResults { + score := 1.0 / (float64(rank) + rrfK) + scoreMap[row.Slug] += score + // Ensure row exists in combined results + if _, exists := seen[row.Slug]; !exists { + embResults = append(embResults, row) + } + } + // Create slice of scored rows + scoredRows := make([]scoredRow, 0, len(embResults)) + for _, row := range embResults { + score := scoreMap[row.Slug] + scoredRows = append(scoredRows, scoredRow{row: row, score: score}) + } + // Sort by descending RRF score + sort.Slice(scoredRows, func(i, j int) bool { + return scoredRows[i].score > scoredRows[j].score + }) + // Take top limit + if len(scoredRows) > limit { + scoredRows = scoredRows[:limit] + } + // Convert back to VectorRow + finalResults := make([]models.VectorRow, len(scoredRows)) + for i, sr := range scoredRows { + finalResults[i] = sr.row } + // Apply reranking heuristics + reranked := r.RerankResults(finalResults, query) return reranked, nil } diff --git a/rag/storage.go b/rag/storage.go index 52f6859..08e9d2a 100644 --- a/rag/storage.go +++ b/rag/storage.go @@ -62,6 +62,18 @@ func (vs *VectorStorage) WriteVector(row *models.VectorRow) error { if err != nil { return err } + embeddingSize := len(row.Embeddings) + + // Start transaction + tx, err := vs.sqlxDB.Beginx() + if err != nil { + return err + } + defer func() { + if err != nil { + tx.Rollback() + } + }() // Serialize the embeddings to binary serializedEmbeddings := SerializeVector(row.Embeddings) @@ -69,10 +81,23 @@ func (vs *VectorStorage) WriteVector(row *models.VectorRow) error { "INSERT INTO %s (embeddings, slug, raw_text, filename) VALUES (?, ?, ?, ?)", tableName, ) - if _, err := vs.sqlxDB.Exec(query, serializedEmbeddings, row.Slug, row.RawText, row.FileName); err != nil { + if _, err := tx.Exec(query, serializedEmbeddings, row.Slug, row.RawText, row.FileName); err != nil { vs.logger.Error("failed to write vector", "error", err, "slug", row.Slug) return err } + + // Insert into FTS table + ftsQuery := `INSERT INTO fts_embeddings (slug, raw_text, filename, embedding_size) VALUES (?, ?, ?, ?)` + if _, err := tx.Exec(ftsQuery, row.Slug, row.RawText, row.FileName, embeddingSize); err != nil { + vs.logger.Error("failed to write to FTS table", "error", err, "slug", row.Slug) + return err + } + + err = tx.Commit() + if err != nil { + vs.logger.Error("failed to commit transaction", "error", err) + return err + } return nil } @@ -98,16 +123,15 @@ func (vs *VectorStorage) getTableName(emb []float32) (string, error) { } // SearchClosest finds vectors closest to the query vector using efficient cosine similarity calculation -func (vs *VectorStorage) SearchClosest(query []float32) ([]models.VectorRow, error) { +func (vs *VectorStorage) SearchClosest(query []float32, limit int) ([]models.VectorRow, error) { + if limit <= 0 { + limit = 10 + } tableName, err := vs.getTableName(query) if err != nil { return nil, err } - // For better performance, instead of loading all vectors at once, - // we'll implement batching and potentially add L2 distance-based pre-filtering - // since cosine similarity is related to L2 distance for normalized vectors - querySQL := "SELECT embeddings, slug, raw_text, filename FROM " + tableName rows, err := vs.sqlxDB.Query(querySQL) if err != nil { @@ -115,13 +139,11 @@ func (vs *VectorStorage) SearchClosest(query []float32) ([]models.VectorRow, err } defer rows.Close() - // Use a min-heap or simple slice to keep track of top 3 closest vectors type SearchResult struct { vector models.VectorRow distance float32 } var topResults []SearchResult - // Process vectors one by one to avoid loading everything into memory for rows.Next() { var ( embeddingsBlob []byte @@ -134,10 +156,8 @@ func (vs *VectorStorage) SearchClosest(query []float32) ([]models.VectorRow, err } storedEmbeddings := DeserializeVector(embeddingsBlob) - - // Calculate cosine similarity (returns value between -1 and 1, where 1 is most similar) similarity := cosineSimilarity(query, storedEmbeddings) - distance := 1 - similarity // Convert to distance where 0 is most similar + distance := 1 - similarity result := SearchResult{ vector: models.VectorRow{ @@ -149,20 +169,15 @@ func (vs *VectorStorage) SearchClosest(query []float32) ([]models.VectorRow, err distance: distance, } - // Add to top results and maintain only top 3 topResults = append(topResults, result) - - // Sort and keep only top 3 sort.Slice(topResults, func(i, j int) bool { return topResults[i].distance < topResults[j].distance }) - - if len(topResults) > 3 { - topResults = topResults[:3] // Keep only closest 3 + if len(topResults) > limit { + topResults = topResults[:limit] } } - // Convert back to VectorRow slice results := make([]models.VectorRow, 0, len(topResults)) for _, result := range topResults { result.vector.Distance = result.distance @@ -171,6 +186,70 @@ func (vs *VectorStorage) SearchClosest(query []float32) ([]models.VectorRow, err return results, nil } +// GetVectorBySlug retrieves a vector row by its slug +func (vs *VectorStorage) GetVectorBySlug(slug string) (*models.VectorRow, error) { + embeddingSizes := []int{384, 768, 1024, 1536, 2048, 3072, 4096, 5120} + for _, size := range embeddingSizes { + table := fmt.Sprintf("embeddings_%d", size) + query := fmt.Sprintf("SELECT embeddings, slug, raw_text, filename FROM %s WHERE slug = ?", table) + row := vs.sqlxDB.QueryRow(query, slug) + var ( + embeddingsBlob []byte + retrievedSlug, rawText, fileName string + ) + if err := row.Scan(&embeddingsBlob, &retrievedSlug, &rawText, &fileName); err != nil { + // No row in this table, continue to next size + continue + } + storedEmbeddings := DeserializeVector(embeddingsBlob) + return &models.VectorRow{ + Embeddings: storedEmbeddings, + Slug: retrievedSlug, + RawText: rawText, + FileName: fileName, + }, nil + } + return nil, fmt.Errorf("vector with slug %s not found", slug) +} + +// SearchKeyword performs full-text search using FTS5 +func (vs *VectorStorage) SearchKeyword(query string, limit int) ([]models.VectorRow, error) { + // Use FTS5 bm25 ranking. bm25 returns negative values where more negative is better. + // We'll order by bm25 (ascending) and limit. + ftsQuery := `SELECT slug, raw_text, filename, bm25(fts_embeddings) as score + FROM fts_embeddings + WHERE fts_embeddings MATCH ? + ORDER BY score + LIMIT ?` + rows, err := vs.sqlxDB.Query(ftsQuery, query, limit) + if err != nil { + return nil, fmt.Errorf("FTS search failed: %w", err) + } + defer rows.Close() + var results []models.VectorRow + for rows.Next() { + var slug, rawText, fileName string + var score float64 + if err := rows.Scan(&slug, &rawText, &fileName, &score); err != nil { + vs.logger.Error("failed to scan FTS row", "error", err) + continue + } + // Convert BM25 score to distance-like metric (lower is better) + // BM25 is negative, more negative is better. We'll normalize to positive distance. + distance := float32(-score) // Make positive (since score is negative) + if distance < 0 { + distance = 0 + } + results = append(results, models.VectorRow{ + Slug: slug, + RawText: rawText, + FileName: fileName, + Distance: distance, + }) + } + return results, nil +} + // ListFiles returns a list of all loaded files func (vs *VectorStorage) ListFiles() ([]string, error) { fileLists := make([][]string, 0) @@ -215,6 +294,10 @@ func (vs *VectorStorage) ListFiles() ([]string, error) { // RemoveEmbByFileName removes all embeddings associated with a specific filename func (vs *VectorStorage) RemoveEmbByFileName(filename string) error { var errors []string + // Delete from FTS table first + if _, err := vs.sqlxDB.Exec("DELETE FROM fts_embeddings WHERE filename = ?", filename); err != nil { + errors = append(errors, err.Error()) + } embeddingSizes := []int{384, 768, 1024, 1536, 2048, 3072, 4096, 5120} for _, size := range embeddingSizes { table := fmt.Sprintf("embeddings_%d", size) -- cgit v1.2.3 From 62ec55505ca07701ee6a976895d910b051e725b9 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Fri, 6 Mar 2026 13:17:49 +0300 Subject: Enha (rag): query each doc --- rag/rag.go | 46 ++++++++++++++++++++++++++++++++++++++++++---- rag/storage.go | 33 ++++++++++++++++++++++++++++++++- 2 files changed, 74 insertions(+), 5 deletions(-) (limited to 'rag') diff --git a/rag/rag.go b/rag/rag.go index 4e11a0d..9271b60 100644 --- a/rag/rag.go +++ b/rag/rag.go @@ -286,10 +286,13 @@ func (r *RAG) RefineQuery(query string) string { return original } query = strings.ToLower(query) - for _, stopWord := range stopWords { - wordPattern := `\b` + stopWord + `\b` - re := regexp.MustCompile(wordPattern) - query = re.ReplaceAllString(query, "") + words := strings.Fields(query) + if len(words) >= 3 { + for _, stopWord := range stopWords { + wordPattern := `\b` + stopWord + `\b` + re := regexp.MustCompile(wordPattern) + query = re.ReplaceAllString(query, "") + } } query = strings.TrimSpace(query) if len(query) < 5 { @@ -340,6 +343,36 @@ func (r *RAG) GenerateQueryVariations(query string) []string { if len(parts) == 0 { return variations } + // Get loaded filenames to filter out filename terms + filenames, err := r.storage.ListFiles() + if err == nil && len(filenames) > 0 { + // Convert to lowercase for case-insensitive matching + lowerFilenames := make([]string, len(filenames)) + for i, f := range filenames { + lowerFilenames[i] = strings.ToLower(f) + } + filteredParts := make([]string, 0, len(parts)) + for _, part := range parts { + partLower := strings.ToLower(part) + skip := false + for _, fn := range lowerFilenames { + if strings.Contains(fn, partLower) || strings.Contains(partLower, fn) { + skip = true + break + } + } + if !skip { + filteredParts = append(filteredParts, part) + } + } + // If filteredParts not empty and different from original, add filtered query + if len(filteredParts) > 0 && len(filteredParts) != len(parts) { + filteredQuery := strings.Join(filteredParts, " ") + if len(filteredQuery) >= 5 { + variations = append(variations, filteredQuery) + } + } + } if len(parts) >= 2 { trimmed := strings.Join(parts[:len(parts)-1], " ") if len(trimmed) >= 5 { @@ -403,9 +436,14 @@ func (r *RAG) RerankResults(results []models.VectorRow, query string) []models.V }) unique := make([]models.VectorRow, 0) seen := make(map[string]bool) + fileCounts := make(map[string]int) for i := range scored { if !seen[scored[i].row.Slug] { + if fileCounts[scored[i].row.FileName] >= 2 { + continue + } seen[scored[i].row.Slug] = true + fileCounts[scored[i].row.FileName]++ unique = append(unique, scored[i].row) } } diff --git a/rag/storage.go b/rag/storage.go index 08e9d2a..110cea2 100644 --- a/rag/storage.go +++ b/rag/storage.go @@ -1,6 +1,7 @@ package rag import ( + "database/sql" "encoding/binary" "fmt" "gf-lt/models" @@ -221,11 +222,41 @@ func (vs *VectorStorage) SearchKeyword(query string, limit int) ([]models.Vector WHERE fts_embeddings MATCH ? ORDER BY score LIMIT ?` + + // Try original query first rows, err := vs.sqlxDB.Query(ftsQuery, query, limit) if err != nil { return nil, fmt.Errorf("FTS search failed: %w", err) } - defer rows.Close() + results, err := vs.scanRows(rows) + rows.Close() + if err != nil { + return nil, err + } + + // If no results and query contains multiple terms, try OR fallback + if len(results) == 0 && strings.Contains(query, " ") && !strings.Contains(strings.ToUpper(query), " OR ") { + // Build OR query: term1 OR term2 OR term3 + terms := strings.Fields(query) + if len(terms) > 1 { + orQuery := strings.Join(terms, " OR ") + rows, err := vs.sqlxDB.Query(ftsQuery, orQuery, limit) + if err != nil { + // Return original empty results rather than error + return results, nil + } + orResults, err := vs.scanRows(rows) + rows.Close() + if err == nil { + results = orResults + } + } + } + return results, nil +} + +// scanRows converts SQL rows to VectorRow slice +func (vs *VectorStorage) scanRows(rows *sql.Rows) ([]models.VectorRow, error) { var results []models.VectorRow for rows.Next() { var slug, rawText, fileName string -- cgit v1.2.3 From 17b68bc21fae99c17ec48e046e67a643b9d159bb Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Fri, 6 Mar 2026 18:58:23 +0300 Subject: Enha (rag): async writes --- rag/embedder.go | 7 +- rag/rag.go | 440 +++++++++++++++++++++++++++++++++++++++++++++++--------- rag/storage.go | 86 +++++++++++ 3 files changed, 464 insertions(+), 69 deletions(-) (limited to 'rag') diff --git a/rag/embedder.go b/rag/embedder.go index 39f4b5c..fd4cfa7 100644 --- a/rag/embedder.go +++ b/rag/embedder.go @@ -11,6 +11,7 @@ import ( "net/http" "os" "sync" + "time" "github.com/sugarme/tokenizer" "github.com/sugarme/tokenizer/pretrained" @@ -33,8 +34,10 @@ type APIEmbedder struct { func NewAPIEmbedder(l *slog.Logger, cfg *config.Config) *APIEmbedder { return &APIEmbedder{ logger: l, - client: &http.Client{}, - cfg: cfg, + client: &http.Client{ + Timeout: 30 * time.Second, + }, + cfg: cfg, } } diff --git a/rag/rag.go b/rag/rag.go index 9271b60..180ad50 100644 --- a/rag/rag.go +++ b/rag/rag.go @@ -1,6 +1,7 @@ package rag import ( + "context" "errors" "fmt" "gf-lt/config" @@ -9,6 +10,7 @@ import ( "log/slog" "path" "regexp" + "runtime" "sort" "strings" "sync" @@ -17,9 +19,14 @@ import ( "github.com/neurosnap/sentences/english" ) +const ( + // batchTimeout is the maximum time allowed for embedding a single batch + batchTimeout = 2 * time.Minute +) + var ( // Status messages for TUI integration - LongJobStatusCh = make(chan string, 10) // Increased buffer size to prevent blocking + LongJobStatusCh = make(chan string, 100) // Increased buffer size for parallel batch updates FinishedRAGStatus = "finished loading RAG file; press Enter" LoadedFileRAGStatus = "loaded file" ErrRAGStatus = "some error occurred; failed to transfer data to vector db" @@ -31,12 +38,38 @@ type RAG struct { cfg *config.Config embedder Embedder storage *VectorStorage - mu sync.Mutex + mu sync.RWMutex + idleMu sync.Mutex fallbackMsg string idleTimer *time.Timer idleTimeout time.Duration } +// batchTask represents a single batch to be embedded +type batchTask struct { + batchIndex int + paragraphs []string + filename string + totalBatches int +} + +// batchResult represents the result of embedding a batch +type batchResult struct { + batchIndex int + embeddings [][]float32 + paragraphs []string + filename string +} + +// sendStatusNonBlocking sends a status message without blocking +func (r *RAG) sendStatusNonBlocking(status string) { + select { + case LongJobStatusCh <- status: + default: + r.logger.Warn("LongJobStatusCh channel is full or closed, dropping status message", "message", status) + } +} + func New(l *slog.Logger, s storage.FullRepo, cfg *config.Config) (*RAG, error) { var embedder Embedder var fallbackMsg string @@ -142,18 +175,22 @@ func sanitizeFTSQuery(query string) string { } func (r *RAG) LoadRAG(fpath string) error { + return r.LoadRAGWithContext(context.Background(), fpath) +} + +func (r *RAG) LoadRAGWithContext(ctx context.Context, fpath string) error { r.mu.Lock() defer r.mu.Unlock() + fileText, err := ExtractText(fpath) if err != nil { return err } r.logger.Debug("rag: loaded file", "fp", fpath) - select { - case LongJobStatusCh <- LoadedFileRAGStatus: - default: - r.logger.Warn("LongJobStatusCh channel is full or closed, dropping status message", "message", LoadedFileRAGStatus) - } + + // Send initial status (non-blocking with retry) + r.sendStatusNonBlocking(LoadedFileRAGStatus) + tokenizer, err := english.NewSentenceTokenizer(nil) if err != nil { return err @@ -163,6 +200,7 @@ func (r *RAG) LoadRAG(fpath string) error { for i, s := range sentences { sents[i] = s.Text } + // Create chunks with overlap paragraphs := createChunks(sents, r.cfg.RAGWordLimit, r.cfg.RAGOverlapWords) // Adjust batch size if needed @@ -172,76 +210,332 @@ func (r *RAG) LoadRAG(fpath string) error { if len(paragraphs) == 0 { return errors.New("no valid paragraphs found in file") } - // Process paragraphs in batches synchronously - batchCount := 0 - for i := 0; i < len(paragraphs); i += r.cfg.RAGBatchSize { - end := i + r.cfg.RAGBatchSize - if end > len(paragraphs) { - end = len(paragraphs) - } - batch := paragraphs[i:end] - batchCount++ - // Filter empty paragraphs - nonEmptyBatch := make([]string, 0, len(batch)) - for _, p := range batch { - if strings.TrimSpace(p) != "" { - nonEmptyBatch = append(nonEmptyBatch, strings.TrimSpace(p)) + + totalBatches := (len(paragraphs) + r.cfg.RAGBatchSize - 1) / r.cfg.RAGBatchSize + r.logger.Debug("starting parallel embedding", "total_batches", totalBatches, "batch_size", r.cfg.RAGBatchSize) + + // Determine concurrency level + concurrency := runtime.NumCPU() + if concurrency > totalBatches { + concurrency = totalBatches + } + if concurrency < 1 { + concurrency = 1 + } + // If using ONNX embedder, limit concurrency to 1 due to mutex serialization + isONNX := false + if _, isONNX = r.embedder.(*ONNXEmbedder); isONNX { + concurrency = 1 + } + embedderType := "API" + if isONNX { + embedderType = "ONNX" + } + r.logger.Debug("parallel embedding setup", + "total_batches", totalBatches, + "concurrency", concurrency, + "embedder", embedderType, + "batch_size", r.cfg.RAGBatchSize) + + // Create context with timeout (30 minutes) and cancellation for error handling + ctx, cancel := context.WithTimeout(ctx, 30*time.Minute) + defer cancel() + + // Channels for task distribution and results + taskCh := make(chan batchTask, totalBatches) + resultCh := make(chan batchResult, totalBatches) + errorCh := make(chan error, totalBatches) + + // Start worker goroutines + var wg sync.WaitGroup + for w := 0; w < concurrency; w++ { + wg.Add(1) + go r.embeddingWorker(ctx, w, taskCh, resultCh, errorCh, &wg) + } + + // Close task channel after all tasks are sent (by separate goroutine) + go func() { + // Ensure task channel is closed when this goroutine exits + defer close(taskCh) + r.logger.Debug("task distributor started", "total_batches", totalBatches) + + for i := 0; i < totalBatches; i++ { + start := i * r.cfg.RAGBatchSize + end := start + r.cfg.RAGBatchSize + if end > len(paragraphs) { + end = len(paragraphs) } - } - if len(nonEmptyBatch) == 0 { - continue - } - // Embed the batch - embeddings, err := r.embedder.EmbedSlice(nonEmptyBatch) - if err != nil { - r.logger.Error("failed to embed batch", "error", err, "batch", batchCount) + batch := paragraphs[start:end] + + // Filter empty paragraphs + nonEmptyBatch := make([]string, 0, len(batch)) + for _, p := range batch { + if strings.TrimSpace(p) != "" { + nonEmptyBatch = append(nonEmptyBatch, strings.TrimSpace(p)) + } + } + + task := batchTask{ + batchIndex: i, + paragraphs: nonEmptyBatch, + filename: path.Base(fpath), + totalBatches: totalBatches, + } + select { - case LongJobStatusCh <- ErrRAGStatus: - default: - r.logger.Warn("LongJobStatusCh channel full, dropping message") + case taskCh <- task: + r.logger.Debug("task distributor sent batch", "batch", i, "paragraphs", len(nonEmptyBatch)) + case <-ctx.Done(): + r.logger.Debug("task distributor cancelled", "batches_sent", i+1, "total_batches", totalBatches) + return } - return fmt.Errorf("failed to embed batch %d: %w", batchCount, err) - } - if len(embeddings) != len(nonEmptyBatch) { - err := errors.New("embedding count mismatch") - r.logger.Error("embedding mismatch", "expected", len(nonEmptyBatch), "got", len(embeddings)) - return err - } - // Write vectors to storage - filename := path.Base(fpath) - for j, text := range nonEmptyBatch { - vector := models.VectorRow{ - Embeddings: embeddings[j], - RawText: text, - Slug: fmt.Sprintf("%s_%d_%d", filename, batchCount, j), - FileName: filename, + } + r.logger.Debug("task distributor finished", "batches_sent", totalBatches) + }() + + // Wait for workers to finish and close result channel + go func() { + wg.Wait() + close(resultCh) + }() + + // Process results in order and write to database + nextExpectedBatch := 0 + resultsBuffer := make(map[int]batchResult) + filename := path.Base(fpath) + batchesProcessed := 0 + + for { + select { + case <-ctx.Done(): + return ctx.Err() + + case err := <-errorCh: + // First error from any worker, cancel everything + cancel() + r.logger.Error("embedding worker failed", "error", err) + r.sendStatusNonBlocking(ErrRAGStatus) + return fmt.Errorf("embedding failed: %w", err) + + case result, ok := <-resultCh: + if !ok { + // All results processed + resultCh = nil + r.logger.Debug("result channel closed", "batches_processed", batchesProcessed, "total_batches", totalBatches) + continue + } + + // Store result in buffer + resultsBuffer[result.batchIndex] = result + + // Process buffered results in order + for { + if res, exists := resultsBuffer[nextExpectedBatch]; exists { + // Write this batch to database + if err := r.writeBatchToStorage(ctx, res, filename); err != nil { + cancel() + return err + } + + batchesProcessed++ + // Send progress update + statusMsg := fmt.Sprintf("processed batch %d/%d", batchesProcessed, totalBatches) + r.sendStatusNonBlocking(statusMsg) + + delete(resultsBuffer, nextExpectedBatch) + nextExpectedBatch++ + } else { + break + } } - if err := r.storage.WriteVector(&vector); err != nil { - r.logger.Error("failed to write vector to DB", "error", err, "slug", vector.Slug) + + default: + // No channels ready, check for deadlock conditions + if resultCh == nil && nextExpectedBatch < totalBatches { + // Missing batch results after result channel closed + r.logger.Error("missing batch results", + "expected", totalBatches, + "received", nextExpectedBatch, + "missing", totalBatches-nextExpectedBatch) + + // Wait a short time for any delayed errors, then cancel select { - case LongJobStatusCh <- ErrRAGStatus: - default: - r.logger.Warn("LongJobStatusCh channel full, dropping message") + case <-time.After(5 * time.Second): + cancel() + return fmt.Errorf("missing batch results: expected %d, got %d", totalBatches, nextExpectedBatch) + case <-ctx.Done(): + return ctx.Err() + case err := <-errorCh: + cancel() + r.logger.Error("embedding worker failed after result channel closed", "error", err) + r.sendStatusNonBlocking(ErrRAGStatus) + return fmt.Errorf("embedding failed: %w", err) } - return fmt.Errorf("failed to write vector: %w", err) + } + // If we reach here, no deadlock yet, just busy loop prevention + time.Sleep(100 * time.Millisecond) + } + + // Check if we're done + if resultCh == nil && nextExpectedBatch >= totalBatches { + r.logger.Debug("all batches processed successfully", "total", totalBatches) + break + } + } + + r.logger.Debug("finished writing vectors", "batches", batchesProcessed) + r.resetIdleTimer() + r.sendStatusNonBlocking(FinishedRAGStatus) + return nil +} + +// embeddingWorker processes batch embedding tasks +func (r *RAG) embeddingWorker(ctx context.Context, workerID int, taskCh <-chan batchTask, resultCh chan<- batchResult, errorCh chan<- error, wg *sync.WaitGroup) { + defer wg.Done() + r.logger.Debug("embedding worker started", "worker", workerID) + + // Panic recovery to ensure worker doesn't crash silently + defer func() { + if rec := recover(); rec != nil { + r.logger.Error("embedding worker panicked", "worker", workerID, "panic", rec) + // Try to send error, but don't block if channel is full + select { + case errorCh <- fmt.Errorf("worker %d panicked: %v", workerID, rec): + default: + r.logger.Warn("error channel full, dropping panic error", "worker", workerID) } } - r.logger.Debug("wrote batch to db", "batch", batchCount, "size", len(nonEmptyBatch)) - // Send progress status - statusMsg := fmt.Sprintf("processed batch %d/%d", batchCount, (len(paragraphs)+r.cfg.RAGBatchSize-1)/r.cfg.RAGBatchSize) + }() + + for task := range taskCh { select { - case LongJobStatusCh <- statusMsg: + case <-ctx.Done(): + r.logger.Debug("embedding worker cancelled", "worker", workerID) + return default: - r.logger.Warn("LongJobStatusCh channel full, dropping message") } + r.logger.Debug("worker processing batch", "worker", workerID, "batch", task.batchIndex, "paragraphs", len(task.paragraphs), "total_batches", task.totalBatches) + + // Skip empty batches + if len(task.paragraphs) == 0 { + select { + case resultCh <- batchResult{ + batchIndex: task.batchIndex, + embeddings: nil, + paragraphs: nil, + filename: task.filename, + }: + case <-ctx.Done(): + r.logger.Debug("embedding worker cancelled while sending empty batch", "worker", workerID) + return + } + r.logger.Debug("worker sent empty batch", "worker", workerID, "batch", task.batchIndex) + continue + } + + // Embed with retry for API embedder + embeddings, err := r.embedWithRetry(ctx, task.paragraphs, 3) + if err != nil { + // Try to send error, but don't block indefinitely + select { + case errorCh <- fmt.Errorf("worker %d batch %d: %w", workerID, task.batchIndex, err): + case <-ctx.Done(): + r.logger.Debug("embedding worker cancelled while sending error", "worker", workerID) + } + return + } + + // Send result with context awareness + select { + case resultCh <- batchResult{ + batchIndex: task.batchIndex, + embeddings: embeddings, + paragraphs: task.paragraphs, + filename: task.filename, + }: + case <-ctx.Done(): + r.logger.Debug("embedding worker cancelled while sending result", "worker", workerID) + return + } + r.logger.Debug("worker completed batch", "worker", workerID, "batch", task.batchIndex, "embeddings", len(embeddings)) } - r.logger.Debug("finished writing vectors", "batches", batchCount) - r.resetIdleTimer() + r.logger.Debug("embedding worker finished", "worker", workerID) +} + +// embedWithRetry attempts embedding with exponential backoff for API embedder +func (r *RAG) embedWithRetry(ctx context.Context, paragraphs []string, maxRetries int) ([][]float32, error) { + var lastErr error + + for attempt := 0; attempt < maxRetries; attempt++ { + if attempt > 0 { + // Exponential backoff + backoff := time.Duration(attempt*attempt) * time.Second + if backoff > 10*time.Second { + backoff = 10 * time.Second + } + + select { + case <-time.After(backoff): + case <-ctx.Done(): + return nil, ctx.Err() + } + + r.logger.Debug("retrying embedding", "attempt", attempt, "max_retries", maxRetries) + } + + embeddings, err := r.embedder.EmbedSlice(paragraphs) + if err == nil { + // Validate embedding count + if len(embeddings) != len(paragraphs) { + return nil, fmt.Errorf("embedding count mismatch: expected %d, got %d", len(paragraphs), len(embeddings)) + } + return embeddings, nil + } + + lastErr = err + // Only retry for API embedder errors (network/timeout) + // For ONNX embedder, fail fast + if _, isAPI := r.embedder.(*APIEmbedder); !isAPI { + break + } + } + + return nil, fmt.Errorf("embedding failed after %d attempts: %w", maxRetries, lastErr) +} + +// writeBatchToStorage writes a single batch of vectors to the database +func (r *RAG) writeBatchToStorage(ctx context.Context, result batchResult, filename string) error { + if len(result.embeddings) == 0 { + // Empty batch, skip + return nil + } + + // Check context before starting select { - case LongJobStatusCh <- FinishedRAGStatus: + case <-ctx.Done(): + return ctx.Err() default: - r.logger.Warn("LongJobStatusCh channel is full or closed, dropping status message", "message", FinishedRAGStatus) } + + // Build all vectors for batch write + vectors := make([]*models.VectorRow, 0, len(result.paragraphs)) + for j, text := range result.paragraphs { + vectors = append(vectors, &models.VectorRow{ + Embeddings: result.embeddings[j], + RawText: text, + Slug: fmt.Sprintf("%s_%d_%d", filename, result.batchIndex+1, j), + FileName: filename, + }) + } + + // Write all vectors in a single transaction + if err := r.storage.WriteVectors(vectors); err != nil { + r.logger.Error("failed to write vectors batch to DB", "error", err, "batch", result.batchIndex+1, "size", len(vectors)) + r.sendStatusNonBlocking(ErrRAGStatus) + return fmt.Errorf("failed to write vectors batch: %w", err) + } + + r.logger.Debug("wrote batch to db", "batch", result.batchIndex+1, "size", len(result.paragraphs)) return nil } @@ -250,22 +544,26 @@ func (r *RAG) LineToVector(line string) ([]float32, error) { return r.embedder.Embed(line) } -func (r *RAG) SearchEmb(emb *models.EmbeddingResp, limit int) ([]models.VectorRow, error) { +func (r *RAG) searchEmb(emb *models.EmbeddingResp, limit int) ([]models.VectorRow, error) { r.resetIdleTimer() return r.storage.SearchClosest(emb.Embedding, limit) } -func (r *RAG) SearchKeyword(query string, limit int) ([]models.VectorRow, error) { +func (r *RAG) searchKeyword(query string, limit int) ([]models.VectorRow, error) { r.resetIdleTimer() sanitized := sanitizeFTSQuery(query) return r.storage.SearchKeyword(sanitized, limit) } func (r *RAG) ListLoaded() ([]string, error) { + r.mu.RLock() + defer r.mu.RUnlock() return r.storage.ListFiles() } func (r *RAG) RemoveFile(filename string) error { + r.mu.Lock() + defer r.mu.Unlock() r.resetIdleTimer() return r.storage.RemoveEmbByFileName(filename) } @@ -454,6 +752,9 @@ func (r *RAG) RerankResults(results []models.VectorRow, query string) []models.V } func (r *RAG) SynthesizeAnswer(results []models.VectorRow, query string) (string, error) { + r.mu.RLock() + defer r.mu.RUnlock() + r.resetIdleTimer() if len(results) == 0 { return "No relevant information found in the vector database.", nil } @@ -482,7 +783,7 @@ func (r *RAG) SynthesizeAnswer(results []models.VectorRow, query string) (string Embedding: emb, Index: 0, } - topResults, err := r.SearchEmb(embResp, 1) + topResults, err := r.searchEmb(embResp, 1) if err != nil { r.logger.Error("failed to search for synthesis context", "error", err) return "", err @@ -509,6 +810,9 @@ func truncateString(s string, maxLen int) string { } func (r *RAG) Search(query string, limit int) ([]models.VectorRow, error) { + r.mu.RLock() + defer r.mu.RUnlock() + r.resetIdleTimer() refined := r.RefineQuery(query) variations := r.GenerateQueryVariations(refined) @@ -525,7 +829,7 @@ func (r *RAG) Search(query string, limit int) ([]models.VectorRow, error) { Embedding: emb, Index: 0, } - results, err := r.SearchEmb(embResp, limit*2) // Get more candidates + results, err := r.searchEmb(embResp, limit*2) // Get more candidates if err != nil { r.logger.Error("failed to search embeddings", "error", err, "query", q) continue @@ -543,7 +847,7 @@ func (r *RAG) Search(query string, limit int) ([]models.VectorRow, error) { }) // Perform keyword search - kwResults, err := r.SearchKeyword(refined, limit*2) + kwResults, err := r.searchKeyword(refined, limit*2) if err != nil { r.logger.Warn("keyword search failed, using only embeddings", "error", err) kwResults = nil @@ -621,6 +925,8 @@ func GetInstance() *RAG { } func (r *RAG) resetIdleTimer() { + r.idleMu.Lock() + defer r.idleMu.Unlock() if r.idleTimer != nil { r.idleTimer.Stop() } diff --git a/rag/storage.go b/rag/storage.go index 110cea2..1e6b013 100644 --- a/rag/storage.go +++ b/rag/storage.go @@ -102,6 +102,92 @@ func (vs *VectorStorage) WriteVector(row *models.VectorRow) error { return nil } +// WriteVectors stores multiple embedding vectors in a single transaction +func (vs *VectorStorage) WriteVectors(rows []*models.VectorRow) error { + if len(rows) == 0 { + return nil + } + // SQLite has limit of 999 parameters per statement, each row uses 4 parameters + const maxBatchSize = 200 // 200 * 4 = 800 < 999 + if len(rows) > maxBatchSize { + // Process in chunks + for i := 0; i < len(rows); i += maxBatchSize { + end := i + maxBatchSize + if end > len(rows) { + end = len(rows) + } + if err := vs.WriteVectors(rows[i:end]); err != nil { + return err + } + } + return nil + } + // All rows should have same embedding size (same model) + firstSize := len(rows[0].Embeddings) + for i, row := range rows { + if len(row.Embeddings) != firstSize { + return fmt.Errorf("embedding size mismatch: row %d has size %d, expected %d", i, len(row.Embeddings), firstSize) + } + } + tableName, err := vs.getTableName(rows[0].Embeddings) + if err != nil { + return err + } + + // Start transaction + tx, err := vs.sqlxDB.Beginx() + if err != nil { + return err + } + defer func() { + if err != nil { + tx.Rollback() + } + }() + + // Build batch insert for embeddings table + embeddingPlaceholders := make([]string, 0, len(rows)) + embeddingArgs := make([]any, 0, len(rows)*4) + for _, row := range rows { + embeddingPlaceholders = append(embeddingPlaceholders, "(?, ?, ?, ?)") + embeddingArgs = append(embeddingArgs, SerializeVector(row.Embeddings), row.Slug, row.RawText, row.FileName) + } + embeddingQuery := fmt.Sprintf( + "INSERT INTO %s (embeddings, slug, raw_text, filename) VALUES %s", + tableName, + strings.Join(embeddingPlaceholders, ", "), + ) + if _, err := tx.Exec(embeddingQuery, embeddingArgs...); err != nil { + vs.logger.Error("failed to write vectors batch", "error", err, "batch_size", len(rows)) + return err + } + + // Build batch insert for FTS table + ftsPlaceholders := make([]string, 0, len(rows)) + ftsArgs := make([]any, 0, len(rows)*4) + embeddingSize := len(rows[0].Embeddings) + for _, row := range rows { + ftsPlaceholders = append(ftsPlaceholders, "(?, ?, ?, ?)") + ftsArgs = append(ftsArgs, row.Slug, row.RawText, row.FileName, embeddingSize) + } + ftsQuery := fmt.Sprintf( + "INSERT INTO fts_embeddings (slug, raw_text, filename, embedding_size) VALUES %s", + strings.Join(ftsPlaceholders, ", "), + ) + if _, err := tx.Exec(ftsQuery, ftsArgs...); err != nil { + vs.logger.Error("failed to write FTS batch", "error", err, "batch_size", len(rows)) + return err + } + + err = tx.Commit() + if err != nil { + vs.logger.Error("failed to commit transaction", "error", err) + return err + } + vs.logger.Debug("wrote vectors batch", "batch_size", len(rows)) + return nil +} + // getTableName determines which table to use based on embedding size func (vs *VectorStorage) getTableName(emb []float32) (string, error) { size := len(emb) -- cgit v1.2.3 From 014e297ae3497d07b5c46c234a9157db8dfce198 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Fri, 6 Mar 2026 19:57:44 +0300 Subject: Chore: linter complaints --- rag/embedder.go | 13 ++++++------- rag/rag.go | 26 ++------------------------ rag/storage.go | 20 ++++---------------- 3 files changed, 12 insertions(+), 47 deletions(-) (limited to 'rag') diff --git a/rag/embedder.go b/rag/embedder.go index fd4cfa7..5a4aae0 100644 --- a/rag/embedder.go +++ b/rag/embedder.go @@ -213,7 +213,6 @@ func NewONNXEmbedder(modelPath, tokenizerPath string, dims int, logger *slog.Log if cudaLibPath == "" { fmt.Println("WARNING: CUDA provider library not found, will use CPU") } - emb := &ONNXEmbedder{ tokenizerPath: tokenizerPath, dims: dims, @@ -232,7 +231,6 @@ func (e *ONNXEmbedder) ensureInitialized() error { if e.session != nil { return nil } - // Load tokenizer lazily if e.tokenizer == nil { tok, err := pretrained.FromFile(e.tokenizerPath) @@ -241,7 +239,6 @@ func (e *ONNXEmbedder) ensureInitialized() error { } e.tokenizer = tok } - onnxInitOnce.Do(func() { onnxruntime_go.SetSharedLibraryPath(onnxLibPath) if err := onnxruntime_go.InitializeEnvironment(); err != nil { @@ -260,13 +257,14 @@ func (e *ONNXEmbedder) ensureInitialized() error { if !onnxReady { return errors.New("ONNX runtime not ready") } - // Create session options opts, err := onnxruntime_go.NewSessionOptions() if err != nil { return fmt.Errorf("failed to create session options: %w", err) } - defer opts.Destroy() + defer func() { + _ = opts.Destroy() + }() // Try to add CUDA provider useCUDA := cudaLibPath != "" @@ -276,7 +274,9 @@ func (e *ONNXEmbedder) ensureInitialized() error { e.logger.Warn("failed to create CUDA provider options, falling back to CPU", "error", err) useCUDA = false } else { - defer cudaOpts.Destroy() + defer func() { + _ = cudaOpts.Destroy() + }() if err := cudaOpts.Update(map[string]string{"device_id": "0"}); err != nil { e.logger.Warn("failed to update CUDA options, falling back to CPU", "error", err) useCUDA = false @@ -286,7 +286,6 @@ func (e *ONNXEmbedder) ensureInitialized() error { } } } - if useCUDA { e.logger.Info("Using CUDA for ONNX inference") } else { diff --git a/rag/rag.go b/rag/rag.go index 180ad50..3db4303 100644 --- a/rag/rag.go +++ b/rag/rag.go @@ -19,10 +19,7 @@ import ( "github.com/neurosnap/sentences/english" ) -const ( - // batchTimeout is the maximum time allowed for embedding a single batch - batchTimeout = 2 * time.Minute -) +const () var ( // Status messages for TUI integration @@ -102,10 +99,6 @@ func New(l *slog.Logger, s storage.FullRepo, cfg *config.Config) (*RAG, error) { return rag, nil } -func wordCounter(sentence string) int { - return len(strings.Split(strings.TrimSpace(sentence), " ")) -} - func createChunks(sentences []string, wordLimit, overlapWords uint32) []string { if len(sentences) == 0 { return nil @@ -181,7 +174,6 @@ func (r *RAG) LoadRAG(fpath string) error { func (r *RAG) LoadRAGWithContext(ctx context.Context, fpath string) error { r.mu.Lock() defer r.mu.Unlock() - fileText, err := ExtractText(fpath) if err != nil { return err @@ -190,7 +182,6 @@ func (r *RAG) LoadRAGWithContext(ctx context.Context, fpath string) error { // Send initial status (non-blocking with retry) r.sendStatusNonBlocking(LoadedFileRAGStatus) - tokenizer, err := english.NewSentenceTokenizer(nil) if err != nil { return err @@ -210,7 +201,6 @@ func (r *RAG) LoadRAGWithContext(ctx context.Context, fpath string) error { if len(paragraphs) == 0 { return errors.New("no valid paragraphs found in file") } - totalBatches := (len(paragraphs) + r.cfg.RAGBatchSize - 1) / r.cfg.RAGBatchSize r.logger.Debug("starting parallel embedding", "total_batches", totalBatches, "batch_size", r.cfg.RAGBatchSize) @@ -223,7 +213,7 @@ func (r *RAG) LoadRAGWithContext(ctx context.Context, fpath string) error { concurrency = 1 } // If using ONNX embedder, limit concurrency to 1 due to mutex serialization - isONNX := false + var isONNX bool if _, isONNX = r.embedder.(*ONNXEmbedder); isONNX { concurrency = 1 } @@ -258,7 +248,6 @@ func (r *RAG) LoadRAGWithContext(ctx context.Context, fpath string) error { // Ensure task channel is closed when this goroutine exits defer close(taskCh) r.logger.Debug("task distributor started", "total_batches", totalBatches) - for i := 0; i < totalBatches; i++ { start := i * r.cfg.RAGBatchSize end := start + r.cfg.RAGBatchSize @@ -304,7 +293,6 @@ func (r *RAG) LoadRAGWithContext(ctx context.Context, fpath string) error { resultsBuffer := make(map[int]batchResult) filename := path.Base(fpath) batchesProcessed := 0 - for { select { case <-ctx.Done(): @@ -382,7 +370,6 @@ func (r *RAG) LoadRAGWithContext(ctx context.Context, fpath string) error { break } } - r.logger.Debug("finished writing vectors", "batches", batchesProcessed) r.resetIdleTimer() r.sendStatusNonBlocking(FinishedRAGStatus) @@ -406,7 +393,6 @@ func (r *RAG) embeddingWorker(ctx context.Context, workerID int, taskCh <-chan b } } }() - for task := range taskCh { select { case <-ctx.Done(): @@ -432,7 +418,6 @@ func (r *RAG) embeddingWorker(ctx context.Context, workerID int, taskCh <-chan b r.logger.Debug("worker sent empty batch", "worker", workerID, "batch", task.batchIndex) continue } - // Embed with retry for API embedder embeddings, err := r.embedWithRetry(ctx, task.paragraphs, 3) if err != nil { @@ -444,7 +429,6 @@ func (r *RAG) embeddingWorker(ctx context.Context, workerID int, taskCh <-chan b } return } - // Send result with context awareness select { case resultCh <- batchResult{ @@ -465,7 +449,6 @@ func (r *RAG) embeddingWorker(ctx context.Context, workerID int, taskCh <-chan b // embedWithRetry attempts embedding with exponential backoff for API embedder func (r *RAG) embedWithRetry(ctx context.Context, paragraphs []string, maxRetries int) ([][]float32, error) { var lastErr error - for attempt := 0; attempt < maxRetries; attempt++ { if attempt > 0 { // Exponential backoff @@ -473,13 +456,11 @@ func (r *RAG) embedWithRetry(ctx context.Context, paragraphs []string, maxRetrie if backoff > 10*time.Second { backoff = 10 * time.Second } - select { case <-time.After(backoff): case <-ctx.Done(): return nil, ctx.Err() } - r.logger.Debug("retrying embedding", "attempt", attempt, "max_retries", maxRetries) } @@ -499,7 +480,6 @@ func (r *RAG) embedWithRetry(ctx context.Context, paragraphs []string, maxRetrie break } } - return nil, fmt.Errorf("embedding failed after %d attempts: %w", maxRetries, lastErr) } @@ -509,7 +489,6 @@ func (r *RAG) writeBatchToStorage(ctx context.Context, result batchResult, filen // Empty batch, skip return nil } - // Check context before starting select { case <-ctx.Done(): @@ -534,7 +513,6 @@ func (r *RAG) writeBatchToStorage(ctx context.Context, result batchResult, filen r.sendStatusNonBlocking(ErrRAGStatus) return fmt.Errorf("failed to write vectors batch: %w", err) } - r.logger.Debug("wrote batch to db", "batch", result.batchIndex+1, "size", len(result.paragraphs)) return nil } diff --git a/rag/storage.go b/rag/storage.go index 1e6b013..62477b6 100644 --- a/rag/storage.go +++ b/rag/storage.go @@ -64,7 +64,6 @@ func (vs *VectorStorage) WriteVector(row *models.VectorRow) error { return err } embeddingSize := len(row.Embeddings) - // Start transaction tx, err := vs.sqlxDB.Beginx() if err != nil { @@ -72,7 +71,7 @@ func (vs *VectorStorage) WriteVector(row *models.VectorRow) error { } defer func() { if err != nil { - tx.Rollback() + _ = tx.Rollback() } }() @@ -86,14 +85,12 @@ func (vs *VectorStorage) WriteVector(row *models.VectorRow) error { vs.logger.Error("failed to write vector", "error", err, "slug", row.Slug) return err } - // Insert into FTS table ftsQuery := `INSERT INTO fts_embeddings (slug, raw_text, filename, embedding_size) VALUES (?, ?, ?, ?)` if _, err := tx.Exec(ftsQuery, row.Slug, row.RawText, row.FileName, embeddingSize); err != nil { vs.logger.Error("failed to write to FTS table", "error", err, "slug", row.Slug) return err } - err = tx.Commit() if err != nil { vs.logger.Error("failed to commit transaction", "error", err) @@ -133,7 +130,6 @@ func (vs *VectorStorage) WriteVectors(rows []*models.VectorRow) error { if err != nil { return err } - // Start transaction tx, err := vs.sqlxDB.Beginx() if err != nil { @@ -141,7 +137,7 @@ func (vs *VectorStorage) WriteVectors(rows []*models.VectorRow) error { } defer func() { if err != nil { - tx.Rollback() + _ = tx.Rollback() } }() @@ -161,7 +157,6 @@ func (vs *VectorStorage) WriteVectors(rows []*models.VectorRow) error { vs.logger.Error("failed to write vectors batch", "error", err, "batch_size", len(rows)) return err } - // Build batch insert for FTS table ftsPlaceholders := make([]string, 0, len(rows)) ftsArgs := make([]any, 0, len(rows)*4) @@ -170,15 +165,12 @@ func (vs *VectorStorage) WriteVectors(rows []*models.VectorRow) error { ftsPlaceholders = append(ftsPlaceholders, "(?, ?, ?, ?)") ftsArgs = append(ftsArgs, row.Slug, row.RawText, row.FileName, embeddingSize) } - ftsQuery := fmt.Sprintf( - "INSERT INTO fts_embeddings (slug, raw_text, filename, embedding_size) VALUES %s", - strings.Join(ftsPlaceholders, ", "), - ) + ftsQuery := "INSERT INTO fts_embeddings (slug, raw_text, filename, embedding_size) VALUES " + + strings.Join(ftsPlaceholders, ", ") if _, err := tx.Exec(ftsQuery, ftsArgs...); err != nil { vs.logger.Error("failed to write FTS batch", "error", err, "batch_size", len(rows)) return err } - err = tx.Commit() if err != nil { vs.logger.Error("failed to commit transaction", "error", err) @@ -218,14 +210,12 @@ func (vs *VectorStorage) SearchClosest(query []float32, limit int) ([]models.Vec if err != nil { return nil, err } - querySQL := "SELECT embeddings, slug, raw_text, filename FROM " + tableName rows, err := vs.sqlxDB.Query(querySQL) if err != nil { return nil, err } defer rows.Close() - type SearchResult struct { vector models.VectorRow distance float32 @@ -241,7 +231,6 @@ func (vs *VectorStorage) SearchClosest(query []float32, limit int) ([]models.Vec vs.logger.Error("failed to scan row", "error", err) continue } - storedEmbeddings := DeserializeVector(embeddingsBlob) similarity := cosineSimilarity(query, storedEmbeddings) distance := 1 - similarity @@ -264,7 +253,6 @@ func (vs *VectorStorage) SearchClosest(query []float32, limit int) ([]models.Vec topResults = topResults[:limit] } } - results := make([]models.VectorRow, 0, len(topResults)) for _, result := range topResults { result.vector.Distance = result.distance -- cgit v1.2.3 From e0201886f80528790c3a05864da66bafdf07f9d8 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Sun, 8 Mar 2026 08:50:50 +0300 Subject: Enha (rag): keep page open until user closes it --- rag/rag.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'rag') diff --git a/rag/rag.go b/rag/rag.go index 3db4303..e47e3d6 100644 --- a/rag/rag.go +++ b/rag/rag.go @@ -24,7 +24,7 @@ const () var ( // Status messages for TUI integration LongJobStatusCh = make(chan string, 100) // Increased buffer size for parallel batch updates - FinishedRAGStatus = "finished loading RAG file; press Enter" + FinishedRAGStatus = "finished loading RAG file; press x to exit" LoadedFileRAGStatus = "loaded file" ErrRAGStatus = "some error occurred; failed to transfer data to vector db" ) -- cgit v1.2.3 From b6e802c12e37aeaf19bd449cf2877df5ae04d389 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Sun, 8 Mar 2026 11:38:56 +0300 Subject: Enha (rag): bigger default batch --- rag/rag.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'rag') diff --git a/rag/rag.go b/rag/rag.go index e47e3d6..ef85e7f 100644 --- a/rag/rag.go +++ b/rag/rag.go @@ -156,7 +156,7 @@ func createChunks(sentences []string, wordLimit, overlapWords uint32) []string { func sanitizeFTSQuery(query string) string { // Remove double quotes and other problematic characters for FTS5 - query = strings.ReplaceAll(query, "\"", " ") + // query = strings.ReplaceAll(query, "\"", " ") query = strings.ReplaceAll(query, "'", " ") query = strings.ReplaceAll(query, ";", " ") query = strings.ReplaceAll(query, "\\", " ") -- cgit v1.2.3 From e74ff8c03faaf156ad684eda48c8cfa35082ce1a Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Sun, 8 Mar 2026 13:27:09 +0300 Subject: Enha (rag): semantic hybrid search --- rag/rag.go | 167 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 164 insertions(+), 3 deletions(-) (limited to 'rag') diff --git a/rag/rag.go b/rag/rag.go index ef85e7f..6f12dd9 100644 --- a/rag/rag.go +++ b/rag/rag.go @@ -12,6 +12,7 @@ import ( "regexp" "runtime" "sort" + "strconv" "strings" "sync" "time" @@ -27,8 +28,101 @@ var ( FinishedRAGStatus = "finished loading RAG file; press x to exit" LoadedFileRAGStatus = "loaded file" ErrRAGStatus = "some error occurred; failed to transfer data to vector db" + + // stopWords are common words that can be removed from queries when not part of phrases + stopWords = []string{"the", "a", "an", "and", "or", "but", "in", "on", "at", "to", "for", "of", "with", "by", "from", "up", "down", "left", "right", "about", "like", "such", "than", "then", "also", "too"} ) +// isStopWord checks if a word is in the stop words list +func isStopWord(word string) bool { + for _, stop := range stopWords { + if strings.EqualFold(word, stop) { + return true + } + } + return false +} + +// detectPhrases returns multi-word phrases from a query that should be treated as units +func detectPhrases(query string) []string { + words := strings.Fields(strings.ToLower(query)) + var phrases []string + + for i := 0; i < len(words)-1; i++ { + word1 := strings.Trim(words[i], ".,!?;:'\"()[]{}") + word2 := strings.Trim(words[i+1], ".,!?;:'\"()[]{}") + + // Skip if either word is a stop word or too short + if isStopWord(word1) || isStopWord(word2) || len(word1) < 2 || len(word2) < 2 { + continue + } + + // Check if this pair appears to be a meaningful phrase + // Simple heuristic: consecutive non-stop words of reasonable length + phrase := word1 + " " + word2 + phrases = append(phrases, phrase) + + // Optionally check for 3-word phrases + if i < len(words)-2 { + word3 := strings.Trim(words[i+2], ".,!?;:'\"()[]{}") + if !isStopWord(word3) && len(word3) >= 2 { + phrases = append(phrases, word1+" "+word2+" "+word3) + } + } + } + + return phrases +} + +// parseSlugIndices extracts batch and chunk indices from a slug +// slug format: filename_batch_chunk (e.g., "kjv_bible.epub_1786_0") +func parseSlugIndices(slug string) (batch, chunk int, ok bool) { + // Find the last two numbers separated by underscores + re := regexp.MustCompile(`_(\d+)_(\d+)$`) + matches := re.FindStringSubmatch(slug) + if matches == nil || len(matches) != 3 { + return 0, 0, false + } + batch, err1 := strconv.Atoi(matches[1]) + chunk, err2 := strconv.Atoi(matches[2]) + if err1 != nil || err2 != nil { + return 0, 0, false + } + return batch, chunk, true +} + +// areSlugsAdjacent returns true if two slugs are from the same file and have sequential indices +func areSlugsAdjacent(slug1, slug2 string) bool { + // Extract filename prefix (everything before the last underscore sequence) + parts1 := strings.Split(slug1, "_") + parts2 := strings.Split(slug2, "_") + if len(parts1) < 3 || len(parts2) < 3 { + return false + } + + // Compare filename prefixes (all parts except last two) + prefix1 := strings.Join(parts1[:len(parts1)-2], "_") + prefix2 := strings.Join(parts2[:len(parts2)-2], "_") + if prefix1 != prefix2 { + return false + } + + batch1, chunk1, ok1 := parseSlugIndices(slug1) + batch2, chunk2, ok2 := parseSlugIndices(slug2) + if !ok1 || !ok2 { + return false + } + + // Check if they're in same batch and chunks are sequential + if batch1 == batch2 && (chunk1 == chunk2+1 || chunk2 == chunk1+1) { + return true + } + + // Check if they're in sequential batches and chunk indices suggest continuity + // This is heuristic but useful for cross-batch adjacency + return false +} + type RAG struct { logger *slog.Logger store storage.FullRepo @@ -155,8 +249,8 @@ func createChunks(sentences []string, wordLimit, overlapWords uint32) []string { } func sanitizeFTSQuery(query string) string { - // Remove double quotes and other problematic characters for FTS5 - // query = strings.ReplaceAll(query, "\"", " ") + // Keep double quotes for FTS5 phrase matching + // Remove other problematic characters query = strings.ReplaceAll(query, "'", " ") query = strings.ReplaceAll(query, ";", " ") query = strings.ReplaceAll(query, "\\", " ") @@ -549,7 +643,6 @@ func (r *RAG) RemoveFile(filename string) error { var ( queryRefinementPattern = regexp.MustCompile(`(?i)(based on my (vector db|vector db|vector database|rags?|past (conversations?|chat|messages?))|from my (files?|documents?|data|information|memory)|search (in|my) (vector db|database|rags?)|rag search for)`) importantKeywords = []string{"project", "architecture", "code", "file", "chat", "conversation", "topic", "summary", "details", "history", "previous", "my", "user", "me"} - stopWords = []string{"the", "a", "an", "and", "or", "but", "in", "on", "at", "to", "for", "of", "with", "by", "from", "up", "down", "left", "right"} ) func (r *RAG) RefineQuery(query string) string { @@ -564,7 +657,20 @@ func (r *RAG) RefineQuery(query string) string { query = strings.ToLower(query) words := strings.Fields(query) if len(words) >= 3 { + // Detect phrases and protect words that are part of phrases + phrases := detectPhrases(query) + protectedWords := make(map[string]bool) + for _, phrase := range phrases { + for _, word := range strings.Fields(phrase) { + protectedWords[word] = true + } + } + + // Remove stop words that are not protected for _, stopWord := range stopWords { + if protectedWords[stopWord] { + continue + } wordPattern := `\b` + stopWord + `\b` re := regexp.MustCompile(wordPattern) query = re.ReplaceAllString(query, "") @@ -673,6 +779,45 @@ func (r *RAG) GenerateQueryVariations(query string) []string { if !strings.HasSuffix(query, " summary") { variations = append(variations, query+" summary") } + + // Add phrase-quoted variations for better FTS5 matching + phrases := detectPhrases(query) + if len(phrases) > 0 { + // Sort phrases by length descending to prioritize longer phrases + sort.Slice(phrases, func(i, j int) bool { + return len(phrases[i]) > len(phrases[j]) + }) + + // Create a version with all phrases quoted + quotedQuery := query + for _, phrase := range phrases { + // Only quote if not already quoted + quotedPhrase := "\"" + phrase + "\"" + if !strings.Contains(strings.ToLower(quotedQuery), strings.ToLower(quotedPhrase)) { + // Case-insensitive replacement of phrase with quoted version + re := regexp.MustCompile(`(?i)\b` + regexp.QuoteMeta(phrase) + `\b`) + quotedQuery = re.ReplaceAllString(quotedQuery, quotedPhrase) + } + } + if quotedQuery != query { + variations = append(variations, quotedQuery) + } + + // Also add individual phrase variations for short queries + if len(phrases) <= 3 { + for _, phrase := range phrases { + // Create a focused query with just this phrase quoted + // Keep original context but emphasize this phrase + quotedPhrase := "\"" + phrase + "\"" + re := regexp.MustCompile(`(?i)\b` + regexp.QuoteMeta(phrase) + `\b`) + focusedQuery := re.ReplaceAllString(query, quotedPhrase) + if focusedQuery != query && focusedQuery != quotedQuery { + variations = append(variations, focusedQuery) + } + } + } + } + return variations } @@ -704,6 +849,22 @@ func (r *RAG) RerankResults(results []models.VectorRow, query string) []models.V if row.FileName == "chat" || strings.Contains(strings.ToLower(row.FileName), "conversation") { score += 3 } + + // Cross-chunk adjacency bonus: if this chunk has adjacent siblings in results, + // boost score to promote narrative continuity + adjacentCount := 0 + for _, other := range results { + if other.Slug == row.Slug { + continue + } + if areSlugsAdjacent(row.Slug, other.Slug) { + adjacentCount++ + } + } + if adjacentCount > 0 { + // Bonus per adjacent chunk, but diminishing returns + score += float32(adjacentCount) * 4 + } distance := row.Distance - score/100 scored = append(scored, scoredResult{row: row, distance: distance}) } -- cgit v1.2.3 From a1b5f9cdc59938901123650fc0900067ac3447ca Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Sun, 8 Mar 2026 16:12:32 +0300 Subject: Enha: rag tuning and tests --- rag/rag.go | 136 ++++++++++++--- rag/rag_integration_test.go | 409 ++++++++++++++++++++++++++++++++++++++++++++ rag/rag_real_test.go | 131 ++++++++++++++ rag/rag_test.go | 155 +++++++++++++++++ rag/storage.go | 8 +- 5 files changed, 814 insertions(+), 25 deletions(-) create mode 100644 rag/rag_integration_test.go create mode 100644 rag/rag_real_test.go create mode 100644 rag/rag_test.go (limited to 'rag') diff --git a/rag/rag.go b/rag/rag.go index 6f12dd9..3a771d4 100644 --- a/rag/rag.go +++ b/rag/rag.go @@ -74,6 +74,22 @@ func detectPhrases(query string) []string { return phrases } +// countPhraseMatches returns the number of query phrases found in text +func countPhraseMatches(text, query string) int { + phrases := detectPhrases(query) + if len(phrases) == 0 { + return 0 + } + textLower := strings.ToLower(text) + count := 0 + for _, phrase := range phrases { + if strings.Contains(textLower, phrase) { + count++ + } + } + return count +} + // parseSlugIndices extracts batch and chunk indices from a slug // slug format: filename_batch_chunk (e.g., "kjv_bible.epub_1786_0") func parseSlugIndices(slug string) (batch, chunk int, ok bool) { @@ -120,6 +136,9 @@ func areSlugsAdjacent(slug1, slug2 string) bool { // Check if they're in sequential batches and chunk indices suggest continuity // This is heuristic but useful for cross-batch adjacency + if (batch1 == batch2+1 && chunk1 == 0) || (batch2 == batch1+1 && chunk2 == 0) { + return true + } return false } @@ -654,6 +673,10 @@ func (r *RAG) RefineQuery(query string) string { if len(query) <= 3 { return original } + // If query already contains double quotes, assume it's a phrase query and skip refinement + if strings.Contains(query, "\"") { + return original + } query = strings.ToLower(query) words := strings.Fields(query) if len(words) >= 3 { @@ -799,12 +822,13 @@ func (r *RAG) GenerateQueryVariations(query string) []string { quotedQuery = re.ReplaceAllString(quotedQuery, quotedPhrase) } } - if quotedQuery != query { - variations = append(variations, quotedQuery) - } + // Disabled malformed quoted query for now + // if quotedQuery != query { + // variations = append(variations, quotedQuery) + // } // Also add individual phrase variations for short queries - if len(phrases) <= 3 { + if len(phrases) <= 5 { for _, phrase := range phrases { // Create a focused query with just this phrase quoted // Keep original context but emphasize this phrase @@ -814,6 +838,8 @@ func (r *RAG) GenerateQueryVariations(query string) []string { if focusedQuery != query && focusedQuery != quotedQuery { variations = append(variations, focusedQuery) } + // Add the phrase alone (quoted) as a separate variation + variations = append(variations, quotedPhrase) } } } @@ -822,9 +848,11 @@ func (r *RAG) GenerateQueryVariations(query string) []string { } func (r *RAG) RerankResults(results []models.VectorRow, query string) []models.VectorRow { + phraseCount := len(detectPhrases(query)) type scoredResult struct { - row models.VectorRow - distance float32 + row models.VectorRow + distance float32 + phraseMatches int } scored := make([]scoredResult, 0, len(results)) for i := range results { @@ -850,6 +878,14 @@ func (r *RAG) RerankResults(results []models.VectorRow, query string) []models.V score += 3 } + // Phrase match bonus: extra points for containing detected phrases + phraseMatches := countPhraseMatches(row.RawText, query) + if phraseMatches > 0 { + // Significant bonus per phrase to prioritize exact phrase matches + r.logger.Debug("phrase match bonus", "slug", row.Slug, "phraseMatches", phraseMatches, "score", score) + score += float32(phraseMatches) * 100 + } + // Cross-chunk adjacency bonus: if this chunk has adjacent siblings in results, // boost score to promote narrative continuity adjacentCount := 0 @@ -866,17 +902,27 @@ func (r *RAG) RerankResults(results []models.VectorRow, query string) []models.V score += float32(adjacentCount) * 4 } distance := row.Distance - score/100 - scored = append(scored, scoredResult{row: row, distance: distance}) + scored = append(scored, scoredResult{row: row, distance: distance, phraseMatches: phraseMatches}) } sort.Slice(scored, func(i, j int) bool { return scored[i].distance < scored[j].distance }) unique := make([]models.VectorRow, 0) seen := make(map[string]bool) + maxPerFile := 2 + if phraseCount > 0 { + maxPerFile = 10 + } fileCounts := make(map[string]int) for i := range scored { if !seen[scored[i].row.Slug] { - if fileCounts[scored[i].row.FileName] >= 2 { + // Allow phrase-matching chunks to bypass per-file limit (up to +5 extra) + allowed := fileCounts[scored[i].row.FileName] < maxPerFile + if !allowed && scored[i].phraseMatches > 0 { + // If chunk has phrase matches, allow extra slots (up to maxPerFile + 5) + allowed = fileCounts[scored[i].row.FileName] < maxPerFile+5 + } + if !allowed { continue } seen[scored[i].row.Slug] = true @@ -884,8 +930,8 @@ func (r *RAG) RerankResults(results []models.VectorRow, query string) []models.V unique = append(unique, scored[i].row) } } - if len(unique) > 10 { - unique = unique[:10] + if len(unique) > 30 { + unique = unique[:30] } return unique } @@ -954,6 +1000,7 @@ func (r *RAG) Search(query string, limit int) ([]models.VectorRow, error) { r.resetIdleTimer() refined := r.RefineQuery(query) variations := r.GenerateQueryVariations(refined) + r.logger.Debug("query variations", "original", query, "refined", refined, "variations", variations) // Collect embedding search results from all variations var embResults []models.VectorRow @@ -985,17 +1032,35 @@ func (r *RAG) Search(query string, limit int) ([]models.VectorRow, error) { return embResults[i].Distance < embResults[j].Distance }) - // Perform keyword search - kwResults, err := r.searchKeyword(refined, limit*2) - if err != nil { - r.logger.Warn("keyword search failed, using only embeddings", "error", err) - kwResults = nil + // Perform keyword search on all variations + var kwResults []models.VectorRow + seenKw := make(map[string]bool) + for _, q := range variations { + results, err := r.searchKeyword(q, limit) + if err != nil { + r.logger.Debug("keyword search failed for variation", "error", err, "query", q) + continue + } + for _, row := range results { + if !seenKw[row.Slug] { + seenKw[row.Slug] = true + kwResults = append(kwResults, row) + } + } } - // Sort keyword results by distance (already sorted by BM25 score) - // kwResults already sorted by distance (lower is better) + // Sort keyword results by distance (lower is better) + sort.Slice(kwResults, func(i, j int) bool { + return kwResults[i].Distance < kwResults[j].Distance + }) // Combine using Reciprocal Rank Fusion (RRF) - const rrfK = 60 + // Use smaller K for phrase-heavy queries to give more weight to top ranks + phraseCount := len(detectPhrases(query)) + rrfK := 60.0 + if phraseCount > 0 { + rrfK = 30.0 + } + r.logger.Debug("RRF parameters", "phraseCount", phraseCount, "rrfK", rrfK, "query", query) type scoredRow struct { row models.VectorRow score float64 @@ -1005,11 +1070,22 @@ func (r *RAG) Search(query string, limit int) ([]models.VectorRow, error) { for rank, row := range embResults { score := 1.0 / (float64(rank) + rrfK) scoreMap[row.Slug] += score + if row.Slug == "kjv_bible.epub_1786_0" { + r.logger.Debug("target chunk embedding rank", "rank", rank, "score", score) + } } - // Add keyword results + // Add keyword results with weight boost when phrases are present + kwWeight := 1.0 + if phraseCount > 0 { + kwWeight = 100.0 + } + r.logger.Debug("keyword weight", "kwWeight", kwWeight, "phraseCount", phraseCount) for rank, row := range kwResults { - score := 1.0 / (float64(rank) + rrfK) + score := kwWeight * (1.0 / (float64(rank) + rrfK)) scoreMap[row.Slug] += score + if row.Slug == "kjv_bible.epub_1786_0" { + r.logger.Debug("target chunk keyword rank", "rank", rank, "score", score, "kwWeight", kwWeight, "rrfK", rrfK) + } // Ensure row exists in combined results if _, exists := seen[row.Slug]; !exists { embResults = append(embResults, row) @@ -1021,6 +1097,18 @@ func (r *RAG) Search(query string, limit int) ([]models.VectorRow, error) { score := scoreMap[row.Slug] scoredRows = append(scoredRows, scoredRow{row: row, score: score}) } + // Debug: log scores for target chunk and top chunks + if strings.Contains(strings.ToLower(query), "bald") || strings.Contains(strings.ToLower(query), "she bears") { + for _, sr := range scoredRows { + if sr.row.Slug == "kjv_bible.epub_1786_0" { + r.logger.Debug("target chunk score", "slug", sr.row.Slug, "score", sr.score, "distance", sr.row.Distance) + } + } + // Log top 5 scores + for i := 0; i < len(scoredRows) && i < 5; i++ { + r.logger.Debug("top scored row", "rank", i+1, "slug", scoredRows[i].row.Slug, "score", scoredRows[i].score, "distance", scoredRows[i].row.Distance) + } + } // Sort by descending RRF score sort.Slice(scoredRows, func(i, j int) bool { return scoredRows[i].score > scoredRows[j].score @@ -1099,3 +1187,11 @@ func (r *RAG) Destroy() { } } } + +// SetEmbedderForTesting replaces the internal embedder with a mock. +// This function is only available when compiling with the "test" build tag. +func (r *RAG) SetEmbedderForTesting(e Embedder) { + r.mu.Lock() + defer r.mu.Unlock() + r.embedder = e +} diff --git a/rag/rag_integration_test.go b/rag/rag_integration_test.go new file mode 100644 index 0000000..f3405eb --- /dev/null +++ b/rag/rag_integration_test.go @@ -0,0 +1,409 @@ +package rag + +import ( + "fmt" + "gf-lt/config" + "gf-lt/models" + "gf-lt/storage" + "log/slog" + "testing" + + _ "github.com/glebarez/go-sqlite" + "github.com/jmoiron/sqlx" +) + +// mockEmbedder returns zero vectors of a fixed dimension. +type mockEmbedder struct { + dim int +} + +func (m *mockEmbedder) Embed(text string) ([]float32, error) { + vec := make([]float32, m.dim) + return vec, nil +} + +func (m *mockEmbedder) EmbedSlice(texts []string) ([][]float32, error) { + vecs := make([][]float32, len(texts)) + for i := range vecs { + vecs[i] = make([]float32, m.dim) + } + return vecs, nil +} + +// dummyStore implements storage.FullRepo with a minimal set of methods. +// Only DB() is used by VectorStorage; other methods return empty values. +type dummyStore struct { + db *sqlx.DB +} + +func (d dummyStore) DB() *sqlx.DB { return d.db } + +// ChatHistory methods +func (d dummyStore) ListChats() ([]models.Chat, error) { return nil, nil } +func (d dummyStore) GetChatByID(id uint32) (*models.Chat, error) { return nil, nil } +func (d dummyStore) GetChatByChar(char string) ([]models.Chat, error) { return nil, nil } +func (d dummyStore) GetLastChat() (*models.Chat, error) { return nil, nil } +func (d dummyStore) GetLastChatByAgent(agent string) (*models.Chat, error) { return nil, nil } +func (d dummyStore) UpsertChat(chat *models.Chat) (*models.Chat, error) { return chat, nil } +func (d dummyStore) RemoveChat(id uint32) error { return nil } +func (d dummyStore) ChatGetMaxID() (uint32, error) { return 0, nil } + +// Memories methods +func (d dummyStore) Memorise(m *models.Memory) (*models.Memory, error) { return m, nil } +func (d dummyStore) Recall(agent, topic string) (string, error) { return "", nil } +func (d dummyStore) RecallTopics(agent string) ([]string, error) { return nil, nil } + +// VectorRepo methods (not used but required by interface) +func (d dummyStore) WriteVector(row *models.VectorRow) error { return nil } +func (d dummyStore) SearchClosest(q []float32, limit int) ([]models.VectorRow, error) { + return nil, nil +} +func (d dummyStore) ListFiles() ([]string, error) { return nil, nil } +func (d dummyStore) RemoveEmbByFileName(filename string) error { return nil } + +var _ storage.FullRepo = dummyStore{} + +// setupTestRAG creates an in‑memory SQLite database, creates the necessary tables, +// inserts the provided chunks, and returns a RAG instance with a mock embedder. +func setupTestRAG(t *testing.T, chunks []*models.VectorRow) (*RAG, error) { + t.Helper() + db, err := sqlx.Open("sqlite", ":memory:") + if err != nil { + return nil, fmt.Errorf("open in‑memory db: %w", err) + } + // Create the required tables (embeddings_768 and fts_embeddings). + // Use the same schema as production. + _, err = db.Exec(` + CREATE TABLE embeddings_768 ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + embeddings BLOB NOT NULL, + slug TEXT NOT NULL, + raw_text TEXT NOT NULL, + filename TEXT NOT NULL DEFAULT '' + ); + `) + if err != nil { + return nil, fmt.Errorf("create embeddings table: %w", err) + } + _, err = db.Exec(` + CREATE VIRTUAL TABLE fts_embeddings USING fts5( + slug UNINDEXED, + raw_text, + filename UNINDEXED, + embedding_size UNINDEXED, + tokenize='porter unicode61' + ); + `) + if err != nil { + return nil, fmt.Errorf("create FTS table: %w", err) + } + // Create a logger that discards output. + logger := slog.New(slog.NewTextHandler(nil, &slog.HandlerOptions{Level: slog.LevelError})) + store := dummyStore{db: db} + // Create config with embedding dimension 768. + cfg := &config.Config{ + EmbedDims: 768, + RAGWordLimit: 250, + RAGOverlapWords: 25, + RAGBatchSize: 1, + } + // Create a RAG instance using New, which will create an embedder based on config. + // We'll override the embedder afterwards via reflection. + rag, err := New(logger, store, cfg) + if err != nil { + return nil, fmt.Errorf("create RAG: %w", err) + } + // Replace the embedder with our mock. + rag.SetEmbedderForTesting(&mockEmbedder{dim: cfg.EmbedDims}) + // Insert the provided chunks using the storage directly. + if len(chunks) > 0 { + // Ensure each chunk has embeddings of correct dimension (zero vector). + for _, chunk := range chunks { + if len(chunk.Embeddings) != cfg.EmbedDims { + chunk.Embeddings = make([]float32, cfg.EmbedDims) + } + } + err = rag.storage.WriteVectors(chunks) + if err != nil { + return nil, fmt.Errorf("write test chunks: %w", err) + } + } + return rag, nil +} + +// createTestChunks returns a slice of VectorRow representing the target chunk +// (kjv_bible.epub_1786_0), several bald‑related noise chunks, and unrelated chunks. +func createTestChunks() []*models.VectorRow { + // Target chunk: 2 Kings 2:23‑24 containing "bald head" and "two she bears". + targetRaw := `And he said, Ye shall not send. + + +2:17 And when they urged him till he was ashamed, he said, Send. They sent +therefore fifty men; and they sought three days, but found him not. + + +2:18 And when they came again to him, (for he tarried at Jericho,) he said unto +them, Did I not say unto you, Go not? 2:19 And the men of the city said unto +Elisha, Behold, I pray thee, the situation of this city is pleasant, as my lord +seeth: but the water is naught, and the ground barren. + + +2:20 And he said, Bring me a new cruse, and put salt therein. And they brought +it to him. + + +2:21 And he went forth unto the spring of the waters, and cast the salt in +there, and said, Thus saith the LORD, I have healed these waters; there shall +not be from thence any more death or barren land. + + +2:22 So the waters were healed unto this day, according to the saying of Elisha +which he spake. + + +2:23 And he went up from thence unto Bethel: and as he was going up by the way, +there came forth little children out of the city, and mocked him, and said unto +him, Go up, thou bald head; go up, thou bald head. + + +2:24 And he turned back, and looked on them, and cursed them in the name of the +LORD. And there came forth two she bears out of the wood, and tare forty and +two children of them.` + // Noise chunk 1: Leviticus containing "bald locust" + noise1Raw := `11:12 Whatsoever hath no fins nor scales in the waters, that shall be an +abomination unto you. + + +11:13 And these are they which ye shall have in abomination among the fowls; +they shall not be eaten, they are an abomination: the eagle, and the ossifrage, +and the ospray, 11:14 And the vulture, and the kite after his kind; 11:15 Every +raven after his kind; 11:16 And the owl, and the night hawk, and the cuckow, +and the hawk after his kind, 11:17 And the little owl, and the cormorant, and +the great owl, 11:18 And the swan, and the pelican, and the gier eagle, 11:19 +And the stork, the heron after her kind, and the lapwing, and the bat. + + +11:20 All fowls that creep, going upon all four, shall be an abomination unto +you. + + +11:21 Yet these may ye eat of every flying creeping thing that goeth upon all +four, which have legs above their feet, to leap withal upon the earth; 11:22 +Even these of them ye may eat; the locust after his kind, and the bald locust +after his kind, and the beetle after his kind, and the grasshopper after his +kind. + + +11:23 But all other flying creeping things, which have four feet, shall be an +abomination unto you. + + +11:24 And for these ye shall be unclean: whosoever toucheth the carcase of them +shall be unclean until the even.` + // Noise chunk 2: Leviticus containing "bald" + noise2Raw := `11:13 And these are they which ye shall have in abomination among the fowls; +they shall not be eaten, they are an abomination: the eagle, and the ossifrage, +and the ospray, 11:14 And the vulture, and the kite after his kind; 11:15 Every +raven after his kind; 11:16 And the owl, and the night hawk, and the cuckow, +and the hawk after his kind, 11:17 And the little owl, and the cormorant, and +the great owl, 11:18 And the swan, and the pelican, and the gier eagle, 11:19 +And the stork, the heron after her kind, and the lapwing, and the bat. + + +11:20 All fowls that creep, going upon all four, shall be an abomination unto +you. + + +11:21 Yet these may ye eat of every flying creeping thing that goeth upon all +four, which have legs above their feet, to leap withal upon the earth; 11:22 +Even these of them ye may eat; the locust after his kind, and the bald locust +after his kind, and the beetle after his kind, and the grasshopper after his +kind. + + +11:23 But all other flying creeping things, which have four feet, shall be an +abomination unto you. + + +11:24 And for these ye shall be unclean: whosoever toucheth the carcase of them +shall be unclean until the even.` + // Additional Leviticus noise chunks (simulating 28 bald-related chunks) + // Using variations of the same text with different slugs + leviticusSlugs := []string{ + "kjv_bible.epub_564_0", + "kjv_bible.epub_565_0", + "kjv_bible.epub_579_0", + "kjv_bible.epub_580_0", + "kjv_bible.epub_581_0", + "kjv_bible.epub_582_0", + "kjv_bible.epub_583_0", + "kjv_bible.epub_584_0", + "kjv_bible.epub_585_0", + "kjv_bible.epub_586_0", + "kjv_bible.epub_587_0", + "kjv_bible.epub_588_0", + "kjv_bible.epub_589_0", + "kjv_bible.epub_590_0", + } + leviticusTexts := []string{ + noise1Raw, + noise2Raw, + `13:40 And the man whose hair is fallen off his head, he is bald; yet is he +clean. + + +13:41 And he that hath his hair fallen off from the part of his head toward his +face, he is forehead bald; yet is he clean.`, + `13:42 And if there be in the bald head, or bald forehead, a white reddish sore; +it is a leprosy sprung up in his bald head, or his bald forehead.`, + `13:43 Then the priest shall look upon it: and, behold, if the rising of the +sore be white reddish in his bald head, or in his bald forehead, as the leprosy +appearedh in the skin of the flesh;`, + `13:44 He is a leprous man, he is unclean: the priest shall pronounce him utterly +unclean; his plague is in his head.`, + `13:45 And the leper in whom the plague is, his clothes shall be rent, and his +head bare, and he shall put a covering upon his upper lip, and shall cry, +Unclean, unclean.`, + `13:46 All the days wherein the plague shall be in him he shall be defiled; he +is unclean: he shall dwell alone; without the camp shall his habitation be.`, + `13:47 The garment also that the plague of leprosy is in, whether it be a woollen +garment, or a linen garment;`, + `13:48 Whether it be in the warp, or woof; of linen, or of woollen; whether in a +skin, or in any thing made of skin;`, + `13:49 And if the plague be greenish or reddish in the garment, or in the skin, +either in the warp, or in the woof, or in any thing of skin; it is a plague of +leprosy, and shall be shewed unto the priest:`, + `13:50 And the priest shall look upon the plague, and shut up it that hath the +plague seven days:`, + `13:51 And he shall look on the plague on the seventh day: if the plague be spread +in the garment, either in the warp, or in the woof, or in a skin, or in any work +that is made of skin; the plague is a fretting leprosy; it is unclean.`, + `13:52 He shall therefore burn that garment, whether warp or woof, in woollen or +in linen, or any thing of skin, wherein the plague is: for it is a fretting +leprosy; it shall be burnt in the fire.`, + } + // Unrelated chunk 1: ghost_7.txt_777_0 + unrelated1Raw := `Doesn’t he have any pride as a hunter?! + +I didn’t see what other choice I had. I would just have to grovel and be ready to flee at any given moment. +The Hidden Curse clan house was in the central region of the imperial capital. It was a high-class area with extraordinary property values that hosted the residences of people like Lord Gladis. This district was near the Imperial Castle, though “near” was a +relative term as it was still a few kilometers away. + +The clan house was made of brick and conformed to an older style of architecture.` + // Unrelated chunk 2: ghost_7.txt_778_0 + unrelated2Raw := `I would just have to grovel and be ready to flee at any given moment. +The Hidden Curse clan house was in the central region of the imperial capital. It was a high-class area with extraordinary property values that hosted the residences of people like Lord Gladis. This district was near the Imperial Castle, though “near” was a +relative term as it was still a few kilometers away. + +The clan house was made of brick and conformed to an older style of architecture. Nearly everyone knew about this mansion and its clock tower. It stood tall over the neighboring mansions and rumor had it that you could see the whole capital from the top. It +spoke to this clan’s renown and history that they were able to get away with building something that dwarfed the mansions of the nobility.` + + chunks := []*models.VectorRow{ + { + Slug: "kjv_bible.epub_1786_0", + RawText: targetRaw, + FileName: "kjv_bible.epub", + Embeddings: nil, // will be filled with zero vector later + }, + } + // Add Leviticus noise chunks + for i, slug := range leviticusSlugs { + text := leviticusTexts[i%len(leviticusTexts)] + chunks = append(chunks, &models.VectorRow{ + Slug: slug, + RawText: text, + FileName: "kjv_bible.epub", + Embeddings: nil, + }) + } + // Add unrelated chunks + chunks = append(chunks, + &models.VectorRow{ + Slug: "ghost_7.txt_777_0", + RawText: unrelated1Raw, + FileName: "ghost_7.txt", + Embeddings: nil, + }, + &models.VectorRow{ + Slug: "ghost_7.txt_778_0", + RawText: unrelated2Raw, + FileName: "ghost_7.txt", + Embeddings: nil, + }, + ) + return chunks +} +func assertTargetInTopN(t *testing.T, results []models.VectorRow, topN int) bool { + t.Helper() + for i, row := range results { + if i >= topN { + break + } + if row.Slug == "kjv_bible.epub_1786_0" { + return true + } + } + return false +} + +func TestBiblicalQuery(t *testing.T) { + chunks := createTestChunks() + rag, err := setupTestRAG(t, chunks) + if err != nil { + t.Fatalf("setup failed: %v", err) + } + query := "bald prophet and two she bears" + results, err := rag.Search(query, 10) + if err != nil { + t.Fatalf("search failed: %v", err) + } + // The target chunk should be in the top results. + if !assertTargetInTopN(t, results, 5) { + t.Errorf("target chunk not found in top 5 results for query %q", query) + t.Logf("results slugs: %v", func() []string { + slugs := make([]string, len(results)) + for i, r := range results { + slugs[i] = r.Slug + } + return slugs + }()) + } +} + +func TestQueryVariations(t *testing.T) { + chunks := createTestChunks() + rag, err := setupTestRAG(t, chunks) + if err != nil { + t.Fatalf("setup failed: %v", err) + } + tests := []struct { + name string + query string + topN int + }{ + {"she bears", "she bears", 5}, + {"bald head", "bald head", 5}, + {"two she bears out of the wood", "two she bears out of the wood", 5}, + {"bald prophet", "bald prophet", 10}, + {"go up thou bald head", "\"go up thou bald head\"", 5}, + {"two she bears", "\"two she bears\"", 5}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + results, err := rag.Search(tt.query, 10) + if err != nil { + t.Fatalf("search failed: %v", err) + } + if !assertTargetInTopN(t, results, tt.topN) { + t.Errorf("target chunk not found in top %d results for query %q", tt.topN, tt.query) + t.Logf("results slugs: %v", func() []string { + slugs := make([]string, len(results)) + for i, r := range results { + slugs[i] = r.Slug + } + return slugs + }()) + } + }) + } +} diff --git a/rag/rag_real_test.go b/rag/rag_real_test.go new file mode 100644 index 0000000..87f6906 --- /dev/null +++ b/rag/rag_real_test.go @@ -0,0 +1,131 @@ +package rag + +import ( + "gf-lt/config" + "gf-lt/storage" + "log/slog" + "os" + "path/filepath" + "testing" +) + +func TestRealBiblicalQuery(t *testing.T) { + if testing.Short() { + t.Skip("skipping real embedder test in short mode") + } + // Check if the embedder model exists + modelPath := filepath.Join("..", "onnx", "embedgemma", "model_q4.onnx") + if _, err := os.Stat(modelPath); os.IsNotExist(err) { + t.Skipf("embedder model not found at %s; skipping real embedder test", modelPath) + } + tokenizerPath := filepath.Join("..", "onnx", "embedgemma", "tokenizer.json") + dbPath := filepath.Join("..", "gflt.db") + if _, err := os.Stat(dbPath); os.IsNotExist(err) { + t.Skipf("database not found at %s; skipping real embedder test", dbPath) + } + cfg := &config.Config{ + EmbedModelPath: modelPath, + EmbedTokenizerPath: tokenizerPath, + EmbedDims: 768, + RAGWordLimit: 250, + RAGOverlapWords: 25, + RAGBatchSize: 1, + } + logger := slog.New(slog.NewTextHandler(nil, &slog.HandlerOptions{Level: slog.LevelError})) + store := storage.NewProviderSQL(dbPath, logger) + if store == nil { + t.Fatal("failed to create storage provider") + } + rag, err := New(logger, store, cfg) + if err != nil { + t.Fatalf("failed to create RAG instance: %v", err) + } + t.Cleanup(func() { rag.Destroy() }) + + query := "bald prophet and two she bears" + results, err := rag.Search(query, 30) + if err != nil { + t.Fatalf("search failed: %v", err) + } + found := false + for i, row := range results { + if row.Slug == "kjv_bible.epub_1786_0" { + found = true + t.Logf("target chunk found at rank %d", i+1) + break + } + } + if !found { + t.Errorf("target chunk not found in search results for query %q", query) + t.Logf("results slugs:") + for i, r := range results { + t.Logf("%d: %s", i+1, r.Slug) + } + } +} + +func TestRealQueryVariations(t *testing.T) { + if testing.Short() { + t.Skip("skipping real embedder test in short mode") + } + modelPath := filepath.Join("..", "onnx", "embedgemma", "model_q4.onnx") + if _, err := os.Stat(modelPath); os.IsNotExist(err) { + t.Skipf("embedder model not found at %s; skipping real embedder test", modelPath) + } + tokenizerPath := filepath.Join("..", "onnx", "embedgemma", "tokenizer.json") + dbPath := filepath.Join("..", "gflt.db") + if _, err := os.Stat(dbPath); os.IsNotExist(err) { + t.Skipf("database not found at %s; skipping real embedder test", dbPath) + } + cfg := &config.Config{ + EmbedModelPath: modelPath, + EmbedTokenizerPath: tokenizerPath, + EmbedDims: 768, + RAGWordLimit: 250, + RAGOverlapWords: 25, + RAGBatchSize: 1, + } + logger := slog.New(slog.NewTextHandler(nil, &slog.HandlerOptions{Level: slog.LevelError})) + store := storage.NewProviderSQL(dbPath, logger) + if store == nil { + t.Fatal("failed to create storage provider") + } + rag, err := New(logger, store, cfg) + if err != nil { + t.Fatalf("failed to create RAG instance: %v", err) + } + t.Cleanup(func() { rag.Destroy() }) + + tests := []struct { + name string + query string + }{ + {"she bears", "she bears"}, + {"bald head", "bald head"}, + {"two she bears out of the wood", "two she bears out of the wood"}, + {"bald prophet", "bald prophet"}, + {"go up thou bald head", "\"go up thou bald head\""}, + {"two she bears", "\"two she bears\""}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + results, err := rag.Search(tt.query, 10) + if err != nil { + t.Fatalf("search failed: %v", err) + } + found := false + for _, row := range results { + if row.Slug == "kjv_bible.epub_1786_0" { + found = true + break + } + } + if !found { + t.Errorf("target chunk not found for query %q", tt.query) + for i, r := range results { + t.Logf("%d: %s", i+1, r.Slug) + } + } + }) + } +} diff --git a/rag/rag_test.go b/rag/rag_test.go new file mode 100644 index 0000000..4944007 --- /dev/null +++ b/rag/rag_test.go @@ -0,0 +1,155 @@ +package rag + +import ( + "testing" +) + +func TestDetectPhrases(t *testing.T) { + tests := []struct { + query string + expect []string + }{ + { + query: "bald prophet and two she bears", + expect: []string{"bald prophet", "two she", "two she bears", "she bears"}, + }, + { + query: "she bears", + expect: []string{"she bears"}, + }, + { + query: "the quick brown fox", + expect: []string{"quick brown", "quick brown fox", "brown fox"}, + }, + { + query: "in the house", // stop words + expect: []string{}, // "in" and "the" are stop words + }, + { + query: "a", // short + expect: []string{}, + }, + } + + for _, tt := range tests { + got := detectPhrases(tt.query) + if len(got) != len(tt.expect) { + t.Errorf("detectPhrases(%q) = %v, want %v", tt.query, got, tt.expect) + continue + } + for i := range got { + if got[i] != tt.expect[i] { + t.Errorf("detectPhrases(%q) = %v, want %v", tt.query, got, tt.expect) + break + } + } + } +} + +func TestCountPhraseMatches(t *testing.T) { + tests := []struct { + text string + query string + expect int + }{ + { + text: "two she bears came out of the wood", + query: "she bears", + expect: 1, + }, + { + text: "bald head and she bears", + query: "bald prophet and two she bears", + expect: 1, // only "she bears" matches + }, + { + text: "no match here", + query: "she bears", + expect: 0, + }, + { + text: "she bears and bald prophet", + query: "bald prophet she bears", + expect: 2, // "she bears" and "bald prophet" + }, + } + + for _, tt := range tests { + got := countPhraseMatches(tt.text, tt.query) + if got != tt.expect { + t.Errorf("countPhraseMatches(%q, %q) = %d, want %d", tt.text, tt.query, got, tt.expect) + } + } +} + +func TestAreSlugsAdjacent(t *testing.T) { + tests := []struct { + slug1 string + slug2 string + expect bool + }{ + { + slug1: "kjv_bible.epub_1786_0", + slug2: "kjv_bible.epub_1787_0", + expect: true, + }, + { + slug1: "kjv_bible.epub_1787_0", + slug2: "kjv_bible.epub_1786_0", + expect: true, + }, + { + slug1: "kjv_bible.epub_1786_0", + slug2: "kjv_bible.epub_1788_0", + expect: false, + }, + { + slug1: "otherfile.txt_1_0", + slug2: "kjv_bible.epub_1786_0", + expect: false, + }, + { + slug1: "file_1_0", + slug2: "file_1_1", + expect: true, + }, + { + slug1: "file_1_0", + slug2: "file_2_0", // different batch + expect: true, // sequential batches with same chunk index are adjacent + }, + } + + for _, tt := range tests { + got := areSlugsAdjacent(tt.slug1, tt.slug2) + if got != tt.expect { + t.Errorf("areSlugsAdjacent(%q, %q) = %v, want %v", tt.slug1, tt.slug2, got, tt.expect) + } + } +} + +func TestParseSlugIndices(t *testing.T) { + tests := []struct { + slug string + wantBatch int + wantChunk int + wantOk bool + }{ + {"kjv_bible.epub_1786_0", 1786, 0, true}, + {"file_1_5", 1, 5, true}, + {"no_underscore", 0, 0, false}, + {"file_abc_def", 0, 0, false}, + {"file_123_456_extra", 456, 0, false}, // regex matches last two numbers + } + + for _, tt := range tests { + batch, chunk, ok := parseSlugIndices(tt.slug) + if ok != tt.wantOk { + t.Errorf("parseSlugIndices(%q) ok = %v, want %v", tt.slug, ok, tt.wantOk) + continue + } + if ok && (batch != tt.wantBatch || chunk != tt.wantChunk) { + t.Errorf("parseSlugIndices(%q) = (%d, %d), want (%d, %d)", tt.slug, batch, chunk, tt.wantBatch, tt.wantChunk) + } + } +} diff --git a/rag/storage.go b/rag/storage.go index 62477b6..a53f767 100644 --- a/rag/storage.go +++ b/rag/storage.go @@ -340,11 +340,9 @@ func (vs *VectorStorage) scanRows(rows *sql.Rows) ([]models.VectorRow, error) { continue } // Convert BM25 score to distance-like metric (lower is better) - // BM25 is negative, more negative is better. We'll normalize to positive distance. - distance := float32(-score) // Make positive (since score is negative) - if distance < 0 { - distance = 0 - } + // BM25 is negative, more negative is better. Keep as negative. + distance := float32(score) // Keep negative, more negative is better + // No clamping needed; negative distances are fine results = append(results, models.VectorRow{ Slug: slug, RawText: rawText, -- cgit v1.2.3