Files
iven 1cf3f585d3 refactor(store): split gatewayStore into specialized domain stores
Major restructuring:
- Split monolithic gatewayStore into 5 focused stores:
  - connectionStore: WebSocket connection and gateway lifecycle
  - configStore: quickConfig, workspaceInfo, MCP services
  - agentStore: clones, usage stats, agent management
  - handStore: hands, approvals, triggers, hand runs
  - workflowStore: workflows, workflow runs, execution

- Update all components to use new stores with selector pattern
- Remove
2026-03-20 22:14:13 +08:00

386 lines
11 KiB
TypeScript

/**
* Vector Memory - Semantic search wrapper for L4 self-evolution
*
* Provides vector-based semantic search over agent memories using OpenViking.
* This enables finding conceptually similar memories rather than just keyword matches.
*
* Key capabilities:
* - Semantic search: Find memories by meaning, not just keywords
* - Relevance scoring: Get similarity scores for search results
* - Context-aware: Search at different context levels (L0/L1/L2)
*
* Reference: ZCLAW_AGENT_INTELLIGENCE_EVOLUTION.md §6.4.2
*/
import { getVikingClient, type VikingHttpClient } from './viking-client';
import { getMemoryManager, type MemoryEntry, type MemoryType } from './agent-memory';
// === Types ===
export interface VectorSearchResult {
memory: MemoryEntry;
score: number;
uri: string;
highlights?: string[];
}
export interface VectorSearchOptions {
topK?: number; // Number of results to return (default: 10)
minScore?: number; // Minimum relevance score (default: 0.5)
types?: MemoryType[]; // Filter by memory types
agentId?: string; // Filter by agent
level?: 'L0' | 'L1' | 'L2'; // Context level to search
}
export interface VectorEmbedding {
id: string;
vector: number[];
dimension: number;
model: string;
}
export interface VectorMemoryConfig {
enabled: boolean;
defaultTopK: number;
defaultMinScore: number;
defaultLevel: 'L0' | 'L1' | 'L2';
embeddingModel: string;
cacheEmbeddings: boolean;
}
// === Default Config ===
export const DEFAULT_VECTOR_CONFIG: VectorMemoryConfig = {
enabled: true,
defaultTopK: 10,
defaultMinScore: 0.3,
defaultLevel: 'L1',
embeddingModel: 'text-embedding-ada-002',
cacheEmbeddings: true,
};
// === Vector Memory Service ===
export class VectorMemoryService {
private config: VectorMemoryConfig;
private vikingClient: VikingHttpClient | null = null;
private embeddingCache: Map<string, VectorEmbedding> = new Map();
constructor(config?: Partial<VectorMemoryConfig>) {
this.config = { ...DEFAULT_VECTOR_CONFIG, ...config };
this.initializeClient();
}
private async initializeClient(): Promise<void> {
try {
this.vikingClient = getVikingClient();
} catch (error) {
console.warn('[VectorMemory] Failed to initialize Viking client:', error);
}
}
// === Semantic Search ===
/**
* Perform semantic search over memories.
* Uses OpenViking's built-in vector search capabilities.
*/
async semanticSearch(
query: string,
options?: VectorSearchOptions
): Promise<VectorSearchResult[]> {
if (!this.config.enabled) {
console.warn('[VectorMemory] Semantic search is disabled');
return [];
}
if (!this.vikingClient) {
await this.initializeClient();
if (!this.vikingClient) {
console.warn('[VectorMemory] Viking client not available');
return [];
}
}
try {
const results = await this.vikingClient.find(query, {
limit: options?.topK ?? this.config.defaultTopK,
minScore: options?.minScore ?? this.config.defaultMinScore,
level: options?.level ?? this.config.defaultLevel,
scope: options?.agentId ? `memories/${options.agentId}` : undefined,
});
// Convert FindResult to VectorSearchResult
const searchResults: VectorSearchResult[] = [];
for (const result of results) {
// Convert Viking result to MemoryEntry format
const memory: MemoryEntry = {
id: this.extractMemoryId(result.uri),
agentId: options?.agentId ?? 'unknown',
content: result.content,
type: this.inferMemoryType(result.uri),
importance: Math.round((1 - result.score) * 10), // Invert score to importance
createdAt: new Date().toISOString(),
source: 'auto',
tags: Array.isArray((result.metadata as Record<string, unknown>)?.tags)
? (result.metadata as Record<string, unknown>).tags as string[]
: [],
lastAccessedAt: new Date().toISOString(),
accessCount: 0,
};
searchResults.push({
memory,
score: result.score,
uri: result.uri,
highlights: Array.isArray((result.metadata as Record<string, unknown>)?.highlights)
? (result.metadata as Record<string, unknown>).highlights as string[]
: undefined,
});
}
// Apply type filter if specified
if (options?.types && options.types.length > 0) {
return searchResults.filter(r => options.types!.includes(r.memory.type));
}
return searchResults;
} catch (error) {
console.error('[VectorMemory] Semantic search failed:', error);
return [];
}
}
/**
* Find similar memories to a given memory.
*/
async findSimilar(
memoryId: string,
options?: Omit<VectorSearchOptions, 'types'>
): Promise<VectorSearchResult[]> {
// Get the memory content first
const memoryManager = getMemoryManager();
const memories = await memoryManager.getAll(options?.agentId ?? 'default');
const memory = memories.find((m: MemoryEntry) => m.id === memoryId);
if (!memory) {
console.warn(`[VectorMemory] Memory not found: ${memoryId}`);
return [];
}
// Use the memory content as query for semantic search
const results = await this.semanticSearch(memory.content, {
...options,
topK: (options?.topK ?? 10) + 1, // +1 to account for the memory itself
});
// Filter out the original memory from results
return results.filter(r => r.memory.id !== memoryId);
}
/**
* Find memories related to a topic/concept.
*/
async findByConcept(
concept: string,
options?: VectorSearchOptions
): Promise<VectorSearchResult[]> {
return this.semanticSearch(concept, options);
}
/**
* Cluster memories by semantic similarity.
* Returns groups of related memories.
*/
async clusterMemories(
agentId: string,
clusterCount: number = 5
): Promise<VectorSearchResult[][]> {
const memoryManager = getMemoryManager();
const memories = await memoryManager.getAll(agentId);
if (memories.length === 0) {
return [];
}
// Simple clustering: use each memory as a seed and find similar ones
const clusters: VectorSearchResult[][] = [];
const usedIds = new Set<string>();
for (const memory of memories) {
if (usedIds.has(memory.id)) continue;
const similar = await this.findSimilar(memory.id, { agentId, topK: clusterCount });
if (similar.length > 0) {
const cluster: VectorSearchResult[] = [
{ memory, score: 1.0, uri: `memory://${memory.id}` },
...similar.filter(r => !usedIds.has(r.memory.id)),
];
cluster.forEach(r => usedIds.add(r.memory.id));
clusters.push(cluster);
if (clusters.length >= clusterCount) break;
}
}
return clusters;
}
// === Embedding Operations ===
/**
* Get or compute embedding for a text.
* Note: OpenViking handles embeddings internally, this is for advanced use.
*/
async getEmbedding(text: string): Promise<VectorEmbedding | null> {
if (!this.config.enabled) return null;
// Check cache first
const cacheKey = this.hashText(text);
if (this.config.cacheEmbeddings && this.embeddingCache.has(cacheKey)) {
return this.embeddingCache.get(cacheKey)!;
}
// OpenViking handles embeddings internally via /api/find
// This method is provided for future extensibility
console.warn('[VectorMemory] Direct embedding computation not available - OpenViking handles this internally');
return null;
}
/**
* Compute similarity between two texts.
*/
async computeSimilarity(text1: string, text2: string): Promise<number> {
if (!this.config.enabled || !this.vikingClient) return 0;
try {
// Use OpenViking to find text1, then check if text2 is in results
const results = await this.vikingClient.find(text1, { limit: 20 });
// If we find text2 in results, return its score
for (const result of results) {
if (result.content.includes(text2) || text2.includes(result.content)) {
return result.score;
}
}
// Otherwise, return 0 (no similarity found)
return 0;
} catch {
return 0;
}
}
// === Utility Methods ===
/**
* Check if vector search is available.
*/
async isAvailable(): Promise<boolean> {
if (!this.config.enabled) return false;
if (!this.vikingClient) {
await this.initializeClient();
}
return this.vikingClient?.isAvailable() ?? false;
}
/**
* Get current configuration.
*/
getConfig(): VectorMemoryConfig {
return { ...this.config };
}
/**
* Update configuration.
*/
updateConfig(updates: Partial<VectorMemoryConfig>): void {
this.config = { ...this.config, ...updates };
}
/**
* Clear embedding cache.
*/
clearCache(): void {
this.embeddingCache.clear();
}
// === Private Helpers ===
private extractMemoryId(uri: string): string {
// Extract memory ID from Viking URI
// Format: memories/agent-id/memory-id or similar
const parts = uri.split('/');
return parts[parts.length - 1] || uri;
}
private inferMemoryType(uri: string): MemoryType {
// Infer memory type from URI or metadata
if (uri.includes('preference')) return 'preference';
if (uri.includes('fact')) return 'fact';
if (uri.includes('task')) return 'task';
if (uri.includes('lesson')) return 'lesson';
return 'fact'; // Default
}
private hashText(text: string): string {
// Simple hash for cache key
let hash = 0;
for (let i = 0; i < text.length; i++) {
const char = text.charCodeAt(i);
hash = ((hash << 5) - hash) + char;
hash = hash & hash;
}
return hash.toString(16);
}
}
// === Singleton ===
let _instance: VectorMemoryService | null = null;
export function getVectorMemory(): VectorMemoryService {
if (!_instance) {
_instance = new VectorMemoryService();
}
return _instance;
}
export function resetVectorMemory(): void {
_instance = null;
}
// === Helper Functions ===
/**
* Quick semantic search helper.
*/
export async function semanticSearch(
query: string,
options?: VectorSearchOptions
): Promise<VectorSearchResult[]> {
return getVectorMemory().semanticSearch(query, options);
}
/**
* Find similar memories helper.
*/
export async function findSimilarMemories(
memoryId: string,
agentId?: string
): Promise<VectorSearchResult[]> {
return getVectorMemory().findSimilar(memoryId, { agentId });
}
/**
* Check if vector search is available.
*/
export async function isVectorSearchAvailable(): Promise<boolean> {
return getVectorMemory().isAvailable();
}