Files
zclaw_openfang/crates/zclaw-growth/src/retrieval/cache.rs
iven b7f3d94950
Some checks failed
CI / Lint & TypeCheck (push) Has been cancelled
CI / Unit Tests (push) Has been cancelled
CI / Build Frontend (push) Has been cancelled
CI / Rust Check (push) Has been cancelled
CI / Security Scan (push) Has been cancelled
CI / E2E Tests (push) Has been cancelled
fix(presentation): 修复 presentation 模块类型错误和语法问题
- 创建 types.ts 定义完整的类型系统
- 重写 DocumentRenderer.tsx 修复语法错误
- 重写 QuizRenderer.tsx 修复语法错误
- 重写 PresentationContainer.tsx 添加类型守卫
- 重写 TypeSwitcher.tsx 修复类型引用
- 更新 index.ts 移除不存在的 ChartRenderer 导出

审计结果:
- 类型检查: 通过
- 单元测试: 222 passed
- 构建: 成功
2026-03-26 17:19:28 +08:00

366 lines
9.5 KiB
Rust

//! Memory Cache
//!
//! Provides caching for frequently accessed memories to improve
//! retrieval performance.
use crate::types::{MemoryEntry, MemoryType};
use std::collections::HashMap;
use std::time::{Duration, Instant};
use tokio::sync::RwLock;
/// Cache entry with metadata
struct CacheEntry {
/// The memory entry
entry: MemoryEntry,
/// Last access time
last_accessed: Instant,
/// Access count
access_count: u32,
}
/// Cache key for efficient lookups
#[derive(Debug, Clone, Hash, Eq, PartialEq)]
struct CacheKey {
agent_id: String,
memory_type: MemoryType,
category: String,
}
impl From<&MemoryEntry> for CacheKey {
fn from(entry: &MemoryEntry) -> Self {
// Parse URI to extract components
let parts: Vec<&str> = entry.uri.trim_start_matches("agent://").split('/').collect();
Self {
agent_id: parts.first().unwrap_or(&"").to_string(),
memory_type: entry.memory_type,
category: parts.get(2).unwrap_or(&"").to_string(),
}
}
}
/// Memory cache configuration
#[derive(Debug, Clone)]
pub struct CacheConfig {
/// Maximum number of entries
pub max_entries: usize,
/// Time-to-live for entries
pub ttl: Duration,
/// Enable/disable caching
pub enabled: bool,
}
impl Default for CacheConfig {
fn default() -> Self {
Self {
max_entries: 1000,
ttl: Duration::from_secs(3600), // 1 hour
enabled: true,
}
}
}
/// Memory cache for hot memories
pub struct MemoryCache {
/// Cache storage
cache: RwLock<HashMap<String, CacheEntry>>,
/// Configuration
config: CacheConfig,
/// Cache statistics
stats: RwLock<CacheStats>,
}
/// Cache statistics
#[derive(Debug, Clone, Default)]
pub struct CacheStats {
/// Total cache hits
pub hits: u64,
/// Total cache misses
pub misses: u64,
/// Total entries evicted
pub evictions: u64,
}
impl MemoryCache {
/// Create a new memory cache
pub fn new(config: CacheConfig) -> Self {
Self {
cache: RwLock::new(HashMap::new()),
config,
stats: RwLock::new(CacheStats::default()),
}
}
/// Create with default configuration
pub fn default_config() -> Self {
Self::new(CacheConfig::default())
}
/// Get a memory from cache
pub async fn get(&self, uri: &str) -> Option<MemoryEntry> {
if !self.config.enabled {
return None;
}
let mut cache = self.cache.write().await;
if let Some(cached) = cache.get_mut(uri) {
// Check TTL
if cached.last_accessed.elapsed() > self.config.ttl {
cache.remove(uri);
return None;
}
// Update access metadata
cached.last_accessed = Instant::now();
cached.access_count += 1;
// Update stats
let mut stats = self.stats.write().await;
stats.hits += 1;
return Some(cached.entry.clone());
}
// Update stats
let mut stats = self.stats.write().await;
stats.misses += 1;
None
}
/// Put a memory into cache
pub async fn put(&self, entry: MemoryEntry) {
if !self.config.enabled {
return;
}
let mut cache = self.cache.write().await;
// Check capacity and evict if necessary
if cache.len() >= self.config.max_entries {
self.evict_lru(&mut cache).await;
}
cache.insert(
entry.uri.clone(),
CacheEntry {
entry,
last_accessed: Instant::now(),
access_count: 0,
},
);
}
/// Remove a memory from cache
pub async fn remove(&self, uri: &str) {
let mut cache = self.cache.write().await;
cache.remove(uri);
}
/// Clear the cache
pub async fn clear(&self) {
let mut cache = self.cache.write().await;
cache.clear();
}
/// Evict least recently used entries
async fn evict_lru(&self, cache: &mut HashMap<String, CacheEntry>) {
// Find LRU entry
let lru_key = cache
.iter()
.min_by_key(|(_, v)| (v.access_count, v.last_accessed))
.map(|(k, _)| k.clone());
if let Some(key) = lru_key {
cache.remove(&key);
let mut stats = self.stats.write().await;
stats.evictions += 1;
}
}
/// Get cache statistics
pub async fn stats(&self) -> CacheStats {
self.stats.read().await.clone()
}
/// Get cache hit rate
pub async fn hit_rate(&self) -> f32 {
let stats = self.stats.read().await;
let total = stats.hits + stats.misses;
if total == 0 {
return 0.0;
}
stats.hits as f32 / total as f32
}
/// Get cache size
pub async fn size(&self) -> usize {
self.cache.read().await.len()
}
/// Warm up cache with frequently accessed entries
pub async fn warmup(&self, entries: Vec<MemoryEntry>) {
for entry in entries {
self.put(entry).await;
}
}
/// Get top accessed entries (for preloading)
pub async fn get_hot_entries(&self, limit: usize) -> Vec<MemoryEntry> {
let cache = self.cache.read().await;
let mut entries: Vec<_> = cache
.values()
.map(|c| (c.access_count, c.entry.clone()))
.collect();
entries.sort_by(|a, b| b.0.cmp(&a.0));
entries.truncate(limit);
entries.into_iter().map(|(_, e)| e).collect()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::types::MemoryType;
#[tokio::test]
async fn test_cache_put_and_get() {
let cache = MemoryCache::default_config();
let entry = MemoryEntry::new(
"test-agent",
MemoryType::Preference,
"style",
"User prefers concise responses".to_string(),
);
cache.put(entry.clone()).await;
let retrieved = cache.get(&entry.uri).await;
assert!(retrieved.is_some());
assert_eq!(retrieved.unwrap().content, "User prefers concise responses");
}
#[tokio::test]
async fn test_cache_miss() {
let cache = MemoryCache::default_config();
let retrieved = cache.get("nonexistent").await;
assert!(retrieved.is_none());
let stats = cache.stats().await;
assert_eq!(stats.misses, 1);
}
#[tokio::test]
async fn test_cache_remove() {
let cache = MemoryCache::default_config();
let entry = MemoryEntry::new(
"test-agent",
MemoryType::Preference,
"style",
"test".to_string(),
);
cache.put(entry.clone()).await;
cache.remove(&entry.uri).await;
let retrieved = cache.get(&entry.uri).await;
assert!(retrieved.is_none());
}
#[tokio::test]
async fn test_cache_clear() {
let cache = MemoryCache::default_config();
let entry = MemoryEntry::new(
"test-agent",
MemoryType::Preference,
"style",
"test".to_string(),
);
cache.put(entry).await;
cache.clear().await;
let size = cache.size().await;
assert_eq!(size, 0);
}
#[tokio::test]
async fn test_cache_stats() {
let cache = MemoryCache::default_config();
let entry = MemoryEntry::new(
"test-agent",
MemoryType::Preference,
"style",
"test".to_string(),
);
cache.put(entry.clone()).await;
// Hit
cache.get(&entry.uri).await;
// Miss
cache.get("nonexistent").await;
let stats = cache.stats().await;
assert_eq!(stats.hits, 1);
assert_eq!(stats.misses, 1);
let hit_rate = cache.hit_rate().await;
assert!((hit_rate - 0.5).abs() < 0.001);
}
#[tokio::test]
async fn test_cache_eviction() {
let config = CacheConfig {
max_entries: 2,
ttl: Duration::from_secs(3600),
enabled: true,
};
let cache = MemoryCache::new(config);
let entry1 = MemoryEntry::new("test", MemoryType::Preference, "1", "1".to_string());
let entry2 = MemoryEntry::new("test", MemoryType::Preference, "2", "2".to_string());
let entry3 = MemoryEntry::new("test", MemoryType::Preference, "3", "3".to_string());
cache.put(entry1.clone()).await;
cache.put(entry2.clone()).await;
// Access entry1 to make it hot
cache.get(&entry1.uri).await;
// Add entry3, should evict entry2 (LRU)
cache.put(entry3).await;
let size = cache.size().await;
assert_eq!(size, 2);
let stats = cache.stats().await;
assert_eq!(stats.evictions, 1);
}
#[tokio::test]
async fn test_get_hot_entries() {
let cache = MemoryCache::default_config();
let entry1 = MemoryEntry::new("test", MemoryType::Preference, "1", "1".to_string());
let entry2 = MemoryEntry::new("test", MemoryType::Preference, "2", "2".to_string());
cache.put(entry1.clone()).await;
cache.put(entry2.clone()).await;
// Access entry1 multiple times
cache.get(&entry1.uri).await;
cache.get(&entry1.uri).await;
let hot = cache.get_hot_entries(10).await;
assert_eq!(hot.len(), 2);
// entry1 should be first (more accesses)
assert_eq!(hot[0].uri, entry1.uri);
}
}