refactor: 统一项目名称从OpenFang到ZCLAW
Some checks failed
CI / Lint & TypeCheck (push) Has been cancelled
CI / Unit Tests (push) Has been cancelled
CI / Build Frontend (push) Has been cancelled
CI / Rust Check (push) Has been cancelled
CI / Security Scan (push) Has been cancelled
CI / E2E Tests (push) Has been cancelled

重构所有代码和文档中的项目名称,将OpenFang统一更新为ZCLAW。包括:
- 配置文件中的项目名称
- 代码注释和文档引用
- 环境变量和路径
- 类型定义和接口名称
- 测试用例和模拟数据

同时优化部分代码结构,移除未使用的模块,并更新相关依赖项。
This commit is contained in:
iven
2026-03-27 07:36:03 +08:00
parent 4b08804aa9
commit 0d4fa96b82
226 changed files with 7288 additions and 5788 deletions

View File

@@ -3,7 +3,7 @@
//! Phase 1 of Intelligence Layer Migration:
//! Provides frontend API for memory storage and retrieval
use crate::memory::{PersistentMemory, PersistentMemoryStore, MemorySearchQuery, MemoryStats, generate_memory_id};
use crate::memory::{PersistentMemory, PersistentMemoryStore, MemorySearchQuery, MemoryStats, generate_memory_id, configure_embedding_client, is_embedding_configured, EmbedFn};
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use tauri::{AppHandle, State};
@@ -52,6 +52,9 @@ pub async fn memory_init(
}
/// Store a new memory
///
/// Writes to both PersistentMemoryStore (backward compat) and SqliteStorage (FTS5+Embedding).
/// SqliteStorage write failure is logged but does not block the operation.
#[tauri::command]
pub async fn memory_store(
entry: MemoryEntryInput,
@@ -64,28 +67,61 @@ pub async fn memory_store(
.ok_or_else(|| "Memory store not initialized. Call memory_init first.".to_string())?;
let now = Utc::now().to_rfc3339();
let id = generate_memory_id();
let memory = PersistentMemory {
id: generate_memory_id(),
agent_id: entry.agent_id,
memory_type: entry.memory_type,
content: entry.content,
id: id.clone(),
agent_id: entry.agent_id.clone(),
memory_type: entry.memory_type.clone(),
content: entry.content.clone(),
importance: entry.importance.unwrap_or(5),
source: entry.source.unwrap_or_else(|| "auto".to_string()),
tags: serde_json::to_string(&entry.tags.unwrap_or_default())
tags: serde_json::to_string(&entry.tags.clone().unwrap_or_default())
.unwrap_or_else(|_| "[]".to_string()),
conversation_id: entry.conversation_id,
conversation_id: entry.conversation_id.clone(),
created_at: now.clone(),
last_accessed_at: now,
access_count: 0,
embedding: None,
overview: None,
};
let id = memory.id.clone();
// Write to PersistentMemoryStore (primary)
store.store(&memory).await?;
// Also write to SqliteStorage via VikingStorage for FTS5 + Embedding search
if let Ok(storage) = crate::viking_commands::get_storage().await {
let memory_type = parse_memory_type(&entry.memory_type);
let keywords = entry.tags.unwrap_or_default();
let viking_entry = zclaw_growth::MemoryEntry::new(
&entry.agent_id,
memory_type,
&entry.memory_type,
entry.content,
)
.with_importance(entry.importance.unwrap_or(5) as u8)
.with_keywords(keywords);
match zclaw_growth::VikingStorage::store(storage.as_ref(), &viking_entry).await {
Ok(()) => tracing::debug!("[memory_store] Also stored in SqliteStorage"),
Err(e) => tracing::warn!("[memory_store] SqliteStorage write failed (non-blocking): {}", e),
}
}
Ok(id)
}
/// Parse a string memory_type into zclaw_growth::MemoryType
fn parse_memory_type(type_str: &str) -> zclaw_growth::MemoryType {
match type_str.to_lowercase().as_str() {
"preference" => zclaw_growth::MemoryType::Preference,
"knowledge" | "fact" | "task" | "todo" | "lesson" | "event" => zclaw_growth::MemoryType::Knowledge,
"skill" | "experience" => zclaw_growth::MemoryType::Experience,
"session" | "conversation" => zclaw_growth::MemoryType::Session,
_ => zclaw_growth::MemoryType::Knowledge,
}
}
/// Get a memory by ID
#[tauri::command]
pub async fn memory_get(
@@ -213,3 +249,223 @@ pub async fn memory_db_path(
Ok(store.path().to_string_lossy().to_string())
}
/// Configure embedding for PersistentMemoryStore (chat memory search)
/// This is called alongside viking_configure_embedding to enable vector search in chat flow
#[tauri::command]
pub async fn memory_configure_embedding(
provider: String,
api_key: String,
model: Option<String>,
endpoint: Option<String>,
) -> Result<bool, String> {
// Create an llm::EmbeddingClient and wrap it in Arc for the closure
let config = crate::llm::EmbeddingConfig {
provider,
api_key,
endpoint,
model,
};
let client = std::sync::Arc::new(crate::llm::EmbeddingClient::new(config));
let embed_fn: EmbedFn = {
let client = client.clone();
Arc::new(move |text: &str| {
let client = client.clone();
let text = text.to_string();
Box::pin(async move {
let response = client.embed(&text).await?;
Ok(response.embedding)
})
})
};
configure_embedding_client(embed_fn);
tracing::info!("[MemoryCommands] Embedding configured for PersistentMemoryStore");
Ok(true)
}
/// Check if embedding is configured for PersistentMemoryStore
#[tauri::command]
pub fn memory_is_embedding_configured() -> bool {
is_embedding_configured()
}
/// Build layered memory context for chat prompt injection
///
/// Uses SqliteStorage (FTS5 + TF-IDF + Embedding) for high-quality semantic search,
/// with fallback to PersistentMemoryStore if Viking storage is unavailable.
///
/// Performs L0→L1→L2 progressive loading:
/// - L0: Search all matching memories (vector similarity when available)
/// - L1: Use overview/summary when available, fall back to truncated content
/// - L2: Full content only for top-ranked items
#[tauri::command]
pub async fn memory_build_context(
agent_id: String,
query: String,
max_tokens: Option<usize>,
state: State<'_, MemoryStoreState>,
) -> Result<BuildContextResult, String> {
let budget = max_tokens.unwrap_or(500);
// Try SqliteStorage (Viking) first — has FTS5 + TF-IDF + Embedding
let entries = match crate::viking_commands::get_storage().await {
Ok(storage) => {
let options = zclaw_growth::FindOptions {
scope: Some(format!("agent://{}", agent_id)),
limit: Some((budget / 25).max(8)),
min_similarity: Some(0.2),
};
match zclaw_growth::VikingStorage::find(storage.as_ref(), &query, options).await {
Ok(entries) => entries,
Err(e) => {
tracing::warn!("[memory_build_context] Viking search failed, falling back: {}", e);
Vec::new()
}
}
}
Err(_) => {
tracing::debug!("[memory_build_context] Viking storage unavailable, falling back to PersistentMemoryStore");
Vec::new()
}
};
// If Viking found results, use them (they have overview/embedding ranking)
if !entries.is_empty() {
let mut used_tokens = 0;
let mut items: Vec<String> = Vec::new();
let mut memories_used = 0;
for entry in &entries {
if used_tokens >= budget {
break;
}
// Prefer overview (L1 summary) over full content
let overview_str = entry.overview.as_deref().unwrap_or("");
let display_content = if !overview_str.is_empty() {
overview_str.to_string()
} else {
truncate_for_l1(&entry.content)
};
let item_tokens = estimate_tokens_text(&display_content);
if used_tokens + item_tokens > budget {
continue;
}
items.push(format!("- [{}] {}", entry.memory_type, display_content));
used_tokens += item_tokens;
memories_used += 1;
}
let system_prompt_addition = if items.is_empty() {
String::new()
} else {
format!("## 相关记忆\n{}", items.join("\n"))
};
return Ok(BuildContextResult {
system_prompt_addition,
total_tokens: used_tokens,
memories_used,
});
}
// Fallback: PersistentMemoryStore (LIKE-based search)
let state_guard = state.lock().await;
let store = state_guard
.as_ref()
.ok_or_else(|| "Memory store not initialized".to_string())?;
let limit = budget / 25;
let search_query = MemorySearchQuery {
agent_id: Some(agent_id.clone()),
query: Some(query.clone()),
limit: Some(limit.max(20)),
min_importance: Some(3),
..Default::default()
};
let memories = store.search(search_query).await?;
if memories.is_empty() {
return Ok(BuildContextResult {
system_prompt_addition: String::new(),
total_tokens: 0,
memories_used: 0,
});
}
// Build layered context with token budget
let mut used_tokens = 0;
let mut items: Vec<String> = Vec::new();
let mut memories_used = 0;
for memory in &memories {
if used_tokens >= budget {
break;
}
let display_content = if let Some(ref overview) = memory.overview {
if !overview.is_empty() {
overview.clone()
} else {
truncate_for_l1(&memory.content)
}
} else {
truncate_for_l1(&memory.content)
};
let item_tokens = estimate_tokens_text(&display_content);
if used_tokens + item_tokens > budget {
continue;
}
items.push(format!("- [{}] {}", memory.memory_type, display_content));
used_tokens += item_tokens;
memories_used += 1;
}
let system_prompt_addition = if items.is_empty() {
String::new()
} else {
format!("## 相关记忆\n{}", items.join("\n"))
};
Ok(BuildContextResult {
system_prompt_addition,
total_tokens: used_tokens,
memories_used,
})
}
/// Result of building layered memory context
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct BuildContextResult {
pub system_prompt_addition: String,
pub total_tokens: usize,
pub memories_used: usize,
}
/// Truncate content for L1 overview display (~50 tokens)
fn truncate_for_l1(content: &str) -> String {
let char_limit = 100; // ~50 tokens for mixed CJK/ASCII
if content.chars().count() <= char_limit {
content.to_string()
} else {
let truncated: String = content.chars().take(char_limit).collect();
format!("{}...", truncated)
}
}
/// Estimate token count for text
fn estimate_tokens_text(text: &str) -> usize {
let cjk_count = text.chars().filter(|c| ('\u{4E00}'..='\u{9FFF}').contains(c)).count();
let other_count = text.chars().count() - cjk_count;
(cjk_count as f32 * 1.5 + other_count as f32 * 0.4).ceil() as usize
}