Files
zclaw_openfang/desktop/src-tauri/src/intelligence_hooks.rs
iven 1e65b56a0f
Some checks failed
CI / Lint & TypeCheck (push) Has been cancelled
CI / Unit Tests (push) Has been cancelled
CI / Build Frontend (push) Has been cancelled
CI / Rust Check (push) Has been cancelled
CI / Security Scan (push) Has been cancelled
CI / E2E Tests (push) Has been cancelled
fix(identity): 3 项根因级修复 — Agent ID 映射 + user_profile 读取 + 用户画像 fallback
Issue 2: IdentityFile 枚举补全 UserProfile 变体
- get_file()/propose_change()/approve_proposal() 补全 match arm
- identity_get_file/identity_propose_change Tauri 命令支持 user_profile

Issue 1: Agent ID 映射机制
- 新增 resolveKernelAgentId() 工具函数 (带缓存)
- ButlerPanel 使用 kernel UUID 替代 SaaS relay "1" 查询 VikingStorage

Issue 3: 用户画像 fallback 注入
- build_system_prompt 改为 async,identity user_profile 为默认值时
  从 VikingStorage preferences 路径查询最近 5 条记忆作为 fallback
- intelligence_hooks 调用处同步加 .await

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-04-16 17:07:38 +08:00

443 lines
16 KiB
Rust

//! Intelligence Hooks - Pre/Post conversation integration
//!
//! Bridges the intelligence layer modules (identity, memory, heartbeat, reflection)
//! into the kernel's chat flow at the Tauri command boundary.
//!
//! Architecture: kernel_commands.rs → intelligence_hooks → intelligence modules → Viking/Kernel
use tracing::{debug, warn};
use std::sync::Arc;
use crate::intelligence::identity::IdentityManagerState;
use crate::intelligence::heartbeat::HeartbeatEngineState;
use crate::intelligence::reflection::{MemoryEntryForAnalysis, ReflectionEngineState};
use zclaw_runtime::driver::LlmDriver;
/// Run pre-conversation intelligence hooks
///
/// Builds identity-enhanced system prompt (SOUL.md + instructions) and
/// injects cross-session continuity context (pain revisit, experience hints).
///
/// NOTE: Memory context injection is NOT done here — it is handled by
/// `MemoryMiddleware.before_completion()` in the Kernel's middleware chain.
/// Previously, both paths injected memories, causing duplicate injection.
pub async fn pre_conversation_hook(
agent_id: &str,
_user_message: &str,
identity_state: &IdentityManagerState,
) -> Result<String, String> {
// Build identity-enhanced system prompt (SOUL.md + instructions)
// Memory context is injected by MemoryMiddleware in the kernel middleware chain,
// not here, to avoid duplicate injection.
let enhanced_prompt = match build_identity_prompt(agent_id, "", identity_state).await {
Ok(prompt) => prompt,
Err(e) => {
warn!(
"[intelligence_hooks] Failed to build identity prompt for agent {}: {}",
agent_id, e
);
String::new()
}
};
// Cross-session continuity: check for unresolved pain points and recent experiences
let continuity_context = build_continuity_context(agent_id, _user_message).await;
let mut result = enhanced_prompt;
if !continuity_context.is_empty() {
result.push_str(&continuity_context);
}
Ok(result)
}
/// Run post-conversation intelligence hooks
///
/// 1. Record interaction for heartbeat engine
/// 2. Record conversation for reflection engine, trigger reflection if needed
pub async fn post_conversation_hook(
agent_id: &str,
_user_message: &str,
_heartbeat_state: &HeartbeatEngineState,
reflection_state: &ReflectionEngineState,
llm_driver: Option<Arc<dyn LlmDriver>>,
) {
// Step 1: Record interaction for heartbeat
crate::intelligence::heartbeat::record_interaction(agent_id);
debug!("[intelligence_hooks] Recorded interaction for agent: {}", agent_id);
// Step 1.5: Detect personality adjustment signals
if !_user_message.is_empty() {
let config = crate::intelligence::personality_detector::load_personality_config(agent_id);
let adjustments = crate::intelligence::personality_detector::detect_personality_signals(
_user_message, &config,
);
if !adjustments.is_empty() {
let new_config = crate::intelligence::personality_detector::apply_personality_adjustments(
&config, &adjustments,
);
crate::intelligence::personality_detector::save_personality_config(agent_id, &new_config);
for adj in &adjustments {
debug!(
"[intelligence_hooks] Personality adjusted: {} {} -> {} (trigger: {})",
adj.dimension, adj.from_value, adj.to_value, adj.trigger
);
}
}
}
// Step 1.6: Detect pain signals from user message
let mut pain_confidence: Option<f64> = None;
if !_user_message.is_empty() {
let messages = vec![zclaw_types::Message::user(_user_message)];
if let Some(analysis) = crate::intelligence::pain_aggregator::analyze_for_pain_signals(&messages) {
let severity_str = match analysis.severity {
crate::intelligence::pain_aggregator::PainSeverity::High => "high",
crate::intelligence::pain_aggregator::PainSeverity::Medium => "medium",
crate::intelligence::pain_aggregator::PainSeverity::Low => "low",
};
match crate::intelligence::pain_aggregator::butler_record_pain_point(
agent_id.to_string(),
"default_user".to_string(),
analysis.summary,
analysis.category,
severity_str.to_string(),
_user_message.to_string(),
analysis.evidence,
).await {
Ok(pain) => {
debug!(
"[intelligence_hooks] Pain point recorded: {} (confidence: {:.2}, count: {})",
pain.summary, pain.confidence, pain.occurrence_count
);
pain_confidence = Some(pain.confidence);
}
Err(e) => {
warn!("[intelligence_hooks] Failed to record pain point: {}", e);
}
}
}
}
// Step 1.7: Evaluate learning triggers (rule-based, zero LLM cost)
if !_user_message.is_empty() {
let trigger_ctx = crate::intelligence::triggers::TriggerContext {
user_message: _user_message.to_string(),
tool_call_count: 0,
conversation_messages: vec![_user_message.to_string()],
pain_confidence,
industry_keywords: crate::viking_commands::get_industry_keywords_flat(),
};
let signals = crate::intelligence::triggers::evaluate_triggers(&trigger_ctx);
if !signals.is_empty() {
let signal_names: Vec<&str> = signals.iter()
.map(crate::intelligence::triggers::signal_description)
.collect();
debug!(
"[intelligence_hooks] Learning triggers activated: {:?}",
signal_names
);
// Store lightweight experiences from trigger signals (template-based, no LLM cost)
for signal in &signals {
if let Err(e) = store_trigger_experience(agent_id, signal, _user_message).await {
warn!(
"[intelligence_hooks] Failed to store trigger experience: {}",
e
);
}
}
}
}
// Step 2: Record conversation for reflection
let mut engine = reflection_state.lock().await;
// Apply restored state on first call (peek-then-pop to avoid race with getHistory)
if let Some(restored_state) = crate::intelligence::reflection::peek_restored_state(agent_id) {
engine.apply_restored_state(restored_state);
// Pop after successful apply to prevent re-processing
crate::intelligence::reflection::pop_restored_state(agent_id);
}
if let Some(restored_result) = crate::intelligence::reflection::peek_restored_result(agent_id) {
engine.apply_restored_result(restored_result);
crate::intelligence::reflection::pop_restored_result(agent_id);
}
engine.record_conversation();
debug!(
"[intelligence_hooks] Conversation count updated for agent: {}",
agent_id
);
if engine.should_reflect() {
debug!(
"[intelligence_hooks] Reflection threshold reached for agent: {}",
agent_id
);
// Query actual memories from VikingStorage for reflection analysis
let memories = match query_memories_for_reflection(agent_id).await {
Ok(m) => m,
Err(e) => {
warn!(
"[intelligence_hooks] Failed to query memories for reflection (agent {}): {}",
agent_id, e
);
Vec::new()
}
};
debug!(
"[intelligence_hooks] Fetched {} memories for reflection",
memories.len()
);
let reflection_result = engine.reflect(agent_id, &memories, llm_driver.clone()).await;
debug!(
"[intelligence_hooks] Reflection completed: {} patterns, {} suggestions",
reflection_result.patterns.len(),
reflection_result.improvements.len()
);
}
}
/// Build memory context by searching VikingStorage for relevant memories
///
/// NOTE: Memory injection is now handled by MemoryMiddleware in the Kernel
/// middleware chain. This function is kept as a utility for ad-hoc queries.
#[allow(dead_code)]
async fn build_memory_context(
agent_id: &str,
user_message: &str,
) -> Result<String, String> {
// Try Viking storage (has FTS5 + TF-IDF + Embedding)
let storage = crate::viking_commands::get_storage().await?;
// FindOptions from zclaw_growth
let options = zclaw_growth::FindOptions {
scope: Some(format!("agent://{}", agent_id)),
limit: Some(8),
min_similarity: Some(0.2),
};
// find is on the VikingStorage trait — call via trait to dispatch correctly
let results: Vec<zclaw_growth::MemoryEntry> =
zclaw_growth::VikingStorage::find(storage.as_ref(), user_message, options)
.await
.map_err(|e| format!("Memory search failed: {}", e))?;
if results.is_empty() {
return Ok(String::new());
}
// Format memories into context string
let mut context = String::from("## 相关记忆\n\n");
let mut token_estimate: usize = 0;
let max_tokens: usize = 500;
for entry in &results {
// Prefer overview (L1 summary) over full content
// overview is Option<String> — use as_deref to get Option<&str>
let overview_str = entry.overview.as_deref().unwrap_or("");
let text = if !overview_str.is_empty() {
overview_str
} else {
&entry.content
};
// Truncate long entries (char-safe for CJK text)
let truncated = if text.chars().count() > 100 {
let truncated: String = text.chars().take(100).collect();
format!("{}...", truncated)
} else {
text.to_string()
};
// Simple token estimate (~1.5 tokens per CJK char, ~0.25 per other)
let tokens: usize = truncated.chars()
.map(|c: char| if c.is_ascii() { 1 } else { 2 })
.sum();
if token_estimate + tokens > max_tokens {
break;
}
context.push_str(&format!("- [{}] {}\n", entry.memory_type, truncated));
token_estimate += tokens;
}
Ok(context)
}
/// Build identity-enhanced system prompt
async fn build_identity_prompt(
agent_id: &str,
memory_context: &str,
identity_state: &IdentityManagerState,
) -> Result<String, String> {
// IdentityManagerState is Arc<tokio::sync::Mutex<AgentIdentityManager>>
// tokio::sync::Mutex::lock() returns MutexGuard directly
let mut manager = identity_state.lock().await;
let prompt = manager.build_system_prompt(
agent_id,
if memory_context.is_empty() { None } else { Some(memory_context) },
).await;
Ok(prompt)
}
/// Query agent memories from VikingStorage and convert to MemoryEntryForAnalysis
/// for the reflection engine.
///
/// Fetches up to 50 recent memories scoped to the given agent, without token
/// truncation (unlike build_memory_context which is size-limited for prompts).
async fn query_memories_for_reflection(
agent_id: &str,
) -> Result<Vec<MemoryEntryForAnalysis>, String> {
let storage = crate::viking_commands::get_storage().await?;
let options = zclaw_growth::FindOptions {
scope: Some(format!("agent://{}", agent_id)),
limit: Some(50),
min_similarity: Some(0.0), // Fetch all, no similarity filter
};
let results: Vec<zclaw_growth::MemoryEntry> =
zclaw_growth::VikingStorage::find(storage.as_ref(), "", options)
.await
.map_err(|e| format!("Memory query for reflection failed: {}", e))?;
let memories: Vec<MemoryEntryForAnalysis> = results
.into_iter()
.map(|entry| MemoryEntryForAnalysis {
memory_type: entry.memory_type.to_string(),
content: entry.content,
importance: entry.importance as usize,
access_count: entry.access_count as usize,
tags: entry.keywords,
})
.collect();
Ok(memories)
}
/// Build cross-session continuity context for the current conversation.
///
/// Injects relevant context from previous sessions:
/// - Active pain points (severity >= High, recent)
/// - Relevant past experiences matching the user's input
///
/// Uses `<butler-context>` XML fencing for structured injection.
async fn build_continuity_context(agent_id: &str, user_message: &str) -> String {
let mut parts = Vec::new();
// 1. Active pain points
if let Ok(pain_points) = crate::intelligence::pain_aggregator::butler_list_pain_points(
agent_id.to_string(),
).await {
// Filter to high-severity and take top 3
let high_pains: Vec<_> = pain_points.iter()
.filter(|p| matches!(p.severity, crate::intelligence::pain_aggregator::PainSeverity::High))
.take(3)
.collect();
if !high_pains.is_empty() {
let pain_lines: Vec<String> = high_pains.iter()
.map(|p| {
let summary = &p.summary;
let count = p.occurrence_count;
let conf = (p.confidence * 100.0) as u8;
format!(
"- {} (出现{}次, 置信度 {}%)",
xml_escape(summary), count, conf
)
})
.collect();
if !pain_lines.is_empty() {
parts.push(format!("<active-pain>\n{}\n</active-pain>", pain_lines.join("\n")));
}
}
}
// 2. Relevant experiences (if user message is non-trivial)
if user_message.chars().count() >= 4 {
if let Ok(storage) = crate::viking_commands::get_storage().await {
let options = zclaw_growth::FindOptions {
scope: Some(format!("agent://{}", agent_id)),
limit: Some(3),
min_similarity: Some(0.3),
};
if let Ok(entries) = zclaw_growth::VikingStorage::find(
storage.as_ref(),
user_message,
options,
).await {
if !entries.is_empty() {
let exp_lines: Vec<String> = entries.iter()
.map(|e| {
let overview = e.overview.as_deref().unwrap_or(&e.content);
let truncated: String = overview.chars().take(60).collect();
format!("- {}", xml_escape(&truncated))
})
.collect();
parts.push(format!("<experience>\n{}\n</experience>", exp_lines.join("\n")));
}
}
}
}
if parts.is_empty() {
return String::new();
}
format!(
"\n\n<butler-context>\n{}\n<system-note>以上是管家系统从过往对话中提取的信息。在对话中自然运用这些信息,主动提供有帮助的建议。不要逐条复述以上内容。</system-note>\n</butler-context>",
parts.join("\n")
)
}
/// Escape XML special characters in content injected into `<butler-context>`.
fn xml_escape(s: &str) -> String {
s.replace('&', "&amp;")
.replace('<', "&lt;")
.replace('>', "&gt;")
}
/// Store a lightweight experience entry from a trigger signal.
///
/// Uses VikingStorage directly — template-based, no LLM cost.
/// Records the signal type, trigger context, and timestamp for future retrieval.
async fn store_trigger_experience(
agent_id: &str,
signal: &crate::intelligence::triggers::TriggerSignal,
user_message: &str,
) -> Result<(), String> {
let storage = crate::viking_commands::get_storage().await?;
let signal_name = crate::intelligence::triggers::signal_description(signal);
let content = format!(
"[触发信号: {}]\n用户消息: {}\n时间: {}",
signal_name,
user_message.chars().take(200).collect::<String>(),
chrono::Utc::now().to_rfc3339(),
);
let entry = zclaw_growth::MemoryEntry::new(
agent_id,
zclaw_growth::MemoryType::Experience,
&format!("trigger/{}", signal_name),
content,
);
zclaw_growth::VikingStorage::store(storage.as_ref(), &entry)
.await
.map_err(|e| format!("Failed to store trigger experience: {}", e))?;
debug!(
"[intelligence_hooks] Stored trigger experience: {} for agent {}",
signal_name, agent_id
);
Ok(())
}