fix(growth,hands,kernel,desktop): Phase 1 用户可感知修复 — 6 项断链修复
Some checks failed
CI / Lint & TypeCheck (push) Has been cancelled
CI / Unit Tests (push) Has been cancelled
CI / Build Frontend (push) Has been cancelled
CI / Rust Check (push) Has been cancelled
CI / Security Scan (push) Has been cancelled
CI / E2E Tests (push) Has been cancelled

Phase 1 修复内容:
1. Hand 执行前端字段映射 — instance_id → runId,修复 Hand 状态追踪
2. Heartbeat 痛点感知 — PAIN_POINTS_CACHE + VikingStorage 持久化 + 未解决痛点检查
3. Browser Hand 委托消息 — pending_execution → delegated_to_frontend + 中文摘要
4. 跨会话记忆检索增强 — 扩展 IdentityRecall 模式 26→43 + 弱身份信号检测 + 低结果 fallback
5. Twitter Hand 凭据持久化 — SetCredentials action + 文件持久化 + 启动恢复
6. Browser 测试修复 — 适配新的 delegated_to_frontend 响应格式

验证: cargo check  | cargo test 912 PASS  | tsc --noEmit 
This commit is contained in:
iven
2026-04-21 10:18:25 +08:00
parent 2f5e9f1755
commit 9a2611d122
10 changed files with 435 additions and 165 deletions

View File

@@ -19,6 +19,8 @@ pub struct AnalyzedQuery {
pub target_types: Vec<MemoryType>,
/// Expanded search terms
pub expansions: Vec<String>,
/// Whether weak identity signals were detected (personal pronouns, possessives)
pub weak_identity: bool,
}
/// Query intent classification
@@ -55,6 +57,8 @@ pub struct QueryAnalyzer {
stop_words: HashSet<String>,
/// Patterns indicating identity/personal recall queries
identity_patterns: Vec<String>,
/// Weak identity signals (pronouns, possessives) that boost broad retrieval
weak_identity_indicators: Vec<String>,
}
impl QueryAnalyzer {
@@ -105,15 +109,33 @@ impl QueryAnalyzer {
.map(|s| s.to_string())
.collect(),
identity_patterns: [
// Chinese identity recall patterns
"我是谁", "我叫什么", "之前", "告诉过你", "之前告诉",
"还记得", "你还记得", "我的名字", "我的身份", "我的信息",
"我的工作", "我在哪", "我的偏好", "我喜欢什么",
"关于", "了解", "记得", "我之前说过",
// Chinese identity recall patterns — direct identity queries
"我是谁", "我叫什么", "的名字", "的身份", "的信息",
"关于", "了解我", "记得我",
// Chinese — cross-session recall ("what did we discuss before")
"之前", "告诉过你", "之前告诉", "我之前说过",
"还记得我", "你还记得", "你记得吗", "记得之前",
"我们之前聊过", "我们讨论过", "我们聊过", "上次聊",
"之前说过", "之前告诉", "以前说过", "以前聊过",
// Chinese — preferences/settings queries
"我的偏好", "我喜欢什么", "我的工作", "我在哪",
"我的设置", "我的习惯", "我的爱好", "我的职业",
"我记得", "我想起来", "我忘了",
// English identity recall patterns
"who am i", "what is my name", "what do you know about me",
"what did i tell", "do you remember me", "what do you remember",
"my preferences", "about me", "what have i shared",
"remind me", "what we discussed", "my settings", "my profile",
"tell me about myself", "what did we talk about", "what was my",
"i mentioned before", "we talked about", "i told you before",
]
.iter()
.map(|s| s.to_string())
.collect(),
// Weak identity signals — pronouns that hint at personal context
weak_identity_indicators: [
"我的", "我之前", "我们之前", "我们上次",
"my ", "i told", "i said", "we discussed", "we talked",
]
.iter()
.map(|s| s.to_string())
@@ -130,6 +152,10 @@ impl QueryAnalyzer {
let is_identity = self.identity_patterns.iter()
.any(|pattern| query_lower.contains(&pattern.to_lowercase()));
// Check for weak identity signals (personal pronouns, possessives)
let weak_identity = !is_identity && self.weak_identity_indicators.iter()
.any(|indicator| query_lower.contains(&indicator.to_lowercase()));
let intent = if is_identity {
QueryIntent::IdentityRecall
} else {
@@ -145,6 +171,7 @@ impl QueryAnalyzer {
intent,
target_types,
expansions,
weak_identity,
}
}
@@ -400,4 +427,48 @@ mod tests {
// Chinese characters should be extracted
assert!(!keywords.is_empty());
}
#[test]
fn test_identity_recall_expanded_patterns() {
let analyzer = QueryAnalyzer::new();
// New Chinese patterns should trigger IdentityRecall
assert_eq!(analyzer.analyze("我们之前聊过什么").intent, QueryIntent::IdentityRecall);
assert_eq!(analyzer.analyze("你记得吗上次说的").intent, QueryIntent::IdentityRecall);
assert_eq!(analyzer.analyze("我的设置是什么").intent, QueryIntent::IdentityRecall);
assert_eq!(analyzer.analyze("我们讨论过这个话题").intent, QueryIntent::IdentityRecall);
// New English patterns
assert_eq!(analyzer.analyze("what did we talk about yesterday").intent, QueryIntent::IdentityRecall);
assert_eq!(analyzer.analyze("remind me what I said").intent, QueryIntent::IdentityRecall);
assert_eq!(analyzer.analyze("my settings").intent, QueryIntent::IdentityRecall);
}
#[test]
fn test_weak_identity_detection() {
let analyzer = QueryAnalyzer::new();
// Queries with "我的" but not matching full identity patterns
let analyzed = analyzer.analyze("我的项目进度怎么样了");
assert!(analyzed.weak_identity, "Should detect weak identity from '我的'");
assert_ne!(analyzed.intent, QueryIntent::IdentityRecall);
// Queries without personal signals should not trigger weak identity
let analyzed = analyzer.analyze("解释一下Rust的所有权");
assert!(!analyzed.weak_identity);
// Full identity pattern should NOT set weak_identity (it's already IdentityRecall)
let analyzed = analyzer.analyze("我是谁");
assert!(!analyzed.weak_identity);
assert_eq!(analyzed.intent, QueryIntent::IdentityRecall);
}
#[test]
fn test_no_false_identity_on_general_queries() {
let analyzer = QueryAnalyzer::new();
// General queries should not trigger identity recall or weak identity
assert_ne!(analyzer.analyze("什么是机器学习").intent, QueryIntent::IdentityRecall);
assert!(!analyzer.analyze("什么是机器学习").weak_identity);
}
}

View File

@@ -106,6 +106,25 @@ impl MemoryRetriever {
)
.await?;
let total_found = preferences.len() + knowledge.len() + experience.len();
// Fallback: if keyword-based retrieval returns too few results AND weak identity
// signals are present (e.g. "我的xxx", "我之前xxx"), supplement with broad retrieval
// to ensure cross-session memories are found even without exact keyword match.
let (preferences, knowledge, experience) = if total_found < 3 && analyzed.weak_identity {
tracing::info!(
"[MemoryRetriever] Weak identity + low results ({}), supplementing with broad retrieval",
total_found
);
let broad = self.retrieve_broad_identity(agent_id).await?;
let prefs = Self::merge_results(preferences, broad.preferences);
let knows = Self::merge_results(knowledge, broad.knowledge);
let exps = Self::merge_results(experience, broad.experience);
(prefs, knows, exps)
} else {
(preferences, knowledge, experience)
};
let total_tokens = preferences.iter()
.chain(knowledge.iter())
.chain(experience.iter())
@@ -153,6 +172,7 @@ impl MemoryRetriever {
intent: crate::retrieval::query::QueryIntent::General,
target_types: vec![],
expansions: vec![],
weak_identity: false,
};
let search_queries = self.analyzer.generate_search_queries(&analyzed_for_search);
@@ -198,6 +218,20 @@ impl MemoryRetriever {
Ok(filtered)
}
/// Merge keyword-based and broad-retrieval results, deduplicating by URI.
/// Keyword results take precedence (appear first), broad results fill gaps.
fn merge_results(keyword_results: Vec<MemoryEntry>, broad_results: Vec<MemoryEntry>) -> Vec<MemoryEntry> {
let mut seen = std::collections::HashSet::new();
let mut merged = Vec::new();
for entry in keyword_results.into_iter().chain(broad_results.into_iter()) {
if seen.insert(entry.uri.clone()) {
merged.push(entry);
}
}
merged
}
/// Rerank entries using semantic similarity
async fn rerank_entries(
&self,