feat: implement ZCLAW Agent Intelligence Evolution Phase 1-3

Phase 1: Persistent Memory + Identity Dynamic Evolution
- agent-memory.ts: MemoryManager with localStorage persistence, keyword search, deduplication, importance scoring, pruning, markdown export
- agent-identity.ts: AgentIdentityManager with per-agent SOUL/AGENTS/USER.md, change proposals with approval workflow, snapshot rollback
- memory-extractor.ts: Rule-based conversation memory extraction (Phase 1), LLM extraction prompt ready for Phase 2
- MemoryPanel.tsx: Memory browsing UI with search, type filter, delete, export (integrated as 4th tab in RightPanel)

Phase 2: Context Governance
- context-compactor.ts: Token estimation, threshold monitoring (soft/hard), memory flush before compaction, rule-based summarization
- chatStore integration: auto-compact when approaching token limits

Phase 3: Proactive Intelligence + Self-Reflection
- heartbeat-engine.ts: Periodic checks (pending tasks, memory health, idle greeting), quiet hours, proactivity levels (silent/light/standard/autonomous)
- reflection-engine.ts: Pattern analysis from memory corpus, improvement suggestions, identity change proposals, meta-memory creation

Chat Flow Integration (chatStore.ts):
- Pre-send: context compaction check -> memory search -> identity system prompt injection
- Post-complete: async memory extraction -> reflection conversation tracking -> auto-trigger reflection

Tests: 274 passing across 12 test files
- agent-memory.test.ts: 42 tests
- context-compactor.test.ts: 23 tests
- heartbeat-reflection.test.ts: 28 tests
- chatStore.test.ts: 11 tests (no regressions)

Refs: ZCLAW_AGENT_INTELLIGENCE_EVOLUTION.md updated with implementation progress
This commit is contained in:
iven
2026-03-15 22:24:57 +08:00
parent 4862e79b2b
commit 04ddf94123
13 changed files with 3949 additions and 26 deletions

View File

@@ -0,0 +1,319 @@
/**
* Memory Extractor - Automatically extract memorable information from conversations
*
* Uses LLM to analyze completed conversations and extract:
* - Facts the user shared
* - User preferences discovered
* - Lessons learned during problem-solving
* - Pending tasks or commitments
*
* Also handles auto-updating USER.md with discovered preferences.
*
* Reference: ZCLAW_AGENT_INTELLIGENCE_EVOLUTION.md §6.2.2
*/
import { getMemoryManager, type MemoryType } from './agent-memory';
import { getAgentIdentityManager } from './agent-identity';
// === Types ===
export interface ExtractedItem {
content: string;
type: MemoryType;
importance: number;
tags: string[];
}
export interface ExtractionResult {
items: ExtractedItem[];
saved: number;
skipped: number;
userProfileUpdated: boolean;
}
export interface ConversationMessage {
role: string;
content: string;
}
// === Extraction Prompt ===
const EXTRACTION_PROMPT = `请从以下对话中提取值得长期记住的信息。
只提取以下类型:
- fact: 用户告知的事实(如"我的公司叫 XXX"、"我在做 YYY 项目"
- preference: 用户的偏好(如"我喜欢简洁的回答"、"请用中文"
- lesson: 本次对话的经验教训(如"调用 API 前需要先验证 token"
- task: 未完成的任务或承诺(如"下次帮我检查 XXX"
评估规则:
- importance 1-3: 临时性、不太重要的信息
- importance 4-6: 有一定参考价值的信息
- importance 7-9: 重要的持久信息
- importance 10: 极其关键的信息
输出**纯 JSON 数组**,每项包含 content, type, importance, tags[]。
如果没有值得记忆的内容,返回空数组 []。
不要输出任何其他内容,只输出 JSON。
对话内容:
`;
// === Memory Extractor ===
export class MemoryExtractor {
private minMessagesForExtraction = 4;
private extractionCooldownMs = 30_000; // 30 seconds between extractions
private lastExtractionTime = 0;
/**
* Extract memories from a conversation using rule-based heuristics.
* This is the Phase 1 approach — no LLM call needed.
* Phase 2 will add LLM-based extraction using EXTRACTION_PROMPT.
*/
async extractFromConversation(
messages: ConversationMessage[],
agentId: string,
conversationId?: string
): Promise<ExtractionResult> {
// Cooldown check
if (Date.now() - this.lastExtractionTime < this.extractionCooldownMs) {
return { items: [], saved: 0, skipped: 0, userProfileUpdated: false };
}
// Minimum message threshold
const chatMessages = messages.filter(m => m.role === 'user' || m.role === 'assistant');
if (chatMessages.length < this.minMessagesForExtraction) {
return { items: [], saved: 0, skipped: 0, userProfileUpdated: false };
}
this.lastExtractionTime = Date.now();
// Phase 1: Rule-based extraction (pattern matching)
const extracted = this.ruleBasedExtraction(chatMessages);
// Save to memory
const memoryManager = getMemoryManager();
let saved = 0;
let skipped = 0;
for (const item of extracted) {
try {
await memoryManager.save({
agentId,
content: item.content,
type: item.type,
importance: item.importance,
source: 'auto',
tags: item.tags,
conversationId,
});
saved++;
} catch {
skipped++;
}
}
// Auto-update USER.md with preferences
let userProfileUpdated = false;
const preferences = extracted.filter(e => e.type === 'preference' && e.importance >= 5);
if (preferences.length > 0) {
try {
const identityManager = getAgentIdentityManager();
const prefSummary = preferences.map(p => `- ${p.content}`).join('\n');
identityManager.appendToUserProfile(agentId, `### 自动发现的偏好 (${new Date().toLocaleDateString('zh-CN')})\n${prefSummary}`);
userProfileUpdated = true;
} catch (err) {
console.warn('[MemoryExtractor] Failed to update USER.md:', err);
}
}
if (saved > 0) {
console.log(`[MemoryExtractor] Extracted ${saved} memories from conversation (${skipped} skipped)`);
}
return { items: extracted, saved, skipped, userProfileUpdated };
}
/**
* Phase 1: Rule-based extraction using pattern matching.
* Extracts common patterns from user messages.
*/
private ruleBasedExtraction(messages: ConversationMessage[]): ExtractedItem[] {
const items: ExtractedItem[] = [];
const userMessages = messages.filter(m => m.role === 'user').map(m => m.content);
for (const msg of userMessages) {
// Fact patterns
this.extractFacts(msg, items);
// Preference patterns
this.extractPreferences(msg, items);
// Task patterns
this.extractTasks(msg, items);
}
// Lesson extraction from assistant messages (error corrections, solutions)
const assistantMessages = messages.filter(m => m.role === 'assistant').map(m => m.content);
this.extractLessons(userMessages, assistantMessages, items);
return items;
}
private extractFacts(msg: string, items: ExtractedItem[]): void {
// "我的/我们的 X 是/叫 Y" patterns
const factPatterns = [
/我(?:的|们的|们)(\S{1,20})(?:是|叫|名叫|名字是)(.{2,50})/g,
/(?:公司|团队|项目|产品)(?:名|名称)?(?:是|叫)(.{2,30})/g,
/我(?:在|正在)(?:做|开发|使用|学习)(.{2,40})/g,
/我(?:是|做)(.{2,30})(?:的|工作)/g,
];
for (const pattern of factPatterns) {
const matches = msg.matchAll(pattern);
for (const match of matches) {
const content = match[0].trim();
if (content.length > 5 && content.length < 100) {
items.push({
content,
type: 'fact',
importance: 6,
tags: ['auto-extracted'],
});
}
}
}
}
private extractPreferences(msg: string, items: ExtractedItem[]): void {
const prefPatterns = [
/(?:我喜欢|我偏好|我习惯|请用|请使用|默认用|我更愿意)(.{2,50})/g,
/(?:不要|别|不用)(.{2,30})(?:了|吧)?/g,
/(?:以后|下次|每次)(?:都)?(.{2,40})/g,
/(?:用中文|用英文|简洁|详细|简短)(?:一点|回复|回答)?/g,
];
for (const pattern of prefPatterns) {
const matches = msg.matchAll(pattern);
for (const match of matches) {
const content = match[0].trim();
if (content.length > 3 && content.length < 80) {
items.push({
content: `用户偏好: ${content}`,
type: 'preference',
importance: 5,
tags: ['auto-extracted', 'preference'],
});
}
}
}
}
private extractTasks(msg: string, items: ExtractedItem[]): void {
const taskPatterns = [
/(?:帮我|帮忙|记得|别忘了|下次|以后|待办)(.{5,60})/g,
/(?:TODO|todo|FIXME|fixme)[:\s]*(.{5,60})/g,
];
for (const pattern of taskPatterns) {
const matches = msg.matchAll(pattern);
for (const match of matches) {
const content = match[0].trim();
if (content.length > 5 && content.length < 100) {
items.push({
content,
type: 'task',
importance: 7,
tags: ['auto-extracted', 'task'],
});
}
}
}
}
private extractLessons(
_userMessages: string[],
assistantMessages: string[],
items: ExtractedItem[]
): void {
// Look for error resolution patterns in assistant messages
for (const msg of assistantMessages) {
// "问题是/原因是/根因是" patterns
const lessonPatterns = [
/(?:问题是|原因是|根因是|解决方法是|关键是)(.{10,100})/g,
/(?:需要注意|要注意|注意事项)[:](.{10,80})/g,
];
for (const pattern of lessonPatterns) {
const matches = msg.matchAll(pattern);
for (const match of matches) {
const content = match[0].trim();
if (content.length > 10 && content.length < 150) {
items.push({
content,
type: 'lesson',
importance: 6,
tags: ['auto-extracted', 'lesson'],
});
}
}
}
}
}
/**
* Build the LLM extraction prompt for a conversation.
* For Phase 2: send this to LLM and parse the JSON response.
*/
buildExtractionPrompt(messages: ConversationMessage[]): string {
const conversationText = messages
.filter(m => m.role === 'user' || m.role === 'assistant')
.map(m => `[${m.role === 'user' ? '用户' : '助手'}]: ${m.content}`)
.join('\n\n');
return EXTRACTION_PROMPT + conversationText;
}
/**
* Parse LLM extraction response.
* For Phase 2: parse the JSON array from LLM response.
*/
parseExtractionResponse(response: string): ExtractedItem[] {
try {
// Find JSON array in response
const jsonMatch = response.match(/\[[\s\S]*\]/);
if (!jsonMatch) return [];
const parsed = JSON.parse(jsonMatch[0]);
if (!Array.isArray(parsed)) return [];
return parsed
.filter((item: Record<string, unknown>) =>
item.content && item.type && item.importance !== undefined
)
.map((item: Record<string, unknown>) => ({
content: String(item.content),
type: item.type as MemoryType,
importance: Math.max(1, Math.min(10, Number(item.importance))),
tags: Array.isArray(item.tags) ? item.tags.map(String) : [],
}));
} catch {
console.warn('[MemoryExtractor] Failed to parse LLM extraction response');
return [];
}
}
}
// === Singleton ===
let _instance: MemoryExtractor | null = null;
export function getMemoryExtractor(): MemoryExtractor {
if (!_instance) {
_instance = new MemoryExtractor();
}
return _instance;
}
export function resetMemoryExtractor(): void {
_instance = null;
}