Files
zclaw_openfang/tests/desktop/context-compactor.test.ts
iven 04ddf94123 feat: implement ZCLAW Agent Intelligence Evolution Phase 1-3
Phase 1: Persistent Memory + Identity Dynamic Evolution
- agent-memory.ts: MemoryManager with localStorage persistence, keyword search, deduplication, importance scoring, pruning, markdown export
- agent-identity.ts: AgentIdentityManager with per-agent SOUL/AGENTS/USER.md, change proposals with approval workflow, snapshot rollback
- memory-extractor.ts: Rule-based conversation memory extraction (Phase 1), LLM extraction prompt ready for Phase 2
- MemoryPanel.tsx: Memory browsing UI with search, type filter, delete, export (integrated as 4th tab in RightPanel)

Phase 2: Context Governance
- context-compactor.ts: Token estimation, threshold monitoring (soft/hard), memory flush before compaction, rule-based summarization
- chatStore integration: auto-compact when approaching token limits

Phase 3: Proactive Intelligence + Self-Reflection
- heartbeat-engine.ts: Periodic checks (pending tasks, memory health, idle greeting), quiet hours, proactivity levels (silent/light/standard/autonomous)
- reflection-engine.ts: Pattern analysis from memory corpus, improvement suggestions, identity change proposals, meta-memory creation

Chat Flow Integration (chatStore.ts):
- Pre-send: context compaction check -> memory search -> identity system prompt injection
- Post-complete: async memory extraction -> reflection conversation tracking -> auto-trigger reflection

Tests: 274 passing across 12 test files
- agent-memory.test.ts: 42 tests
- context-compactor.test.ts: 23 tests
- heartbeat-reflection.test.ts: 28 tests
- chatStore.test.ts: 11 tests (no regressions)

Refs: ZCLAW_AGENT_INTELLIGENCE_EVOLUTION.md updated with implementation progress
2026-03-15 22:24:57 +08:00

310 lines
11 KiB
TypeScript
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

/**
* Tests for Context Compactor (Phase 2)
*
* Covers: token estimation, threshold checking, memory flush, compaction
*/
import { describe, it, expect, beforeEach, vi } from 'vitest';
import {
ContextCompactor,
resetContextCompactor,
estimateTokens,
estimateMessagesTokens,
DEFAULT_COMPACTION_CONFIG,
type CompactableMessage,
} from '../../desktop/src/lib/context-compactor';
import { resetMemoryManager } from '../../desktop/src/lib/agent-memory';
import { resetAgentIdentityManager } from '../../desktop/src/lib/agent-identity';
import { resetMemoryExtractor } from '../../desktop/src/lib/memory-extractor';
// === Mock localStorage ===
const localStorageMock = (() => {
let store: Record<string, string> = {};
return {
getItem: (key: string) => store[key] ?? null,
setItem: (key: string, value: string) => { store[key] = value; },
removeItem: (key: string) => { delete store[key]; },
clear: () => { store = {}; },
};
})();
vi.stubGlobal('localStorage', localStorageMock);
// === Helpers ===
function makeMessages(count: number, contentLength: number = 100): CompactableMessage[] {
const msgs: CompactableMessage[] = [];
for (let i = 0; i < count; i++) {
msgs.push({
role: i % 2 === 0 ? 'user' : 'assistant',
content: '测试消息内容'.repeat(Math.ceil(contentLength / 6)).slice(0, contentLength),
id: `msg_${i}`,
timestamp: new Date(Date.now() - (count - i) * 60000),
});
}
return msgs;
}
function makeLargeConversation(targetTokens: number): CompactableMessage[] {
const msgs: CompactableMessage[] = [];
let totalTokens = 0;
let i = 0;
while (totalTokens < targetTokens) {
const content = i % 2 === 0
? `用户问题 ${i}: 请帮我分析一下这个技术方案的可行性,包括性能、安全性和可维护性方面`
: `助手回答 ${i}: 好的,我来从三个维度分析这个方案。首先从性能角度来看,这个方案使用了异步处理机制,能够有效提升吞吐量。其次从安全性方面,建议增加输入验证和权限控制。最后从可维护性来看,模块化设计使得后续修改更加方便。`;
msgs.push({
role: i % 2 === 0 ? 'user' : 'assistant',
content,
id: `msg_${i}`,
timestamp: new Date(Date.now() - (1000 - i) * 60000),
});
totalTokens = estimateMessagesTokens(msgs);
i++;
}
return msgs;
}
// =============================================
// Token Estimation Tests
// =============================================
describe('Token Estimation', () => {
it('returns 0 for empty string', () => {
expect(estimateTokens('')).toBe(0);
});
it('estimates CJK text at ~1.5 tokens per char', () => {
const text = '你好世界测试';
const tokens = estimateTokens(text);
// 6 CJK chars × 1.5 = 9
expect(tokens).toBe(9);
});
it('estimates English text at ~0.3 tokens per char', () => {
const text = 'hello world test';
const tokens = estimateTokens(text);
// Roughly: 13 ASCII chars × 0.3 + 2 spaces × 0.25 ≈ 4.4
expect(tokens).toBeGreaterThan(3);
expect(tokens).toBeLessThan(10);
});
it('estimates mixed CJK+English text', () => {
const text = '用户的项目叫 ZCLAW Desktop';
const tokens = estimateTokens(text);
expect(tokens).toBeGreaterThan(5);
});
it('estimateMessagesTokens includes framing overhead', () => {
const msgs: CompactableMessage[] = [
{ role: 'user', content: '你好' },
{ role: 'assistant', content: '你好!' },
];
const tokens = estimateMessagesTokens(msgs);
// Content tokens + framing (4 per message × 2)
expect(tokens).toBeGreaterThan(estimateTokens('你好') + estimateTokens('你好!'));
});
});
// =============================================
// ContextCompactor Tests
// =============================================
describe('ContextCompactor', () => {
let compactor: ContextCompactor;
beforeEach(() => {
localStorageMock.clear();
resetContextCompactor();
resetMemoryManager();
resetAgentIdentityManager();
resetMemoryExtractor();
compactor = new ContextCompactor();
});
describe('checkThreshold', () => {
it('returns none urgency for small conversations', () => {
const msgs = makeMessages(4);
const check = compactor.checkThreshold(msgs);
expect(check.shouldCompact).toBe(false);
expect(check.urgency).toBe('none');
});
it('returns soft urgency when approaching threshold', () => {
const msgs = makeLargeConversation(DEFAULT_COMPACTION_CONFIG.softThresholdTokens);
const check = compactor.checkThreshold(msgs);
expect(check.shouldCompact).toBe(true);
expect(check.urgency).toBe('soft');
});
it('returns hard urgency when exceeding hard threshold', () => {
const msgs = makeLargeConversation(DEFAULT_COMPACTION_CONFIG.hardThresholdTokens);
const check = compactor.checkThreshold(msgs);
expect(check.shouldCompact).toBe(true);
expect(check.urgency).toBe('hard');
});
it('reports current token count', () => {
const msgs = makeMessages(10);
const check = compactor.checkThreshold(msgs);
expect(check.currentTokens).toBeGreaterThan(0);
});
});
describe('compact', () => {
it('retains keepRecentMessages recent messages', async () => {
const config = { keepRecentMessages: 4 };
const comp = new ContextCompactor(config);
const msgs = makeMessages(20);
const result = await comp.compact(msgs, 'agent-1');
// Should have: 1 summary + 4 recent = 5
expect(result.retainedCount).toBe(5);
expect(result.compactedMessages).toHaveLength(5);
expect(result.compactedMessages[0].role).toBe('system'); // summary
});
it('generates a summary that mentions message count', async () => {
const msgs = makeMessages(20);
const result = await compactor.compact(msgs, 'agent-1');
expect(result.summary).toContain('压缩');
expect(result.summary).toContain('条消息');
});
it('reduces token count significantly', async () => {
const msgs = makeLargeConversation(16000);
const result = await compactor.compact(msgs, 'agent-1');
expect(result.tokensAfterCompaction).toBeLessThan(result.tokensBeforeCompaction);
});
it('preserves most recent messages in order', async () => {
const msgs: CompactableMessage[] = [
{ role: 'user', content: 'old message 1', id: 'old1' },
{ role: 'assistant', content: 'old reply 1', id: 'old2' },
{ role: 'user', content: 'old message 2', id: 'old3' },
{ role: 'assistant', content: 'old reply 2', id: 'old4' },
{ role: 'user', content: 'recent message 1', id: 'recent1' },
{ role: 'assistant', content: 'recent reply 1', id: 'recent2' },
{ role: 'user', content: 'recent message 2', id: 'recent3' },
{ role: 'assistant', content: 'recent reply 2', id: 'recent4' },
];
const comp = new ContextCompactor({ keepRecentMessages: 4 });
const result = await comp.compact(msgs, 'agent-1');
// Last 4 messages should be preserved
const retained = result.compactedMessages.slice(1); // skip summary
expect(retained).toHaveLength(4);
expect(retained[0].content).toBe('recent message 1');
expect(retained[3].content).toBe('recent reply 2');
});
it('handles empty message list', async () => {
const result = await compactor.compact([], 'agent-1');
expect(result.retainedCount).toBe(1); // just the summary
expect(result.summary).toContain('对话开始');
});
it('handles fewer messages than keepRecentMessages', async () => {
const msgs = makeMessages(3);
const result = await compactor.compact(msgs, 'agent-1');
// All messages kept + summary
expect(result.compactedMessages.length).toBeLessThanOrEqual(msgs.length + 1);
});
});
describe('memoryFlush', () => {
it('returns 0 when disabled', async () => {
const comp = new ContextCompactor({ memoryFlushEnabled: false });
const flushed = await comp.memoryFlush(makeMessages(10), 'agent-1');
expect(flushed).toBe(0);
});
it('extracts memories from conversation messages', async () => {
const msgs: CompactableMessage[] = [
{ role: 'user', content: '我的公司叫字节跳动我在做AI项目' },
{ role: 'assistant', content: '好的,了解了。' },
{ role: 'user', content: '我喜欢简洁的代码风格' },
{ role: 'assistant', content: '明白。' },
{ role: 'user', content: '帮我看看这个问题' },
{ role: 'assistant', content: '好的。' },
];
const flushed = await compactor.memoryFlush(msgs, 'agent-1');
// Should extract at least some memories
expect(flushed).toBeGreaterThanOrEqual(0); // May or may not match patterns
});
});
describe('generateSummary (via compact)', () => {
it('includes topic extraction from user messages', async () => {
const msgs: CompactableMessage[] = [
{ role: 'user', content: '帮我分析一下React性能优化方案' },
{ role: 'assistant', content: '好的React性能优化主要从以下几个方面入手1. 使用React.memo 2. 使用useMemo' },
{ role: 'user', content: '那TypeScript的类型推导呢' },
{ role: 'assistant', content: 'TypeScript类型推导是一个重要特性...' },
...makeMessages(4), // pad to exceed keepRecentMessages
];
const comp = new ContextCompactor({ keepRecentMessages: 2 });
const result = await comp.compact(msgs, 'agent-1');
// Summary should mention topics
expect(result.summary).toContain('讨论主题');
});
it('includes technical context when code blocks present', async () => {
const msgs: CompactableMessage[] = [
{ role: 'user', content: '帮我写一个函数' },
{ role: 'assistant', content: '好的,这是实现:\n```typescript\nfunction hello() { return "world"; }\n```' },
...makeMessages(6),
];
const comp = new ContextCompactor({ keepRecentMessages: 2 });
const result = await comp.compact(msgs, 'agent-1');
expect(result.summary).toContain('技术上下文');
});
});
describe('buildCompactionPrompt', () => {
it('generates a valid LLM prompt', () => {
const msgs: CompactableMessage[] = [
{ role: 'user', content: '帮我优化数据库查询' },
{ role: 'assistant', content: '好的,我建议使用索引...' },
];
const prompt = compactor.buildCompactionPrompt(msgs);
expect(prompt).toContain('压缩为简洁摘要');
expect(prompt).toContain('优化数据库');
expect(prompt).toContain('用户');
expect(prompt).toContain('助手');
});
});
describe('config management', () => {
it('uses default config', () => {
const config = compactor.getConfig();
expect(config.softThresholdTokens).toBe(15000);
expect(config.keepRecentMessages).toBe(6);
});
it('allows config updates', () => {
compactor.updateConfig({ softThresholdTokens: 10000 });
expect(compactor.getConfig().softThresholdTokens).toBe(10000);
});
it('accepts partial config in constructor', () => {
const comp = new ContextCompactor({ keepRecentMessages: 10 });
const config = comp.getConfig();
expect(config.keepRecentMessages).toBe(10);
expect(config.softThresholdTokens).toBe(15000); // default preserved
});
});
});