feat(intelligence): complete migration to Rust backend

- Unify all intelligence modules to use intelligenceClient
- Delete legacy TS implementations (agent-memory, reflection-engine, heartbeat-engine, context-compactor, agent-identity, memory-index)
- Update all consumers to use snake_case backend types
- Remove deprecated llm-integration.test.ts

This eliminates code duplication between frontend and backend, resolves
localStorage limitations, and enables persistent intelligence features.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
iven
2026-03-21 15:17:39 +08:00
parent 17fb1e69aa
commit f3ec3c8d4c
24 changed files with 1172 additions and 3095 deletions

View File

@@ -18,6 +18,7 @@ import { useConnectionStore } from './store/connectionStore';
import { useHandStore, type HandRun } from './store/handStore';
import { useTeamStore } from './store/teamStore';
import { useChatStore } from './store/chatStore';
import { initializeStores } from './store';
import { getStoredGatewayToken } from './lib/gateway-client';
import { pageVariants, defaultTransition, fadeInVariants } from './lib/animations';
import { Users, Loader2, Settings } from 'lucide-react';
@@ -156,7 +157,10 @@ function App() {
setShowOnboarding(true);
}
// Step 4: Bootstrap complete
// Step 4: Initialize stores with gateway client
initializeStores();
// Step 5: Bootstrap complete
setBootstrapping(false);
} catch (err) {
console.error('[App] Bootstrap failed:', err);

View File

@@ -9,7 +9,7 @@
import { useState, useEffect, useCallback, useMemo } from 'react';
import { useHandStore } from '../../store/handStore';
import { useWorkflowStore, type Workflow } from '../../store/workflowStore';
import { useWorkflowStore } from '../../store/workflowStore';
import {
type AutomationItem,
type CategoryType,
@@ -54,7 +54,9 @@ export function AutomationPanel({
// Store state - use domain stores
const hands = useHandStore((s) => s.hands);
const workflows = useWorkflowStore((s) => s.workflows);
const isLoading = useHandStore((s) => s.isLoading) || useWorkflowStore((s) => s.isLoading);
const handLoading = useHandStore((s) => s.isLoading);
const workflowLoading = useWorkflowStore((s) => s.isLoading);
const isLoading = handLoading || workflowLoading;
const loadHands = useHandStore((s) => s.loadHands);
const loadWorkflows = useWorkflowStore((s) => s.loadWorkflows);
const triggerHand = useHandStore((s) => s.triggerHand);

View File

@@ -11,7 +11,6 @@ import {
X,
AlertCircle,
AlertTriangle,
Info,
Bug,
WifiOff,
ShieldAlert,
@@ -44,14 +43,14 @@ interface ErrorNotificationProps {
const categoryIcons: Record<ErrorCategory, typeof AlertCircle> = {
network: WifiOff,
authentication: ShieldAlert,
authorization: ShieldAlert,
auth: ShieldAlert,
permission: ShieldAlert,
validation: AlertTriangle,
configuration: AlertTriangle,
internal: Bug,
external: AlertCircle,
config: AlertTriangle,
server: Bug,
client: AlertCircle,
timeout: Clock,
unknown: AlertCircle,
system: Bug,
};
const severityColors: Record<ErrorSeverity, {

View File

@@ -26,11 +26,23 @@ import {
RefreshCw,
} from 'lucide-react';
import {
HeartbeatEngine,
DEFAULT_HEARTBEAT_CONFIG,
intelligenceClient,
type HeartbeatConfig as HeartbeatConfigType,
type HeartbeatResult,
} from '../lib/heartbeat-engine';
type HeartbeatAlert,
} from '../lib/intelligence-client';
// === Default Config ===
const DEFAULT_HEARTBEAT_CONFIG: HeartbeatConfigType = {
enabled: false,
interval_minutes: 30,
quiet_hours_start: null,
quiet_hours_end: null,
notify_channel: 'ui',
proactivity_level: 'standard',
max_alerts_per_tick: 5,
};
// === Types ===
@@ -309,8 +321,8 @@ export function HeartbeatConfig({ className = '', onConfigChange }: HeartbeatCon
const handleTestHeartbeat = useCallback(async () => {
setIsTesting(true);
try {
const engine = new HeartbeatEngine('zclaw-main', config);
const result = await engine.tick();
await intelligenceClient.heartbeat.init('zclaw-main', config);
const result = await intelligenceClient.heartbeat.tick('zclaw-main');
setLastResult(result);
} catch (error) {
console.error('[HeartbeatConfig] Test failed:', error);
@@ -408,12 +420,12 @@ export function HeartbeatConfig({ className = '', onConfigChange }: HeartbeatCon
min="5"
max="120"
step="5"
value={config.intervalMinutes}
onChange={(e) => updateConfig({ intervalMinutes: parseInt(e.target.value) })}
value={config.interval_minutes}
onChange={(e) => updateConfig({ interval_minutes: parseInt(e.target.value) })}
className="flex-1 h-2 bg-gray-200 dark:bg-gray-700 rounded-lg appearance-none cursor-pointer accent-pink-500"
/>
<span className="text-sm font-medium text-gray-900 dark:text-gray-100 w-16 text-right">
{config.intervalMinutes}
{config.interval_minutes}
</span>
</div>
</div>
@@ -428,8 +440,8 @@ export function HeartbeatConfig({ className = '', onConfigChange }: HeartbeatCon
</div>
<div className="pl-6">
<ProactivityLevelSelector
value={config.proactivityLevel}
onChange={(level) => updateConfig({ proactivityLevel: level })}
value={config.proactivity_level}
onChange={(level) => updateConfig({ proactivity_level: level })}
/>
</div>
</div>
@@ -437,15 +449,15 @@ export function HeartbeatConfig({ className = '', onConfigChange }: HeartbeatCon
{/* Quiet Hours */}
<div className="space-y-2">
<QuietHoursConfig
start={config.quietHoursStart}
end={config.quietHoursEnd}
enabled={!!config.quietHoursStart}
onStartChange={(time) => updateConfig({ quietHoursStart: time })}
onEndChange={(time) => updateConfig({ quietHoursEnd: time })}
start={config.quiet_hours_start ?? undefined}
end={config.quiet_hours_end ?? undefined}
enabled={!!config.quiet_hours_start}
onStartChange={(time) => updateConfig({ quiet_hours_start: time })}
onEndChange={(time) => updateConfig({ quiet_hours_end: time })}
onToggle={(enabled) =>
updateConfig({
quietHoursStart: enabled ? '22:00' : undefined,
quietHoursEnd: enabled ? '08:00' : undefined,
quiet_hours_start: enabled ? '22:00' : null,
quiet_hours_end: enabled ? '08:00' : null,
})
}
/>
@@ -484,12 +496,12 @@ export function HeartbeatConfig({ className = '', onConfigChange }: HeartbeatCon
</span>
</div>
<div className="text-xs text-gray-500 dark:text-gray-400">
{lastResult.checkedItems}
{lastResult.checked_items}
{lastResult.alerts.length > 0 && ` · ${lastResult.alerts.length} 个提醒`}
</div>
{lastResult.alerts.length > 0 && (
<div className="mt-2 space-y-1">
{lastResult.alerts.map((alert, i) => (
{lastResult.alerts.map((alert: HeartbeatAlert, i: number) => (
<div
key={i}
className={`text-xs p-2 rounded ${

View File

@@ -7,11 +7,11 @@ import {
import { cardHover, defaultTransition } from '../lib/animations';
import { Button, Badge, EmptyState } from './ui';
import {
getMemoryManager,
intelligenceClient,
type MemoryEntry,
type MemoryType,
type MemoryStats,
} from '../lib/agent-memory';
} from '../lib/intelligence-client';
import { useChatStore } from '../store/chatStore';
const TYPE_LABELS: Record<MemoryType, { label: string; emoji: string; color: string }> = {
@@ -34,22 +34,26 @@ export function MemoryPanel() {
const [isExporting, setIsExporting] = useState(false);
const loadMemories = useCallback(async () => {
const mgr = getMemoryManager();
const typeFilter = filterType !== 'all' ? { type: filterType as MemoryType } : {};
if (searchQuery.trim()) {
const results = await mgr.search(searchQuery, {
const results = await intelligenceClient.memory.search({
agentId,
query: searchQuery,
limit: 50,
...typeFilter,
});
setMemories(results);
} else {
const all = await mgr.getAll(agentId, { ...typeFilter, limit: 50 });
setMemories(all);
const results = await intelligenceClient.memory.search({
agentId,
limit: 50,
...typeFilter,
});
setMemories(results);
}
const s = await mgr.stats(agentId);
const s = await intelligenceClient.memory.stats();
setStats(s);
}, [agentId, searchQuery, filterType]);
@@ -58,15 +62,22 @@ export function MemoryPanel() {
}, [loadMemories]);
const handleDelete = async (id: string) => {
await getMemoryManager().forget(id);
await intelligenceClient.memory.delete(id);
loadMemories();
};
const handleExport = async () => {
setIsExporting(true);
try {
const md = await getMemoryManager().exportToMarkdown(agentId);
const blob = new Blob([md], { type: 'text/markdown' });
const memories = await intelligenceClient.memory.export();
const filtered = memories.filter(m => m.agentId === agentId);
const md = filtered.map(m =>
`## [${m.type}] ${m.content}\n` +
`- 重要度: ${m.importance}\n` +
`- 标签: ${m.tags.join(', ') || '无'}\n` +
`- 创建时间: ${m.createdAt}\n`
).join('\n---\n\n');
const blob = new Blob([md || '# 无记忆数据'], { type: 'text/markdown' });
const url = URL.createObjectURL(blob);
const a = document.createElement('a');
a.href = url;
@@ -79,12 +90,20 @@ export function MemoryPanel() {
};
const handlePrune = async () => {
const pruned = await getMemoryManager().prune({
// Find old, low-importance memories and delete them
const memories = await intelligenceClient.memory.search({
agentId,
maxAgeDays: 30,
minImportance: 3,
minImportance: 0,
limit: 1000,
});
if (pruned > 0) {
const thirtyDaysAgo = Date.now() - 30 * 24 * 60 * 60 * 1000;
const toDelete = memories.filter(m =>
new Date(m.createdAt).getTime() < thirtyDaysAgo && m.importance < 3
);
for (const m of toDelete) {
await intelligenceClient.memory.delete(m.id);
}
if (toDelete.length > 0) {
loadMemories();
}
};

View File

@@ -29,14 +29,13 @@ import {
Settings,
} from 'lucide-react';
import {
ReflectionEngine,
intelligenceClient,
type ReflectionResult,
type IdentityChangeProposal,
type ReflectionConfig,
type PatternObservation,
type ImprovementSuggestion,
type ReflectionConfig,
DEFAULT_REFLECTION_CONFIG,
} from '../lib/reflection-engine';
import { getAgentIdentityManager, type IdentityChangeProposal } from '../lib/agent-identity';
} from '../lib/intelligence-client';
// === Types ===
@@ -231,8 +230,8 @@ function ProposalCard({
</h5>
<pre className="text-xs text-gray-600 dark:text-gray-300 bg-white dark:bg-gray-800 p-2 rounded overflow-x-auto whitespace-pre-wrap">
{proposal.currentContent.slice(0, 500)}
{proposal.currentContent.length > 500 && '...'}
{proposal.current_content.slice(0, 500)}
{proposal.current_content.length > 500 && '...'}
</pre>
</div>
<div>
@@ -240,8 +239,8 @@ function ProposalCard({
</h5>
<pre className="text-xs text-gray-600 dark:text-gray-300 bg-white dark:bg-gray-800 p-2 rounded overflow-x-auto whitespace-pre-wrap">
{proposal.suggestedContent.slice(0, 500)}
{proposal.suggestedContent.length > 500 && '...'}
{proposal.suggested_content.slice(0, 500)}
{proposal.suggested_content.length > 500 && '...'}
</pre>
</div>
</div>
@@ -309,9 +308,9 @@ function ReflectionEntry({
<span className="text-gray-500 dark:text-gray-400">
{result.improvements.length}
</span>
{result.identityProposals.length > 0 && (
{result.identity_proposals.length > 0 && (
<span className="text-yellow-600 dark:text-yellow-400">
{result.identityProposals.length}
{result.identity_proposals.length}
</span>
)}
</div>
@@ -362,8 +361,8 @@ function ReflectionEntry({
{/* Meta */}
<div className="flex items-center gap-4 text-xs text-gray-500 dark:text-gray-400 pt-2 border-t border-gray-200 dark:border-gray-700">
<span>: {result.newMemories}</span>
<span>: {result.identityProposals.length}</span>
<span>: {result.new_memories}</span>
<span>: {result.identity_proposals.length}</span>
</div>
</div>
</motion.div>
@@ -381,56 +380,63 @@ export function ReflectionLog({
onProposalApprove,
onProposalReject,
}: ReflectionLogProps) {
const [engine] = useState(() => new ReflectionEngine());
const [history, setHistory] = useState<ReflectionResult[]>([]);
const [pendingProposals, setPendingProposals] = useState<IdentityChangeProposal[]>([]);
const [expandedId, setExpandedId] = useState<string | null>(null);
const [isReflecting, setIsReflecting] = useState(false);
const [config, setConfig] = useState<ReflectionConfig>(DEFAULT_REFLECTION_CONFIG);
const [showConfig, setShowConfig] = useState(false);
const [config, setConfig] = useState<ReflectionConfig>({
trigger_after_conversations: 5,
allow_soul_modification: true,
require_approval: true,
});
// Load history and pending proposals
useEffect(() => {
const loadedHistory = engine.getHistory();
setHistory([...loadedHistory].reverse()); // Most recent first
const loadData = async () => {
try {
const loadedHistory = await intelligenceClient.reflection.getHistory();
setHistory([...loadedHistory].reverse()); // Most recent first
const identityManager = getAgentIdentityManager();
const proposals = identityManager.getPendingProposals(agentId);
setPendingProposals(proposals);
}, [engine, agentId]);
const proposals = await intelligenceClient.identity.getPendingProposals(agentId);
setPendingProposals(proposals);
} catch (error) {
console.error('[ReflectionLog] Failed to load data:', error);
}
};
loadData();
}, [agentId]);
const handleReflect = useCallback(async () => {
setIsReflecting(true);
try {
const result = await engine.reflect(agentId);
const result = await intelligenceClient.reflection.reflect(agentId, []);
setHistory((prev) => [result, ...prev]);
// Update pending proposals
if (result.identityProposals.length > 0) {
setPendingProposals((prev) => [...prev, ...result.identityProposals]);
if (result.identity_proposals.length > 0) {
setPendingProposals((prev) => [...prev, ...result.identity_proposals]);
}
} catch (error) {
console.error('[ReflectionLog] Reflection failed:', error);
} finally {
setIsReflecting(false);
}
}, [engine, agentId]);
}, [agentId]);
const handleApproveProposal = useCallback(
(proposal: IdentityChangeProposal) => {
const identityManager = getAgentIdentityManager();
identityManager.approveProposal(proposal.id);
setPendingProposals((prev) => prev.filter((p) => p.id !== proposal.id));
async (proposal: IdentityChangeProposal) => {
await intelligenceClient.identity.approveProposal(proposal.id);
setPendingProposals((prev: IdentityChangeProposal[]) => prev.filter((p: IdentityChangeProposal) => p.id !== proposal.id));
onProposalApprove?.(proposal);
},
[onProposalApprove]
);
const handleRejectProposal = useCallback(
(proposal: IdentityChangeProposal) => {
const identityManager = getAgentIdentityManager();
identityManager.rejectProposal(proposal.id);
setPendingProposals((prev) => prev.filter((p) => p.id !== proposal.id));
async (proposal: IdentityChangeProposal) => {
await intelligenceClient.identity.rejectProposal(proposal.id);
setPendingProposals((prev: IdentityChangeProposal[]) => prev.filter((p: IdentityChangeProposal) => p.id !== proposal.id));
onProposalReject?.(proposal);
},
[onProposalReject]
@@ -438,9 +444,9 @@ export function ReflectionLog({
const stats = useMemo(() => {
const totalReflections = history.length;
const totalPatterns = history.reduce((sum, r) => sum + r.patterns.length, 0);
const totalImprovements = history.reduce((sum, r) => sum + r.improvements.length, 0);
const totalIdentityChanges = history.reduce((sum, r) => sum + r.identityProposals.length, 0);
const totalPatterns = history.reduce((sum: number, r: ReflectionResult) => sum + r.patterns.length, 0);
const totalImprovements = history.reduce((sum: number, r: ReflectionResult) => sum + r.improvements.length, 0);
const totalIdentityChanges = history.reduce((sum: number, r: ReflectionResult) => sum + r.identity_proposals.length, 0);
return { totalReflections, totalPatterns, totalImprovements, totalIdentityChanges };
}, [history]);
@@ -507,9 +513,9 @@ export function ReflectionLog({
type="number"
min="1"
max="20"
value={config.triggerAfterConversations}
value={config.trigger_after_conversations || 5}
onChange={(e) =>
setConfig((prev) => ({ ...prev, triggerAfterConversations: parseInt(e.target.value) || 5 }))
setConfig((prev) => ({ ...prev, trigger_after_conversations: parseInt(e.target.value) || 5 }))
}
className="w-16 px-2 py-1 text-sm border border-gray-300 dark:border-gray-600 rounded bg-white dark:bg-gray-800 text-gray-900 dark:text-gray-100"
/>
@@ -517,13 +523,13 @@ export function ReflectionLog({
<div className="flex items-center justify-between">
<span className="text-sm text-gray-700 dark:text-gray-300"> SOUL.md</span>
<button
onClick={() => setConfig((prev) => ({ ...prev, allowSoulModification: !prev.allowSoulModification }))}
onClick={() => setConfig((prev) => ({ ...prev, allow_soul_modification: !prev.allow_soul_modification }))}
className={`relative w-9 h-5 rounded-full transition-colors ${
config.allowSoulModification ? 'bg-purple-500' : 'bg-gray-300 dark:bg-gray-600'
config.allow_soul_modification ? 'bg-purple-500' : 'bg-gray-300 dark:bg-gray-600'
}`}
>
<motion.div
animate={{ x: config.allowSoulModification ? 18 : 0 }}
animate={{ x: config.allow_soul_modification ? 18 : 0 }}
className="absolute top-0.5 left-0.5 w-4 h-4 bg-white rounded-full shadow"
/>
</button>
@@ -531,13 +537,13 @@ export function ReflectionLog({
<div className="flex items-center justify-between">
<span className="text-sm text-gray-700 dark:text-gray-300"></span>
<button
onClick={() => setConfig((prev) => ({ ...prev, requireApproval: !prev.requireApproval }))}
onClick={() => setConfig((prev) => ({ ...prev, require_approval: !prev.require_approval }))}
className={`relative w-9 h-5 rounded-full transition-colors ${
config.requireApproval ? 'bg-purple-500' : 'bg-gray-300 dark:bg-gray-600'
config.require_approval ? 'bg-purple-500' : 'bg-gray-300 dark:bg-gray-600'
}`}
>
<motion.div
animate={{ x: config.requireApproval ? 18 : 0 }}
animate={{ x: config.require_approval ? 18 : 0 }}
className="absolute top-0.5 left-0.5 w-4 h-4 bg-white rounded-full shadow"
/>
</button>

View File

@@ -11,6 +11,7 @@
*/
import { useState, useEffect, useMemo, useCallback } from 'react';
import { motion, AnimatePresence } from 'framer-motion';
import {
Search,
Package,
@@ -23,7 +24,7 @@ import {
ChevronRight,
RefreshCw,
} from 'lucide-react';
import { useConfigStore, type SkillInfo } from '../store/configStore';
import { useConfigStore } from '../store/configStore';
import {
adaptSkillsCatalog,
type SkillDisplay,

View File

@@ -1,220 +0,0 @@
/**
* LLM Integration Tests - Phase 2 Engine Upgrades
*
* Tests for LLM-powered features:
* - ReflectionEngine with LLM semantic analysis
* - ContextCompactor with LLM summarization
* - MemoryExtractor with LLM importance scoring
*/
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import {
ReflectionEngine,
} from '../reflection-engine';
import {
ContextCompactor,
} from '../context-compactor';
import {
MemoryExtractor,
} from '../memory-extractor';
import {
type LLMProvider,
} from '../llm-service';
// === Mock LLM Adapter ===
const mockLLMAdapter = {
complete: vi.fn(),
isAvailable: vi.fn(() => true),
getProvider: vi.fn(() => 'mock' as LLMProvider),
};
vi.mock('../llm-service', () => ({
getLLMAdapter: vi.fn(() => mockLLMAdapter),
resetLLMAdapter: vi.fn(),
llmReflect: vi.fn(async () => JSON.stringify({
patterns: [
{
observation: '用户经常询问代码优化问题',
frequency: 5,
sentiment: 'positive',
evidence: ['多次讨论性能优化'],
},
],
improvements: [
{
area: '代码解释',
suggestion: '可以提供更详细的代码注释',
priority: 'medium',
},
],
identityProposals: [],
})),
llmCompact: vi.fn(async () => '[LLM摘要]\n讨论主题: 代码优化\n关键决策: 使用缓存策略\n待办事项: 完成性能测试'),
llmExtract: vi.fn(async () => JSON.stringify([
{ content: '用户偏好简洁的回答', type: 'preference', importance: 7, tags: ['style'] },
{ content: '项目使用 TypeScript', type: 'fact', importance: 6, tags: ['tech'] },
])),
}));
// === ReflectionEngine Tests ===
describe('ReflectionEngine with LLM', () => {
let engine: ReflectionEngine;
beforeEach(() => {
vi.clearAllMocks();
engine = new ReflectionEngine({ useLLM: true });
});
afterEach(() => {
engine?.updateConfig({ useLLM: false });
});
it('should initialize with LLM config', () => {
const config = engine.getConfig();
expect(config.useLLM).toBe(true);
});
it('should have llmFallbackToRules enabled by default', () => {
const config = engine.getConfig();
expect(config.llmFallbackToRules).toBe(true);
});
it('should track conversations for reflection trigger', () => {
engine.recordConversation();
engine.recordConversation();
expect(engine.shouldReflect()).toBe(false);
// After 5 conversations (default trigger)
for (let i = 0; i < 4; i++) {
engine.recordConversation();
}
expect(engine.shouldReflect()).toBe(true);
});
it('should use LLM when enabled and available', async () => {
mockLLMAdapter.isAvailable.mockReturnValue(true);
const result = await engine.reflect('test-agent', { forceLLM: true });
expect(result.patterns.length).toBeGreaterThan(0);
expect(result.timestamp).toBeDefined();
});
it('should fallback to rules when LLM fails', async () => {
mockLLMAdapter.isAvailable.mockReturnValue(false);
const result = await engine.reflect('test-agent');
// Should still work with rule-based approach
expect(result).toBeDefined();
expect(result.timestamp).toBeDefined();
});
});
// === ContextCompactor Tests ===
describe('ContextCompactor with LLM', () => {
let compactor: ContextCompactor;
beforeEach(() => {
vi.clearAllMocks();
compactor = new ContextCompactor({ useLLM: true });
});
it('should initialize with LLM config', () => {
const config = compactor.getConfig();
expect(config.useLLM).toBe(true);
});
it('should have llmFallbackToRules enabled by default', () => {
const config = compactor.getConfig();
expect(config.llmFallbackToRules).toBe(true);
});
it('should check threshold correctly', () => {
const messages = [
{ role: 'user', content: 'Hello'.repeat(1000) },
{ role: 'assistant', content: 'Response'.repeat(1000) },
];
const check = compactor.checkThreshold(messages);
expect(check.shouldCompact).toBe(false);
expect(check.urgency).toBe('none');
});
it('should trigger soft threshold', () => {
// Create enough messages to exceed 15000 soft threshold but not 20000 hard threshold
// estimateTokens: CJK chars ~1.5 tokens each
// 20 messages × 600 CJK chars × 1.5 = ~18000 tokens (between soft and hard)
const messages = Array(20).fill(null).map((_, i) => ({
role: i % 2 === 0 ? 'user' : 'assistant',
content: '测试内容'.repeat(150), // 600 CJK chars ≈ 900 tokens each
}));
const check = compactor.checkThreshold(messages);
expect(check.shouldCompact).toBe(true);
expect(check.urgency).toBe('soft');
});
});
// === MemoryExtractor Tests ===
describe('MemoryExtractor with LLM', () => {
let extractor: MemoryExtractor;
beforeEach(() => {
vi.clearAllMocks();
extractor = new MemoryExtractor({ useLLM: true });
});
it('should initialize with LLM config', () => {
// MemoryExtractor doesn't expose config directly, but we can test behavior
expect(extractor).toBeDefined();
});
it('should skip extraction with too few messages', async () => {
const messages = [
{ role: 'user', content: 'Hi' },
{ role: 'assistant', content: 'Hello!' },
];
const result = await extractor.extractFromConversation(messages, 'test-agent');
expect(result.saved).toBe(0);
});
it('should extract with enough messages', async () => {
const messages = [
{ role: 'user', content: '我喜欢简洁的回答' },
{ role: 'assistant', content: '好的,我会简洁一些' },
{ role: 'user', content: '我的项目使用 TypeScript' },
{ role: 'assistant', content: 'TypeScript 是个好选择' },
{ role: 'user', content: '继续' },
{ role: 'assistant', content: '继续...' },
];
const result = await extractor.extractFromConversation(messages, 'test-agent');
expect(result.items.length).toBeGreaterThanOrEqual(0);
});
});
// === Integration Test ===
describe('LLM Integration Full Flow', () => {
it('should work end-to-end with all engines', async () => {
// Setup all engines with LLM
const engine = new ReflectionEngine({ useLLM: true, llmFallbackToRules: true });
const compactor = new ContextCompactor({ useLLM: true, llmFallbackToRules: true });
const extractor = new MemoryExtractor({ useLLM: true, llmFallbackToRules: true });
// Verify they all have LLM support
expect(engine.getConfig().useLLM).toBe(true);
expect(compactor.getConfig().useLLM).toBe(true);
// All should work without throwing
await expect(engine.reflect('test-agent')).resolves;
await expect(compactor.compact([], 'test-agent')).resolves;
await expect(extractor.extractFromConversation([], 'test-agent')).resolves;
});
});

View File

@@ -1,350 +0,0 @@
/**
* Agent Identity Manager - Per-agent dynamic identity files
*
* Manages SOUL.md, AGENTS.md, USER.md per agent with:
* - Per-agent isolated identity directories
* - USER.md auto-update by agent (stores learned preferences)
* - SOUL.md/AGENTS.md change proposals (require user approval)
* - Snapshot history for rollback
*
* Phase 1: localStorage-based storage (same as agent-memory.ts)
* Upgrade path: Tauri filesystem API for real .md files
*
* Reference: ZCLAW_AGENT_INTELLIGENCE_EVOLUTION.md §6.2.3
*/
import { canAutoExecute } from './autonomy-manager';
// === Types ===
export interface IdentityFiles {
soul: string;
instructions: string;
userProfile: string;
heartbeat?: string;
}
export interface IdentityChangeProposal {
id: string;
agentId: string;
file: 'soul' | 'instructions';
reason: string;
currentContent: string;
suggestedContent: string;
status: 'pending' | 'approved' | 'rejected';
createdAt: string;
}
export interface IdentitySnapshot {
id: string;
agentId: string;
files: IdentityFiles;
timestamp: string;
reason: string;
}
// === Storage Keys ===
const IDENTITY_STORAGE_KEY = 'zclaw-agent-identities';
const PROPOSALS_STORAGE_KEY = 'zclaw-identity-proposals';
const SNAPSHOTS_STORAGE_KEY = 'zclaw-identity-snapshots';
// === Default Identity Content ===
const DEFAULT_SOUL = `# ZCLAW 人格
你是 ZCLAW小龙虾一个基于 OpenClaw 定制的中文 AI 助手。
## 核心特质
- **高效执行**: 你不只是出主意,你会真正动手完成任务
- **中文优先**: 默认使用中文交流,必要时切换英文
- **专业可靠**: 对技术问题给出精确答案,不确定时坦诚说明
- **持续成长**: 你会记住与用户的交互,不断改进自己的服务方式
## 语气
简洁、专业、友好。避免过度客套,直接给出有用信息。`;
const DEFAULT_INSTRUCTIONS = `# Agent 指令
## 操作规范
1. 执行文件操作前,先确认目标路径
2. 执行 Shell 命令前,评估安全风险
3. 长时间任务需定期汇报进度
4. 优先使用中文回复
## 记忆管理
- 重要的用户偏好自动记录
- 项目上下文保存到工作区
- 对话结束时总结关键信息`;
const DEFAULT_USER_PROFILE = `# 用户画像
_尚未收集到用户偏好信息。随着交互积累此文件将自动更新。_`;
// === AgentIdentityManager Implementation ===
export class AgentIdentityManager {
private identities: Map<string, IdentityFiles> = new Map();
private proposals: IdentityChangeProposal[] = [];
private snapshots: IdentitySnapshot[] = [];
constructor() {
this.load();
}
// === Persistence ===
private load(): void {
try {
const rawIdentities = localStorage.getItem(IDENTITY_STORAGE_KEY);
if (rawIdentities) {
const parsed = JSON.parse(rawIdentities) as Record<string, IdentityFiles>;
this.identities = new Map(Object.entries(parsed));
}
const rawProposals = localStorage.getItem(PROPOSALS_STORAGE_KEY);
if (rawProposals) {
this.proposals = JSON.parse(rawProposals);
}
const rawSnapshots = localStorage.getItem(SNAPSHOTS_STORAGE_KEY);
if (rawSnapshots) {
this.snapshots = JSON.parse(rawSnapshots);
}
} catch (err) {
console.warn('[AgentIdentity] Failed to load:', err);
}
}
private persist(): void {
try {
const obj: Record<string, IdentityFiles> = {};
for (const [key, value] of this.identities) {
obj[key] = value;
}
localStorage.setItem(IDENTITY_STORAGE_KEY, JSON.stringify(obj));
localStorage.setItem(PROPOSALS_STORAGE_KEY, JSON.stringify(this.proposals));
localStorage.setItem(SNAPSHOTS_STORAGE_KEY, JSON.stringify(this.snapshots.slice(-50))); // Keep last 50 snapshots
} catch (err) {
console.warn('[AgentIdentity] Failed to persist:', err);
}
}
// === Read Identity ===
getIdentity(agentId: string): IdentityFiles {
const existing = this.identities.get(agentId);
if (existing) return { ...existing };
// Initialize with defaults
const defaults: IdentityFiles = {
soul: DEFAULT_SOUL,
instructions: DEFAULT_INSTRUCTIONS,
userProfile: DEFAULT_USER_PROFILE,
};
this.identities.set(agentId, defaults);
this.persist();
return { ...defaults };
}
getFile(agentId: string, file: keyof IdentityFiles): string {
const identity = this.getIdentity(agentId);
return identity[file] || '';
}
// === Build System Prompt ===
buildSystemPrompt(agentId: string, memoryContext?: string): string {
const identity = this.getIdentity(agentId);
const sections: string[] = [];
if (identity.soul) sections.push(identity.soul);
if (identity.instructions) sections.push(identity.instructions);
if (identity.userProfile && identity.userProfile !== DEFAULT_USER_PROFILE) {
sections.push(`## 用户画像\n${identity.userProfile}`);
}
if (memoryContext) {
sections.push(memoryContext);
}
return sections.join('\n\n');
}
// === Update USER.md (auto, no approval needed) ===
updateUserProfile(agentId: string, newContent: string): void {
const identity = this.getIdentity(agentId);
const oldContent = identity.userProfile;
// Create snapshot before update
this.createSnapshot(agentId, 'Auto-update USER.md');
identity.userProfile = newContent;
this.identities.set(agentId, identity);
this.persist();
console.log(`[AgentIdentity] Updated USER.md for ${agentId} (${oldContent.length}${newContent.length} chars)`);
}
appendToUserProfile(agentId: string, addition: string): void {
const identity = this.getIdentity(agentId);
const updated = identity.userProfile.trimEnd() + '\n\n' + addition;
this.updateUserProfile(agentId, updated);
}
// === Update SOUL.md / AGENTS.md (requires approval) ===
proposeChange(
agentId: string,
file: 'soul' | 'instructions',
suggestedContent: string,
reason: string,
options?: { skipAutonomyCheck?: boolean }
): IdentityChangeProposal | null {
// Autonomy check - identity updates are high-risk, always require approval
if (!options?.skipAutonomyCheck) {
const { decision } = canAutoExecute('identity_update', 8);
console.log(`[AgentIdentity] Autonomy check for identity update: ${decision.reason}`);
// Identity updates always require approval regardless of autonomy level
// But we log the decision for audit purposes
}
const identity = this.getIdentity(agentId);
const currentContent = file === 'soul' ? identity.soul : identity.instructions;
const proposal: IdentityChangeProposal = {
id: `prop_${Date.now()}_${Math.random().toString(36).slice(2, 6)}`,
agentId,
file,
reason,
currentContent,
suggestedContent,
status: 'pending',
createdAt: new Date().toISOString(),
};
this.proposals.push(proposal);
this.persist();
return proposal;
}
approveProposal(proposalId: string): boolean {
const proposal = this.proposals.find(p => p.id === proposalId);
if (!proposal || proposal.status !== 'pending') return false;
const identity = this.getIdentity(proposal.agentId);
this.createSnapshot(proposal.agentId, `Approved proposal: ${proposal.reason}`);
if (proposal.file === 'soul') {
identity.soul = proposal.suggestedContent;
} else {
identity.instructions = proposal.suggestedContent;
}
this.identities.set(proposal.agentId, identity);
proposal.status = 'approved';
this.persist();
return true;
}
rejectProposal(proposalId: string): boolean {
const proposal = this.proposals.find(p => p.id === proposalId);
if (!proposal || proposal.status !== 'pending') return false;
proposal.status = 'rejected';
this.persist();
return true;
}
getPendingProposals(agentId?: string): IdentityChangeProposal[] {
return this.proposals.filter(p =>
p.status === 'pending' && (!agentId || p.agentId === agentId)
);
}
// === Direct Edit (user explicitly edits in UI) ===
updateFile(agentId: string, file: keyof IdentityFiles, content: string): void {
const identity = this.getIdentity(agentId);
this.createSnapshot(agentId, `Manual edit: ${file}`);
identity[file] = content;
this.identities.set(agentId, identity);
this.persist();
}
// === Snapshots ===
private snapshotCounter = 0;
private createSnapshot(agentId: string, reason: string): void {
const identity = this.getIdentity(agentId);
this.snapshotCounter++;
this.snapshots.push({
id: `snap_${Date.now()}_${this.snapshotCounter}_${Math.random().toString(36).slice(2, 6)}`,
agentId,
files: { ...identity },
timestamp: new Date().toISOString(),
reason,
});
}
getSnapshots(agentId: string, limit: number = 10): IdentitySnapshot[] {
// Return newest first; use array index as tiebreaker for same-millisecond snapshots
const filtered = this.snapshots
.map((s, idx) => ({ s, idx }))
.filter(({ s }) => s.agentId === agentId)
.sort((a, b) => {
const timeDiff = new Date(b.s.timestamp).getTime() - new Date(a.s.timestamp).getTime();
return timeDiff !== 0 ? timeDiff : b.idx - a.idx;
})
.map(({ s }) => s);
return filtered.slice(0, limit);
}
restoreSnapshot(agentId: string, snapshotId: string): boolean {
const snapshot = this.snapshots.find(s =>
s.agentId === agentId && s.id === snapshotId
);
if (!snapshot) return false;
this.createSnapshot(agentId, `Rollback to ${snapshot.timestamp}`);
this.identities.set(agentId, { ...snapshot.files });
this.persist();
return true;
}
// === List agents ===
listAgents(): string[] {
return [...this.identities.keys()];
}
// === Delete agent identity ===
deleteAgent(agentId: string): void {
this.identities.delete(agentId);
this.proposals = this.proposals.filter(p => p.agentId !== agentId);
this.snapshots = this.snapshots.filter(s => s.agentId !== agentId);
this.persist();
}
}
// === Singleton ===
let _instance: AgentIdentityManager | null = null;
export function getAgentIdentityManager(): AgentIdentityManager {
if (!_instance) {
_instance = new AgentIdentityManager();
}
return _instance;
}
export function resetAgentIdentityManager(): void {
_instance = null;
}

View File

@@ -1,486 +0,0 @@
/**
* Agent Memory System - Persistent cross-session memory for ZCLAW agents
*
* Phase 1 implementation: zustand persist (localStorage) with keyword search.
* Optimized with inverted index for sub-20ms retrieval on 1000+ memories.
* Designed for easy upgrade to SQLite + FTS5 + vector search in Phase 2.
*
* Reference: ZCLAW_AGENT_INTELLIGENCE_EVOLUTION.md §6.2.1
*/
import { MemoryIndex, getMemoryIndex, tokenize } from './memory-index';
// === Types ===
export type MemoryType = 'fact' | 'preference' | 'lesson' | 'context' | 'task';
export type MemorySource = 'auto' | 'user' | 'reflection' | 'llm-reflection';
export interface MemoryEntry {
id: string;
agentId: string;
content: string;
type: MemoryType;
importance: number; // 0-10
source: MemorySource;
tags: string[];
createdAt: string; // ISO timestamp
lastAccessedAt: string;
accessCount: number;
conversationId?: string;
}
export interface MemorySearchOptions {
agentId?: string;
type?: MemoryType;
types?: MemoryType[];
tags?: string[];
limit?: number;
minImportance?: number;
[key: string]: unknown;
}
export interface MemoryStats {
totalEntries: number;
byType: Record<string, number>;
byAgent: Record<string, number>;
oldestEntry: string | null;
newestEntry: string | null;
indexStats?: {
cacheHitRate: number;
avgQueryTime: number;
};
}
// === Memory ID Generator ===
function generateMemoryId(): string {
return `mem_${Date.now()}_${Math.random().toString(36).slice(2, 8)}`;
}
// === Keyword Search Scoring ===
function searchScore(
entry: MemoryEntry,
queryTokens: string[],
cachedTokens?: string[]
): number {
// Use cached tokens if available, otherwise tokenize
const contentTokens = cachedTokens ?? tokenize(entry.content);
const tagTokens = entry.tags.flatMap(t => tokenize(t));
const allTokens = [...contentTokens, ...tagTokens];
let matched = 0;
for (const qt of queryTokens) {
if (allTokens.some(t => t.includes(qt) || qt.includes(t))) {
matched++;
}
}
if (matched === 0) return 0;
const relevance = matched / queryTokens.length;
const importanceBoost = entry.importance / 10;
const recencyBoost = Math.max(0, 1 - (Date.now() - new Date(entry.lastAccessedAt).getTime()) / (30 * 24 * 60 * 60 * 1000)); // decay over 30 days
return relevance * 0.6 + importanceBoost * 0.25 + recencyBoost * 0.15;
}
// === MemoryManager Implementation ===
const STORAGE_KEY = 'zclaw-agent-memories';
export class MemoryManager {
private entries: MemoryEntry[] = [];
private entryIndex: Map<string, number> = new Map(); // id -> array index for O(1) lookup
private memoryIndex: MemoryIndex;
private indexInitialized = false;
constructor() {
this.load();
this.memoryIndex = getMemoryIndex();
}
// === Persistence ===
private load(): void {
try {
const raw = localStorage.getItem(STORAGE_KEY);
if (raw) {
this.entries = JSON.parse(raw);
// Build entry index for O(1) lookups
this.entries.forEach((entry, index) => {
this.entryIndex.set(entry.id, index);
});
}
} catch (err) {
console.warn('[MemoryManager] Failed to load memories:', err);
this.entries = [];
}
}
private persist(): void {
try {
localStorage.setItem(STORAGE_KEY, JSON.stringify(this.entries));
} catch (err) {
console.warn('[MemoryManager] Failed to persist memories:', err);
}
}
// === Index Management ===
private ensureIndexInitialized(): void {
if (!this.indexInitialized) {
this.memoryIndex.rebuild(this.entries);
this.indexInitialized = true;
}
}
private indexEntry(entry: MemoryEntry): void {
this.ensureIndexInitialized();
this.memoryIndex.index(entry);
}
private removeEntryFromIndex(id: string): void {
if (this.indexInitialized) {
this.memoryIndex.remove(id);
}
}
// === Write ===
async save(
entry: Omit<MemoryEntry, 'id' | 'createdAt' | 'lastAccessedAt' | 'accessCount'>
): Promise<MemoryEntry> {
const now = new Date().toISOString();
const newEntry: MemoryEntry = {
...entry,
id: generateMemoryId(),
createdAt: now,
lastAccessedAt: now,
accessCount: 0,
};
// Deduplicate: check if very similar content already exists for this agent
const duplicate = this.entries.find(e =>
e.agentId === entry.agentId &&
e.type === entry.type &&
this.contentSimilarity(e.content, entry.content) >= 0.8
);
if (duplicate) {
// Update existing entry instead of creating duplicate
duplicate.content = entry.content;
duplicate.importance = Math.max(duplicate.importance, entry.importance);
duplicate.lastAccessedAt = now;
duplicate.accessCount++;
duplicate.tags = [...new Set([...duplicate.tags, ...entry.tags])];
// Re-index the updated entry
this.indexEntry(duplicate);
this.persist();
return duplicate;
}
this.entries.push(newEntry);
this.entryIndex.set(newEntry.id, this.entries.length - 1);
this.indexEntry(newEntry);
this.persist();
return newEntry;
}
// === Search (Optimized with Index) ===
async search(query: string, options?: MemorySearchOptions): Promise<MemoryEntry[]> {
const startTime = performance.now();
const queryTokens = tokenize(query);
if (queryTokens.length === 0) return [];
this.ensureIndexInitialized();
// Check query cache first
const cached = this.memoryIndex.getCached(query, options);
if (cached) {
// Retrieve entries by IDs
const results = cached
.map(id => this.entries[this.entryIndex.get(id) ?? -1])
.filter((e): e is MemoryEntry => e !== undefined);
this.memoryIndex.recordQueryTime(performance.now() - startTime);
return results;
}
// Get candidate IDs using index (O(1) lookups)
const candidateIds = this.memoryIndex.getCandidates(options || {});
// If no candidates from index, return empty
if (candidateIds && candidateIds.size === 0) {
this.memoryIndex.setCached(query, options, []);
this.memoryIndex.recordQueryTime(performance.now() - startTime);
return [];
}
// Build candidates list
let candidates: MemoryEntry[];
if (candidateIds) {
// Use indexed candidates
candidates = [];
for (const id of candidateIds) {
const idx = this.entryIndex.get(id);
if (idx !== undefined) {
const entry = this.entries[idx];
// Additional filter for minImportance (not handled by index)
if (options?.minImportance !== undefined && entry.importance < options.minImportance) {
continue;
}
candidates.push(entry);
}
}
} else {
// Fallback: no index-based candidates, use all entries
candidates = [...this.entries];
// Apply minImportance filter
if (options?.minImportance !== undefined) {
candidates = candidates.filter(e => e.importance >= options.minImportance!);
}
}
// Score and rank using cached tokens
const scored = candidates
.map(entry => {
const cachedTokens = this.memoryIndex.getTokens(entry.id);
return { entry, score: searchScore(entry, queryTokens, cachedTokens) };
})
.filter(item => item.score > 0)
.sort((a, b) => b.score - a.score);
const limit = options?.limit ?? 10;
const results = scored.slice(0, limit).map(item => item.entry);
// Cache the results
this.memoryIndex.setCached(query, options, results.map(r => r.id));
// Update access metadata
const now = new Date().toISOString();
for (const entry of results) {
entry.lastAccessedAt = now;
entry.accessCount++;
}
if (results.length > 0) {
this.persist();
}
this.memoryIndex.recordQueryTime(performance.now() - startTime);
return results;
}
// === Get All (for an agent) - Optimized with Index ===
async getAll(agentId: string, options?: { type?: MemoryType; limit?: number }): Promise<MemoryEntry[]> {
this.ensureIndexInitialized();
// Use index to get candidates for this agent
const candidateIds = this.memoryIndex.getCandidates({
agentId,
type: options?.type,
});
let results: MemoryEntry[];
if (candidateIds) {
results = [];
for (const id of candidateIds) {
const idx = this.entryIndex.get(id);
if (idx !== undefined) {
results.push(this.entries[idx]);
}
}
} else {
// Fallback to linear scan
results = this.entries.filter(e => e.agentId === agentId);
if (options?.type) {
results = results.filter(e => e.type === options.type);
}
}
results.sort((a, b) => new Date(b.createdAt).getTime() - new Date(a.createdAt).getTime());
if (options?.limit) {
results = results.slice(0, options.limit);
}
return results;
}
// === Get by ID (O(1) with index) ===
async get(id: string): Promise<MemoryEntry | null> {
const idx = this.entryIndex.get(id);
return idx !== undefined ? this.entries[idx] ?? null : null;
}
// === Forget ===
async forget(id: string): Promise<void> {
const idx = this.entryIndex.get(id);
if (idx !== undefined) {
this.removeEntryFromIndex(id);
this.entries.splice(idx, 1);
// Rebuild entry index since positions changed
this.entryIndex.clear();
this.entries.forEach((entry, i) => {
this.entryIndex.set(entry.id, i);
});
this.persist();
}
}
// === Prune (bulk cleanup) ===
async prune(options: {
maxAgeDays?: number;
minImportance?: number;
agentId?: string;
}): Promise<number> {
const before = this.entries.length;
const now = Date.now();
const toRemove: string[] = [];
this.entries = this.entries.filter(entry => {
if (options.agentId && entry.agentId !== options.agentId) return true; // keep other agents
const ageDays = (now - new Date(entry.lastAccessedAt).getTime()) / (24 * 60 * 60 * 1000);
const tooOld = options.maxAgeDays !== undefined && ageDays > options.maxAgeDays;
const tooLow = options.minImportance !== undefined && entry.importance < options.minImportance;
// Only prune if both conditions met (old AND low importance)
if (tooOld && tooLow) {
toRemove.push(entry.id);
return false;
}
return true;
});
// Remove from index
for (const id of toRemove) {
this.removeEntryFromIndex(id);
}
// Rebuild entry index
this.entryIndex.clear();
this.entries.forEach((entry, i) => {
this.entryIndex.set(entry.id, i);
});
const pruned = before - this.entries.length;
if (pruned > 0) {
this.persist();
}
return pruned;
}
// === Export to Markdown ===
async exportToMarkdown(agentId: string): Promise<string> {
const agentEntries = this.entries
.filter(e => e.agentId === agentId)
.sort((a, b) => new Date(b.createdAt).getTime() - new Date(a.createdAt).getTime());
if (agentEntries.length === 0) {
return `# Agent Memory Export\n\n_No memories recorded._\n`;
}
const sections: string[] = [`# Agent Memory Export\n\n> Agent: ${agentId}\n> Exported: ${new Date().toISOString()}\n> Total entries: ${agentEntries.length}\n`];
const byType = new Map<string, MemoryEntry[]>();
for (const entry of agentEntries) {
const list = byType.get(entry.type) || [];
list.push(entry);
byType.set(entry.type, list);
}
const typeLabels: Record<string, string> = {
fact: '📋 事实',
preference: '⭐ 偏好',
lesson: '💡 经验教训',
context: '📌 上下文',
task: '📝 任务',
};
for (const [type, entries] of byType) {
sections.push(`\n## ${typeLabels[type] || type}\n`);
for (const entry of entries) {
const tags = entry.tags.length > 0 ? ` [${entry.tags.join(', ')}]` : '';
sections.push(`- **[重要性:${entry.importance}]** ${entry.content}${tags}`);
sections.push(` _创建: ${entry.createdAt} | 访问: ${entry.accessCount}次_\n`);
}
}
return sections.join('\n');
}
// === Stats ===
async stats(agentId?: string): Promise<MemoryStats> {
const entries = agentId
? this.entries.filter(e => e.agentId === agentId)
: this.entries;
const byType: Record<string, number> = {};
const byAgent: Record<string, number> = {};
for (const entry of entries) {
byType[entry.type] = (byType[entry.type] || 0) + 1;
byAgent[entry.agentId] = (byAgent[entry.agentId] || 0) + 1;
}
const sorted = [...entries].sort((a, b) =>
new Date(a.createdAt).getTime() - new Date(b.createdAt).getTime()
);
return {
totalEntries: entries.length,
byType,
byAgent,
oldestEntry: sorted[0]?.createdAt ?? null,
newestEntry: sorted[sorted.length - 1]?.createdAt ?? null,
};
}
// === Update importance ===
async updateImportance(id: string, importance: number): Promise<void> {
const entry = this.entries.find(e => e.id === id);
if (entry) {
entry.importance = Math.max(0, Math.min(10, importance));
this.persist();
}
}
// === Helpers ===
private contentSimilarity(a: string, b: string): number {
const tokensA = new Set(tokenize(a));
const tokensB = new Set(tokenize(b));
if (tokensA.size === 0 || tokensB.size === 0) return 0;
let intersection = 0;
for (const t of tokensA) {
if (tokensB.has(t)) intersection++;
}
return (2 * intersection) / (tokensA.size + tokensB.size);
}
}
// === Singleton ===
let _instance: MemoryManager | null = null;
export function getMemoryManager(): MemoryManager {
if (!_instance) {
_instance = new MemoryManager();
}
return _instance;
}
export function resetMemoryManager(): void {
_instance = null;
}

View File

@@ -11,7 +11,7 @@
* Reference: ZCLAW_AGENT_INTELLIGENCE_EVOLUTION.md §6.5.1
*/
import { getMemoryManager } from './agent-memory';
import { intelligenceClient } from './intelligence-client';
// === Types ===
@@ -199,10 +199,10 @@ export class AgentSwarm {
// Save task result as memory
try {
await getMemoryManager().save({
agentId: this.config.coordinator,
await intelligenceClient.memory.store({
agent_id: this.config.coordinator,
memory_type: 'lesson',
content: `协作任务完成: "${task.description}" — ${task.subtasks.length}个子任务, 模式: ${task.communicationStyle}, 结果: ${(task.finalResult || '').slice(0, 200)}`,
type: 'lesson',
importance: 6,
source: 'auto',
tags: ['swarm', task.communicationStyle],

View File

@@ -1,442 +0,0 @@
/**
* Context Compactor - Manages infinite-length conversations without losing key info
*
* Flow:
* 1. Monitor token count against soft threshold
* 2. When threshold approached: flush memories from old messages
* 3. Summarize old messages into a compact system message
* 4. Replace old messages with summary — user sees no interruption
*
* Phase 2 implementation: heuristic token estimation + rule-based summarization.
* Phase 4 upgrade: LLM-powered summarization + semantic importance scoring.
*
* Reference: ZCLAW_AGENT_INTELLIGENCE_EVOLUTION.md §6.3.1
*/
import { getMemoryExtractor, type ConversationMessage } from './memory-extractor';
import {
getLLMAdapter,
llmCompact,
type LLMServiceAdapter,
type LLMProvider,
} from './llm-service';
import { canAutoExecute } from './autonomy-manager';
// === Types ===
export interface CompactionConfig {
softThresholdTokens: number; // Trigger compaction when approaching this (default 15000)
hardThresholdTokens: number; // Force compaction at this limit (default 20000)
reserveTokens: number; // Reserve for new messages (default 4000)
memoryFlushEnabled: boolean; // Extract memories before compacting (default true)
keepRecentMessages: number; // Always keep this many recent messages (default 6)
summaryMaxTokens: number; // Max tokens for the compaction summary (default 800)
useLLM: boolean; // Use LLM for high-quality summarization (Phase 4)
llmProvider?: LLMProvider; // Preferred LLM provider
llmFallbackToRules: boolean; // Fall back to rules if LLM fails
}
export interface CompactableMessage {
role: string;
content: string;
id?: string;
timestamp?: Date;
}
export interface CompactionResult {
compactedMessages: CompactableMessage[];
summary: string;
originalCount: number;
retainedCount: number;
flushedMemories: number;
tokensBeforeCompaction: number;
tokensAfterCompaction: number;
}
export interface CompactionCheck {
shouldCompact: boolean;
currentTokens: number;
threshold: number;
urgency: 'none' | 'soft' | 'hard';
}
// === Default Config ===
export const DEFAULT_COMPACTION_CONFIG: CompactionConfig = {
softThresholdTokens: 15000,
hardThresholdTokens: 20000,
reserveTokens: 4000,
memoryFlushEnabled: true,
keepRecentMessages: 6,
summaryMaxTokens: 800,
useLLM: false,
llmFallbackToRules: true,
};
// === Token Estimation ===
/**
* Heuristic token count estimation.
* CJK characters ≈ 1.5 tokens each, English words ≈ 1.3 tokens each.
* This is intentionally conservative (overestimates) to avoid hitting real limits.
*/
export function estimateTokens(text: string): number {
if (!text) return 0;
let tokens = 0;
for (const char of text) {
const code = char.codePointAt(0) || 0;
if (code >= 0x4E00 && code <= 0x9FFF) {
tokens += 1.5; // CJK ideographs
} else if (code >= 0x3400 && code <= 0x4DBF) {
tokens += 1.5; // CJK Extension A
} else if (code >= 0x3000 && code <= 0x303F) {
tokens += 1; // CJK punctuation
} else if (char === ' ' || char === '\n' || char === '\t') {
tokens += 0.25; // whitespace
} else {
tokens += 0.3; // ASCII chars (roughly 4 chars per token for English)
}
}
return Math.ceil(tokens);
}
export function estimateMessagesTokens(messages: CompactableMessage[]): number {
let total = 0;
for (const msg of messages) {
total += estimateTokens(msg.content);
total += 4; // message framing overhead (role, separators)
}
return total;
}
// === Context Compactor ===
export class ContextCompactor {
private config: CompactionConfig;
private llmAdapter: LLMServiceAdapter | null = null;
constructor(config?: Partial<CompactionConfig>) {
this.config = { ...DEFAULT_COMPACTION_CONFIG, ...config };
// Initialize LLM adapter if configured
if (this.config.useLLM) {
try {
this.llmAdapter = getLLMAdapter();
} catch (error) {
console.warn('[ContextCompactor] Failed to initialize LLM adapter:', error);
}
}
}
/**
* Check if compaction is needed based on current message token count.
*/
checkThreshold(messages: CompactableMessage[]): CompactionCheck {
const currentTokens = estimateMessagesTokens(messages);
if (currentTokens >= this.config.hardThresholdTokens) {
return { shouldCompact: true, currentTokens, threshold: this.config.hardThresholdTokens, urgency: 'hard' };
}
if (currentTokens >= this.config.softThresholdTokens) {
return { shouldCompact: true, currentTokens, threshold: this.config.softThresholdTokens, urgency: 'soft' };
}
return { shouldCompact: false, currentTokens, threshold: this.config.softThresholdTokens, urgency: 'none' };
}
/**
* Execute memory flush: extract memories from messages about to be compacted.
*/
async memoryFlush(
messagesToCompact: CompactableMessage[],
agentId: string,
conversationId?: string
): Promise<number> {
if (!this.config.memoryFlushEnabled) return 0;
try {
const extractor = getMemoryExtractor();
const convMessages: ConversationMessage[] = messagesToCompact.map(m => ({
role: m.role,
content: m.content,
}));
const result = await extractor.extractFromConversation(convMessages, agentId, conversationId);
return result.saved;
} catch (err) {
console.warn('[ContextCompactor] Memory flush failed:', err);
return 0;
}
}
/**
* Execute compaction: summarize old messages, keep recent ones.
*
* Phase 2: Rule-based summarization (extract key points heuristically).
* Phase 4: LLM-powered summarization for higher quality summaries.
*/
async compact(
messages: CompactableMessage[],
agentId: string,
conversationId?: string,
options?: { forceLLM?: boolean; skipAutonomyCheck?: boolean }
): Promise<CompactionResult> {
// Autonomy check - verify if compaction is allowed
if (!options?.skipAutonomyCheck) {
const { canProceed, decision } = canAutoExecute('compaction_run', 5);
if (!canProceed) {
console.log(`[ContextCompactor] Autonomy check failed: ${decision.reason}`);
// Return result without compaction
return {
compactedMessages: messages,
summary: '',
originalCount: messages.length,
retainedCount: messages.length,
flushedMemories: 0,
tokensBeforeCompaction: estimateMessagesTokens(messages),
tokensAfterCompaction: estimateMessagesTokens(messages),
};
}
console.log(`[ContextCompactor] Autonomy check passed: ${decision.reason}`);
}
const tokensBeforeCompaction = estimateMessagesTokens(messages);
const keepCount = Math.min(this.config.keepRecentMessages, messages.length);
// Split: old messages to compact vs recent to keep
const splitIndex = messages.length - keepCount;
const oldMessages = messages.slice(0, splitIndex);
const recentMessages = messages.slice(splitIndex);
// Step 1: Memory flush from old messages
let flushedMemories = 0;
if (oldMessages.length > 0) {
flushedMemories = await this.memoryFlush(oldMessages, agentId, conversationId);
}
// Step 2: Generate summary of old messages
let summary: string;
if ((this.config.useLLM || options?.forceLLM) && this.llmAdapter?.isAvailable()) {
try {
console.log('[ContextCompactor] Using LLM-powered summarization');
summary = await this.llmGenerateSummary(oldMessages);
} catch (error) {
console.error('[ContextCompactor] LLM summarization failed:', error);
if (!this.config.llmFallbackToRules) {
throw error;
}
console.log('[ContextCompactor] Falling back to rule-based summarization');
summary = this.generateSummary(oldMessages);
}
} else {
summary = this.generateSummary(oldMessages);
}
// Step 3: Build compacted message list
const summaryMessage: CompactableMessage = {
role: 'system',
content: summary,
id: `compaction_${Date.now()}`,
timestamp: new Date(),
};
const compactedMessages = [summaryMessage, ...recentMessages];
const tokensAfterCompaction = estimateMessagesTokens(compactedMessages);
console.log(
`[ContextCompactor] Compacted: ${messages.length}${compactedMessages.length} messages, ` +
`${tokensBeforeCompaction}${tokensAfterCompaction} tokens, ` +
`${flushedMemories} memories flushed`
);
return {
compactedMessages,
summary,
originalCount: messages.length,
retainedCount: compactedMessages.length,
flushedMemories,
tokensBeforeCompaction,
tokensAfterCompaction,
};
}
/**
* LLM-powered summary generation for high-quality compaction.
*/
private async llmGenerateSummary(messages: CompactableMessage[]): Promise<string> {
if (messages.length === 0) return '[对话开始]';
// Build conversation text for LLM
const conversationText = messages
.filter(m => m.role === 'user' || m.role === 'assistant')
.map(m => `[${m.role === 'user' ? '用户' : '助手'}]: ${m.content}`)
.join('\n\n');
// Use llmCompact helper from llm-service
const llmSummary = await llmCompact(conversationText, this.llmAdapter!);
// Enforce token limit
const summaryTokens = estimateTokens(llmSummary);
if (summaryTokens > this.config.summaryMaxTokens) {
return llmSummary.slice(0, this.config.summaryMaxTokens * 2) + '\n...(摘要已截断)';
}
return `[LLM摘要]\n${llmSummary}`;
}
/**
* Phase 2: Rule-based summary generation.
* Extracts key topics, decisions, and action items from old messages.
*/
private generateSummary(messages: CompactableMessage[]): string {
if (messages.length === 0) return '[对话开始]';
const sections: string[] = ['[以下是之前对话的摘要]'];
// Extract user questions/topics
const userMessages = messages.filter(m => m.role === 'user');
const assistantMessages = messages.filter(m => m.role === 'assistant');
// Summarize topics discussed
if (userMessages.length > 0) {
const topics = userMessages
.map(m => this.extractTopic(m.content))
.filter(Boolean);
if (topics.length > 0) {
sections.push(`讨论主题: ${topics.join('; ')}`);
}
}
// Extract key decisions/conclusions from assistant
if (assistantMessages.length > 0) {
const conclusions = assistantMessages
.flatMap(m => this.extractConclusions(m.content))
.slice(0, 5);
if (conclusions.length > 0) {
sections.push(`关键结论:\n${conclusions.map(c => `- ${c}`).join('\n')}`);
}
}
// Extract any code/technical context
const technicalContext = messages
.filter(m => m.content.includes('```') || m.content.includes('function ') || m.content.includes('class '))
.map(m => {
const codeMatch = m.content.match(/```(\w+)?[\s\S]*?```/);
return codeMatch ? `代码片段 (${codeMatch[1] || 'code'})` : null;
})
.filter(Boolean);
if (technicalContext.length > 0) {
sections.push(`技术上下文: ${technicalContext.join(', ')}`);
}
// Message count summary
sections.push(`(已压缩 ${messages.length} 条消息,其中用户 ${userMessages.length} 条,助手 ${assistantMessages.length} 条)`);
const summary = sections.join('\n');
// Enforce token limit
const summaryTokens = estimateTokens(summary);
if (summaryTokens > this.config.summaryMaxTokens) {
return summary.slice(0, this.config.summaryMaxTokens * 2) + '\n...(摘要已截断)';
}
return summary;
}
/**
* Extract the main topic from a user message (first 50 chars or first sentence).
*/
private extractTopic(content: string): string {
const trimmed = content.trim();
// First sentence or first 50 chars
const sentenceEnd = trimmed.search(/[。!?\n]/);
if (sentenceEnd > 0 && sentenceEnd <= 80) {
return trimmed.slice(0, sentenceEnd + 1);
}
if (trimmed.length <= 50) return trimmed;
return trimmed.slice(0, 50) + '...';
}
/**
* Extract key conclusions/decisions from assistant messages.
*/
private extractConclusions(content: string): string[] {
const conclusions: string[] = [];
const patterns = [
/(?:总结|结论|关键点|建议|方案)[:]\s*(.{10,100})/g,
/(?:\d+\.\s+)(.{10,80})/g,
/(?:需要|应该|可以|建议)(.{5,60})/g,
];
for (const pattern of patterns) {
const matches = content.matchAll(pattern);
for (const match of matches) {
const text = match[1]?.trim() || match[0].trim();
if (text.length > 10 && text.length < 100) {
conclusions.push(text);
}
}
}
return conclusions.slice(0, 3);
}
/**
* Build the LLM compaction prompt for Phase 3.
* Returns the prompt to send to LLM for generating a high-quality summary.
*/
buildCompactionPrompt(messages: CompactableMessage[]): string {
const conversationText = messages
.filter(m => m.role === 'user' || m.role === 'assistant')
.map(m => `[${m.role === 'user' ? '用户' : '助手'}]: ${m.content}`)
.join('\n\n');
return `请将以下对话压缩为简洁摘要,保留:
1. 用户提出的所有问题和需求
2. 达成的关键决策和结论
3. 重要的技术上下文(文件路径、配置、代码片段名称)
4. 未完成的任务或待办事项
输出格式:
- 讨论主题: ...
- 关键决策: ...
- 技术上下文: ...
- 待办事项: ...
请用中文输出,控制在 300 字以内。
对话内容:
${conversationText}`;
}
// === Config Management ===
getConfig(): CompactionConfig {
return { ...this.config };
}
updateConfig(updates: Partial<CompactionConfig>): void {
this.config = { ...this.config, ...updates };
}
}
// === Singleton ===
let _instance: ContextCompactor | null = null;
export function getContextCompactor(config?: Partial<CompactionConfig>): ContextCompactor {
if (!_instance) {
_instance = new ContextCompactor(config);
}
return _instance;
}
export function resetContextCompactor(): void {
_instance = null;
}

View File

@@ -17,6 +17,8 @@ import {
export interface StoredError extends AppError {
dismissed: boolean;
reported: boolean;
stack?: string;
context?: Record<string, unknown>;
}
// === Error Store ===
@@ -303,6 +305,13 @@ export function dismissAllErrors(): void {
errorStore.dismissAll();
}
/**
* Dismiss all active errors (alias for dismissAllErrors).
*/
export function dismissAll(): void {
errorStore.dismissAll();
}
/**
* Mark an error as reported.
*/
@@ -317,6 +326,13 @@ export function getActiveErrors(): StoredError[] {
return errorStore.getUndismissedErrors();
}
/**
* Get all undismissed errors (alias for getActiveErrors).
*/
export function getUndismissedErrors(): StoredError[] {
return errorStore.getUndismissedErrors();
}
/**
* Get the count of active errors.
*/

View File

@@ -1,346 +0,0 @@
/**
* Heartbeat Engine - Periodic proactive checks for ZCLAW agents
*
* Runs on a configurable interval, executing a checklist of items.
* Each check can produce alerts that surface via desktop notification or UI.
* Supports quiet hours (no notifications during sleep time).
*
* Phase 3 implementation: rule-based checks with configurable checklist.
* Phase 4 upgrade: LLM-powered interpretation of HEARTBEAT.md checklists.
*
* Reference: ZCLAW_AGENT_INTELLIGENCE_EVOLUTION.md §6.4.1
*/
import { getMemoryManager } from './agent-memory';
// === Types ===
export interface HeartbeatConfig {
enabled: boolean;
intervalMinutes: number;
quietHoursStart?: string; // "22:00" format
quietHoursEnd?: string; // "08:00" format
notifyChannel: 'desktop' | 'ui' | 'all';
proactivityLevel: 'silent' | 'light' | 'standard' | 'autonomous';
maxAlertsPerTick: number;
}
export interface HeartbeatAlert {
title: string;
content: string;
urgency: 'low' | 'medium' | 'high';
source: string;
timestamp: string;
}
export interface HeartbeatResult {
status: 'ok' | 'alert';
alerts: HeartbeatAlert[];
checkedItems: number;
timestamp: string;
}
export type HeartbeatCheckFn = (agentId: string) => Promise<HeartbeatAlert | null>;
// === Default Config ===
export const DEFAULT_HEARTBEAT_CONFIG: HeartbeatConfig = {
enabled: false,
intervalMinutes: 30,
quietHoursStart: '22:00',
quietHoursEnd: '08:00',
notifyChannel: 'ui',
proactivityLevel: 'light',
maxAlertsPerTick: 5,
};
// === Built-in Checks ===
/** Check if agent has unresolved task memories */
async function checkPendingTasks(agentId: string): Promise<HeartbeatAlert | null> {
const mgr = getMemoryManager();
const tasks = await mgr.getAll(agentId, { type: 'task', limit: 10 });
const pending = tasks.filter(t => t.importance >= 6);
if (pending.length > 0) {
return {
title: '待办任务提醒',
content: `${pending.length} 个待处理任务:${pending.slice(0, 3).map(t => t.content).join('')}`,
urgency: pending.some(t => t.importance >= 8) ? 'high' : 'medium',
source: 'pending-tasks',
timestamp: new Date().toISOString(),
};
}
return null;
}
/** Check if memory storage is getting large and might need pruning */
async function checkMemoryHealth(agentId: string): Promise<HeartbeatAlert | null> {
const mgr = getMemoryManager();
const stats = await mgr.stats(agentId);
if (stats.totalEntries > 500) {
return {
title: '记忆存储提醒',
content: `已积累 ${stats.totalEntries} 条记忆,建议清理低重要性的旧记忆以保持检索效率。`,
urgency: stats.totalEntries > 1000 ? 'high' : 'low',
source: 'memory-health',
timestamp: new Date().toISOString(),
};
}
return null;
}
/** Check if it's been a while since last interaction (greeting opportunity) */
async function checkIdleGreeting(_agentId: string): Promise<HeartbeatAlert | null> {
// Check localStorage for last interaction time
try {
const lastInteraction = localStorage.getItem('zclaw-last-interaction');
if (lastInteraction) {
const elapsed = Date.now() - parseInt(lastInteraction, 10);
const hoursSinceInteraction = elapsed / (1000 * 60 * 60);
// Only greet on weekdays between 9am-6pm if idle for > 4 hours
const now = new Date();
const isWeekday = now.getDay() >= 1 && now.getDay() <= 5;
const isWorkHours = now.getHours() >= 9 && now.getHours() <= 18;
if (isWeekday && isWorkHours && hoursSinceInteraction > 4) {
return {
title: '闲置提醒',
content: `已有 ${Math.floor(hoursSinceInteraction)} 小时未交互。需要我帮你处理什么吗?`,
urgency: 'low',
source: 'idle-greeting',
timestamp: new Date().toISOString(),
};
}
}
} catch {
// localStorage not available in test
}
return null;
}
// === Heartbeat Engine ===
const HISTORY_STORAGE_KEY = 'zclaw-heartbeat-history';
export class HeartbeatEngine {
private config: HeartbeatConfig;
private timerId: ReturnType<typeof setInterval> | null = null;
private checks: HeartbeatCheckFn[] = [];
private history: HeartbeatResult[] = [];
private agentId: string;
private onAlert?: (alerts: HeartbeatAlert[]) => void;
constructor(agentId: string, config?: Partial<HeartbeatConfig>) {
this.config = { ...DEFAULT_HEARTBEAT_CONFIG, ...config };
this.agentId = agentId;
this.loadHistory();
// Register built-in checks
this.checks = [
checkPendingTasks,
checkMemoryHealth,
checkIdleGreeting,
];
}
// === Lifecycle ===
start(onAlert?: (alerts: HeartbeatAlert[]) => void): void {
if (this.timerId) this.stop();
if (!this.config.enabled) return;
this.onAlert = onAlert;
const intervalMs = this.config.intervalMinutes * 60 * 1000;
this.timerId = setInterval(() => {
this.tick().catch(err =>
console.warn('[Heartbeat] Tick failed:', err)
);
}, intervalMs);
console.log(`[Heartbeat] Started for ${this.agentId}, interval: ${this.config.intervalMinutes}min`);
}
stop(): void {
if (this.timerId) {
clearInterval(this.timerId);
this.timerId = null;
console.log(`[Heartbeat] Stopped for ${this.agentId}`);
}
}
isRunning(): boolean {
return this.timerId !== null;
}
// === Single Tick ===
async tick(): Promise<HeartbeatResult> {
// Quiet hours check
if (this.isQuietHours()) {
const result: HeartbeatResult = {
status: 'ok',
alerts: [],
checkedItems: 0,
timestamp: new Date().toISOString(),
};
return result;
}
const alerts: HeartbeatAlert[] = [];
for (const check of this.checks) {
try {
const alert = await check(this.agentId);
if (alert) {
alerts.push(alert);
if (alerts.length >= this.config.maxAlertsPerTick) break;
}
} catch (err) {
console.warn('[Heartbeat] Check failed:', err);
}
}
// Filter by proactivity level
const filteredAlerts = this.filterByProactivity(alerts);
const result: HeartbeatResult = {
status: filteredAlerts.length > 0 ? 'alert' : 'ok',
alerts: filteredAlerts,
checkedItems: this.checks.length,
timestamp: new Date().toISOString(),
};
// Store history
this.history.push(result);
if (this.history.length > 100) {
this.history = this.history.slice(-50);
}
this.saveHistory();
// Notify
if (filteredAlerts.length > 0 && this.onAlert) {
this.onAlert(filteredAlerts);
}
return result;
}
// === Custom Checks ===
registerCheck(check: HeartbeatCheckFn): void {
this.checks.push(check);
}
// === History ===
getHistory(limit: number = 20): HeartbeatResult[] {
return this.history.slice(-limit);
}
getLastResult(): HeartbeatResult | null {
return this.history.length > 0 ? this.history[this.history.length - 1] : null;
}
// === Quiet Hours ===
isQuietHours(): boolean {
if (!this.config.quietHoursStart || !this.config.quietHoursEnd) return false;
const now = new Date();
const currentMinutes = now.getHours() * 60 + now.getMinutes();
const [startH, startM] = this.config.quietHoursStart.split(':').map(Number);
const [endH, endM] = this.config.quietHoursEnd.split(':').map(Number);
const startMinutes = startH * 60 + startM;
const endMinutes = endH * 60 + endM;
if (startMinutes <= endMinutes) {
// Same-day range (e.g., 13:00-17:00)
return currentMinutes >= startMinutes && currentMinutes < endMinutes;
} else {
// Cross-midnight range (e.g., 22:00-08:00)
return currentMinutes >= startMinutes || currentMinutes < endMinutes;
}
}
// === Config ===
getConfig(): HeartbeatConfig {
return { ...this.config };
}
updateConfig(updates: Partial<HeartbeatConfig>): void {
const wasEnabled = this.config.enabled;
this.config = { ...this.config, ...updates };
// Restart if interval changed or enabled/disabled
if (this.timerId && (updates.intervalMinutes || updates.enabled === false)) {
this.stop();
if (this.config.enabled) {
this.start(this.onAlert);
}
} else if (!wasEnabled && this.config.enabled) {
this.start(this.onAlert);
}
}
// === Internal ===
private filterByProactivity(alerts: HeartbeatAlert[]): HeartbeatAlert[] {
switch (this.config.proactivityLevel) {
case 'silent':
return []; // Never alert
case 'light':
return alerts.filter(a => a.urgency === 'high');
case 'standard':
return alerts.filter(a => a.urgency === 'high' || a.urgency === 'medium');
case 'autonomous':
return alerts; // Show everything
default:
return alerts.filter(a => a.urgency === 'high');
}
}
private loadHistory(): void {
try {
const raw = localStorage.getItem(HISTORY_STORAGE_KEY);
if (raw) {
this.history = JSON.parse(raw);
}
} catch {
this.history = [];
}
}
private saveHistory(): void {
try {
localStorage.setItem(HISTORY_STORAGE_KEY, JSON.stringify(this.history.slice(-50)));
} catch {
// silent
}
}
}
// === Singleton ===
let _instances: Map<string, HeartbeatEngine> = new Map();
export function getHeartbeatEngine(agentId: string, config?: Partial<HeartbeatConfig>): HeartbeatEngine {
let engine = _instances.get(agentId);
if (!engine) {
engine = new HeartbeatEngine(agentId, config);
_instances.set(agentId, engine);
}
return engine;
}
export function resetHeartbeatEngines(): void {
for (const engine of _instances.values()) {
engine.stop();
}
_instances = new Map();
}

View File

@@ -0,0 +1,955 @@
/**
* Intelligence Layer Unified Client
*
* Provides a unified API for intelligence operations that:
* - Uses Rust backend (via Tauri commands) when running in Tauri environment
* - Falls back to localStorage-based implementation in browser environment
*
* This replaces direct usage of:
* - agent-memory.ts
* - heartbeat-engine.ts
* - context-compactor.ts
* - reflection-engine.ts
* - agent-identity.ts
*
* Usage:
* ```typescript
* import { intelligenceClient, toFrontendMemory, toBackendMemoryInput } from './intelligence-client';
*
* // Store memory
* const id = await intelligenceClient.memory.store({
* agent_id: 'agent-1',
* memory_type: 'fact',
* content: 'User prefers concise responses',
* importance: 7,
* });
*
* // Search memories
* const memories = await intelligenceClient.memory.search({
* agent_id: 'agent-1',
* query: 'user preference',
* limit: 10,
* });
*
* // Convert to frontend format if needed
* const frontendMemories = memories.map(toFrontendMemory);
* ```
*/
import {
intelligence,
type MemoryEntryInput,
type PersistentMemory,
type MemorySearchOptions as BackendSearchOptions,
type MemoryStats as BackendMemoryStats,
type HeartbeatConfig,
type HeartbeatResult,
type CompactableMessage,
type CompactionResult,
type CompactionCheck,
type CompactionConfig,
type MemoryEntryForAnalysis,
type ReflectionResult,
type ReflectionState,
type ReflectionConfig,
type IdentityFiles,
type IdentityChangeProposal,
type IdentitySnapshot,
} from './intelligence-backend';
// === Environment Detection ===
/**
* Check if running in Tauri environment
*/
export function isTauriEnv(): boolean {
return typeof window !== 'undefined' && '__TAURI__' in window;
}
// === Frontend Types (for backward compatibility) ===
export type MemoryType = 'fact' | 'preference' | 'lesson' | 'context' | 'task';
export type MemorySource = 'auto' | 'user' | 'reflection' | 'llm-reflection';
export interface MemoryEntry {
id: string;
agentId: string;
content: string;
type: MemoryType;
importance: number;
source: MemorySource;
tags: string[];
createdAt: string;
lastAccessedAt: string;
accessCount: number;
conversationId?: string;
}
export interface MemorySearchOptions {
agentId?: string;
type?: MemoryType;
types?: MemoryType[];
tags?: string[];
query?: string;
limit?: number;
minImportance?: number;
}
export interface MemoryStats {
totalEntries: number;
byType: Record<string, number>;
byAgent: Record<string, number>;
oldestEntry: string | null;
newestEntry: string | null;
}
// === Re-export types from intelligence-backend ===
export type {
HeartbeatConfig,
HeartbeatResult,
HeartbeatAlert,
CompactableMessage,
CompactionResult,
CompactionCheck,
CompactionConfig,
PatternObservation,
ImprovementSuggestion,
ReflectionResult,
ReflectionState,
ReflectionConfig,
IdentityFiles,
IdentityChangeProposal,
IdentitySnapshot,
} from './intelligence-backend';
// === Type Conversion Utilities ===
/**
* Convert backend PersistentMemory to frontend MemoryEntry format
*/
export function toFrontendMemory(backend: PersistentMemory): MemoryEntry {
return {
id: backend.id,
agentId: backend.agent_id,
content: backend.content,
type: backend.memory_type as MemoryType,
importance: backend.importance,
source: backend.source as MemorySource,
tags: parseTags(backend.tags),
createdAt: backend.created_at,
lastAccessedAt: backend.last_accessed_at,
accessCount: backend.access_count,
conversationId: backend.conversation_id ?? undefined,
};
}
/**
* Convert frontend MemoryEntry to backend MemoryEntryInput format
*/
export function toBackendMemoryInput(entry: Omit<MemoryEntry, 'id' | 'createdAt' | 'lastAccessedAt' | 'accessCount'>): MemoryEntryInput {
return {
agent_id: entry.agentId,
memory_type: entry.type,
content: entry.content,
importance: entry.importance,
source: entry.source,
tags: entry.tags,
conversation_id: entry.conversationId,
};
}
/**
* Convert frontend search options to backend format
*/
export function toBackendSearchOptions(options: MemorySearchOptions): BackendSearchOptions {
return {
agent_id: options.agentId,
memory_type: options.type,
tags: options.tags,
query: options.query,
limit: options.limit,
min_importance: options.minImportance,
};
}
/**
* Convert backend stats to frontend format
*/
export function toFrontendStats(backend: BackendMemoryStats): MemoryStats {
return {
totalEntries: backend.total_memories,
byType: backend.by_type,
byAgent: backend.by_agent,
oldestEntry: backend.oldest_memory,
newestEntry: backend.newest_memory,
};
}
/**
* Parse tags from backend (JSON string or array)
*/
function parseTags(tags: string | string[]): string[] {
if (Array.isArray(tags)) return tags;
if (!tags) return [];
try {
return JSON.parse(tags);
} catch {
return [];
}
}
// === LocalStorage Fallback Implementation ===
const FALLBACK_STORAGE_KEY = 'zclaw-intelligence-fallback';
interface FallbackMemoryStore {
memories: MemoryEntry[];
}
function getFallbackStore(): FallbackMemoryStore {
try {
const stored = localStorage.getItem(FALLBACK_STORAGE_KEY);
if (stored) {
return JSON.parse(stored);
}
} catch {
// ignore
}
return { memories: [] };
}
function saveFallbackStore(store: FallbackMemoryStore): void {
try {
localStorage.setItem(FALLBACK_STORAGE_KEY, JSON.stringify(store));
} catch {
console.warn('[IntelligenceClient] Failed to save to localStorage');
}
}
// Fallback Memory API
const fallbackMemory = {
async init(): Promise<void> {
// No-op for localStorage
},
async store(entry: MemoryEntryInput): Promise<string> {
const store = getFallbackStore();
const id = `mem_${Date.now()}_${Math.random().toString(36).slice(2, 8)}`;
const now = new Date().toISOString();
const memory: MemoryEntry = {
id,
agentId: entry.agent_id,
content: entry.content,
type: entry.memory_type as MemoryType,
importance: entry.importance ?? 5,
source: (entry.source as MemorySource) ?? 'auto',
tags: entry.tags ?? [],
createdAt: now,
lastAccessedAt: now,
accessCount: 0,
conversationId: entry.conversation_id,
};
store.memories.push(memory);
saveFallbackStore(store);
return id;
},
async get(id: string): Promise<MemoryEntry | null> {
const store = getFallbackStore();
return store.memories.find(m => m.id === id) ?? null;
},
async search(options: MemorySearchOptions): Promise<MemoryEntry[]> {
const store = getFallbackStore();
let results = store.memories;
if (options.agentId) {
results = results.filter(m => m.agentId === options.agentId);
}
if (options.type) {
results = results.filter(m => m.type === options.type);
}
if (options.minImportance !== undefined) {
results = results.filter(m => m.importance >= options.minImportance!);
}
if (options.query) {
const queryLower = options.query.toLowerCase();
results = results.filter(m =>
m.content.toLowerCase().includes(queryLower) ||
m.tags.some(t => t.toLowerCase().includes(queryLower))
);
}
if (options.limit) {
results = results.slice(0, options.limit);
}
return results;
},
async delete(id: string): Promise<void> {
const store = getFallbackStore();
store.memories = store.memories.filter(m => m.id !== id);
saveFallbackStore(store);
},
async deleteAll(agentId: string): Promise<number> {
const store = getFallbackStore();
const before = store.memories.length;
store.memories = store.memories.filter(m => m.agentId !== agentId);
saveFallbackStore(store);
return before - store.memories.length;
},
async stats(): Promise<MemoryStats> {
const store = getFallbackStore();
const byType: Record<string, number> = {};
const byAgent: Record<string, number> = {};
for (const m of store.memories) {
byType[m.type] = (byType[m.type] ?? 0) + 1;
byAgent[m.agentId] = (byAgent[m.agentId] ?? 0) + 1;
}
const sorted = [...store.memories].sort((a, b) =>
new Date(a.createdAt).getTime() - new Date(b.createdAt).getTime()
);
return {
totalEntries: store.memories.length,
byType,
byAgent,
oldestEntry: sorted[0]?.createdAt ?? null,
newestEntry: sorted[sorted.length - 1]?.createdAt ?? null,
};
},
async export(): Promise<MemoryEntry[]> {
const store = getFallbackStore();
return store.memories;
},
async import(memories: MemoryEntry[]): Promise<number> {
const store = getFallbackStore();
store.memories.push(...memories);
saveFallbackStore(store);
return memories.length;
},
async dbPath(): Promise<string> {
return 'localStorage://zclaw-intelligence-fallback';
},
};
// Fallback Compactor API
const fallbackCompactor = {
async estimateTokens(text: string): Promise<number> {
// Simple heuristic: ~4 chars per token for English, ~1.5 for CJK
const cjkChars = (text.match(/[\u4e00-\u9fff\u3040-\u30ff]/g) ?? []).length;
const otherChars = text.length - cjkChars;
return Math.ceil(cjkChars * 1.5 + otherChars / 4);
},
async estimateMessagesTokens(messages: CompactableMessage[]): Promise<number> {
let total = 0;
for (const m of messages) {
total += await fallbackCompactor.estimateTokens(m.content);
}
return total;
},
async checkThreshold(
messages: CompactableMessage[],
config?: CompactionConfig
): Promise<CompactionCheck> {
const threshold = config?.soft_threshold_tokens ?? 15000;
const currentTokens = await fallbackCompactor.estimateMessagesTokens(messages);
return {
should_compact: currentTokens >= threshold,
current_tokens: currentTokens,
threshold,
urgency: currentTokens >= (config?.hard_threshold_tokens ?? 20000) ? 'hard' :
currentTokens >= threshold ? 'soft' : 'none',
};
},
async compact(
messages: CompactableMessage[],
_agentId: string,
_conversationId?: string,
config?: CompactionConfig
): Promise<CompactionResult> {
// Simple rule-based compaction: keep last N messages
const keepRecent = config?.keep_recent_messages ?? 10;
const retained = messages.slice(-keepRecent);
return {
compacted_messages: retained,
summary: `[Compacted ${messages.length - retained.length} earlier messages]`,
original_count: messages.length,
retained_count: retained.length,
flushed_memories: 0,
tokens_before_compaction: await fallbackCompactor.estimateMessagesTokens(messages),
tokens_after_compaction: await fallbackCompactor.estimateMessagesTokens(retained),
};
},
};
// Fallback Reflection API
const fallbackReflection = {
_conversationCount: 0,
_lastReflection: null as string | null,
async init(_config?: ReflectionConfig): Promise<void> {
// No-op
},
async recordConversation(): Promise<void> {
fallbackReflection._conversationCount++;
},
async shouldReflect(): Promise<boolean> {
return fallbackReflection._conversationCount >= 5;
},
async reflect(_agentId: string, _memories: MemoryEntryForAnalysis[]): Promise<ReflectionResult> {
fallbackReflection._conversationCount = 0;
fallbackReflection._lastReflection = new Date().toISOString();
return {
patterns: [],
improvements: [],
identity_proposals: [],
new_memories: 0,
timestamp: new Date().toISOString(),
};
},
async getHistory(_limit?: number): Promise<ReflectionResult[]> {
return [];
},
async getState(): Promise<ReflectionState> {
return {
conversations_since_reflection: fallbackReflection._conversationCount,
last_reflection_time: fallbackReflection._lastReflection,
last_reflection_agent_id: null,
};
},
};
// Fallback Identity API
const fallbackIdentities = new Map<string, IdentityFiles>();
const fallbackProposals: IdentityChangeProposal[] = [];
const fallbackIdentity = {
async get(agentId: string): Promise<IdentityFiles> {
if (!fallbackIdentities.has(agentId)) {
fallbackIdentities.set(agentId, {
soul: '# Agent Soul\n\nA helpful AI assistant.',
instructions: '# Instructions\n\nBe helpful and concise.',
user_profile: '# User Profile\n\nNo profile yet.',
});
}
return fallbackIdentities.get(agentId)!;
},
async getFile(agentId: string, file: string): Promise<string> {
const files = await fallbackIdentity.get(agentId);
return files[file as keyof IdentityFiles] ?? '';
},
async buildPrompt(agentId: string, memoryContext?: string): Promise<string> {
const files = await fallbackIdentity.get(agentId);
let prompt = `${files.soul}\n\n## Instructions\n${files.instructions}\n\n## User Profile\n${files.user_profile}`;
if (memoryContext) {
prompt += `\n\n## Memory Context\n${memoryContext}`;
}
return prompt;
},
async updateUserProfile(agentId: string, content: string): Promise<void> {
const files = await fallbackIdentity.get(agentId);
files.user_profile = content;
fallbackIdentities.set(agentId, files);
},
async appendUserProfile(agentId: string, addition: string): Promise<void> {
const files = await fallbackIdentity.get(agentId);
files.user_profile += `\n\n${addition}`;
fallbackIdentities.set(agentId, files);
},
async proposeChange(
agentId: string,
file: 'soul' | 'instructions',
suggestedContent: string,
reason: string
): Promise<IdentityChangeProposal> {
const files = await fallbackIdentity.get(agentId);
const proposal: IdentityChangeProposal = {
id: `prop_${Date.now()}`,
agent_id: agentId,
file,
reason,
current_content: files[file] ?? '',
suggested_content: suggestedContent,
status: 'pending',
created_at: new Date().toISOString(),
};
fallbackProposals.push(proposal);
return proposal;
},
async approveProposal(proposalId: string): Promise<IdentityFiles> {
const proposal = fallbackProposals.find(p => p.id === proposalId);
if (!proposal) throw new Error('Proposal not found');
proposal.status = 'approved';
const files = await fallbackIdentity.get(proposal.agent_id);
files[proposal.file] = proposal.suggested_content;
fallbackIdentities.set(proposal.agent_id, files);
return files;
},
async rejectProposal(proposalId: string): Promise<void> {
const proposal = fallbackProposals.find(p => p.id === proposalId);
if (proposal) {
proposal.status = 'rejected';
}
},
async getPendingProposals(agentId?: string): Promise<IdentityChangeProposal[]> {
return fallbackProposals.filter(p =>
p.status === 'pending' && (!agentId || p.agent_id === agentId)
);
},
async updateFile(agentId: string, file: string, content: string): Promise<void> {
const files = await fallbackIdentity.get(agentId);
if (file in files) {
// IdentityFiles has known properties, update safely
const key = file as keyof IdentityFiles;
if (key in files) {
files[key] = content;
fallbackIdentities.set(agentId, files);
}
}
},
async getSnapshots(_agentId: string, _limit?: number): Promise<IdentitySnapshot[]> {
return [];
},
async restoreSnapshot(_agentId: string, _snapshotId: string): Promise<void> {
// No-op for fallback
},
async listAgents(): Promise<string[]> {
return Array.from(fallbackIdentities.keys());
},
async deleteAgent(agentId: string): Promise<void> {
fallbackIdentities.delete(agentId);
},
};
// Fallback Heartbeat API
const fallbackHeartbeat = {
_configs: new Map<string, HeartbeatConfig>(),
async init(agentId: string, config?: HeartbeatConfig): Promise<void> {
if (config) {
fallbackHeartbeat._configs.set(agentId, config);
}
},
async start(_agentId: string): Promise<void> {
// No-op for fallback (no background tasks in browser)
},
async stop(_agentId: string): Promise<void> {
// No-op
},
async tick(_agentId: string): Promise<HeartbeatResult> {
return {
status: 'ok',
alerts: [],
checked_items: 0,
timestamp: new Date().toISOString(),
};
},
async getConfig(agentId: string): Promise<HeartbeatConfig> {
return fallbackHeartbeat._configs.get(agentId) ?? {
enabled: false,
interval_minutes: 30,
quiet_hours_start: null,
quiet_hours_end: null,
notify_channel: 'ui',
proactivity_level: 'standard',
max_alerts_per_tick: 5,
};
},
async updateConfig(agentId: string, config: HeartbeatConfig): Promise<void> {
fallbackHeartbeat._configs.set(agentId, config);
},
async getHistory(_agentId: string, _limit?: number): Promise<HeartbeatResult[]> {
return [];
},
};
// === Unified Client Export ===
/**
* Unified intelligence client that automatically selects backend or fallback
*/
export const intelligenceClient = {
memory: {
init: async (): Promise<void> => {
if (isTauriEnv()) {
await intelligence.memory.init();
} else {
await fallbackMemory.init();
}
},
store: async (entry: MemoryEntryInput): Promise<string> => {
if (isTauriEnv()) {
return intelligence.memory.store(entry);
}
return fallbackMemory.store(entry);
},
get: async (id: string): Promise<MemoryEntry | null> => {
if (isTauriEnv()) {
const result = await intelligence.memory.get(id);
return result ? toFrontendMemory(result) : null;
}
return fallbackMemory.get(id);
},
search: async (options: MemorySearchOptions): Promise<MemoryEntry[]> => {
if (isTauriEnv()) {
const results = await intelligence.memory.search(toBackendSearchOptions(options));
return results.map(toFrontendMemory);
}
return fallbackMemory.search(options);
},
delete: async (id: string): Promise<void> => {
if (isTauriEnv()) {
await intelligence.memory.delete(id);
} else {
await fallbackMemory.delete(id);
}
},
deleteAll: async (agentId: string): Promise<number> => {
if (isTauriEnv()) {
return intelligence.memory.deleteAll(agentId);
}
return fallbackMemory.deleteAll(agentId);
},
stats: async (): Promise<MemoryStats> => {
if (isTauriEnv()) {
const stats = await intelligence.memory.stats();
return toFrontendStats(stats);
}
return fallbackMemory.stats();
},
export: async (): Promise<MemoryEntry[]> => {
if (isTauriEnv()) {
const results = await intelligence.memory.export();
return results.map(toFrontendMemory);
}
return fallbackMemory.export();
},
import: async (memories: MemoryEntry[]): Promise<number> => {
if (isTauriEnv()) {
// Convert to backend format
const backendMemories = memories.map(m => ({
...m,
agent_id: m.agentId,
memory_type: m.type,
last_accessed_at: m.lastAccessedAt,
created_at: m.createdAt,
access_count: m.accessCount,
conversation_id: m.conversationId ?? null,
tags: JSON.stringify(m.tags),
embedding: null,
}));
return intelligence.memory.import(backendMemories as PersistentMemory[]);
}
return fallbackMemory.import(memories);
},
dbPath: async (): Promise<string> => {
if (isTauriEnv()) {
return intelligence.memory.dbPath();
}
return fallbackMemory.dbPath();
},
},
heartbeat: {
init: async (agentId: string, config?: HeartbeatConfig): Promise<void> => {
if (isTauriEnv()) {
await intelligence.heartbeat.init(agentId, config);
} else {
await fallbackHeartbeat.init(agentId, config);
}
},
start: async (agentId: string): Promise<void> => {
if (isTauriEnv()) {
await intelligence.heartbeat.start(agentId);
} else {
await fallbackHeartbeat.start(agentId);
}
},
stop: async (agentId: string): Promise<void> => {
if (isTauriEnv()) {
await intelligence.heartbeat.stop(agentId);
} else {
await fallbackHeartbeat.stop(agentId);
}
},
tick: async (agentId: string): Promise<HeartbeatResult> => {
if (isTauriEnv()) {
return intelligence.heartbeat.tick(agentId);
}
return fallbackHeartbeat.tick(agentId);
},
getConfig: async (agentId: string): Promise<HeartbeatConfig> => {
if (isTauriEnv()) {
return intelligence.heartbeat.getConfig(agentId);
}
return fallbackHeartbeat.getConfig(agentId);
},
updateConfig: async (agentId: string, config: HeartbeatConfig): Promise<void> => {
if (isTauriEnv()) {
await intelligence.heartbeat.updateConfig(agentId, config);
} else {
await fallbackHeartbeat.updateConfig(agentId, config);
}
},
getHistory: async (agentId: string, limit?: number): Promise<HeartbeatResult[]> => {
if (isTauriEnv()) {
return intelligence.heartbeat.getHistory(agentId, limit);
}
return fallbackHeartbeat.getHistory(agentId, limit);
},
},
compactor: {
estimateTokens: async (text: string): Promise<number> => {
if (isTauriEnv()) {
return intelligence.compactor.estimateTokens(text);
}
return fallbackCompactor.estimateTokens(text);
},
estimateMessagesTokens: async (messages: CompactableMessage[]): Promise<number> => {
if (isTauriEnv()) {
return intelligence.compactor.estimateMessagesTokens(messages);
}
return fallbackCompactor.estimateMessagesTokens(messages);
},
checkThreshold: async (
messages: CompactableMessage[],
config?: CompactionConfig
): Promise<CompactionCheck> => {
if (isTauriEnv()) {
return intelligence.compactor.checkThreshold(messages, config);
}
return fallbackCompactor.checkThreshold(messages, config);
},
compact: async (
messages: CompactableMessage[],
agentId: string,
conversationId?: string,
config?: CompactionConfig
): Promise<CompactionResult> => {
if (isTauriEnv()) {
return intelligence.compactor.compact(messages, agentId, conversationId, config);
}
return fallbackCompactor.compact(messages, agentId, conversationId, config);
},
},
reflection: {
init: async (config?: ReflectionConfig): Promise<void> => {
if (isTauriEnv()) {
await intelligence.reflection.init(config);
} else {
await fallbackReflection.init(config);
}
},
recordConversation: async (): Promise<void> => {
if (isTauriEnv()) {
await intelligence.reflection.recordConversation();
} else {
await fallbackReflection.recordConversation();
}
},
shouldReflect: async (): Promise<boolean> => {
if (isTauriEnv()) {
return intelligence.reflection.shouldReflect();
}
return fallbackReflection.shouldReflect();
},
reflect: async (agentId: string, memories: MemoryEntryForAnalysis[]): Promise<ReflectionResult> => {
if (isTauriEnv()) {
return intelligence.reflection.reflect(agentId, memories);
}
return fallbackReflection.reflect(agentId, memories);
},
getHistory: async (limit?: number): Promise<ReflectionResult[]> => {
if (isTauriEnv()) {
return intelligence.reflection.getHistory(limit);
}
return fallbackReflection.getHistory(limit);
},
getState: async (): Promise<ReflectionState> => {
if (isTauriEnv()) {
return intelligence.reflection.getState();
}
return fallbackReflection.getState();
},
},
identity: {
get: async (agentId: string): Promise<IdentityFiles> => {
if (isTauriEnv()) {
return intelligence.identity.get(agentId);
}
return fallbackIdentity.get(agentId);
},
getFile: async (agentId: string, file: string): Promise<string> => {
if (isTauriEnv()) {
return intelligence.identity.getFile(agentId, file);
}
return fallbackIdentity.getFile(agentId, file);
},
buildPrompt: async (agentId: string, memoryContext?: string): Promise<string> => {
if (isTauriEnv()) {
return intelligence.identity.buildPrompt(agentId, memoryContext);
}
return fallbackIdentity.buildPrompt(agentId, memoryContext);
},
updateUserProfile: async (agentId: string, content: string): Promise<void> => {
if (isTauriEnv()) {
await intelligence.identity.updateUserProfile(agentId, content);
} else {
await fallbackIdentity.updateUserProfile(agentId, content);
}
},
appendUserProfile: async (agentId: string, addition: string): Promise<void> => {
if (isTauriEnv()) {
await intelligence.identity.appendUserProfile(agentId, addition);
} else {
await fallbackIdentity.appendUserProfile(agentId, addition);
}
},
proposeChange: async (
agentId: string,
file: 'soul' | 'instructions',
suggestedContent: string,
reason: string
): Promise<IdentityChangeProposal> => {
if (isTauriEnv()) {
return intelligence.identity.proposeChange(agentId, file, suggestedContent, reason);
}
return fallbackIdentity.proposeChange(agentId, file, suggestedContent, reason);
},
approveProposal: async (proposalId: string): Promise<IdentityFiles> => {
if (isTauriEnv()) {
return intelligence.identity.approveProposal(proposalId);
}
return fallbackIdentity.approveProposal(proposalId);
},
rejectProposal: async (proposalId: string): Promise<void> => {
if (isTauriEnv()) {
await intelligence.identity.rejectProposal(proposalId);
} else {
await fallbackIdentity.rejectProposal(proposalId);
}
},
getPendingProposals: async (agentId?: string): Promise<IdentityChangeProposal[]> => {
if (isTauriEnv()) {
return intelligence.identity.getPendingProposals(agentId);
}
return fallbackIdentity.getPendingProposals(agentId);
},
updateFile: async (agentId: string, file: string, content: string): Promise<void> => {
if (isTauriEnv()) {
await intelligence.identity.updateFile(agentId, file, content);
} else {
await fallbackIdentity.updateFile(agentId, file, content);
}
},
getSnapshots: async (agentId: string, limit?: number): Promise<IdentitySnapshot[]> => {
if (isTauriEnv()) {
return intelligence.identity.getSnapshots(agentId, limit);
}
return fallbackIdentity.getSnapshots(agentId, limit);
},
restoreSnapshot: async (agentId: string, snapshotId: string): Promise<void> => {
if (isTauriEnv()) {
await intelligence.identity.restoreSnapshot(agentId, snapshotId);
} else {
await fallbackIdentity.restoreSnapshot(agentId, snapshotId);
}
},
listAgents: async (): Promise<string[]> => {
if (isTauriEnv()) {
return intelligence.identity.listAgents();
}
return fallbackIdentity.listAgents();
},
deleteAgent: async (agentId: string): Promise<void> => {
if (isTauriEnv()) {
await intelligence.identity.deleteAgent(agentId);
} else {
await fallbackIdentity.deleteAgent(agentId);
}
},
},
};
export default intelligenceClient;

View File

@@ -15,8 +15,10 @@
* Reference: ZCLAW_AGENT_INTELLIGENCE_EVOLUTION.md §6.2.2
*/
import { getMemoryManager, type MemoryType } from './agent-memory';
import { getAgentIdentityManager } from './agent-identity';
import {
intelligenceClient,
type MemoryType,
} from './intelligence-client';
import {
getLLMAdapter,
llmExtract,
@@ -159,20 +161,19 @@ export class MemoryExtractor {
console.log(`[MemoryExtractor] After importance filtering (>= ${this.config.minImportanceThreshold}): ${extracted.length} items`);
// Save to memory
const memoryManager = getMemoryManager();
let saved = 0;
let skipped = 0;
for (const item of extracted) {
try {
await memoryManager.save({
agentId,
await intelligenceClient.memory.store({
agent_id: agentId,
memory_type: item.type,
content: item.content,
type: item.type,
importance: item.importance,
source: 'auto',
tags: item.tags,
conversationId,
conversation_id: conversationId,
});
saved++;
} catch {
@@ -185,9 +186,8 @@ export class MemoryExtractor {
const preferences = extracted.filter(e => e.type === 'preference' && e.importance >= 5);
if (preferences.length > 0) {
try {
const identityManager = getAgentIdentityManager();
const prefSummary = preferences.map(p => `- ${p.content}`).join('\n');
identityManager.appendToUserProfile(agentId, `### 自动发现的偏好 (${new Date().toLocaleDateString('zh-CN')})\n${prefSummary}`);
await intelligenceClient.identity.appendUserProfile(agentId, `### 自动发现的偏好 (${new Date().toLocaleDateString('zh-CN')})\n${prefSummary}`);
userProfileUpdated = true;
} catch (err) {
console.warn('[MemoryExtractor] Failed to update USER.md:', err);

View File

@@ -1,443 +0,0 @@
/**
* Memory Index - High-performance indexing for agent memory retrieval
*
* Implements inverted index + LRU cache for sub-20ms retrieval on 1000+ memories.
*
* Performance targets:
* - Retrieval latency: <20ms (vs ~50ms with linear scan)
* - 1000 memories: smooth operation
* - Memory overhead: ~30% additional for indexes
*
* Reference: Task "Optimize ZCLAW Agent Memory Retrieval Performance"
*/
import type { MemoryEntry, MemoryType } from './agent-memory';
// === Types ===
export interface IndexStats {
totalEntries: number;
keywordCount: number;
cacheHitRate: number;
cacheSize: number;
avgQueryTime: number;
}
interface CacheEntry {
results: string[]; // memory IDs
timestamp: number;
}
// === Tokenization (shared with agent-memory.ts) ===
export function tokenize(text: string): string[] {
return text
.toLowerCase()
.replace(/[^\w\u4e00-\u9fff\u3400-\u4dbf]+/g, ' ')
.split(/\s+/)
.filter(t => t.length > 0);
}
// === LRU Cache Implementation ===
class LRUCache<K, V> {
private cache: Map<K, V>;
private maxSize: number;
constructor(maxSize: number) {
this.cache = new Map();
this.maxSize = maxSize;
}
get(key: K): V | undefined {
const value = this.cache.get(key);
if (value !== undefined) {
// Move to end (most recently used)
this.cache.delete(key);
this.cache.set(key, value);
}
return value;
}
set(key: K, value: V): void {
if (this.cache.has(key)) {
this.cache.delete(key);
} else if (this.cache.size >= this.maxSize) {
// Remove least recently used (first item)
const firstKey = this.cache.keys().next().value;
if (firstKey !== undefined) {
this.cache.delete(firstKey);
}
}
this.cache.set(key, value);
}
clear(): void {
this.cache.clear();
}
get size(): number {
return this.cache.size;
}
}
// === Memory Index Implementation ===
export class MemoryIndex {
// Inverted indexes
private keywordIndex: Map<string, Set<string>> = new Map(); // keyword -> memoryIds
private typeIndex: Map<MemoryType, Set<string>> = new Map(); // type -> memoryIds
private agentIndex: Map<string, Set<string>> = new Map(); // agentId -> memoryIds
private tagIndex: Map<string, Set<string>> = new Map(); // tag -> memoryIds
// Pre-tokenized content cache
private tokenCache: Map<string, string[]> = new Map(); // memoryId -> tokens
// Query result cache
private queryCache: LRUCache<string, CacheEntry>;
// Statistics
private cacheHits = 0;
private cacheMisses = 0;
private queryTimes: number[] = [];
constructor(cacheSize = 100) {
this.queryCache = new LRUCache(cacheSize);
}
// === Index Building ===
/**
* Build or update index for a memory entry.
* Call this when adding or updating a memory.
*/
index(entry: MemoryEntry): void {
const { id, agentId, type, tags, content } = entry;
// Index by agent
if (!this.agentIndex.has(agentId)) {
this.agentIndex.set(agentId, new Set());
}
this.agentIndex.get(agentId)!.add(id);
// Index by type
if (!this.typeIndex.has(type)) {
this.typeIndex.set(type, new Set());
}
this.typeIndex.get(type)!.add(id);
// Index by tags
for (const tag of tags) {
const normalizedTag = tag.toLowerCase();
if (!this.tagIndex.has(normalizedTag)) {
this.tagIndex.set(normalizedTag, new Set());
}
this.tagIndex.get(normalizedTag)!.add(id);
}
// Index by content keywords
const tokens = tokenize(content);
this.tokenCache.set(id, tokens);
for (const token of tokens) {
if (!this.keywordIndex.has(token)) {
this.keywordIndex.set(token, new Set());
}
this.keywordIndex.get(token)!.add(id);
}
// Invalidate query cache on index change
this.queryCache.clear();
}
/**
* Remove a memory from all indexes.
*/
remove(memoryId: string): void {
// Remove from agent index
for (const [agentId, ids] of this.agentIndex) {
ids.delete(memoryId);
if (ids.size === 0) {
this.agentIndex.delete(agentId);
}
}
// Remove from type index
for (const [type, ids] of this.typeIndex) {
ids.delete(memoryId);
if (ids.size === 0) {
this.typeIndex.delete(type);
}
}
// Remove from tag index
for (const [tag, ids] of this.tagIndex) {
ids.delete(memoryId);
if (ids.size === 0) {
this.tagIndex.delete(tag);
}
}
// Remove from keyword index
for (const [keyword, ids] of this.keywordIndex) {
ids.delete(memoryId);
if (ids.size === 0) {
this.keywordIndex.delete(keyword);
}
}
// Remove token cache
this.tokenCache.delete(memoryId);
// Invalidate query cache
this.queryCache.clear();
}
/**
* Rebuild all indexes from scratch.
* Use after bulk updates or data corruption.
*/
rebuild(entries: MemoryEntry[]): void {
this.clear();
for (const entry of entries) {
this.index(entry);
}
}
/**
* Clear all indexes.
*/
clear(): void {
this.keywordIndex.clear();
this.typeIndex.clear();
this.agentIndex.clear();
this.tagIndex.clear();
this.tokenCache.clear();
this.queryCache.clear();
this.cacheHits = 0;
this.cacheMisses = 0;
this.queryTimes = [];
}
// === Fast Filtering ===
/**
* Get candidate memory IDs based on filter options.
* Uses indexes for O(1) lookups instead of O(n) scans.
*/
getCandidates(options: {
agentId?: string;
type?: MemoryType;
types?: MemoryType[];
tags?: string[];
}): Set<string> | null {
const candidateSets: Set<string>[] = [];
// Filter by agent
if (options.agentId) {
const agentSet = this.agentIndex.get(options.agentId);
if (!agentSet) return new Set(); // Agent has no memories
candidateSets.push(agentSet);
}
// Filter by single type
if (options.type) {
const typeSet = this.typeIndex.get(options.type);
if (!typeSet) return new Set(); // No memories of this type
candidateSets.push(typeSet);
}
// Filter by multiple types
if (options.types && options.types.length > 0) {
const typeUnion = new Set<string>();
for (const t of options.types) {
const typeSet = this.typeIndex.get(t);
if (typeSet) {
for (const id of typeSet) {
typeUnion.add(id);
}
}
}
if (typeUnion.size === 0) return new Set();
candidateSets.push(typeUnion);
}
// Filter by tags (OR logic - match any tag)
if (options.tags && options.tags.length > 0) {
const tagUnion = new Set<string>();
for (const tag of options.tags) {
const normalizedTag = tag.toLowerCase();
const tagSet = this.tagIndex.get(normalizedTag);
if (tagSet) {
for (const id of tagSet) {
tagUnion.add(id);
}
}
}
if (tagUnion.size === 0) return new Set();
candidateSets.push(tagUnion);
}
// Intersect all candidate sets
if (candidateSets.length === 0) {
return null; // No filters applied, return null to indicate "all"
}
// Start with smallest set for efficiency
candidateSets.sort((a, b) => a.size - b.size);
let result = new Set(candidateSets[0]);
for (let i = 1; i < candidateSets.length; i++) {
const nextSet = candidateSets[i];
result = new Set([...result].filter(id => nextSet.has(id)));
if (result.size === 0) break;
}
return result;
}
// === Keyword Search ===
/**
* Get memory IDs that contain any of the query keywords.
* Returns a map of memoryId -> match count for ranking.
*/
searchKeywords(queryTokens: string[]): Map<string, number> {
const matchCounts = new Map<string, number>();
for (const token of queryTokens) {
const matchingIds = this.keywordIndex.get(token);
if (matchingIds) {
for (const id of matchingIds) {
matchCounts.set(id, (matchCounts.get(id) ?? 0) + 1);
}
}
// Also check for partial matches (token is substring of indexed keyword)
for (const [keyword, ids] of this.keywordIndex) {
if (keyword.includes(token) || token.includes(keyword)) {
for (const id of ids) {
matchCounts.set(id, (matchCounts.get(id) ?? 0) + 1);
}
}
}
}
return matchCounts;
}
/**
* Get pre-tokenized content for a memory.
*/
getTokens(memoryId: string): string[] | undefined {
return this.tokenCache.get(memoryId);
}
// === Query Cache ===
/**
* Generate cache key from query and options.
*/
private getCacheKey(query: string, options?: Record<string, unknown>): string {
const opts = options ?? {};
return `${query}|${opts.agentId ?? ''}|${opts.type ?? ''}|${(opts.types as string[])?.join(',') ?? ''}|${(opts.tags as string[])?.join(',') ?? ''}|${opts.minImportance ?? ''}|${opts.limit ?? ''}`;
}
/**
* Get cached query results.
*/
getCached(query: string, options?: Record<string, unknown>): string[] | null {
const key = this.getCacheKey(query, options);
const cached = this.queryCache.get(key);
if (cached) {
this.cacheHits++;
return cached.results;
}
this.cacheMisses++;
return null;
}
/**
* Cache query results.
*/
setCached(query: string, options: Record<string, unknown> | undefined, results: string[]): void {
const key = this.getCacheKey(query, options);
this.queryCache.set(key, {
results,
timestamp: Date.now(),
});
}
// === Statistics ===
/**
* Record query time for statistics.
*/
recordQueryTime(timeMs: number): void {
this.queryTimes.push(timeMs);
// Keep last 100 query times
if (this.queryTimes.length > 100) {
this.queryTimes.shift();
}
}
/**
* Get index statistics.
*/
getStats(): IndexStats {
const avgQueryTime = this.queryTimes.length > 0
? this.queryTimes.reduce((a, b) => a + b, 0) / this.queryTimes.length
: 0;
const totalRequests = this.cacheHits + this.cacheMisses;
return {
totalEntries: this.tokenCache.size,
keywordCount: this.keywordIndex.size,
cacheHitRate: totalRequests > 0 ? this.cacheHits / totalRequests : 0,
cacheSize: this.queryCache.size,
avgQueryTime,
};
}
/**
* Get index memory usage estimate.
*/
getMemoryUsage(): { estimated: number; breakdown: Record<string, number> } {
let keywordIndexSize = 0;
for (const [keyword, ids] of this.keywordIndex) {
keywordIndexSize += keyword.length * 2 + ids.size * 50; // rough estimate
}
return {
estimated:
keywordIndexSize +
this.typeIndex.size * 100 +
this.agentIndex.size * 100 +
this.tagIndex.size * 100 +
this.tokenCache.size * 200,
breakdown: {
keywordIndex: keywordIndexSize,
typeIndex: this.typeIndex.size * 100,
agentIndex: this.agentIndex.size * 100,
tagIndex: this.tagIndex.size * 100,
tokenCache: this.tokenCache.size * 200,
},
};
}
}
// === Singleton ===
let _instance: MemoryIndex | null = null;
export function getMemoryIndex(): MemoryIndex {
if (!_instance) {
_instance = new MemoryIndex();
}
return _instance;
}
export function resetMemoryIndex(): void {
_instance = null;
}

View File

@@ -1,677 +0,0 @@
/**
* Reflection Engine - Agent self-improvement through conversation analysis
*
* Periodically analyzes recent conversations to:
* - Identify behavioral patterns (positive and negative)
* - Generate improvement suggestions
* - Propose identity file changes (with user approval)
* - Create meta-memories about agent performance
*
* Phase 3 implementation: rule-based pattern detection.
* Phase 4 upgrade: LLM-powered deep reflection.
*
* Reference: ZCLAW_AGENT_INTELLIGENCE_EVOLUTION.md §6.4.2
*/
import { getMemoryManager, type MemoryEntry } from './agent-memory';
import { getAgentIdentityManager, type IdentityChangeProposal } from './agent-identity';
import {
getLLMAdapter,
llmReflect,
type LLMServiceAdapter,
type LLMProvider,
} from './llm-service';
import { canAutoExecute } from './autonomy-manager';
// === Types ===
export interface ReflectionConfig {
triggerAfterConversations: number; // Reflect after N conversations (default 5)
triggerAfterHours: number; // Reflect after N hours (default 24)
allowSoulModification: boolean; // Can propose SOUL.md changes
requireApproval: boolean; // Identity changes need user OK
useLLM: boolean; // Use LLM for deep reflection (Phase 4)
llmProvider?: LLMProvider; // Preferred LLM provider
llmFallbackToRules: boolean; // Fall back to rules if LLM fails
}
export interface PatternObservation {
observation: string;
frequency: number;
sentiment: 'positive' | 'negative' | 'neutral';
evidence: string[];
}
export interface ImprovementSuggestion {
area: string;
suggestion: string;
priority: 'high' | 'medium' | 'low';
}
export interface ReflectionResult {
patterns: PatternObservation[];
improvements: ImprovementSuggestion[];
identityProposals: IdentityChangeProposal[];
newMemories: number;
timestamp: string;
}
// === Default Config ===
export const DEFAULT_REFLECTION_CONFIG: ReflectionConfig = {
triggerAfterConversations: 5,
triggerAfterHours: 24,
allowSoulModification: false,
requireApproval: true,
useLLM: true, // Enable LLM-powered deep reflection (Phase 4)
llmFallbackToRules: true,
};
// === Storage ===
const REFLECTION_STORAGE_KEY = 'zclaw-reflection-state';
const REFLECTION_HISTORY_KEY = 'zclaw-reflection-history';
interface ReflectionState {
conversationsSinceReflection: number;
lastReflectionTime: string | null;
lastReflectionAgentId: string | null;
}
// === Reflection Engine ===
export class ReflectionEngine {
private config: ReflectionConfig;
private state: ReflectionState;
private history: ReflectionResult[] = [];
private llmAdapter: LLMServiceAdapter | null = null;
constructor(config?: Partial<ReflectionConfig>) {
this.config = { ...DEFAULT_REFLECTION_CONFIG, ...config };
this.state = this.loadState();
this.loadHistory();
// Initialize LLM adapter if configured
if (this.config.useLLM) {
try {
this.llmAdapter = getLLMAdapter();
} catch (error) {
console.warn('[ReflectionEngine] Failed to initialize LLM adapter:', error);
}
}
}
// === Trigger Management ===
/**
* Call after each conversation to track when reflection should trigger.
*/
recordConversation(): void {
this.state.conversationsSinceReflection++;
this.saveState();
}
/**
* Check if it's time for reflection.
*/
shouldReflect(): boolean {
// Conversation count trigger
if (this.state.conversationsSinceReflection >= this.config.triggerAfterConversations) {
return true;
}
// Time-based trigger
if (this.state.lastReflectionTime) {
const elapsed = Date.now() - new Date(this.state.lastReflectionTime).getTime();
const hoursSince = elapsed / (1000 * 60 * 60);
if (hoursSince >= this.config.triggerAfterHours) {
return true;
}
} else {
// Never reflected before, trigger after initial conversations
return this.state.conversationsSinceReflection >= 3;
}
return false;
}
/**
* Execute a reflection cycle for the given agent.
*/
async reflect(agentId: string, options?: { forceLLM?: boolean; skipAutonomyCheck?: boolean }): Promise<ReflectionResult> {
console.log(`[Reflection] Starting reflection for agent: ${agentId}`);
// Autonomy check - verify if reflection is allowed
if (!options?.skipAutonomyCheck) {
const { canProceed, decision } = canAutoExecute('reflection_run', 5);
if (!canProceed) {
console.log(`[Reflection] Autonomy check failed: ${decision.reason}`);
// Return empty result instead of throwing
return {
patterns: [],
improvements: [],
identityProposals: [],
newMemories: 0,
timestamp: new Date().toISOString(),
};
}
console.log(`[Reflection] Autonomy check passed: ${decision.reason}`);
}
// Try LLM-powered reflection if enabled
if ((this.config.useLLM || options?.forceLLM) && this.llmAdapter?.isAvailable()) {
try {
console.log('[Reflection] Using LLM-powered deep reflection');
return await this.llmReflectImpl(agentId);
} catch (error) {
console.error('[Reflection] LLM reflection failed:', error);
if (!this.config.llmFallbackToRules) {
throw error;
}
console.log('[Reflection] Falling back to rule-based analysis');
}
}
// Rule-based reflection (original implementation)
return this.ruleBasedReflect(agentId);
}
/**
* LLM-powered deep reflection implementation.
* Uses semantic analysis for pattern detection and improvement suggestions.
*/
private async llmReflectImpl(agentId: string): Promise<ReflectionResult> {
const memoryMgr = getMemoryManager();
const identityMgr = getAgentIdentityManager();
// 1. Gather context for LLM analysis
const allMemories = await memoryMgr.getAll(agentId, { limit: 100 });
const context = this.buildReflectionContext(agentId, allMemories);
// 2. Call LLM for deep reflection
const llmResponse = await llmReflect(context, this.llmAdapter!);
// 3. Parse LLM response
const { patterns, improvements } = this.parseLLMResponse(llmResponse);
// 4. Propose identity changes if patterns warrant it
const identityProposals: IdentityChangeProposal[] = [];
if (this.config.allowSoulModification) {
const proposals = this.proposeIdentityChanges(agentId, patterns, identityMgr);
identityProposals.push(...proposals);
}
// 5. Save reflection insights as memories
let newMemories = 0;
for (const pattern of patterns.filter(p => p.frequency >= 2)) {
await memoryMgr.save({
agentId,
content: `[LLM反思] ${pattern.observation} (出现${pattern.frequency}次, ${pattern.sentiment === 'positive' ? '正面' : pattern.sentiment === 'negative' ? '负面' : '中性'})`,
type: 'lesson',
importance: pattern.sentiment === 'negative' ? 8 : 5,
source: 'llm-reflection',
tags: ['reflection', 'pattern', 'llm'],
});
newMemories++;
}
for (const improvement of improvements.filter(i => i.priority === 'high')) {
await memoryMgr.save({
agentId,
content: `[LLM建议] [${improvement.area}] ${improvement.suggestion}`,
type: 'lesson',
importance: 7,
source: 'llm-reflection',
tags: ['reflection', 'improvement', 'llm'],
});
newMemories++;
}
// 6. Build result
const result: ReflectionResult = {
patterns,
improvements,
identityProposals,
newMemories,
timestamp: new Date().toISOString(),
};
// 7. Update state and history
this.state.conversationsSinceReflection = 0;
this.state.lastReflectionTime = result.timestamp;
this.state.lastReflectionAgentId = agentId;
this.saveState();
this.history.push(result);
if (this.history.length > 20) {
this.history = this.history.slice(-10);
}
this.saveHistory();
console.log(
`[Reflection] LLM complete: ${patterns.length} patterns, ${improvements.length} improvements, ` +
`${identityProposals.length} proposals, ${newMemories} memories saved`
);
return result;
}
/**
* Build context string for LLM reflection.
*/
private buildReflectionContext(agentId: string, memories: MemoryEntry[]): string {
const memorySummary = memories.slice(0, 50).map(m =>
`[${m.type}] ${m.content} (重要性: ${m.importance}, 访问: ${m.accessCount}次)`
).join('\n');
const typeStats = new Map<string, number>();
for (const m of memories) {
typeStats.set(m.type, (typeStats.get(m.type) || 0) + 1);
}
const recentHistory = this.history.slice(-3).map(h =>
`上次反思(${h.timestamp}): ${h.patterns.length}个模式, ${h.improvements.length}个建议`
).join('\n');
return `
Agent ID: ${agentId}
记忆总数: ${memories.length}
记忆类型分布: ${[...typeStats.entries()].map(([k, v]) => `${k}:${v}`).join(', ')}
最近记忆:
${memorySummary}
历史反思:
${recentHistory || '无'}
`;
}
/**
* Parse LLM response into structured reflection data.
*/
private parseLLMResponse(response: string): {
patterns: PatternObservation[];
improvements: ImprovementSuggestion[];
} {
const patterns: PatternObservation[] = [];
const improvements: ImprovementSuggestion[] = [];
try {
// Try to extract JSON from response
const jsonMatch = response.match(/\{[\s\S]*\}/);
if (jsonMatch) {
const parsed = JSON.parse(jsonMatch[0]);
if (Array.isArray(parsed.patterns)) {
for (const p of parsed.patterns) {
patterns.push({
observation: p.observation || p.observation || '未知模式',
frequency: p.frequency || 1,
sentiment: p.sentiment || 'neutral',
evidence: Array.isArray(p.evidence) ? p.evidence : [],
});
}
}
if (Array.isArray(parsed.improvements)) {
for (const i of parsed.improvements) {
improvements.push({
area: i.area || '通用',
suggestion: i.suggestion || i.suggestion || '',
priority: i.priority || 'medium',
});
}
}
}
} catch (error) {
console.warn('[Reflection] Failed to parse LLM response as JSON:', error);
// Fallback: extract text patterns
if (response.includes('模式') || response.includes('pattern')) {
patterns.push({
observation: 'LLM 分析完成,但未能解析结构化数据',
frequency: 1,
sentiment: 'neutral',
evidence: [response.slice(0, 200)],
});
}
}
// Ensure we have at least some output
if (patterns.length === 0) {
patterns.push({
observation: 'LLM 反思完成,未检测到显著模式',
frequency: 1,
sentiment: 'neutral',
evidence: [],
});
}
return { patterns, improvements };
}
/**
* Rule-based reflection (original implementation).
*/
private async ruleBasedReflect(agentId: string): Promise<ReflectionResult> {
const memoryMgr = getMemoryManager();
const identityMgr = getAgentIdentityManager();
// 1. Analyze memory patterns
const allMemories = await memoryMgr.getAll(agentId, { limit: 100 });
const patterns = this.analyzePatterns(allMemories);
// 2. Generate improvement suggestions
const improvements = this.generateImprovements(patterns, allMemories);
// 3. Propose identity changes if patterns warrant it
const identityProposals: IdentityChangeProposal[] = [];
if (this.config.allowSoulModification) {
const proposals = this.proposeIdentityChanges(agentId, patterns, identityMgr);
identityProposals.push(...proposals);
}
// 4. Save reflection insights as memories
let newMemories = 0;
for (const pattern of patterns.filter(p => p.frequency >= 3)) {
await memoryMgr.save({
agentId,
content: `反思观察: ${pattern.observation} (出现${pattern.frequency}次, ${pattern.sentiment === 'positive' ? '正面' : pattern.sentiment === 'negative' ? '负面' : '中性'})`,
type: 'lesson',
importance: pattern.sentiment === 'negative' ? 8 : 5,
source: 'reflection',
tags: ['reflection', 'pattern'],
});
newMemories++;
}
for (const improvement of improvements.filter(i => i.priority === 'high')) {
await memoryMgr.save({
agentId,
content: `改进方向: [${improvement.area}] ${improvement.suggestion}`,
type: 'lesson',
importance: 7,
source: 'reflection',
tags: ['reflection', 'improvement'],
});
newMemories++;
}
// 5. Build result
const result: ReflectionResult = {
patterns,
improvements,
identityProposals,
newMemories,
timestamp: new Date().toISOString(),
};
// 6. Update state
this.state.conversationsSinceReflection = 0;
this.state.lastReflectionTime = result.timestamp;
this.state.lastReflectionAgentId = agentId;
this.saveState();
// 7. Store in history
this.history.push(result);
if (this.history.length > 20) {
this.history = this.history.slice(-10);
}
this.saveHistory();
console.log(
`[Reflection] Complete: ${patterns.length} patterns, ${improvements.length} improvements, ` +
`${identityProposals.length} proposals, ${newMemories} memories saved`
);
return result;
}
// === Pattern Analysis ===
private analyzePatterns(memories: MemoryEntry[]): PatternObservation[] {
const patterns: PatternObservation[] = [];
// Analyze memory type distribution
const typeCounts = new Map<string, number>();
for (const m of memories) {
typeCounts.set(m.type, (typeCounts.get(m.type) || 0) + 1);
}
// Pattern: Too many tasks accumulating
const taskCount = typeCounts.get('task') || 0;
if (taskCount >= 5) {
patterns.push({
observation: `积累了 ${taskCount} 个待办任务,可能存在任务管理不善`,
frequency: taskCount,
sentiment: 'negative',
evidence: memories.filter(m => m.type === 'task').slice(0, 3).map(m => m.content),
});
}
// Pattern: Strong preference accumulation
const prefCount = typeCounts.get('preference') || 0;
if (prefCount >= 5) {
patterns.push({
observation: `已记录 ${prefCount} 个用户偏好,对用户习惯有较好理解`,
frequency: prefCount,
sentiment: 'positive',
evidence: memories.filter(m => m.type === 'preference').slice(0, 3).map(m => m.content),
});
}
// Pattern: Many lessons learned
const lessonCount = typeCounts.get('lesson') || 0;
if (lessonCount >= 5) {
patterns.push({
observation: `积累了 ${lessonCount} 条经验教训,知识库在成长`,
frequency: lessonCount,
sentiment: 'positive',
evidence: memories.filter(m => m.type === 'lesson').slice(0, 3).map(m => m.content),
});
}
// Pattern: High-importance items being accessed frequently
const highAccess = memories.filter(m => m.accessCount >= 5 && m.importance >= 7);
if (highAccess.length >= 3) {
patterns.push({
observation: `${highAccess.length} 条高频访问的重要记忆,核心知识正在形成`,
frequency: highAccess.length,
sentiment: 'positive',
evidence: highAccess.slice(0, 3).map(m => m.content),
});
}
// Pattern: Low-importance memories accumulating
const lowImportance = memories.filter(m => m.importance <= 3);
if (lowImportance.length > 20) {
patterns.push({
observation: `${lowImportance.length} 条低重要性记忆,建议清理`,
frequency: lowImportance.length,
sentiment: 'neutral',
evidence: [],
});
}
// Pattern: Tag analysis - recurring topics
const tagCounts = new Map<string, number>();
for (const m of memories) {
for (const tag of m.tags) {
if (tag !== 'auto-extracted') {
tagCounts.set(tag, (tagCounts.get(tag) || 0) + 1);
}
}
}
const frequentTags = [...tagCounts.entries()]
.filter(([, count]) => count >= 5)
.sort((a, b) => b[1] - a[1]);
if (frequentTags.length > 0) {
patterns.push({
observation: `反复出现的主题: ${frequentTags.slice(0, 5).map(([tag, count]) => `${tag}(${count}次)`).join(', ')}`,
frequency: frequentTags[0][1],
sentiment: 'neutral',
evidence: frequentTags.map(([tag]) => tag),
});
}
return patterns;
}
// === Improvement Suggestions ===
private generateImprovements(
patterns: PatternObservation[],
memories: MemoryEntry[]
): ImprovementSuggestion[] {
const improvements: ImprovementSuggestion[] = [];
// Suggestion: Clear pending tasks
const taskPattern = patterns.find(p => p.observation.includes('待办任务'));
if (taskPattern) {
improvements.push({
area: '任务管理',
suggestion: '清理已完成的任务记忆,对长期未处理的任务降低重要性或标记为已取消',
priority: 'high',
});
}
// Suggestion: Prune low-importance memories
const lowPattern = patterns.find(p => p.observation.includes('低重要性'));
if (lowPattern) {
improvements.push({
area: '记忆管理',
suggestion: '执行记忆清理移除30天以上未访问且重要性低于3的记忆',
priority: 'medium',
});
}
// Suggestion: User profile enrichment
const prefCount = memories.filter(m => m.type === 'preference').length;
if (prefCount < 3) {
improvements.push({
area: '用户理解',
suggestion: '主动在对话中了解用户偏好(沟通风格、技术栈、工作习惯),丰富用户画像',
priority: 'medium',
});
}
// Suggestion: Knowledge consolidation
const factCount = memories.filter(m => m.type === 'fact').length;
if (factCount > 20) {
improvements.push({
area: '知识整合',
suggestion: '合并相似的事实记忆,提高检索效率。可将相关事实整合为结构化的项目/用户档案',
priority: 'low',
});
}
return improvements;
}
// === Identity Change Proposals ===
private proposeIdentityChanges(
agentId: string,
patterns: PatternObservation[],
identityMgr: ReturnType<typeof getAgentIdentityManager>
): IdentityChangeProposal[] {
const proposals: IdentityChangeProposal[] = [];
// If many negative patterns, propose instruction update
const negativePatterns = patterns.filter(p => p.sentiment === 'negative');
if (negativePatterns.length >= 2) {
const identity = identityMgr.getIdentity(agentId);
const additions = negativePatterns.map(p =>
`- 注意: ${p.observation}`
).join('\n');
const proposal = identityMgr.proposeChange(
agentId,
'instructions',
identity.instructions + `\n\n## 自我反思改进\n${additions}`,
`基于 ${negativePatterns.length} 个负面模式观察,建议在指令中增加自我改进提醒`
);
if (proposal) {
proposals.push(proposal);
}
}
return proposals;
}
// === History ===
getHistory(limit: number = 10): ReflectionResult[] {
return this.history.slice(-limit);
}
getLastResult(): ReflectionResult | null {
return this.history.length > 0 ? this.history[this.history.length - 1] : null;
}
// === Config ===
getConfig(): ReflectionConfig {
return { ...this.config };
}
updateConfig(updates: Partial<ReflectionConfig>): void {
this.config = { ...this.config, ...updates };
}
getState(): ReflectionState {
return { ...this.state };
}
// === Persistence ===
private loadState(): ReflectionState {
try {
const raw = localStorage.getItem(REFLECTION_STORAGE_KEY);
if (raw) return JSON.parse(raw);
} catch { /* silent */ }
return {
conversationsSinceReflection: 0,
lastReflectionTime: null,
lastReflectionAgentId: null,
};
}
private saveState(): void {
try {
localStorage.setItem(REFLECTION_STORAGE_KEY, JSON.stringify(this.state));
} catch { /* silent */ }
}
private loadHistory(): void {
try {
const raw = localStorage.getItem(REFLECTION_HISTORY_KEY);
if (raw) this.history = JSON.parse(raw);
} catch {
this.history = [];
}
}
private saveHistory(): void {
try {
localStorage.setItem(REFLECTION_HISTORY_KEY, JSON.stringify(this.history.slice(-10)));
} catch { /* silent */ }
}
}
// === Singleton ===
let _instance: ReflectionEngine | null = null;
export function getReflectionEngine(config?: Partial<ReflectionConfig>): ReflectionEngine {
if (!_instance) {
_instance = new ReflectionEngine(config);
}
return _instance;
}
export function resetReflectionEngine(): void {
_instance = null;
}

View File

@@ -191,3 +191,17 @@ export function getCategories(skills: UISkillInfo[]): string[] {
}
return Array.from(categories);
}
// === Aliases for backward compatibility ===
/**
* Alias for UISkillInfo for backward compatibility
*/
export type SkillDisplay = UISkillInfo;
/**
* Alias for adaptSkills for catalog adaptation
*/
export function adaptSkillsCatalog(skills: ConfigSkillInfo[]): UISkillInfo[] {
return adaptSkills(skills);
}

View File

@@ -11,7 +11,7 @@
* Reference: ZCLAW_AGENT_INTELLIGENCE_EVOLUTION.md §6.5.2
*/
import { getMemoryManager } from './agent-memory';
import { intelligenceClient } from './intelligence-client';
import { canAutoExecute } from './autonomy-manager';
// === Types ===
@@ -295,8 +295,9 @@ export class SkillDiscoveryEngine {
// 3. Check memory patterns for recurring needs
try {
const memories = await getMemoryManager().search(skill.name, {
const memories = await intelligenceClient.memory.search({
agentId,
query: skill.name,
limit: 5,
minImportance: 3,
});

View File

@@ -1,11 +1,8 @@
import { create } from 'zustand';
import { persist } from 'zustand/middleware';
import { getGatewayClient, AgentStreamDelta } from '../lib/gateway-client';
import { getMemoryManager } from '../lib/agent-memory';
import { getAgentIdentityManager } from '../lib/agent-identity';
import { intelligenceClient } from '../lib/intelligence-client';
import { getMemoryExtractor } from '../lib/memory-extractor';
import { getContextCompactor } from '../lib/context-compactor';
import { getReflectionEngine } from '../lib/reflection-engine';
import { getAgentSwarm } from '../lib/agent-swarm';
import { getSkillDiscovery } from '../lib/skill-discovery';
@@ -300,21 +297,26 @@ export const useChatStore = create<ChatState>()(
// Check context compaction threshold before adding new message
try {
const compactor = getContextCompactor();
const check = compactor.checkThreshold(get().messages.map(m => ({ role: m.role, content: m.content })));
if (check.shouldCompact) {
console.log(`[Chat] Context compaction triggered (${check.urgency}): ${check.currentTokens} tokens`);
const result = await compactor.compact(
get().messages.map(m => ({ role: m.role, content: m.content, id: m.id, timestamp: m.timestamp })),
const messages = get().messages.map(m => ({ role: m.role, content: m.content }));
const check = await intelligenceClient.compactor.checkThreshold(messages);
if (check.should_compact) {
console.log(`[Chat] Context compaction triggered (${check.urgency}): ${check.current_tokens} tokens`);
const result = await intelligenceClient.compactor.compact(
get().messages.map(m => ({
role: m.role,
content: m.content,
id: m.id,
timestamp: m.timestamp instanceof Date ? m.timestamp.toISOString() : m.timestamp
})),
agentId,
get().currentConversationId ?? undefined
);
// Replace messages with compacted version
const compactedMsgs: Message[] = result.compactedMessages.map((m, i) => ({
const compactedMsgs: Message[] = result.compacted_messages.map((m, i) => ({
id: m.id || `compacted_${i}_${Date.now()}`,
role: m.role as Message['role'],
content: m.content,
timestamp: m.timestamp || new Date(),
timestamp: m.timestamp ? new Date(m.timestamp) : new Date(),
}));
set({ messages: compactedMsgs });
}
@@ -325,17 +327,16 @@ export const useChatStore = create<ChatState>()(
// Build memory-enhanced content
let enhancedContent = content;
try {
const memoryMgr = getMemoryManager();
const identityMgr = getAgentIdentityManager();
const relevantMemories = await memoryMgr.search(content, {
const relevantMemories = await intelligenceClient.memory.search({
agentId,
query: content,
limit: 8,
minImportance: 3,
});
const memoryContext = relevantMemories.length > 0
? `\n\n## 相关记忆\n${relevantMemories.map(m => `- [${m.type}] ${m.content}`).join('\n')}`
: '';
const systemPrompt = identityMgr.buildSystemPrompt(agentId, memoryContext);
const systemPrompt = await intelligenceClient.identity.buildPrompt(agentId, memoryContext);
if (systemPrompt) {
enhancedContent = `<context>\n${systemPrompt}\n</context>\n\n${content}`;
}
@@ -426,13 +427,16 @@ export const useChatStore = create<ChatState>()(
console.warn('[Chat] Memory extraction failed:', err)
);
// Track conversation for reflection trigger
const reflectionEngine = getReflectionEngine();
reflectionEngine.recordConversation();
if (reflectionEngine.shouldReflect()) {
reflectionEngine.reflect(agentId).catch(err =>
console.warn('[Chat] Reflection failed:', err)
);
}
intelligenceClient.reflection.recordConversation().catch(err =>
console.warn('[Chat] Recording conversation failed:', err)
);
intelligenceClient.reflection.shouldReflect().then(shouldReflect => {
if (shouldReflect) {
intelligenceClient.reflection.reflect(agentId, []).catch(err =>
console.warn('[Chat] Reflection failed:', err)
);
}
});
},
onError: (error: string) => {
set((state) => ({

View File

@@ -136,6 +136,7 @@ export interface ConfigStateSlice {
modelsError: string | null;
error: string | null;
client: ConfigStoreClient | null;
isLoading: boolean;
}
// === Store Actions Slice ===
@@ -208,6 +209,7 @@ export const useConfigStore = create<ConfigStateSlice & ConfigActionsSlice>((set
modelsError: null,
error: null,
client: null,
isLoading: false,
// Client Injection
setConfigStoreClient: (client: ConfigStoreClient) => {

View File

@@ -24,8 +24,6 @@ import {
type LocalGatewayStatus,
} from '../lib/tauri-gateway';
import {
performHealthCheck,
createHealthCheckScheduler,
type HealthCheckResult,
type HealthStatus,
} from '../lib/health-check';
@@ -165,6 +163,8 @@ export const useConnectionStore = create<ConnectionStore>((set, get) => {
localGateway: getUnsupportedLocalGatewayStatus(),
localGatewayBusy: false,
isLoading: false,
healthStatus: 'unknown',
healthCheckResult: null,
client,
// === Actions ===

View File

@@ -6,7 +6,11 @@
import { create } from 'zustand';
import { persist } from 'zustand/middleware';
import { getMemoryManager, type MemoryEntry, type MemoryType } from '../lib/agent-memory';
import {
intelligenceClient,
type MemoryEntry,
type MemoryType,
} from '../lib/intelligence-client';
export type { MemoryType };
@@ -184,8 +188,10 @@ export const useMemoryGraphStore = create<MemoryGraphStore>()(
set({ isLoading: true, error: null });
try {
const mgr = getMemoryManager();
const memories = await mgr.getAll(agentId, { limit: 200 });
const memories = await intelligenceClient.memory.search({
agentId,
limit: 200,
});
const nodes = memories.map((m, i) => memoryToNode(m, i, memories.length));
const edges = findRelatedMemories(memories);