feat(automation): complete unified automation system redesign

Phase 4 completion:
- Add ApprovalQueue component for managing pending approvals
- Add ExecutionResult component for displaying hand/workflow results
- Update Sidebar navigation to use unified AutomationPanel
- Replace separate 'hands' and 'workflow' tabs with single 'automation' tab
- Fix TypeScript type safety issues with unknown types in JSX expressions

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
iven
2026-03-18 17:12:05 +08:00
parent 3a7631e035
commit 3518fc8ece
74 changed files with 4984 additions and 687 deletions

View File

@@ -13,6 +13,8 @@
* Reference: ZCLAW_AGENT_INTELLIGENCE_EVOLUTION.md §6.2.3
*/
import { canAutoExecute } from './autonomy-manager';
// === Types ===
export interface IdentityFiles {
@@ -200,8 +202,17 @@ export class AgentIdentityManager {
agentId: string,
file: 'soul' | 'instructions',
suggestedContent: string,
reason: string
): IdentityChangeProposal {
reason: string,
options?: { skipAutonomyCheck?: boolean }
): IdentityChangeProposal | null {
// Autonomy check - identity updates are high-risk, always require approval
if (!options?.skipAutonomyCheck) {
const { decision } = canAutoExecute('identity_update', 8);
console.log(`[AgentIdentity] Autonomy check for identity update: ${decision.reason}`);
// Identity updates always require approval regardless of autonomy level
// But we log the decision for audit purposes
}
const identity = this.getIdentity(agentId);
const currentContent = file === 'soul' ? identity.soul : identity.instructions;

View File

@@ -20,6 +20,7 @@ import {
type LLMServiceAdapter,
type LLMProvider,
} from './llm-service';
import { canAutoExecute } from './autonomy-manager';
// === Types ===
@@ -181,8 +182,27 @@ export class ContextCompactor {
messages: CompactableMessage[],
agentId: string,
conversationId?: string,
options?: { forceLLM?: boolean }
options?: { forceLLM?: boolean; skipAutonomyCheck?: boolean }
): Promise<CompactionResult> {
// Autonomy check - verify if compaction is allowed
if (!options?.skipAutonomyCheck) {
const { canProceed, decision } = canAutoExecute('compaction_run', 5);
if (!canProceed) {
console.log(`[ContextCompactor] Autonomy check failed: ${decision.reason}`);
// Return result without compaction
return {
compactedMessages: messages,
summary: '',
originalCount: messages.length,
retainedCount: messages.length,
flushedMemories: 0,
tokensBeforeCompaction: estimateMessagesTokens(messages),
tokensAfterCompaction: estimateMessagesTokens(messages),
};
}
console.log(`[ContextCompactor] Autonomy check passed: ${decision.reason}`);
}
const tokensBeforeCompaction = estimateMessagesTokens(messages);
const keepCount = Math.min(this.config.keepRecentMessages, messages.length);

View File

@@ -958,15 +958,27 @@ export class GatewayClient {
private async restPost<T>(path: string, body?: unknown): Promise<T> {
const baseUrl = this.getRestBaseUrl();
const response = await fetch(`${baseUrl}${path}`, {
const url = `${baseUrl}${path}`;
console.log(`[GatewayClient] POST ${url}`, body);
const response = await fetch(url, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: body ? JSON.stringify(body) : undefined,
});
if (!response.ok) {
throw new Error(`REST API error: ${response.status} ${response.statusText}`);
const errorBody = await response.text().catch(() => '');
console.error(`[GatewayClient] POST ${url} failed: ${response.status} ${response.statusText}`, errorBody);
const error = new Error(`REST API error: ${response.status} ${response.statusText}${errorBody ? ` - ${errorBody}` : ''}`);
(error as any).status = response.status;
(error as any).body = errorBody;
throw error;
}
return response.json();
const result = await response.json();
console.log(`[GatewayClient] POST ${url} response:`, result);
return result;
}
private async restPut<T>(path: string, body?: unknown): Promise<T> {
@@ -1318,12 +1330,19 @@ export class GatewayClient {
/** Trigger a Hand */
async triggerHand(name: string, params?: Record<string, unknown>): Promise<{ runId: string; status: string }> {
console.log(`[GatewayClient] Triggering hand: ${name}`, params);
// OpenFang uses /activate endpoint, not /trigger
const result = await this.restPost<{
instance_id: string;
status: string;
}>(`/api/hands/${name}/activate`, params || {});
return { runId: result.instance_id, status: result.status };
try {
const result = await this.restPost<{
instance_id: string;
status: string;
}>(`/api/hands/${name}/activate`, params || {});
console.log(`[GatewayClient] Hand trigger response:`, result);
return { runId: result.instance_id, status: result.status };
} catch (err) {
console.error(`[GatewayClient] Hand trigger failed for ${name}:`, err);
throw err;
}
}
/** Get Hand execution status */

View File

@@ -295,21 +295,59 @@ class GatewayLLMAdapter implements LLMServiceAdapter {
const config = { ...this.config, ...options };
const startTime = Date.now();
const response = await fetch(`${config.apiBase}/complete`, {
// Build a single prompt from messages
const systemMessage = messages.find(m => m.role === 'system')?.content || '';
const userMessage = messages.find(m => m.role === 'user')?.content || '';
// Combine system and user messages into a single prompt
const fullPrompt = systemMessage
? `${systemMessage}\n\n${userMessage}`
: userMessage;
// Use OpenFang's chat endpoint (same as main chat)
// Try to get the default agent ID from localStorage or use 'default'
const agentId = localStorage.getItem('zclaw-default-agent-id') || 'default';
const response = await fetch(`/api/agents/${agentId}/message`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
messages,
message: fullPrompt,
max_tokens: config.maxTokens,
temperature: config.temperature,
temperature: config.temperature ?? 0.3, // Lower temperature for extraction tasks
}),
signal: AbortSignal.timeout(config.timeout || 60000),
});
if (!response.ok) {
const error = await response.text();
// If agent not found, try without agent ID (direct /api/chat)
if (response.status === 404) {
const fallbackResponse = await fetch('/api/chat', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
message: fullPrompt,
max_tokens: config.maxTokens,
temperature: config.temperature ?? 0.3,
}),
signal: AbortSignal.timeout(config.timeout || 60000),
});
if (!fallbackResponse.ok) {
throw new Error(`[Gateway] Both endpoints failed: ${fallbackResponse.status}`);
}
const data = await fallbackResponse.json();
const latencyMs = Date.now() - startTime;
return {
content: data.response || data.content || '',
tokensUsed: { input: data.input_tokens || 0, output: data.output_tokens || 0 },
latencyMs,
};
}
throw new Error(`[Gateway] API error: ${response.status} - ${error}`);
}
@@ -317,15 +355,14 @@ class GatewayLLMAdapter implements LLMServiceAdapter {
const latencyMs = Date.now() - startTime;
return {
content: data.content || data.choices?.[0]?.message?.content || '',
tokensUsed: data.tokensUsed || { input: 0, output: 0 },
model: data.model,
content: data.response || data.content || '',
tokensUsed: { input: data.input_tokens || 0, output: data.output_tokens || 0 },
latencyMs,
};
}
isAvailable(): boolean {
// Gateway is available if we're connected to OpenFang
// Gateway is available if we're in browser (can connect to OpenFang)
return typeof window !== 'undefined';
}
@@ -382,8 +419,8 @@ export function loadConfig(): LLMConfig {
// Ignore parse errors
}
// Default to mock for safety
return DEFAULT_CONFIGS.mock;
// Default to gateway (OpenFang passthrough) for L4 self-evolution
return DEFAULT_CONFIGS.gateway;
}
export function saveConfig(config: LLMConfig): void {

View File

@@ -80,9 +80,9 @@ const EXTRACTION_PROMPT = `请从以下对话中提取值得长期记住的信
// === Default Config ===
export const DEFAULT_EXTRACTION_CONFIG: ExtractionConfig = {
useLLM: false,
useLLM: true, // Enable LLM-powered semantic extraction by default
llmFallbackToRules: true,
minMessagesForExtraction: 4,
minMessagesForExtraction: 2, // Lowered from 4 to capture memories earlier
extractionCooldownMs: 30_000,
minImportanceThreshold: 3,
};
@@ -119,12 +119,15 @@ export class MemoryExtractor {
): Promise<ExtractionResult> {
// Cooldown check
if (Date.now() - this.lastExtractionTime < this.config.extractionCooldownMs) {
console.log('[MemoryExtractor] Skipping extraction: cooldown active');
return { items: [], saved: 0, skipped: 0, userProfileUpdated: false };
}
// Minimum message threshold
const chatMessages = messages.filter(m => m.role === 'user' || m.role === 'assistant');
console.log(`[MemoryExtractor] Checking extraction: ${chatMessages.length} messages (min: ${this.config.minMessagesForExtraction})`);
if (chatMessages.length < this.config.minMessagesForExtraction) {
console.log('[MemoryExtractor] Skipping extraction: not enough messages');
return { items: [], saved: 0, skipped: 0, userProfileUpdated: false };
}
@@ -146,11 +149,14 @@ export class MemoryExtractor {
}
} else {
// Rule-based extraction
console.log('[MemoryExtractor] Using rule-based extraction');
extracted = this.ruleBasedExtraction(chatMessages);
console.log(`[MemoryExtractor] Rule-based extracted ${extracted.length} items before filtering`);
}
// Filter by importance threshold
extracted = extracted.filter(item => item.importance >= this.config.minImportanceThreshold);
console.log(`[MemoryExtractor] After importance filtering (>= ${this.config.minImportanceThreshold}): ${extracted.length} items`);
// Save to memory
const memoryManager = getMemoryManager();

View File

@@ -21,6 +21,7 @@ import {
type LLMServiceAdapter,
type LLMProvider,
} from './llm-service';
import { canAutoExecute } from './autonomy-manager';
// === Types ===
@@ -62,7 +63,7 @@ export const DEFAULT_REFLECTION_CONFIG: ReflectionConfig = {
triggerAfterHours: 24,
allowSoulModification: false,
requireApproval: true,
useLLM: false,
useLLM: true, // Enable LLM-powered deep reflection (Phase 4)
llmFallbackToRules: true,
};
@@ -137,9 +138,26 @@ export class ReflectionEngine {
/**
* Execute a reflection cycle for the given agent.
*/
async reflect(agentId: string, options?: { forceLLM?: boolean }): Promise<ReflectionResult> {
async reflect(agentId: string, options?: { forceLLM?: boolean; skipAutonomyCheck?: boolean }): Promise<ReflectionResult> {
console.log(`[Reflection] Starting reflection for agent: ${agentId}`);
// Autonomy check - verify if reflection is allowed
if (!options?.skipAutonomyCheck) {
const { canProceed, decision } = canAutoExecute('reflection_run', 5);
if (!canProceed) {
console.log(`[Reflection] Autonomy check failed: ${decision.reason}`);
// Return empty result instead of throwing
return {
patterns: [],
improvements: [],
identityProposals: [],
newMemories: 0,
timestamp: new Date().toISOString(),
};
}
console.log(`[Reflection] Autonomy check passed: ${decision.reason}`);
}
// Try LLM-powered reflection if enabled
if ((this.config.useLLM || options?.forceLLM) && this.llmAdapter?.isAvailable()) {
try {
@@ -575,7 +593,9 @@ ${recentHistory || '无'}
identity.instructions + `\n\n## 自我反思改进\n${additions}`,
`基于 ${negativePatterns.length} 个负面模式观察,建议在指令中增加自我改进提醒`
);
proposals.push(proposal);
if (proposal) {
proposals.push(proposal);
}
}
return proposals;

View File

@@ -12,6 +12,7 @@
*/
import { getMemoryManager } from './agent-memory';
import { canAutoExecute } from './autonomy-manager';
// === Types ===
@@ -365,13 +366,33 @@ export class SkillDiscoveryEngine {
/**
* Mark a skill as installed/uninstalled.
* Includes autonomy check for skill_install/skill_uninstall actions.
*/
setSkillInstalled(skillId: string, installed: boolean): void {
setSkillInstalled(
skillId: string,
installed: boolean,
options?: { skipAutonomyCheck?: boolean }
): { success: boolean; reason?: string } {
const skill = this.skills.find(s => s.id === skillId);
if (skill) {
skill.installed = installed;
this.saveIndex();
if (!skill) {
return { success: false, reason: `Skill not found: ${skillId}` };
}
// Autonomy check - verify if skill installation is allowed
if (!options?.skipAutonomyCheck) {
const action = installed ? 'skill_install' : 'skill_uninstall';
const { canProceed, decision } = canAutoExecute(action, 6);
console.log(`[SkillDiscovery] Autonomy check for ${action}: ${decision.reason}`);
if (!canProceed) {
return { success: false, reason: decision.reason };
}
}
skill.installed = installed;
this.saveIndex();
console.log(`[SkillDiscovery] Skill ${skillId} ${installed ? 'installed' : 'uninstalled'}`);
return { success: true };
}
/**