feat(automation): complete unified automation system redesign

Phase 4 completion:
- Add ApprovalQueue component for managing pending approvals
- Add ExecutionResult component for displaying hand/workflow results
- Update Sidebar navigation to use unified AutomationPanel
- Replace separate 'hands' and 'workflow' tabs with single 'automation' tab
- Fix TypeScript type safety issues with unknown types in JSX expressions

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
iven
2026-03-18 17:12:05 +08:00
parent 3a7631e035
commit 3518fc8ece
74 changed files with 4984 additions and 687 deletions

View File

@@ -295,21 +295,59 @@ class GatewayLLMAdapter implements LLMServiceAdapter {
const config = { ...this.config, ...options };
const startTime = Date.now();
const response = await fetch(`${config.apiBase}/complete`, {
// Build a single prompt from messages
const systemMessage = messages.find(m => m.role === 'system')?.content || '';
const userMessage = messages.find(m => m.role === 'user')?.content || '';
// Combine system and user messages into a single prompt
const fullPrompt = systemMessage
? `${systemMessage}\n\n${userMessage}`
: userMessage;
// Use OpenFang's chat endpoint (same as main chat)
// Try to get the default agent ID from localStorage or use 'default'
const agentId = localStorage.getItem('zclaw-default-agent-id') || 'default';
const response = await fetch(`/api/agents/${agentId}/message`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
messages,
message: fullPrompt,
max_tokens: config.maxTokens,
temperature: config.temperature,
temperature: config.temperature ?? 0.3, // Lower temperature for extraction tasks
}),
signal: AbortSignal.timeout(config.timeout || 60000),
});
if (!response.ok) {
const error = await response.text();
// If agent not found, try without agent ID (direct /api/chat)
if (response.status === 404) {
const fallbackResponse = await fetch('/api/chat', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
message: fullPrompt,
max_tokens: config.maxTokens,
temperature: config.temperature ?? 0.3,
}),
signal: AbortSignal.timeout(config.timeout || 60000),
});
if (!fallbackResponse.ok) {
throw new Error(`[Gateway] Both endpoints failed: ${fallbackResponse.status}`);
}
const data = await fallbackResponse.json();
const latencyMs = Date.now() - startTime;
return {
content: data.response || data.content || '',
tokensUsed: { input: data.input_tokens || 0, output: data.output_tokens || 0 },
latencyMs,
};
}
throw new Error(`[Gateway] API error: ${response.status} - ${error}`);
}
@@ -317,15 +355,14 @@ class GatewayLLMAdapter implements LLMServiceAdapter {
const latencyMs = Date.now() - startTime;
return {
content: data.content || data.choices?.[0]?.message?.content || '',
tokensUsed: data.tokensUsed || { input: 0, output: 0 },
model: data.model,
content: data.response || data.content || '',
tokensUsed: { input: data.input_tokens || 0, output: data.output_tokens || 0 },
latencyMs,
};
}
isAvailable(): boolean {
// Gateway is available if we're connected to OpenFang
// Gateway is available if we're in browser (can connect to OpenFang)
return typeof window !== 'undefined';
}
@@ -382,8 +419,8 @@ export function loadConfig(): LLMConfig {
// Ignore parse errors
}
// Default to mock for safety
return DEFAULT_CONFIGS.mock;
// Default to gateway (OpenFang passthrough) for L4 self-evolution
return DEFAULT_CONFIGS.gateway;
}
export function saveConfig(config: LLMConfig): void {