feat: 新增技能编排引擎和工作流构建器组件
Some checks failed
CI / Lint & TypeCheck (push) Has been cancelled
CI / Unit Tests (push) Has been cancelled
CI / Build Frontend (push) Has been cancelled
CI / Rust Check (push) Has been cancelled
CI / Security Scan (push) Has been cancelled
CI / E2E Tests (push) Has been cancelled

refactor: 统一Hands系统常量到单个源文件
refactor: 更新Hands中文名称和描述

fix: 修复技能市场在连接状态变化时重新加载
fix: 修复身份变更提案的错误处理逻辑

docs: 更新多个功能文档的验证状态和实现位置
docs: 更新Hands系统文档

test: 添加测试文件验证工作区路径
This commit is contained in:
iven
2026-03-25 08:27:25 +08:00
parent 9c781f5f2a
commit aa6a9cbd84
110 changed files with 12384 additions and 1337 deletions

View File

@@ -7,6 +7,7 @@
*/
import { tomlUtils, TomlParseError } from './toml-utils';
import { DEFAULT_MODEL_ID, DEFAULT_PROVIDER } from '../constants/models';
import type {
OpenFangConfig,
ConfigValidationResult,
@@ -74,12 +75,12 @@ const DEFAULT_CONFIG: Partial<OpenFangConfig> = {
agent: {
defaults: {
workspace: '~/.openfang/workspace',
default_model: 'gpt-4',
default_model: DEFAULT_MODEL_ID,
},
},
llm: {
default_provider: 'openai',
default_model: 'gpt-4',
default_provider: DEFAULT_PROVIDER,
default_model: DEFAULT_MODEL_ID,
},
};

View File

@@ -66,11 +66,11 @@ export interface MemorySearchOptions {
}
export interface MemoryStats {
total_memories: number;
total_entries: number;
by_type: Record<string, number>;
by_agent: Record<string, number>;
oldest_memory: string | null;
newest_memory: string | null;
oldest_entry: string | null;
newest_entry: string | null;
storage_size_bytes: number;
}

View File

@@ -185,11 +185,11 @@ export function toBackendSearchOptions(options: MemorySearchOptions): BackendSea
*/
export function toFrontendStats(backend: BackendMemoryStats): MemoryStats {
return {
totalEntries: backend.total_memories,
totalEntries: backend.total_entries,
byType: backend.by_type,
byAgent: backend.by_agent,
oldestEntry: backend.oldest_memory,
newestEntry: backend.newest_memory,
oldestEntry: backend.oldest_entry,
newestEntry: backend.newest_entry,
storageSizeBytes: backend.storage_size_bytes ?? 0,
};
}
@@ -325,13 +325,22 @@ const fallbackMemory = {
new Date(a.createdAt).getTime() - new Date(b.createdAt).getTime()
);
// Estimate storage size from serialized data
let storageSizeBytes = 0;
try {
const serialized = JSON.stringify(store.memories);
storageSizeBytes = new Blob([serialized]).size;
} catch {
// Ignore serialization errors
}
return {
totalEntries: store.memories.length,
byType,
byAgent,
oldestEntry: sorted[0]?.createdAt ?? null,
newestEntry: sorted[sorted.length - 1]?.createdAt ?? null,
storageSizeBytes: 0, // localStorage-based fallback doesn't track storage size
storageSizeBytes,
};
},
@@ -994,10 +1003,10 @@ export const intelligenceClient = {
): Promise<void> => {
if (isTauriEnv()) {
await invoke('heartbeat_update_memory_stats', {
agentId,
taskCount,
totalEntries,
storageSizeBytes,
agent_id: agentId,
task_count: taskCount,
total_entries: totalEntries,
storage_size_bytes: storageSizeBytes,
});
}
// Fallback: store in localStorage for non-Tauri environment
@@ -1012,7 +1021,10 @@ export const intelligenceClient = {
recordCorrection: async (agentId: string, correctionType: string): Promise<void> => {
if (isTauriEnv()) {
await invoke('heartbeat_record_correction', { agentId, correctionType });
await invoke('heartbeat_record_correction', {
agent_id: agentId,
correction_type: correctionType,
});
}
// Fallback: store in localStorage for non-Tauri environment
const key = `zclaw-corrections-${agentId}`;
@@ -1021,6 +1033,16 @@ export const intelligenceClient = {
counters[correctionType] = (counters[correctionType] || 0) + 1;
localStorage.setItem(key, JSON.stringify(counters));
},
recordInteraction: async (agentId: string): Promise<void> => {
if (isTauriEnv()) {
await invoke('heartbeat_record_interaction', {
agent_id: agentId,
});
}
// Fallback: store in localStorage for non-Tauri environment
localStorage.setItem(`zclaw-last-interaction-${agentId}`, new Date().toISOString());
},
},
compactor: {

View File

@@ -87,6 +87,12 @@ export interface StreamEventToolEnd {
output: unknown;
}
export interface StreamEventIterationStart {
type: 'iteration_start';
iteration: number;
maxIterations: number;
}
export interface StreamEventComplete {
type: 'complete';
inputTokens: number;
@@ -102,6 +108,7 @@ export type StreamChatEvent =
| StreamEventDelta
| StreamEventToolStart
| StreamEventToolEnd
| StreamEventIterationStart
| StreamEventComplete
| StreamEventError;
@@ -424,6 +431,7 @@ export class KernelClient {
break;
case 'tool_start':
console.log('[KernelClient] Tool started:', streamEvent.name, streamEvent.input);
if (callbacks.onTool) {
callbacks.onTool(
streamEvent.name,
@@ -434,6 +442,7 @@ export class KernelClient {
break;
case 'tool_end':
console.log('[KernelClient] Tool ended:', streamEvent.name, streamEvent.output);
if (callbacks.onTool) {
callbacks.onTool(
streamEvent.name,
@@ -443,7 +452,13 @@ export class KernelClient {
}
break;
case 'iteration_start':
console.log('[KernelClient] Iteration started:', streamEvent.iteration, '/', streamEvent.maxIterations);
// Don't need to notify user about iterations
break;
case 'complete':
console.log('[KernelClient] Stream complete:', streamEvent.inputTokens, streamEvent.outputTokens);
callbacks.onComplete(streamEvent.inputTokens, streamEvent.outputTokens);
// Clean up listener
if (unlisten) {
@@ -453,6 +468,7 @@ export class KernelClient {
break;
case 'error':
console.error('[KernelClient] Stream error:', streamEvent.message);
callbacks.onError(streamEvent.message);
// Clean up listener
if (unlisten) {
@@ -539,6 +555,236 @@ export class KernelClient {
};
}
// === Hands API ===
/**
* List all available hands
*/
async listHands(): Promise<{
hands: {
id?: string;
name: string;
description?: string;
status?: string;
requirements_met?: boolean;
category?: string;
icon?: string;
tool_count?: number;
tools?: string[];
metric_count?: number;
metrics?: string[];
}[]
}> {
const hands = await invoke<Array<{
id?: string;
name: string;
description?: string;
status?: string;
requirements_met?: boolean;
category?: string;
icon?: string;
tool_count?: number;
tools?: string[];
metric_count?: number;
metrics?: string[];
}>>('hand_list');
return { hands: hands || [] };
}
/**
* Get hand details
*/
async getHand(name: string): Promise<{
id?: string;
name?: string;
description?: string;
status?: string;
requirements_met?: boolean;
category?: string;
icon?: string;
provider?: string;
model?: string;
requirements?: { description?: string; name?: string; met?: boolean; satisfied?: boolean; details?: string; hint?: string }[];
tools?: string[];
metrics?: string[];
config?: Record<string, unknown>;
tool_count?: number;
metric_count?: number;
}> {
return invoke('hand_get', { name });
}
/**
* Trigger/execute a hand
*/
async triggerHand(name: string, params?: Record<string, unknown>): Promise<{ runId: string; status: string }> {
const result = await invoke<{ instance_id: string; status: string }>('hand_execute', {
id: name,
input: params || {},
});
return { runId: result.instance_id, status: result.status };
}
/**
* Get hand run status
*/
async getHandStatus(name: string, runId: string): Promise<{ status: string; result?: unknown }> {
return invoke('hand_run_status', { handName: name, runId });
}
/**
* Approve a hand execution
*/
async approveHand(name: string, runId: string, approved: boolean, reason?: string): Promise<{ status: string }> {
return invoke('hand_approve', { handName: name, runId, approved, reason });
}
/**
* Cancel a hand execution
*/
async cancelHand(name: string, runId: string): Promise<{ status: string }> {
return invoke('hand_cancel', { handName: name, runId });
}
/**
* List hand runs (execution history)
*/
async listHandRuns(name: string, opts?: { limit?: number; offset?: number }): Promise<{
runs: {
runId?: string;
run_id?: string;
id?: string;
status?: string;
startedAt?: string;
started_at?: string;
completedAt?: string;
completed_at?: string;
result?: unknown;
error?: string;
}[]
}> {
// Hand run history API may not exist yet, return empty array
try {
return await invoke('hand_run_list', { handName: name, ...opts });
} catch {
return { runs: [] };
}
}
// === Skills API ===
/**
* List all discovered skills
*/
async listSkills(): Promise<{
skills: {
id: string;
name: string;
description: string;
version: string;
capabilities: string[];
tags: string[];
mode: string;
enabled: boolean;
triggers: string[];
category?: string;
}[]
}> {
const skills = await invoke<Array<{
id: string;
name: string;
description: string;
version: string;
capabilities: string[];
tags: string[];
mode: string;
enabled: boolean;
triggers: string[];
category?: string;
}>>('skill_list');
return { skills: skills || [] };
}
/**
* Refresh skills from directory
*/
async refreshSkills(skillDir?: string): Promise<{
skills: {
id: string;
name: string;
description: string;
version: string;
capabilities: string[];
tags: string[];
mode: string;
enabled: boolean;
triggers: string[];
category?: string;
}[]
}> {
const skills = await invoke<Array<{
id: string;
name: string;
description: string;
version: string;
capabilities: string[];
tags: string[];
mode: string;
enabled: boolean;
triggers: string[];
category?: string;
}>>('skill_refresh', { skillDir: skillDir || null });
return { skills: skills || [] };
}
/**
* Execute a skill
*/
async executeSkill(id: string, input?: Record<string, unknown>): Promise<{
success: boolean;
output?: unknown;
error?: string;
durationMs?: number;
}> {
return invoke('skill_execute', {
id,
context: {},
input: input || {},
});
}
// === Triggers API (stubs for compatibility) ===
async listTriggers(): Promise<{ triggers?: { id: string; type: string; enabled: boolean }[] }> {
return { triggers: [] };
}
async getTrigger(_id: string): Promise<{ id: string; type: string; enabled: boolean } | null> {
return null;
}
async createTrigger(_trigger: { type: string; name?: string; enabled?: boolean; config?: Record<string, unknown>; handName?: string; workflowId?: string }): Promise<{ id?: string } | null> {
return null;
}
async updateTrigger(_id: string, _updates: { name?: string; enabled?: boolean; config?: Record<string, unknown>; handName?: string; workflowId?: string }): Promise<{ id: string }> {
throw new Error('Triggers not implemented');
}
async deleteTrigger(_id: string): Promise<{ status: string }> {
throw new Error('Triggers not implemented');
}
// === Approvals API (stubs for compatibility) ===
async listApprovals(_status?: string): Promise<{ approvals?: unknown[] }> {
return { approvals: [] };
}
async respondToApproval(_approvalId: string, _approved: boolean, _reason?: string): Promise<{ status: string }> {
throw new Error('Approvals not implemented');
}
/**
* REST API compatibility methods
*/

View File

@@ -14,6 +14,8 @@
* Part of ZCLAW L4 Self-Evolution capability.
*/
import { DEFAULT_MODEL_ID, DEFAULT_OPENAI_BASE_URL } from '../constants/models';
// === Types ===
export type LLMProvider = 'openai' | 'volcengine' | 'gateway' | 'mock';
@@ -54,8 +56,8 @@ export interface LLMServiceAdapter {
const DEFAULT_CONFIGS: Record<LLMProvider, LLMConfig> = {
openai: {
provider: 'openai',
model: 'gpt-4o-mini',
apiBase: 'https://api.openai.com/v1',
model: DEFAULT_MODEL_ID,
apiBase: DEFAULT_OPENAI_BASE_URL,
maxTokens: 2000,
temperature: 0.7,
timeout: 30000,

View File

@@ -65,14 +65,22 @@ function extractTriggers(triggers?: ConfigSkillInfo['triggers']): string[] {
}
/**
* Extract capabilities from actions
* Extract capabilities from actions or capabilities field
*/
function extractCapabilities(actions?: ConfigSkillInfo['actions']): string[] {
if (!actions) return [];
function extractCapabilities(skill: ConfigSkillInfo): string[] {
// Prefer explicit capabilities field if available
if (skill.capabilities && skill.capabilities.length > 0) {
return skill.capabilities;
}
return actions
.map(a => a.type)
.filter((t): t is string => Boolean(t));
// Fall back to extracting from actions
if (skill.actions) {
return skill.actions
.map(a => a.type)
.filter((t): t is string => Boolean(t));
}
return [];
}
/**
@@ -112,7 +120,7 @@ export function adaptSkillInfo(skill: ConfigSkillInfo): UISkillInfo {
name: skill.name,
description: skill.description || '',
triggers: extractTriggers(skill.triggers),
capabilities: extractCapabilities(skill.actions),
capabilities: extractCapabilities(skill),
toolDeps: extractToolDeps(skill.actions),
installed: skill.enabled ?? false,
category: inferCategory(skill),

View File

@@ -6,7 +6,7 @@
* - Recommend skills based on recent conversation patterns
* - Manage skill installation lifecycle (with user approval)
*
* Scans the local `skills/` directory for SKILL.md manifests and indexes them.
* Dynamically loads skills from the backend Kernel's SkillRegistry.
*
* Reference: ZCLAW_AGENT_INTELLIGENCE_EVOLUTION.md §6.5.2
*/
@@ -26,6 +26,20 @@ export interface SkillInfo {
installed: boolean;
category?: string;
path?: string;
version?: string;
mode?: string;
}
/** Backend skill response format */
interface BackendSkillInfo {
id: string;
name: string;
description: string;
version: string;
capabilities: string[];
tags: string[];
mode: string;
enabled: boolean;
}
export interface SkillSuggestion {
@@ -51,151 +65,89 @@ export interface ConversationContext {
const SKILL_INDEX_KEY = 'zclaw-skill-index';
const SKILL_SUGGESTIONS_KEY = 'zclaw-skill-suggestions';
// === Built-in Skill Registry ===
/**
* Pre-indexed skills from the skills/ directory.
* In production, this would be dynamically scanned from SKILL.md files.
* For Phase 4, we maintain a static registry that can be refreshed.
*/
const BUILT_IN_SKILLS: SkillInfo[] = [
{
id: 'code-review',
name: 'Code Review',
description: '审查代码、分析代码质量、提供改进建议',
triggers: ['审查代码', '代码审查', 'code review', 'PR review', '检查代码'],
capabilities: ['代码质量分析', '架构评估', '安全审计', '最佳实践检查'],
toolDeps: ['read', 'grep', 'glob'],
installed: true,
category: 'development',
},
{
id: 'frontend-developer',
name: 'Frontend Developer',
description: '前端开发专家,擅长 React/Vue/CSS/TypeScript',
triggers: ['前端开发', '页面开发', 'UI开发', 'React', 'Vue', 'CSS'],
capabilities: ['组件开发', '样式调整', '性能优化', '响应式设计'],
toolDeps: ['read', 'write', 'shell'],
installed: true,
category: 'development',
},
{
id: 'backend-architect',
name: 'Backend Architect',
description: '后端架构设计、API设计、数据库建模',
triggers: ['后端架构', 'API设计', '数据库设计', '系统架构', '微服务'],
capabilities: ['架构设计', 'API规范', '数据库建模', '性能优化'],
toolDeps: ['read', 'write', 'shell'],
installed: true,
category: 'development',
},
{
id: 'security-engineer',
name: 'Security Engineer',
description: '安全工程师,负责安全审计、漏洞检测、合规检查',
triggers: ['安全审计', '漏洞检测', '安全检查', 'security', '渗透测试'],
capabilities: ['漏洞扫描', '合规检查', '安全加固', '威胁建模'],
toolDeps: ['read', 'grep', 'shell'],
installed: true,
category: 'security',
},
{
id: 'data-analysis',
name: 'Data Analysis',
description: '数据分析、可视化、报告生成',
triggers: ['数据分析', '数据可视化', '报表', '统计', 'analytics'],
capabilities: ['数据清洗', '统计分析', '可视化图表', '报告生成'],
toolDeps: ['read', 'write', 'shell'],
installed: true,
category: 'analytics',
},
{
id: 'chinese-writing',
name: 'Chinese Writing',
description: '中文写作、文案创作、内容优化',
triggers: ['写文章', '文案', '写作', '中文创作', '内容优化'],
capabilities: ['文案创作', '文章润色', '标题优化', 'SEO写作'],
toolDeps: ['read', 'write'],
installed: true,
category: 'content',
},
{
id: 'devops-automator',
name: 'DevOps Automator',
description: 'CI/CD、Docker、K8s、自动化部署',
triggers: ['DevOps', 'CI/CD', 'Docker', '部署', '自动化', 'K8s'],
capabilities: ['CI/CD配置', '容器化', '自动化部署', '监控告警'],
toolDeps: ['shell', 'read', 'write'],
installed: true,
category: 'ops',
},
{
id: 'senior-pm',
name: 'Senior PM',
description: '项目管理、需求分析、迭代规划',
triggers: ['项目管理', '需求分析', '迭代规划', '产品设计', 'PRD'],
capabilities: ['需求拆解', '迭代排期', '风险评估', '文档撰写'],
toolDeps: ['read', 'write'],
installed: true,
category: 'management',
},
{
id: 'git',
name: 'Git Operations',
description: 'Git 版本控制操作、分支管理、冲突解决',
triggers: ['git', '版本控制', '分支', '合并', 'commit', 'merge'],
capabilities: ['分支管理', '冲突解决', 'rebase', 'cherry-pick'],
toolDeps: ['shell'],
installed: true,
category: 'development',
},
{
id: 'api-tester',
name: 'API Tester',
description: 'API 测试、接口调试、自动化测试脚本',
triggers: ['API测试', '接口测试', '接口调试', 'Postman', 'curl'],
capabilities: ['接口调试', '自动化测试', '性能测试', '断言验证'],
toolDeps: ['shell', 'read', 'write'],
installed: true,
category: 'testing',
},
{
id: 'finance-tracker',
name: 'Finance Tracker',
description: '财务追踪、预算管理、报表分析',
triggers: ['财务', '预算', '记账', '报销', '财务报表'],
capabilities: ['收支分析', '预算规划', '报表生成', '趋势预测'],
toolDeps: ['read', 'write'],
installed: true,
category: 'business',
},
{
id: 'social-media-strategist',
name: 'Social Media Strategist',
description: '社交媒体运营策略、内容规划、数据分析',
triggers: ['社交媒体', '运营', '小红书', '抖音', '微博', '内容运营'],
capabilities: ['内容策划', '发布排期', '数据分析', '竞品监控'],
toolDeps: ['read', 'write'],
installed: true,
category: 'marketing',
},
];
// === Skill Discovery Engine ===
export class SkillDiscoveryEngine {
private skills: SkillInfo[] = [];
private suggestionHistory: SkillSuggestion[] = [];
private loadedFromBackend: boolean = false;
constructor() {
this.loadIndex();
this.loadSuggestions();
if (this.skills.length === 0) {
this.skills = [...BUILT_IN_SKILLS];
// Try to load from backend, fallback to cache
this.loadFromBackend();
}
/**
* Load skills from backend Tauri command.
* Falls back to cached skills if backend is unavailable.
*/
private async loadFromBackend(): Promise<void> {
try {
// Dynamic import to avoid bundling issues in non-Tauri environments
const { invoke } = await import('@tauri-apps/api/core');
const backendSkills = await invoke<BackendSkillInfo[]>('skill_list');
// Convert backend format to frontend format
this.skills = backendSkills.map(this.convertFromBackend);
this.loadedFromBackend = true;
this.saveIndex();
console.log(`[SkillDiscovery] Loaded ${this.skills.length} skills from backend`);
} catch (error) {
console.warn('[SkillDiscovery] Failed to load skills from backend:', error);
// Keep using cached skills (loaded in loadIndex)
this.loadedFromBackend = false;
}
}
/**
* Convert backend skill format to frontend format.
*/
private convertFromBackend(backend: BackendSkillInfo): SkillInfo {
return {
id: backend.id,
name: backend.name,
description: backend.description,
version: backend.version,
triggers: backend.tags, // Use tags as triggers
capabilities: backend.capabilities,
mode: backend.mode,
toolDeps: [], // Backend doesn't have this field
installed: backend.enabled,
category: backend.tags[0] || 'general',
};
}
/**
* Refresh skills from backend.
* Optionally specify a custom directory to scan.
*/
async refresh(skillDir?: string): Promise<number> {
try {
const { invoke } = await import('@tauri-apps/api/core');
const backendSkills = await invoke<BackendSkillInfo[]>('skill_refresh', {
skillDir
});
this.skills = backendSkills.map(this.convertFromBackend);
this.loadedFromBackend = true;
this.saveIndex();
console.log(`[SkillDiscovery] Refreshed ${this.skills.length} skills`);
return this.skills.length;
} catch (error) {
console.error('[SkillDiscovery] Failed to refresh skills:', error);
throw error;
}
}
/**
* Check if skills were loaded from backend.
*/
isLoadedFromBackend(): boolean {
return this.loadedFromBackend;
}
// === Search ===
/**

View File

@@ -0,0 +1,11 @@
/**
* Workflow Builder Library
*
* Provides types, converters, and utilities for building visual workflow editors.
*/
export * from './types';
export * from './yaml-converter';
// Re-export commonly used types from @xyflow/react
export type { Node, Edge, Connection } from '@xyflow/react';

View File

@@ -0,0 +1,329 @@
/**
* Workflow Builder Types
*
* Core types for the visual workflow builder that creates Pipeline DSL
* configurations through drag-and-drop node composition.
*/
import type { Node, Edge } from '@xyflow/react';
// =============================================================================
// Node Types
// =============================================================================
export type WorkflowNodeType =
| 'input'
| 'llm'
| 'skill'
| 'hand'
| 'orchestration'
| 'condition'
| 'parallel'
| 'loop'
| 'export'
| 'http'
| 'setVar'
| 'delay';
// =============================================================================
// Node Data Types
// =============================================================================
// Base node data that satisfies Record<string, unknown>
export interface BaseNodeData extends Record<string, unknown> {
label: string;
description?: string;
}
export interface InputNodeData extends BaseNodeData {
type: 'input';
/** Input variable name */
variableName: string;
/** Default value for testing */
defaultValue?: unknown;
/** JSON schema for validation */
schema?: Record<string, unknown>;
}
export interface LlmNodeData extends BaseNodeData {
type: 'llm';
/** Template path or inline prompt */
template: string;
/** Whether template is a file path */
isTemplateFile: boolean;
/** Model override */
model?: string;
/** Temperature override */
temperature?: number;
/** Max tokens override */
maxTokens?: number;
/** JSON mode for structured output */
jsonMode: boolean;
}
export interface SkillNodeData extends BaseNodeData {
type: 'skill';
/** Skill ID to execute */
skillId: string;
/** Skill name for display */
skillName?: string;
/** Input variable mappings */
inputMappings: Record<string, string>;
}
export interface HandNodeData extends BaseNodeData {
type: 'hand';
/** Hand ID */
handId: string;
/** Hand name for display */
handName?: string;
/** Action to perform */
action: string;
/** Action parameters */
params: Record<string, string>;
}
export interface OrchestrationNodeData extends BaseNodeData {
type: 'orchestration';
/** Graph ID reference */
graphId?: string;
/** Inline graph definition */
graph?: Record<string, unknown>;
/** Input mappings */
inputMappings: Record<string, string>;
}
export interface ConditionNodeData extends BaseNodeData {
type: 'condition';
/** Condition expression */
condition: string;
/** Branch definitions */
branches: ConditionBranch[];
/** Has default branch */
hasDefault: boolean;
}
export interface ConditionBranch {
/** Condition expression for this branch */
when: string;
/** Label for display */
label: string;
}
export interface ParallelNodeData extends BaseNodeData {
type: 'parallel';
/** Expression to iterate over */
each: string;
/** Max concurrent workers */
maxWorkers: number;
}
export interface LoopNodeData extends BaseNodeData {
type: 'loop';
/** Expression to iterate over */
each: string;
/** Variable name for current item */
itemVar: string;
/** Variable name for index */
indexVar: string;
}
export interface ExportNodeData extends BaseNodeData {
type: 'export';
/** Export formats */
formats: ExportFormat[];
/** Output directory */
outputDir?: string;
}
export interface HttpNodeData extends BaseNodeData {
type: 'http';
/** URL */
url: string;
/** HTTP method */
method: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH';
/** Headers */
headers: Record<string, string>;
/** Request body expression */
body?: string;
}
export interface SetVarNodeData extends BaseNodeData {
type: 'setVar';
/** Variable name */
variableName: string;
/** Value expression */
value: string;
}
export interface DelayNodeData extends BaseNodeData {
type: 'delay';
/** Delay in milliseconds */
ms: number;
}
export type WorkflowNodeData =
| InputNodeData
| LlmNodeData
| SkillNodeData
| HandNodeData
| OrchestrationNodeData
| ConditionNodeData
| ParallelNodeData
| LoopNodeData
| ExportNodeData
| HttpNodeData
| SetVarNodeData
| DelayNodeData;
// =============================================================================
// Canvas Types
// =============================================================================
// Use Record<string, unknown> as base to satisfy React Flow constraints
// The actual data will be one of the WorkflowNodeData union types
export type WorkflowNode = Node<Record<string, unknown>, string>;
export type WorkflowEdge = Edge;
export interface WorkflowCanvas {
/** Unique canvas ID */
id: string;
/** Canvas name */
name: string;
/** Canvas description */
description?: string;
/** Category for organization */
category?: string;
/** Nodes in the canvas */
nodes: WorkflowNode[];
/** Edges connecting nodes */
edges: WorkflowEdge[];
/** Viewport state */
viewport: {
x: number;
y: number;
zoom: number;
};
/** Canvas metadata */
metadata: WorkflowMetadata;
}
export interface WorkflowMetadata {
/** Created timestamp */
createdAt: string;
/** Updated timestamp */
updatedAt: string;
/** Author */
author?: string;
/** Tags for search */
tags: string[];
/** Version */
version: string;
}
// =============================================================================
// Export Types
// =============================================================================
export type ExportFormat = 'pptx' | 'html' | 'pdf' | 'markdown' | 'json';
// =============================================================================
// Palette Types
// =============================================================================
export interface NodePaletteItem {
type: WorkflowNodeType;
label: string;
description: string;
icon: string;
category: NodeCategory;
defaultData: Partial<WorkflowNodeData>;
}
export type NodeCategory =
| 'input'
| 'ai'
| 'action'
| 'control'
| 'output';
// =============================================================================
// Conversion Types
// =============================================================================
export interface PipelineYaml {
apiVersion: 'zclaw/v1';
kind: 'Pipeline';
metadata: {
name: string;
description?: string;
tags?: string[];
};
spec: {
input?: Record<string, unknown>;
steps: PipelineStepYaml[];
output?: Record<string, string>;
};
}
export interface PipelineStepYaml {
id: string;
name?: string;
action: Record<string, unknown>;
when?: string;
}
// =============================================================================
// Validation Types
// =============================================================================
export interface ValidationError {
nodeId: string;
field?: string;
message: string;
severity: 'error' | 'warning';
}
export interface ValidationResult {
valid: boolean;
errors: ValidationError[];
warnings: ValidationError[];
}
// =============================================================================
// Template Types
// =============================================================================
export interface WorkflowTemplate {
id: string;
name: string;
description: string;
category: string;
thumbnail?: string;
canvas: WorkflowCanvas;
}
// =============================================================================
// Store Types
// =============================================================================
export interface WorkflowBuilderState {
/** Current canvas */
canvas: WorkflowCanvas | null;
/** All saved workflows */
workflows: WorkflowCanvas[];
/** Selected node ID */
selectedNodeId: string | null;
/** Is dragging from palette */
isDragging: boolean;
/** Is canvas dirty (unsaved changes) */
isDirty: boolean;
/** Validation result */
validation: ValidationResult | null;
/** Templates */
templates: WorkflowTemplate[];
/** Available skills for palette */
availableSkills: Array<{ id: string; name: string; description: string }>;
/** Available hands for palette */
availableHands: Array<{ id: string; name: string; actions: string[] }>;
}

View File

@@ -0,0 +1,803 @@
/**
* YAML Converter for Workflow Builder
*
* Bidirectional conversion between WorkflowCanvas (visual representation)
* and Pipeline YAML (execution format).
*/
import * as yaml from 'js-yaml';
import type { Edge } from '@xyflow/react';
import dagre from '@dagrejs/dagre';
import type {
WorkflowCanvas,
WorkflowNode,
WorkflowNodeData,
InputNodeData,
LlmNodeData,
SkillNodeData,
HandNodeData,
ConditionNodeData,
ParallelNodeData,
ExportNodeData,
PipelineYaml,
PipelineStepYaml,
ValidationError,
ValidationResult,
} from './types';
// =============================================================================
// Canvas to YAML Conversion
// =============================================================================
/**
* Convert a WorkflowCanvas to Pipeline YAML string
*/
export function canvasToYaml(canvas: WorkflowCanvas): string {
const pipeline: PipelineYaml = {
apiVersion: 'zclaw/v1',
kind: 'Pipeline',
metadata: {
name: canvas.name,
description: canvas.description,
tags: canvas.metadata.tags,
},
spec: {
input: extractInputs(canvas.nodes),
steps: nodesToSteps(canvas.nodes, canvas.edges),
output: extractOutputs(canvas.nodes),
},
};
return yaml.dump(pipeline, {
indent: 2,
lineWidth: -1,
noRefs: true,
sortKeys: false,
});
}
/**
* Extract input definitions from input nodes
*/
function extractInputs(nodes: WorkflowNode[]): Record<string, unknown> | undefined {
const inputs: Record<string, unknown> = {};
for (const node of nodes) {
if (node.data.type === 'input') {
const data = node.data as InputNodeData;
inputs[data.variableName] = data.defaultValue ?? null;
}
}
return Object.keys(inputs).length > 0 ? inputs : undefined;
}
/**
* Extract output mappings from the last nodes or explicit output nodes
*/
function extractOutputs(nodes: WorkflowNode[]): Record<string, string> | undefined {
const outputs: Record<string, string> = {};
for (const node of nodes) {
if (node.data.type === 'export') {
// Export nodes define outputs
outputs[`${node.id}_export`] = `\${steps.${node.id}.output}`;
}
}
return Object.keys(outputs).length > 0 ? outputs : undefined;
}
/**
* Convert nodes and edges to pipeline steps
*/
function nodesToSteps(nodes: WorkflowNode[], edges: Edge[]): PipelineStepYaml[] {
// Topological sort to get execution order
const sortedNodes = topologicalSort(nodes, edges);
return sortedNodes
.filter(node => node.data.type !== 'input') // Skip input nodes
.map(node => nodeToStep(node))
.filter((step): step is PipelineStepYaml => step !== null);
}
/**
* Convert a single node to a pipeline step
*/
function nodeToStep(node: WorkflowNode): PipelineStepYaml | null {
const data = node.data;
const label = data.label as string | undefined;
const base: PipelineStepYaml = {
id: node.id,
name: label,
action: {},
};
const nodeType = data.type as string;
switch (nodeType) {
case 'llm': {
const llmData = data as LlmNodeData;
base.action = {
llm_generate: {
template: llmData.template,
input: mapExpressionsToObject(llmData.template),
model: llmData.model,
temperature: llmData.temperature,
max_tokens: llmData.maxTokens,
json_mode: llmData.jsonMode,
},
};
break;
}
case 'skill': {
const skillData = data as SkillNodeData;
base.action = {
skill: {
skill_id: skillData.skillId,
input: skillData.inputMappings,
},
};
break;
}
case 'hand': {
const handData = data as HandNodeData;
base.action = {
hand: {
hand_id: handData.handId,
hand_action: handData.action,
params: handData.params,
},
};
break;
}
case 'orchestration': {
const orchData = data as { graphId?: string; graph?: Record<string, unknown>; inputMappings?: Record<string, string> };
base.action = {
skill_orchestration: {
graph_id: orchData.graphId,
graph: orchData.graph,
input: orchData.inputMappings,
},
};
break;
}
case 'condition': {
const condData = data as ConditionNodeData;
base.action = {
condition: {
condition: condData.condition,
branches: condData.branches.map((b: { when: string }) => ({
when: b.when,
then: { /* Will be filled by connected nodes */ },
})),
},
};
break;
}
case 'parallel': {
const parData = data as ParallelNodeData;
base.action = {
parallel: {
each: parData.each,
step: { /* Will be filled by child nodes */ },
max_workers: parData.maxWorkers,
},
};
break;
}
case 'loop': {
const loopData = data as { each: string; itemVar: string; indexVar: string };
base.action = {
loop: {
each: loopData.each,
item_var: loopData.itemVar,
index_var: loopData.indexVar,
step: { /* Will be filled by child nodes */ },
},
};
break;
}
case 'export': {
const exportData = data as ExportNodeData;
base.action = {
file_export: {
formats: exportData.formats,
input: `\${steps.${node.id}.input}`,
output_dir: exportData.outputDir,
},
};
break;
}
case 'http': {
const httpData = data as { url: string; method: string; headers: Record<string, string>; body?: string };
base.action = {
http_request: {
url: httpData.url,
method: httpData.method,
headers: httpData.headers,
body: httpData.body,
},
};
break;
}
case 'setVar': {
const varData = data as { variableName: string; value: string };
base.action = {
set_var: {
name: varData.variableName,
value: varData.value,
},
};
break;
}
case 'delay': {
const delayData = data as { ms: number };
base.action = {
delay: {
ms: delayData.ms,
},
};
break;
}
case 'input':
// Input nodes don't become steps
return null;
default:
console.warn(`Unknown node type: ${nodeType}`);
return null;
}
return base;
}
/**
* Topological sort of nodes based on edges
*/
function topologicalSort(nodes: WorkflowNode[], edges: Edge[]): WorkflowNode[] {
const nodeMap = new Map(nodes.map(n => [n.id, n]));
const inDegree = new Map<string, number>();
const adjacency = new Map<string, string[]>();
// Initialize
for (const node of nodes) {
inDegree.set(node.id, 0);
adjacency.set(node.id, []);
}
// Build graph
for (const edge of edges) {
const current = adjacency.get(edge.source) || [];
current.push(edge.target);
adjacency.set(edge.source, current);
inDegree.set(edge.target, (inDegree.get(edge.target) || 0) + 1);
}
// Kahn's algorithm
const queue: string[] = [];
const result: WorkflowNode[] = [];
for (const [nodeId, degree] of inDegree) {
if (degree === 0) {
queue.push(nodeId);
}
}
while (queue.length > 0) {
const nodeId = queue.shift()!;
const node = nodeMap.get(nodeId);
if (node) {
result.push(node);
}
const neighbors = adjacency.get(nodeId) || [];
for (const neighbor of neighbors) {
const newDegree = (inDegree.get(neighbor) || 0) - 1;
inDegree.set(neighbor, newDegree);
if (newDegree === 0) {
queue.push(neighbor);
}
}
}
return result;
}
/**
* Extract variable references from a template string
*/
function mapExpressionsToObject(template: string): Record<string, string> {
const regex = /\$\{([^}]+)\}/g;
const matches = template.match(regex) || [];
const result: Record<string, string> = {};
for (const match of matches) {
const expr = match.slice(2, -1); // Remove ${ and }
const parts = expr.split('.');
if (parts.length >= 2) {
result[parts[parts.length - 1]] = match;
}
}
return result;
}
// =============================================================================
// YAML to Canvas Conversion
// =============================================================================
/**
* Parse Pipeline YAML string to WorkflowCanvas
*/
export function yamlToCanvas(yamlString: string): WorkflowCanvas {
const pipeline = yaml.load(yamlString) as PipelineYaml;
const nodes: WorkflowNode[] = [];
const edges: Edge[] = [];
// Create input nodes from spec.input
if (pipeline.spec.input) {
let y = 50;
for (const [varName, defaultValue] of Object.entries(pipeline.spec.input)) {
nodes.push({
id: `input_${varName}`,
type: 'input',
position: { x: 50, y },
data: {
type: 'input',
label: varName,
variableName: varName,
defaultValue,
},
});
y += 100;
}
}
// Convert steps to nodes
if (pipeline.spec.steps) {
let x = 300;
let y = 50;
for (const step of pipeline.spec.steps) {
const node = stepToNode(step, x, y);
if (node) {
nodes.push(node);
y += 150;
}
}
}
// Auto-layout with dagre
const layoutedNodes = applyDagreLayout(nodes, edges);
return {
id: `workflow_${Date.now()}`,
name: pipeline.metadata?.name || 'Imported Workflow',
description: pipeline.metadata?.description,
category: 'imported',
nodes: layoutedNodes,
edges,
viewport: { x: 0, y: 0, zoom: 1 },
metadata: {
createdAt: new Date().toISOString(),
updatedAt: new Date().toISOString(),
tags: pipeline.metadata?.tags || [],
version: '1.0.0',
},
};
}
/**
* Convert a pipeline step to a workflow node
*/
function stepToNode(step: PipelineStepYaml, x: number, y: number): WorkflowNode | null {
const action = step.action;
const actionType = Object.keys(action)[0];
const actionData = action[actionType];
const baseData = {
label: step.name || step.id,
};
switch (actionType) {
case 'llm_generate':
return {
id: step.id,
type: 'llm',
position: { x, y },
data: {
type: 'llm',
...baseData,
template: (actionData as { template?: string }).template || '',
isTemplateFile: false,
model: (actionData as { model?: string }).model,
temperature: (actionData as { temperature?: number }).temperature,
maxTokens: (actionData as { max_tokens?: number }).max_tokens,
jsonMode: (actionData as { json_mode?: boolean }).json_mode || false,
} as WorkflowNodeData,
};
case 'skill':
return {
id: step.id,
type: 'skill',
position: { x, y },
data: {
type: 'skill',
...baseData,
skillId: (actionData as { skill_id?: string }).skill_id || '',
inputMappings: (actionData as { input?: Record<string, string> }).input || {},
} as WorkflowNodeData,
};
case 'hand':
return {
id: step.id,
type: 'hand',
position: { x, y },
data: {
type: 'hand',
...baseData,
handId: (actionData as { hand_id?: string }).hand_id || '',
action: (actionData as { hand_action?: string }).hand_action || '',
params: (actionData as { params?: Record<string, string> }).params || {},
} as WorkflowNodeData,
};
case 'skill_orchestration':
return {
id: step.id,
type: 'orchestration',
position: { x, y },
data: {
type: 'orchestration',
...baseData,
graphId: (actionData as { graph_id?: string }).graph_id,
graph: (actionData as { graph?: Record<string, unknown> }).graph,
inputMappings: (actionData as { input?: Record<string, string> }).input || {},
} as WorkflowNodeData,
};
case 'condition':
return {
id: step.id,
type: 'condition',
position: { x, y },
data: {
type: 'condition',
...baseData,
condition: (actionData as { condition?: string }).condition || '',
branches: ((actionData as { branches?: Array<{ when: string }> }).branches || []).map(b => ({
when: b.when,
label: b.when.slice(0, 20),
})),
hasDefault: true,
} as WorkflowNodeData,
};
case 'parallel':
return {
id: step.id,
type: 'parallel',
position: { x, y },
data: {
type: 'parallel',
...baseData,
each: (actionData as { each?: string }).each || '',
maxWorkers: (actionData as { max_workers?: number }).max_workers || 4,
} as WorkflowNodeData,
};
case 'file_export':
return {
id: step.id,
type: 'export',
position: { x, y },
data: {
type: 'export',
...baseData,
formats: (actionData as { formats?: string[] }).formats || [],
outputDir: (actionData as { output_dir?: string }).output_dir,
} as WorkflowNodeData,
};
case 'http_request':
return {
id: step.id,
type: 'http',
position: { x, y },
data: {
type: 'http',
...baseData,
url: (actionData as { url?: string }).url || '',
method: ((actionData as { method?: string }).method || 'GET') as 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH',
headers: (actionData as { headers?: Record<string, string> }).headers || {},
body: (actionData as { body?: string }).body,
} as WorkflowNodeData,
};
case 'set_var':
return {
id: step.id,
type: 'setVar',
position: { x, y },
data: {
type: 'setVar',
...baseData,
variableName: (actionData as { name?: string }).name || '',
value: (actionData as { value?: string }).value || '',
} as WorkflowNodeData,
};
case 'delay':
return {
id: step.id,
type: 'delay',
position: { x, y },
data: {
type: 'delay',
...baseData,
ms: (actionData as { ms?: number }).ms || 0,
} as WorkflowNodeData,
};
default:
console.warn(`Unknown action type: ${actionType}`);
return null;
}
}
// =============================================================================
// Layout Utilities
// =============================================================================
/**
* Apply dagre layout to nodes
*/
export function applyDagreLayout(nodes: WorkflowNode[], edges: Edge[]): WorkflowNode[] {
const dagreGraph = new dagre.graphlib.Graph();
dagreGraph.setDefaultEdgeLabel(() => ({}));
dagreGraph.setGraph({
rankdir: 'LR',
nodesep: 100,
ranksep: 150,
marginx: 50,
marginy: 50,
});
// Add nodes to dagre
for (const node of nodes) {
dagreGraph.setNode(node.id, {
width: 250,
height: 100,
});
}
// Add edges to dagre
for (const edge of edges) {
dagreGraph.setEdge(edge.source, edge.target);
}
// Apply layout
dagre.layout(dagreGraph);
// Update node positions
return nodes.map(node => {
const dagreNode = dagreGraph.node(node.id);
if (dagreNode) {
return {
...node,
position: {
x: dagreNode.x - dagreNode.width / 2,
y: dagreNode.y - dagreNode.height / 2,
},
};
}
return node;
});
}
// =============================================================================
// Validation
// =============================================================================
/**
* Validate a workflow canvas
*/
export function validateCanvas(canvas: WorkflowCanvas): ValidationResult {
const errors: ValidationError[] = [];
const warnings: ValidationError[] = [];
// Check for empty canvas
if (canvas.nodes.length === 0) {
errors.push({
nodeId: 'canvas',
message: 'Workflow is empty',
severity: 'error',
});
return { valid: false, errors, warnings };
}
// Check for input nodes
const hasInput = canvas.nodes.some(n => n.data.type === 'input');
if (!hasInput) {
warnings.push({
nodeId: 'canvas',
message: 'No input nodes defined',
severity: 'warning',
});
}
// Check for disconnected nodes
const connectedNodeIds = new Set<string>();
for (const edge of canvas.edges) {
connectedNodeIds.add(edge.source);
connectedNodeIds.add(edge.target);
}
for (const node of canvas.nodes) {
if (canvas.nodes.length > 1 && !connectedNodeIds.has(node.id) && node.data.type !== 'input') {
warnings.push({
nodeId: node.id,
message: `Node "${node.data.label}" is not connected`,
severity: 'warning',
});
}
}
// Validate individual nodes
for (const node of canvas.nodes) {
const nodeErrors = validateNode(node);
errors.push(...nodeErrors);
}
// Check for cycles (basic check)
if (hasCycle(canvas.nodes, canvas.edges)) {
errors.push({
nodeId: 'canvas',
message: 'Workflow contains a cycle',
severity: 'error',
});
}
return {
valid: errors.length === 0,
errors,
warnings,
};
}
/**
* Validate a single node
*/
function validateNode(node: WorkflowNode): ValidationError[] {
const errors: ValidationError[] = [];
const data = node.data;
switch (data.type) {
case 'llm':
if (!data.template) {
errors.push({
nodeId: node.id,
field: 'template',
message: 'Template is required',
severity: 'error',
});
}
break;
case 'skill':
if (!data.skillId) {
errors.push({
nodeId: node.id,
field: 'skillId',
message: 'Skill ID is required',
severity: 'error',
});
}
break;
case 'hand':
if (!data.handId) {
errors.push({
nodeId: node.id,
field: 'handId',
message: 'Hand ID is required',
severity: 'error',
});
}
if (!data.action) {
errors.push({
nodeId: node.id,
field: 'action',
message: 'Action is required',
severity: 'error',
});
}
break;
case 'http':
if (!data.url) {
errors.push({
nodeId: node.id,
field: 'url',
message: 'URL is required',
severity: 'error',
});
}
break;
case 'input':
if (!data.variableName) {
errors.push({
nodeId: node.id,
field: 'variableName',
message: 'Variable name is required',
severity: 'error',
});
}
break;
}
return errors;
}
/**
* Check if the graph has a cycle
*/
function hasCycle(nodes: WorkflowNode[], edges: Edge[]): boolean {
const adjacency = new Map<string, string[]>();
const visited = new Set<string>();
const recStack = new Set<string>();
// Build adjacency list
for (const node of nodes) {
adjacency.set(node.id, []);
}
for (const edge of edges) {
const neighbors = adjacency.get(edge.source) || [];
neighbors.push(edge.target);
adjacency.set(edge.source, neighbors);
}
// DFS cycle detection
function dfs(nodeId: string): boolean {
visited.add(nodeId);
recStack.add(nodeId);
const neighbors = adjacency.get(nodeId) || [];
for (const neighbor of neighbors) {
if (!visited.has(neighbor)) {
if (dfs(neighbor)) return true;
} else if (recStack.has(neighbor)) {
return true;
}
}
recStack.delete(nodeId);
return false;
}
for (const node of nodes) {
if (!visited.has(node.id)) {
if (dfs(node.id)) return true;
}
}
return false;
}