feat(kernel): add multi-skill orchestration bridge + true parallel execution
Some checks failed
CI / Lint & TypeCheck (push) Has been cancelled
CI / Unit Tests (push) Has been cancelled
CI / Build Frontend (push) Has been cancelled
CI / Rust Check (push) Has been cancelled
CI / Security Scan (push) Has been cancelled
CI / E2E Tests (push) Has been cancelled

- Kernel orchestration bridge: execute_orchestration, auto_compose_skills,
  validate_orchestration methods on Kernel struct
- True parallel execution: replace sequential for-loop with tokio::JoinSet
  for concurrent node execution within parallel groups
- Tauri commands: orchestration_execute (auto-compose or pre-defined graph),
  orchestration_validate (dry-run validation)
- Full type conversions: OrchestrationRequest/Response with camelCase serde

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
iven
2026-04-04 09:18:26 +08:00
parent 1399054547
commit f4ed1b33e0
6 changed files with 422 additions and 37 deletions

View File

@@ -7,6 +7,7 @@ mod skills;
mod hands;
mod triggers;
mod approvals;
mod orchestration;
#[cfg(feature = "multi-agent")]
mod a2a;

View File

@@ -0,0 +1,71 @@
//! Kernel-level orchestration methods
//!
//! Bridges the zclaw-skills orchestration engine to the Kernel,
//! providing high-level orchestration execution via Tauri commands.
use std::collections::HashMap;
use serde_json::Value;
use zclaw_types::Result;
use super::Kernel;
use zclaw_skills::orchestration::{OrchestrationPlanner, SkillGraphExecutor};
impl Kernel {
/// Execute a skill orchestration graph with the given inputs.
///
/// This creates a `DefaultExecutor` backed by the kernel's `SkillRegistry`,
/// builds an execution plan, and runs the graph with true parallel execution
/// within each parallel group.
pub async fn execute_orchestration(
&self,
graph: &zclaw_skills::orchestration::SkillGraph,
inputs: HashMap<String, Value>,
context: &zclaw_skills::SkillContext,
) -> Result<zclaw_skills::orchestration::OrchestrationResult> {
let executor = zclaw_skills::orchestration::DefaultExecutor::new(
self.skills.clone(),
);
executor.execute(graph, inputs, context).await
}
/// Execute an orchestration with progress tracking.
///
/// The `progress_fn` callback is invoked at each stage of execution,
/// allowing the frontend to display real-time progress.
pub async fn execute_orchestration_with_progress<F>(
&self,
graph: &zclaw_skills::orchestration::SkillGraph,
inputs: HashMap<String, Value>,
context: &zclaw_skills::SkillContext,
progress_fn: F,
) -> Result<zclaw_skills::orchestration::OrchestrationResult>
where
F: Fn(zclaw_skills::orchestration::OrchestrationProgress) + Send + Sync,
{
let executor = zclaw_skills::orchestration::DefaultExecutor::new(
self.skills.clone(),
);
executor.execute_with_progress(graph, inputs, context, progress_fn).await
}
/// Auto-compose a skill graph from a list of skill IDs.
///
/// Analyzes input/output schemas to infer data flow edges between skills.
pub async fn auto_compose_skills(
&self,
skill_ids: &[zclaw_types::SkillId],
) -> Result<zclaw_skills::orchestration::SkillGraph> {
let planner = zclaw_skills::orchestration::DefaultPlanner::new();
planner.auto_compose(skill_ids, &self.skills).await
}
/// Validate a skill graph without executing it.
pub async fn validate_orchestration(
&self,
graph: &zclaw_skills::orchestration::SkillGraph,
) -> Vec<zclaw_skills::orchestration::ValidationError> {
zclaw_skills::orchestration::validate_graph(graph, &self.skills).await
}
}

View File

@@ -17,6 +17,12 @@ use super::{
planner::OrchestrationPlanner,
};
/// Wrapper to make NodeResult Send for JoinSet
struct ParallelNodeResult {
node_id: String,
result: NodeResult,
}
/// Skill graph executor trait
#[async_trait::async_trait]
pub trait SkillGraphExecutor: Send + Sync {
@@ -76,7 +82,8 @@ impl DefaultExecutor {
cancellations.get(graph_id).copied().unwrap_or(false)
}
/// Execute a single node
/// Execute a single node (used by pipeline orchestration action driver)
#[allow(dead_code)]
async fn execute_node(
&self,
node: &super::SkillNode,
@@ -201,7 +208,7 @@ impl SkillGraphExecutor for DefaultExecutor {
let mut node_results: HashMap<String, NodeResult> = HashMap::new();
let mut progress = OrchestrationProgress::new(&graph.id, graph.nodes.len());
// Execute parallel groups
// Execute parallel groups sequentially, but nodes within each group in parallel
for group in &plan.parallel_groups {
if self.is_cancelled(&graph.id).await {
return Ok(OrchestrationResult {
@@ -213,40 +220,72 @@ impl SkillGraphExecutor for DefaultExecutor {
});
}
// Execute nodes in parallel within the group
progress.status = format!("Executing group with {} nodes", group.len());
progress_fn(progress.clone());
// Execute all nodes in the group concurrently using JoinSet
let mut join_set = tokio::task::JoinSet::new();
for node_id in group {
if let Some(node) = graph.nodes.iter().find(|n| &n.id == node_id) {
progress.current_node = Some(node_id.clone());
progress_fn(progress.clone());
let node = node.clone();
let node_id = node_id.clone();
let orch_ctx = orch_context.clone();
let skill_ctx = context.clone();
let registry = self.registry.clone();
let result = self.execute_node(node, &orch_context, context).await
.unwrap_or_else(|e| NodeResult {
node_id: node_id.clone(),
success: false,
output: Value::Null,
error: Some(e.to_string()),
duration_ms: 0,
retries: 0,
skipped: false,
});
node_results.insert(node_id.clone(), result);
join_set.spawn(async move {
let input = orch_ctx.resolve_node_input(&node);
let start = Instant::now();
let result = registry.execute(&node.skill_id, &skill_ctx, input).await;
let nr = match result {
Ok(sr) if sr.success => NodeResult {
node_id: node_id.clone(),
success: true,
output: sr.output,
error: None,
duration_ms: start.elapsed().as_millis() as u64,
retries: 0,
skipped: false,
},
Ok(sr) => NodeResult {
node_id: node_id.clone(),
success: false,
output: Value::Null,
error: sr.error,
duration_ms: start.elapsed().as_millis() as u64,
retries: 0,
skipped: false,
},
Err(e) => NodeResult {
node_id: node_id.clone(),
success: false,
output: Value::Null,
error: Some(e.to_string()),
duration_ms: start.elapsed().as_millis() as u64,
retries: 0,
skipped: false,
},
};
ParallelNodeResult { node_id, result: nr }
});
}
}
// Update context with node outputs
for node_id in group {
if let Some(result) = node_results.get(node_id) {
if result.success {
orch_context.set_node_output(node_id, result.output.clone());
progress.completed_nodes.push(node_id.clone());
} else {
progress.failed_nodes.push(node_id.clone());
// Handle error based on strategy
match graph.on_error {
ErrorStrategy::Stop => {
// Clone error before moving node_results
// Collect results as tasks complete
while let Some(join_result) = join_set.join_next().await {
match join_result {
Ok(parallel_result) => {
let ParallelNodeResult { node_id, result } = parallel_result;
if result.success {
orch_context.set_node_output(&node_id, result.output.clone());
progress.completed_nodes.push(node_id.clone());
} else {
progress.failed_nodes.push(node_id.clone());
if matches!(graph.on_error, ErrorStrategy::Stop) {
let error = result.error.clone();
node_results.insert(node_id, result);
join_set.abort_all();
return Ok(OrchestrationResult {
success: false,
output: Value::Null,
@@ -255,27 +294,24 @@ impl SkillGraphExecutor for DefaultExecutor {
error,
});
}
ErrorStrategy::Continue => {
// Continue to next group
}
ErrorStrategy::Retry => {
// Already handled in execute_node
}
}
node_results.insert(node_id, result);
}
Err(e) => {
tracing::warn!("[Orchestration] Task panicked: {}", e);
}
}
}
// Update progress
progress.progress_percent = ((progress.completed_nodes.len() + progress.failed_nodes.len())
* 100 / graph.nodes.len()) as u8;
* 100 / graph.nodes.len().max(1)) as u8;
progress.status = format!("Completed group with {} nodes", group.len());
progress_fn(progress.clone());
}
// Build final output
let output = orch_context.build_output(&graph.output_mapping);
let success = progress.failed_nodes.is_empty();
Ok(OrchestrationResult {