refactor(crates): kernel/generation module split + DeerFlow optimizations + middleware + dead code cleanup

- Split zclaw-kernel/kernel.rs (1486 lines) into 9 domain modules
- Split zclaw-kernel/generation.rs (1080 lines) into 3 modules
- Add DeerFlow-inspired middleware: DanglingTool, SubagentLimit, ToolError, ToolOutputGuard
- Add PromptBuilder for structured system prompt assembly
- Add FactStore (zclaw-memory) for persistent fact extraction
- Add task builtin tool for agent task management
- Driver improvements: Anthropic/OpenAI extended thinking, Gemini safety settings
- Replace let _ = with proper log::warn! across SaaS handlers
- Remove unused dependency (url) from zclaw-hands
This commit is contained in:
iven
2026-04-03 00:28:03 +08:00
parent 0a04b260a4
commit 52bdafa633
55 changed files with 4130 additions and 1959 deletions

View File

@@ -311,7 +311,7 @@ impl KernelConfig {
}
/// Find the config file path.
fn find_config_path() -> Option<PathBuf> {
pub fn find_config_path() -> Option<PathBuf> {
// 1. Environment variable override
if let Ok(path) = std::env::var("ZCLAW_CONFIG") {
return Some(PathBuf::from(path));

View File

@@ -755,6 +755,7 @@ mod tests {
order: 0,
},
],
agents: vec![],
metadata: ClassroomMetadata::default(),
}
}

View File

@@ -563,6 +563,7 @@ mod tests {
order: 1,
},
],
agents: vec![],
metadata: ClassroomMetadata::default(),
}
}

View File

@@ -601,6 +601,7 @@ mod tests {
order: 0,
},
],
agents: vec![],
metadata: ClassroomMetadata::default(),
}
}

View File

@@ -0,0 +1,345 @@
//! Agent Profile Generation for Interactive Classroom
//!
//! Generates multi-agent classroom roles (Teacher, Assistant, Students)
//! with distinct personas, avatars, and action permissions.
use serde::{Deserialize, Serialize};
use uuid::Uuid;
/// Agent role in the classroom
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum AgentRole {
Teacher,
Assistant,
Student,
}
impl Default for AgentRole {
fn default() -> Self {
Self::Teacher
}
}
impl std::fmt::Display for AgentRole {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
AgentRole::Teacher => write!(f, "teacher"),
AgentRole::Assistant => write!(f, "assistant"),
AgentRole::Student => write!(f, "student"),
}
}
}
/// Agent profile for classroom participants
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct AgentProfile {
/// Unique ID for this agent
pub id: String,
/// Display name (e.g., "陈老师", "小助手", "张伟")
pub name: String,
/// Role type
pub role: AgentRole,
/// Persona description (system prompt for this agent)
pub persona: String,
/// Avatar emoji or URL
pub avatar: String,
/// Theme color (hex)
pub color: String,
/// Actions this agent is allowed to perform
pub allowed_actions: Vec<String>,
/// Speaking priority (higher = speaks first in multi-agent)
pub priority: u8,
}
/// Request for generating agent profiles
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AgentProfileRequest {
/// Topic for context-aware persona generation
pub topic: String,
/// Teaching style hint
pub style: String,
/// Difficulty level hint
pub level: String,
/// Total agent count (default: 5)
pub agent_count: Option<usize>,
/// Language code (default: "zh-CN")
pub language: Option<String>,
}
impl Default for AgentProfileRequest {
fn default() -> Self {
Self {
topic: String::new(),
style: "lecture".to_string(),
level: "intermediate".to_string(),
agent_count: None,
language: Some("zh-CN".to_string()),
}
}
}
/// Generate agent profiles for a classroom session.
///
/// Returns hardcoded defaults that match the OpenMAIC experience.
/// Future: optionally use LLM for dynamic persona generation.
pub fn generate_agent_profiles(request: &AgentProfileRequest) -> Vec<AgentProfile> {
let lang = request.language.as_deref().unwrap_or("zh-CN");
let count = request.agent_count.unwrap_or(5);
let student_count = count.saturating_sub(2).max(1);
if lang.starts_with("zh") {
generate_chinese_profiles(&request.topic, &request.style, student_count)
} else {
generate_english_profiles(&request.topic, &request.style, student_count)
}
}
fn generate_chinese_profiles(topic: &str, style: &str, student_count: usize) -> Vec<AgentProfile> {
let style_desc = match style {
"discussion" => "善于引导讨论的",
"pbl" => "注重项目实践的",
"socratic" => "擅长提问式教学的",
_ => "经验丰富的",
};
let mut agents = Vec::with_capacity(student_count + 2);
// Teacher
agents.push(AgentProfile {
id: format!("agent_teacher_{}", Uuid::new_v4()),
name: "陈老师".to_string(),
role: AgentRole::Teacher,
persona: format!(
"你是一位{}教师,正在教授「{}」这个主题。你的教学风格清晰有条理,\
善于使用生活中的比喻和类比帮助学生理解抽象概念。你注重核心原理的透彻理解,\
会用通俗易懂的语言解释复杂概念。",
style_desc, topic
),
avatar: "👩‍🏫".to_string(),
color: "#4F46E5".to_string(),
allowed_actions: vec![
"speech".into(),
"whiteboard_draw".into(),
"slideshow_control".into(),
"quiz_create".into(),
],
priority: 10,
});
// Assistant
agents.push(AgentProfile {
id: format!("agent_assistant_{}", Uuid::new_v4()),
name: "小助手".to_string(),
role: AgentRole::Assistant,
persona: format!(
"你是一位耐心的助教,正在协助教授「{}」。你擅长用代码示例和图表辅助讲解,\
善于回答学生问题补充老师遗漏的知识点。你说话简洁明了喜欢用emoji点缀语气。",
topic
),
avatar: "🤝".to_string(),
color: "#10B981".to_string(),
allowed_actions: vec![
"speech".into(),
"whiteboard_draw".into(),
],
priority: 7,
});
// Students — up to 3 distinct personalities
let student_templates = [
(
"李思",
"你是一个好奇且活跃的学生,正在学习「{topic}」。你有一定编程基础,但概念理解上容易混淆。\
你经常问'为什么'和'如果...呢'这类深入问题,喜欢和老师互动。",
"🤔",
"#EF4444",
),
(
"王明",
"你是一个认真笔记的学生,正在学习「{topic}」。你学习态度端正,善于总结和归纳要点。\
你经常复述和确认自己的理解,喜欢有条理的讲解方式。",
"📝",
"#F59E0B",
),
(
"张伟",
"你是一个思维跳跃的学生,正在学习「{topic}」。你经常联想到其他概念和实际应用场景,\
善于举一反三但有时会跑题。你喜欢动手实践和探索。",
"💡",
"#8B5CF6",
),
];
for i in 0..student_count {
let (name, persona_tmpl, avatar, color) = &student_templates[i % student_templates.len()];
agents.push(AgentProfile {
id: format!("agent_student_{}_{}", i + 1, Uuid::new_v4()),
name: name.to_string(),
role: AgentRole::Student,
persona: persona_tmpl.replace("{topic}", topic),
avatar: avatar.to_string(),
color: color.to_string(),
allowed_actions: vec!["speech".into(), "ask_question".into()],
priority: (5 - i as u8).max(1),
});
}
agents
}
fn generate_english_profiles(topic: &str, style: &str, student_count: usize) -> Vec<AgentProfile> {
let style_desc = match style {
"discussion" => "discussion-oriented",
"pbl" => "project-based",
"socratic" => "Socratic method",
_ => "experienced",
};
let mut agents = Vec::with_capacity(student_count + 2);
// Teacher
agents.push(AgentProfile {
id: format!("agent_teacher_{}", Uuid::new_v4()),
name: "Prof. Chen".to_string(),
role: AgentRole::Teacher,
persona: format!(
"You are a {} instructor teaching 「{}」. Your teaching style is clear and organized, \
skilled at using metaphors and analogies to explain complex concepts in accessible language. \
You focus on thorough understanding of core principles.",
style_desc, topic
),
avatar: "👩‍🏫".to_string(),
color: "#4F46E5".to_string(),
allowed_actions: vec![
"speech".into(),
"whiteboard_draw".into(),
"slideshow_control".into(),
"quiz_create".into(),
],
priority: 10,
});
// Assistant
agents.push(AgentProfile {
id: format!("agent_assistant_{}", Uuid::new_v4()),
name: "TA Alex".to_string(),
role: AgentRole::Assistant,
persona: format!(
"You are a patient teaching assistant helping with 「{}」. \
You provide code examples, diagrams, and fill in gaps. You are concise and friendly.",
topic
),
avatar: "🤝".to_string(),
color: "#10B981".to_string(),
allowed_actions: vec!["speech".into(), "whiteboard_draw".into()],
priority: 7,
});
// Students
let student_templates = [
(
"Sam",
"A curious and active student learning 「{topic}」. Has some programming background \
but gets confused on concepts. Often asks 'why?' and 'what if?'",
"🤔",
"#EF4444",
),
(
"Jordan",
"A diligent note-taking student learning 「{topic}」. Methodical learner, \
good at summarizing key points. Prefers structured explanations.",
"📝",
"#F59E0B",
),
(
"Alex",
"A creative thinker learning 「{topic}」. Connects concepts to real-world applications. \
Good at lateral thinking but sometimes goes off-topic.",
"💡",
"#8B5CF6",
),
];
for i in 0..student_count {
let (name, persona_tmpl, avatar, color) = &student_templates[i % student_templates.len()];
agents.push(AgentProfile {
id: format!("agent_student_{}_{}", i + 1, Uuid::new_v4()),
name: name.to_string(),
role: AgentRole::Student,
persona: persona_tmpl.replace("{topic}", topic),
avatar: avatar.to_string(),
color: color.to_string(),
allowed_actions: vec!["speech".into(), "ask_question".into()],
priority: (5 - i as u8).max(1),
});
}
agents
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_generate_chinese_profiles() {
let req = AgentProfileRequest {
topic: "Rust 所有权".to_string(),
style: "lecture".to_string(),
level: "intermediate".to_string(),
agent_count: Some(5),
language: Some("zh-CN".to_string()),
};
let agents = generate_agent_profiles(&req);
assert_eq!(agents.len(), 5);
assert_eq!(agents[0].role, AgentRole::Teacher);
assert!(agents[0].name.contains("陈老师"));
assert!(agents[0].persona.contains("Rust 所有权"));
assert_eq!(agents[1].role, AgentRole::Assistant);
assert!(agents[1].name.contains("小助手"));
assert_eq!(agents[2].role, AgentRole::Student);
assert_eq!(agents[3].role, AgentRole::Student);
assert_eq!(agents[4].role, AgentRole::Student);
// Priority ordering
assert!(agents[0].priority > agents[1].priority);
assert!(agents[1].priority > agents[2].priority);
}
#[test]
fn test_generate_english_profiles() {
let req = AgentProfileRequest {
topic: "Python Basics".to_string(),
style: "discussion".to_string(),
level: "beginner".to_string(),
agent_count: Some(4),
language: Some("en-US".to_string()),
};
let agents = generate_agent_profiles(&req);
assert_eq!(agents.len(), 4); // 1 teacher + 1 assistant + 2 students
assert_eq!(agents[0].role, AgentRole::Teacher);
assert!(agents[0].persona.contains("discussion-oriented"));
}
#[test]
fn test_agent_role_display() {
assert_eq!(format!("{}", AgentRole::Teacher), "teacher");
assert_eq!(format!("{}", AgentRole::Assistant), "assistant");
assert_eq!(format!("{}", AgentRole::Student), "student");
}
#[test]
fn test_default_request() {
let req = AgentProfileRequest::default();
assert!(req.topic.is_empty());
assert_eq!(req.agent_count, None);
}
}

View File

@@ -0,0 +1,337 @@
//! Classroom Multi-Agent Chat
//!
//! Handles multi-agent conversation within the classroom context.
//! A single LLM call generates responses from multiple agent perspectives.
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use super::agents::AgentProfile;
/// A single chat message in the classroom
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ClassroomChatMessage {
/// Unique message ID
pub id: String,
/// Agent profile ID of the sender
pub agent_id: String,
/// Display name of the sender
pub agent_name: String,
/// Avatar of the sender
pub agent_avatar: String,
/// Message content
pub content: String,
/// Unix timestamp (milliseconds)
pub timestamp: i64,
/// Role of the sender
pub role: String,
/// Theme color of the sender
pub color: String,
}
/// Chat state for a classroom session
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ClassroomChatState {
/// All chat messages
pub messages: Vec<ClassroomChatMessage>,
/// Whether chat is active
pub active: bool,
}
/// Request for generating a chat response
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ClassroomChatRequest {
/// Classroom ID
pub classroom_id: String,
/// User's message
pub user_message: String,
/// Available agents
pub agents: Vec<AgentProfile>,
/// Current scene context (optional, for contextual responses)
pub scene_context: Option<String>,
/// Chat history for context
pub history: Vec<ClassroomChatMessage>,
}
/// Response from multi-agent chat generation
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ClassroomChatResponse {
/// Agent responses (may be 1-3 agents responding)
pub responses: Vec<ClassroomChatMessage>,
}
impl ClassroomChatMessage {
/// Create a user message
pub fn user_message(content: &str) -> Self {
Self {
id: format!("msg_{}", Uuid::new_v4()),
agent_id: "user".to_string(),
agent_name: "You".to_string(),
agent_avatar: "👤".to_string(),
content: content.to_string(),
timestamp: current_timestamp_millis(),
role: "user".to_string(),
color: "#6B7280".to_string(),
}
}
/// Create an agent message
pub fn agent_message(agent: &AgentProfile, content: &str) -> Self {
Self {
id: format!("msg_{}", Uuid::new_v4()),
agent_id: agent.id.clone(),
agent_name: agent.name.clone(),
agent_avatar: agent.avatar.clone(),
content: content.to_string(),
timestamp: current_timestamp_millis(),
role: agent.role.to_string(),
color: agent.color.clone(),
}
}
}
/// Build the LLM prompt for multi-agent chat response generation.
///
/// This function constructs a prompt that instructs the LLM to generate
/// responses from multiple agent perspectives in a structured JSON format.
pub fn build_chat_prompt(request: &ClassroomChatRequest) -> String {
let agent_descriptions: Vec<String> = request.agents.iter()
.map(|a| format!(
"- **{}** ({}): {}",
a.name, a.role, a.persona
))
.collect();
let history_text = if request.history.is_empty() {
"No previous messages.".to_string()
} else {
request.history.iter()
.map(|m| format!("**{}**: {}", m.agent_name, m.content))
.collect::<Vec<_>>()
.join("\n")
};
let scene_hint = request.scene_context.as_deref()
.map(|ctx| format!("\n当前场景上下文:{}", ctx))
.unwrap_or_default();
format!(
r#"你是一个课堂多智能体讨论的协调器。根据学生的问题选择1-3个合适的角色来回复。
## 可用角色
{agents}
## 对话历史
{history}
{scene_hint}
## 学生最新问题
{question}
## 回复规则
1. 选择最合适的1-3个角色来回复
2. 老师角色应该给出权威、清晰的解释
3. 助教角色可以补充代码示例或图表说明
4. 学生角色可以表达理解、提出追问或分享自己的理解
5. 每个角色的回复应该符合其个性设定
6. 回复应该自然、有教育意义
## 输出格式
你必须返回合法的JSON数组每个元素包含
```json
[
{{
"agentName": "角色名",
"content": "回复内容"
}}
]
```
只返回JSON数组不要包含其他文字。"#,
agents = agent_descriptions.join("\n"),
history = history_text,
scene_hint = scene_hint,
question = request.user_message,
)
}
/// Parse multi-agent responses from LLM output.
///
/// Extracts agent messages from the LLM's JSON response.
/// Falls back to a single teacher response if parsing fails.
pub fn parse_chat_responses(
llm_output: &str,
agents: &[AgentProfile],
) -> Vec<ClassroomChatMessage> {
// Try to extract JSON from the response
let json_text = extract_json_array(llm_output);
// Try parsing as JSON array
if let Ok(parsed) = serde_json::from_str::<Vec<serde_json::Value>>(&json_text) {
let mut messages = Vec::new();
for item in &parsed {
if let (Some(name), Some(content)) = (
item.get("agentName").and_then(|v| v.as_str()),
item.get("content").and_then(|v| v.as_str()),
) {
// Find matching agent
if let Some(agent) = agents.iter().find(|a| a.name == name) {
messages.push(ClassroomChatMessage::agent_message(agent, content));
}
}
}
if !messages.is_empty() {
return messages;
}
}
// Fallback: teacher responds with the raw LLM output
if let Some(teacher) = agents.iter().find(|a| a.role == super::agents::AgentRole::Teacher) {
vec![ClassroomChatMessage::agent_message(
teacher,
&clean_fallback_response(llm_output),
)]
} else if let Some(first) = agents.first() {
vec![ClassroomChatMessage::agent_message(first, llm_output)]
} else {
vec![]
}
}
/// Extract JSON array from text (handles markdown code blocks)
fn extract_json_array(text: &str) -> String {
// Try markdown code block first
if let Some(start) = text.find("```json") {
if let Some(end) = text[start + 7..].find("```") {
return text[start + 7..start + 7 + end].trim().to_string();
}
}
// Try to find JSON array directly
if let Some(start) = text.find('[') {
if let Some(end) = text.rfind(']') {
if end > start {
return text[start..=end].to_string();
}
}
}
text.to_string()
}
/// Clean up fallback response (remove JSON artifacts if present)
fn clean_fallback_response(text: &str) -> String {
let trimmed = text.trim();
// If it looks like JSON attempt, extract just the text content
if trimmed.starts_with('[') || trimmed.starts_with('{') {
if let Ok(values) = serde_json::from_str::<Vec<serde_json::Value>>(trimmed) {
if let Some(first) = values.first() {
if let Some(content) = first.get("content").and_then(|v| v.as_str()) {
return content.to_string();
}
}
}
}
trimmed.to_string()
}
fn current_timestamp_millis() -> i64 {
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_millis() as i64
}
#[cfg(test)]
mod tests {
use super::*;
use crate::generation::agents::{AgentProfile, AgentRole};
fn test_agents() -> Vec<AgentProfile> {
vec![
AgentProfile {
id: "t1".into(),
name: "陈老师".into(),
role: AgentRole::Teacher,
persona: "Test teacher".into(),
avatar: "👩‍🏫".into(),
color: "#4F46E5".into(),
allowed_actions: vec![],
priority: 10,
},
AgentProfile {
id: "s1".into(),
name: "李思".into(),
role: AgentRole::Student,
persona: "Curious student".into(),
avatar: "🤔".into(),
color: "#EF4444".into(),
allowed_actions: vec![],
priority: 5,
},
]
}
#[test]
fn test_parse_chat_responses_valid_json() {
let agents = test_agents();
let llm_output = r#"```json
[
{"agentName": "陈老师", "content": "好问题!让我来解释一下..."},
{"agentName": "李思", "content": "原来如此,那如果..."}
]
```"#;
let messages = parse_chat_responses(llm_output, &agents);
assert_eq!(messages.len(), 2);
assert_eq!(messages[0].agent_name, "陈老师");
assert_eq!(messages[1].agent_name, "李思");
}
#[test]
fn test_parse_chat_responses_fallback() {
let agents = test_agents();
let llm_output = "这是一个关于Rust的好问题。所有权意味着每个值只有一个主人。";
let messages = parse_chat_responses(llm_output, &agents);
assert_eq!(messages.len(), 1);
assert_eq!(messages[0].agent_name, "陈老师"); // Falls back to teacher
}
#[test]
fn test_build_chat_prompt() {
let agents = test_agents();
let request = ClassroomChatRequest {
classroom_id: "test".into(),
user_message: "什么是所有权?".into(),
agents,
scene_context: Some("Rust 所有权核心规则".into()),
history: vec![],
};
let prompt = build_chat_prompt(&request);
assert!(prompt.contains("陈老师"));
assert!(prompt.contains("什么是所有权?"));
assert!(prompt.contains("Rust 所有权核心规则"));
}
#[test]
fn test_user_message() {
let msg = ClassroomChatMessage::user_message("Hello");
assert_eq!(msg.agent_name, "You");
assert_eq!(msg.role, "user");
}
#[test]
fn test_agent_message() {
let agent = &test_agents()[0];
let msg = ClassroomChatMessage::agent_message(agent, "Test");
assert_eq!(msg.agent_name, "陈老师");
assert_eq!(msg.role, "teacher");
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,268 @@
//! A2A (Agent-to-Agent) messaging
//!
//! All items in this module are gated by the `multi-agent` feature flag.
#[cfg(feature = "multi-agent")]
use zclaw_types::{AgentId, Capability, Event, Result};
#[cfg(feature = "multi-agent")]
use zclaw_protocols::{A2aAgentProfile, A2aCapability, A2aEnvelope, A2aMessageType, A2aRecipient};
#[cfg(feature = "multi-agent")]
use super::Kernel;
#[cfg(feature = "multi-agent")]
impl Kernel {
// ============================================================
// A2A (Agent-to-Agent) Messaging
// ============================================================
/// Derive an A2A agent profile from an AgentConfig
pub(super) fn agent_config_to_a2a_profile(config: &zclaw_types::AgentConfig) -> A2aAgentProfile {
let caps: Vec<A2aCapability> = config.tools.iter().map(|tool_name| {
A2aCapability {
name: tool_name.clone(),
description: format!("Tool: {}", tool_name),
input_schema: None,
output_schema: None,
requires_approval: false,
version: "1.0.0".to_string(),
tags: vec![],
}
}).collect();
A2aAgentProfile {
id: config.id,
name: config.name.clone(),
description: config.description.clone().unwrap_or_default(),
capabilities: caps,
protocols: vec!["a2a".to_string()],
role: "worker".to_string(),
priority: 5,
metadata: std::collections::HashMap::new(),
groups: vec![],
last_seen: 0,
}
}
/// Check if an agent is authorized to send messages to a target
pub(super) fn check_a2a_permission(&self, from: &AgentId, to: &AgentId) -> Result<()> {
let caps = self.capabilities.get(from);
match caps {
Some(cap_set) => {
let has_permission = cap_set.capabilities.iter().any(|cap| {
match cap {
Capability::AgentMessage { pattern } => {
pattern == "*" || to.to_string().starts_with(pattern)
}
_ => false,
}
});
if !has_permission {
return Err(zclaw_types::ZclawError::PermissionDenied(
format!("Agent {} does not have AgentMessage capability for {}", from, to)
));
}
Ok(())
}
None => {
// No capabilities registered — deny by default
Err(zclaw_types::ZclawError::PermissionDenied(
format!("Agent {} has no capabilities registered", from)
))
}
}
}
/// Send a direct A2A message from one agent to another
pub async fn a2a_send(
&self,
from: &AgentId,
to: &AgentId,
payload: serde_json::Value,
message_type: Option<A2aMessageType>,
) -> Result<()> {
// Validate sender exists
self.registry.get(from)
.ok_or_else(|| zclaw_types::ZclawError::NotFound(
format!("Sender agent not found: {}", from)
))?;
// Validate receiver exists and is running
self.registry.get(to)
.ok_or_else(|| zclaw_types::ZclawError::NotFound(
format!("Target agent not found: {}", to)
))?;
// Check capability permission
self.check_a2a_permission(from, to)?;
// Build and route envelope
let envelope = A2aEnvelope::new(
*from,
A2aRecipient::Direct { agent_id: *to },
message_type.unwrap_or(A2aMessageType::Notification),
payload,
);
self.a2a_router.route(envelope).await?;
// Emit event
self.events.publish(Event::A2aMessageSent {
from: *from,
to: format!("{}", to),
message_type: "direct".to_string(),
});
Ok(())
}
/// Broadcast a message from one agent to all other agents
pub async fn a2a_broadcast(
&self,
from: &AgentId,
payload: serde_json::Value,
) -> Result<()> {
// Validate sender exists
self.registry.get(from)
.ok_or_else(|| zclaw_types::ZclawError::NotFound(
format!("Sender agent not found: {}", from)
))?;
let envelope = A2aEnvelope::new(
*from,
A2aRecipient::Broadcast,
A2aMessageType::Notification,
payload,
);
self.a2a_router.route(envelope).await?;
self.events.publish(Event::A2aMessageSent {
from: *from,
to: "broadcast".to_string(),
message_type: "broadcast".to_string(),
});
Ok(())
}
/// Discover agents that have a specific capability
pub async fn a2a_discover(&self, capability: &str) -> Result<Vec<A2aAgentProfile>> {
let result = self.a2a_router.discover(capability).await?;
self.events.publish(Event::A2aAgentDiscovered {
agent_id: AgentId::new(),
capabilities: vec![capability.to_string()],
});
Ok(result)
}
/// Try to receive a pending A2A message for an agent (non-blocking)
pub async fn a2a_receive(&self, agent_id: &AgentId) -> Result<Option<A2aEnvelope>> {
let inbox = self.a2a_inboxes.get(agent_id)
.ok_or_else(|| zclaw_types::ZclawError::NotFound(
format!("No A2A inbox for agent: {}", agent_id)
))?;
let mut inbox = inbox.lock().await;
match inbox.try_recv() {
Ok(envelope) => {
self.events.publish(Event::A2aMessageReceived {
from: envelope.from,
to: format!("{}", agent_id),
message_type: "direct".to_string(),
});
Ok(Some(envelope))
}
Err(_) => Ok(None),
}
}
/// Delegate a task to another agent and wait for response with timeout
pub async fn a2a_delegate_task(
&self,
from: &AgentId,
to: &AgentId,
task_description: String,
timeout_ms: u64,
) -> Result<serde_json::Value> {
// Validate both agents exist
self.registry.get(from)
.ok_or_else(|| zclaw_types::ZclawError::NotFound(
format!("Sender agent not found: {}", from)
))?;
self.registry.get(to)
.ok_or_else(|| zclaw_types::ZclawError::NotFound(
format!("Target agent not found: {}", to)
))?;
// Check capability permission
self.check_a2a_permission(from, to)?;
// Send task request
let task_id = uuid::Uuid::new_v4().to_string();
let envelope = A2aEnvelope::new(
*from,
A2aRecipient::Direct { agent_id: *to },
A2aMessageType::Task,
serde_json::json!({
"task_id": task_id,
"description": task_description,
}),
).with_conversation(task_id.clone());
let envelope_id = envelope.id.clone();
self.a2a_router.route(envelope).await?;
self.events.publish(Event::A2aMessageSent {
from: *from,
to: format!("{}", to),
message_type: "task".to_string(),
});
// Wait for response with timeout
let timeout = tokio::time::Duration::from_millis(timeout_ms);
let result = tokio::time::timeout(timeout, async {
let inbox_entry = self.a2a_inboxes.get(from)
.ok_or_else(|| zclaw_types::ZclawError::NotFound(
format!("No A2A inbox for agent: {}", from)
))?;
let mut inbox = inbox_entry.lock().await;
// Poll for matching response
loop {
match inbox.recv().await {
Some(msg) => {
// Check if this is a response to our task
if msg.message_type == A2aMessageType::Response
&& msg.reply_to.as_deref() == Some(&envelope_id) {
return Ok::<_, zclaw_types::ZclawError>(msg.payload);
}
// Not our response — requeue it for later processing
tracing::debug!("Re-queuing non-matching A2A message: {}", msg.id);
inbox.requeue(msg);
}
None => {
return Err(zclaw_types::ZclawError::Internal(
"A2A inbox channel closed".to_string()
));
}
}
}
}).await;
match result {
Ok(Ok(payload)) => Ok(payload),
Ok(Err(e)) => Err(e),
Err(_) => Err(zclaw_types::ZclawError::Timeout(
format!("A2A task delegation timed out after {}ms", timeout_ms)
)),
}
}
/// Get all online agents via A2A profiles
pub async fn a2a_get_online_agents(&self) -> Result<Vec<A2aAgentProfile>> {
Ok(self.a2a_router.list_profiles().await)
}
}

View File

@@ -0,0 +1,138 @@
//! Adapter types bridging runtime interfaces
use std::pin::Pin;
use std::sync::Arc;
use async_trait::async_trait;
use serde_json::Value;
use zclaw_runtime::{LlmDriver, tool::SkillExecutor};
use zclaw_skills::{SkillRegistry, LlmCompleter};
use zclaw_types::Result;
/// Adapter that bridges `zclaw_runtime::LlmDriver` -> `zclaw_skills::LlmCompleter`
pub(crate) struct LlmDriverAdapter {
pub(crate) driver: Arc<dyn LlmDriver>,
pub(crate) max_tokens: u32,
pub(crate) temperature: f32,
}
impl LlmCompleter for LlmDriverAdapter {
fn complete(
&self,
prompt: &str,
) -> Pin<Box<dyn std::future::Future<Output = std::result::Result<String, String>> + Send + '_>> {
let driver = self.driver.clone();
let prompt = prompt.to_string();
Box::pin(async move {
let request = zclaw_runtime::CompletionRequest {
messages: vec![zclaw_types::Message::user(prompt)],
max_tokens: Some(self.max_tokens),
temperature: Some(self.temperature),
..Default::default()
};
let response = driver.complete(request).await
.map_err(|e| format!("LLM completion error: {}", e))?;
// Extract text from content blocks
let text: String = response.content.iter()
.filter_map(|block| match block {
zclaw_runtime::ContentBlock::Text { text } => Some(text.as_str()),
_ => None,
})
.collect::<Vec<_>>()
.join("");
Ok(text)
})
}
}
/// Skill executor implementation for Kernel
pub struct KernelSkillExecutor {
pub(crate) skills: Arc<SkillRegistry>,
pub(crate) llm: Arc<dyn LlmCompleter>,
}
impl KernelSkillExecutor {
pub fn new(skills: Arc<SkillRegistry>, driver: Arc<dyn LlmDriver>) -> Self {
let llm: Arc<dyn zclaw_skills::LlmCompleter> = Arc::new(LlmDriverAdapter { driver, max_tokens: 4096, temperature: 0.7 });
Self { skills, llm }
}
}
#[async_trait]
impl SkillExecutor for KernelSkillExecutor {
async fn execute_skill(
&self,
skill_id: &str,
agent_id: &str,
session_id: &str,
input: Value,
) -> Result<Value> {
let context = zclaw_skills::SkillContext {
agent_id: agent_id.to_string(),
session_id: session_id.to_string(),
llm: Some(self.llm.clone()),
..Default::default()
};
let result = self.skills.execute(&zclaw_types::SkillId::new(skill_id), &context, input).await?;
Ok(result.output)
}
fn get_skill_detail(&self, skill_id: &str) -> Option<zclaw_runtime::tool::SkillDetail> {
let manifests = self.skills.manifests_snapshot();
let manifest = manifests.get(&zclaw_types::SkillId::new(skill_id))?;
Some(zclaw_runtime::tool::SkillDetail {
id: manifest.id.as_str().to_string(),
name: manifest.name.clone(),
description: manifest.description.clone(),
category: manifest.category.clone(),
input_schema: manifest.input_schema.clone(),
triggers: manifest.triggers.clone(),
capabilities: manifest.capabilities.clone(),
})
}
fn list_skill_index(&self) -> Vec<zclaw_runtime::tool::SkillIndexEntry> {
let manifests = self.skills.manifests_snapshot();
manifests.values()
.filter(|m| m.enabled)
.map(|m| zclaw_runtime::tool::SkillIndexEntry {
id: m.id.as_str().to_string(),
description: m.description.clone(),
triggers: m.triggers.clone(),
})
.collect()
}
}
/// Inbox wrapper for A2A message receivers that supports re-queuing
/// non-matching messages instead of dropping them.
#[cfg(feature = "multi-agent")]
pub(crate) struct AgentInbox {
pub(crate) rx: tokio::sync::mpsc::Receiver<zclaw_protocols::A2aEnvelope>,
pub(crate) pending: std::collections::VecDeque<zclaw_protocols::A2aEnvelope>,
}
#[cfg(feature = "multi-agent")]
impl AgentInbox {
pub(crate) fn new(rx: tokio::sync::mpsc::Receiver<zclaw_protocols::A2aEnvelope>) -> Self {
Self { rx, pending: std::collections::VecDeque::new() }
}
pub(crate) fn try_recv(&mut self) -> std::result::Result<zclaw_protocols::A2aEnvelope, tokio::sync::mpsc::error::TryRecvError> {
if let Some(msg) = self.pending.pop_front() {
return Ok(msg);
}
self.rx.try_recv()
}
pub(crate) async fn recv(&mut self) -> Option<zclaw_protocols::A2aEnvelope> {
if let Some(msg) = self.pending.pop_front() {
return Some(msg);
}
self.rx.recv().await
}
pub(crate) fn requeue(&mut self, envelope: zclaw_protocols::A2aEnvelope) {
self.pending.push_back(envelope);
}
}

View File

@@ -0,0 +1,113 @@
//! Agent CRUD operations
use zclaw_types::{AgentConfig, AgentId, AgentInfo, Event, Result};
#[cfg(feature = "multi-agent")]
use std::sync::Arc;
#[cfg(feature = "multi-agent")]
use tokio::sync::Mutex;
#[cfg(feature = "multi-agent")]
use super::adapters::AgentInbox;
use super::Kernel;
impl Kernel {
/// Spawn a new agent
pub async fn spawn_agent(&self, config: AgentConfig) -> Result<AgentId> {
let id = config.id;
// Validate capabilities
self.capabilities.validate(&config.capabilities)?;
// Register in memory
self.memory.save_agent(&config).await?;
// Register with A2A router for multi-agent messaging (before config is moved)
#[cfg(feature = "multi-agent")]
{
let profile = Self::agent_config_to_a2a_profile(&config);
let rx = self.a2a_router.register_agent(profile).await;
self.a2a_inboxes.insert(id, Arc::new(Mutex::new(AgentInbox::new(rx))));
}
// Register in registry (consumes config)
let name = config.name.clone();
self.registry.register(config);
// Emit event
self.events.publish(Event::AgentSpawned {
agent_id: id,
name,
});
Ok(id)
}
/// Kill an agent
pub async fn kill_agent(&self, id: &AgentId) -> Result<()> {
// Remove from registry
self.registry.unregister(id);
// Remove from memory
self.memory.delete_agent(id).await?;
// Unregister from A2A router
#[cfg(feature = "multi-agent")]
{
self.a2a_router.unregister_agent(id).await;
self.a2a_inboxes.remove(id);
}
// Emit event
self.events.publish(Event::AgentTerminated {
agent_id: *id,
reason: "killed".to_string(),
});
Ok(())
}
/// Update an existing agent's configuration
pub async fn update_agent(&self, config: AgentConfig) -> Result<()> {
let id = config.id;
// Validate the agent exists
if self.registry.get(&id).is_none() {
return Err(zclaw_types::ZclawError::NotFound(
format!("Agent not found: {}", id)
));
}
// Validate capabilities
self.capabilities.validate(&config.capabilities)?;
// Save updated config to memory
self.memory.save_agent(&config).await?;
// Update in registry (preserves state and message count)
self.registry.update(config.clone());
// Emit event
self.events.publish(Event::AgentConfigUpdated {
agent_id: id,
name: config.name.clone(),
});
Ok(())
}
/// List all agents
pub fn list_agents(&self) -> Vec<AgentInfo> {
self.registry.list()
}
/// Get agent info
pub fn get_agent(&self, id: &AgentId) -> Option<AgentInfo> {
self.registry.get_info(id)
}
/// Get agent config (for export)
pub fn get_agent_config(&self, id: &AgentId) -> Option<AgentConfig> {
self.registry.get(id)
}
}

View File

@@ -0,0 +1,155 @@
//! Approval management
use std::sync::Arc;
use serde_json::Value;
use zclaw_types::{Result, HandRun, HandRunId, HandRunStatus, TriggerSource};
use zclaw_hands::HandContext;
use super::Kernel;
impl Kernel {
// ============================================================
// Approval Management
// ============================================================
/// List pending approvals
pub async fn list_approvals(&self) -> Vec<super::ApprovalEntry> {
let approvals = self.pending_approvals.lock().await;
approvals.iter().filter(|a| a.status == "pending").cloned().collect()
}
/// Get a single approval by ID (any status, not just pending)
///
/// Returns None if no approval with the given ID exists.
pub async fn get_approval(&self, id: &str) -> Option<super::ApprovalEntry> {
let approvals = self.pending_approvals.lock().await;
approvals.iter().find(|a| a.id == id).cloned()
}
/// Create a pending approval (called when a needs_approval hand is triggered)
pub async fn create_approval(&self, hand_id: String, input: serde_json::Value) -> super::ApprovalEntry {
let entry = super::ApprovalEntry {
id: uuid::Uuid::new_v4().to_string(),
hand_id,
status: "pending".to_string(),
created_at: chrono::Utc::now(),
input,
reject_reason: None,
};
let mut approvals = self.pending_approvals.lock().await;
approvals.push(entry.clone());
entry
}
/// Respond to an approval
pub async fn respond_to_approval(
&self,
id: &str,
approved: bool,
reason: Option<String>,
) -> Result<()> {
let mut approvals = self.pending_approvals.lock().await;
let entry = approvals.iter_mut().find(|a| a.id == id && a.status == "pending")
.ok_or_else(|| zclaw_types::ZclawError::NotFound(format!("Approval not found: {}", id)))?;
entry.status = if approved { "approved".to_string() } else { "rejected".to_string() };
if let Some(r) = reason {
entry.reject_reason = Some(r);
}
if approved {
let hand_id = entry.hand_id.clone();
let input = entry.input.clone();
drop(approvals); // Release lock before async hand execution
// Execute the hand in background with HandRun tracking
let hands = self.hands.clone();
let approvals = self.pending_approvals.clone();
let memory = self.memory.clone();
let running_hand_runs = self.running_hand_runs.clone();
let id_owned = id.to_string();
tokio::spawn(async move {
// Create HandRun record for tracking
let run_id = HandRunId::new();
let now = chrono::Utc::now().to_rfc3339();
let mut run = HandRun {
id: run_id,
hand_name: hand_id.clone(),
trigger_source: TriggerSource::Manual,
params: input.clone(),
status: HandRunStatus::Pending,
result: None,
error: None,
duration_ms: None,
created_at: now.clone(),
started_at: None,
completed_at: None,
};
let _ = memory.save_hand_run(&run).await.map_err(|e| {
tracing::warn!("[Approval] Failed to save hand run: {}", e);
});
run.status = HandRunStatus::Running;
run.started_at = Some(chrono::Utc::now().to_rfc3339());
let _ = memory.update_hand_run(&run).await.map_err(|e| {
tracing::warn!("[Approval] Failed to update hand run (running): {}", e);
});
// Register cancellation flag
let cancel_flag = Arc::new(std::sync::atomic::AtomicBool::new(false));
running_hand_runs.insert(run.id, cancel_flag.clone());
let context = HandContext::default();
let start = std::time::Instant::now();
let result = hands.execute(&hand_id, &context, input).await;
let duration = start.elapsed();
// Remove from running map
running_hand_runs.remove(&run.id);
// Update HandRun with result
let completed_at = chrono::Utc::now().to_rfc3339();
match &result {
Ok(res) => {
run.status = HandRunStatus::Completed;
run.result = Some(res.output.clone());
run.error = res.error.clone();
}
Err(e) => {
run.status = HandRunStatus::Failed;
run.error = Some(e.to_string());
}
}
run.duration_ms = Some(duration.as_millis() as u64);
run.completed_at = Some(completed_at);
let _ = memory.update_hand_run(&run).await.map_err(|e| {
tracing::warn!("[Approval] Failed to update hand run (completed): {}", e);
});
// Update approval status based on execution result
let mut approvals = approvals.lock().await;
if let Some(entry) = approvals.iter_mut().find(|a| a.id == id_owned) {
match result {
Ok(_) => entry.status = "completed".to_string(),
Err(e) => {
entry.status = "failed".to_string();
if let Some(obj) = entry.input.as_object_mut() {
obj.insert("error".to_string(), Value::String(format!("{}", e)));
}
}
}
}
});
}
Ok(())
}
/// Cancel a pending approval
pub async fn cancel_approval(&self, id: &str) -> Result<()> {
let mut approvals = self.pending_approvals.lock().await;
let entry = approvals.iter_mut().find(|a| a.id == id && a.status == "pending")
.ok_or_else(|| zclaw_types::ZclawError::NotFound(format!("Approval not found: {}", id)))?;
entry.status = "cancelled".to_string();
Ok(())
}
}

View File

@@ -0,0 +1,209 @@
//! Hand execution and run tracking
use std::sync::Arc;
use zclaw_types::{Result, HandRun, HandRunId, HandRunStatus, HandRunFilter, TriggerSource};
use zclaw_hands::{HandContext, HandResult};
use super::Kernel;
impl Kernel {
/// Get the hands registry
pub fn hands(&self) -> &Arc<zclaw_hands::HandRegistry> {
&self.hands
}
/// List all registered hands
pub async fn list_hands(&self) -> Vec<zclaw_hands::HandConfig> {
self.hands.list().await
}
/// Execute a hand with the given input, tracking the run
pub async fn execute_hand(
&self,
hand_id: &str,
input: serde_json::Value,
) -> Result<(HandResult, HandRunId)> {
let run_id = HandRunId::new();
let now = chrono::Utc::now().to_rfc3339();
// Create the initial HandRun record
let mut run = HandRun {
id: run_id,
hand_name: hand_id.to_string(),
trigger_source: TriggerSource::Manual,
params: input.clone(),
status: HandRunStatus::Pending,
result: None,
error: None,
duration_ms: None,
created_at: now.clone(),
started_at: None,
completed_at: None,
};
self.memory.save_hand_run(&run).await?;
// Transition to Running
run.status = HandRunStatus::Running;
run.started_at = Some(chrono::Utc::now().to_rfc3339());
self.memory.update_hand_run(&run).await?;
// Register cancellation flag
let cancel_flag = Arc::new(std::sync::atomic::AtomicBool::new(false));
self.running_hand_runs.insert(run_id, cancel_flag.clone());
// Execute the hand
let context = HandContext::default();
let start = std::time::Instant::now();
let hand_result = self.hands.execute(hand_id, &context, input).await;
let duration = start.elapsed();
// Check if cancelled during execution
if cancel_flag.load(std::sync::atomic::Ordering::Relaxed) {
let mut run_update = run.clone();
run_update.status = HandRunStatus::Cancelled;
run_update.completed_at = Some(chrono::Utc::now().to_rfc3339());
run_update.duration_ms = Some(duration.as_millis() as u64);
self.memory.update_hand_run(&run_update).await?;
self.running_hand_runs.remove(&run_id);
return Err(zclaw_types::ZclawError::Internal("Hand execution cancelled".to_string()));
}
// Remove from running map
self.running_hand_runs.remove(&run_id);
// Update HandRun with result
let completed_at = chrono::Utc::now().to_rfc3339();
match &hand_result {
Ok(res) => {
run.status = HandRunStatus::Completed;
run.result = Some(res.output.clone());
run.error = res.error.clone();
}
Err(e) => {
run.status = HandRunStatus::Failed;
run.error = Some(e.to_string());
}
}
run.duration_ms = Some(duration.as_millis() as u64);
run.completed_at = Some(completed_at);
self.memory.update_hand_run(&run).await?;
hand_result.map(|res| (res, run_id))
}
/// Execute a hand with a specific trigger source (for scheduled/event triggers)
pub async fn execute_hand_with_source(
&self,
hand_id: &str,
input: serde_json::Value,
trigger_source: TriggerSource,
) -> Result<(HandResult, HandRunId)> {
let run_id = HandRunId::new();
let now = chrono::Utc::now().to_rfc3339();
let mut run = HandRun {
id: run_id,
hand_name: hand_id.to_string(),
trigger_source,
params: input.clone(),
status: HandRunStatus::Pending,
result: None,
error: None,
duration_ms: None,
created_at: now,
started_at: None,
completed_at: None,
};
self.memory.save_hand_run(&run).await?;
run.status = HandRunStatus::Running;
run.started_at = Some(chrono::Utc::now().to_rfc3339());
self.memory.update_hand_run(&run).await?;
let cancel_flag = Arc::new(std::sync::atomic::AtomicBool::new(false));
self.running_hand_runs.insert(run_id, cancel_flag.clone());
let context = HandContext::default();
let start = std::time::Instant::now();
let hand_result = self.hands.execute(hand_id, &context, input).await;
let duration = start.elapsed();
// Check if cancelled during execution
if cancel_flag.load(std::sync::atomic::Ordering::Relaxed) {
run.status = HandRunStatus::Cancelled;
run.completed_at = Some(chrono::Utc::now().to_rfc3339());
run.duration_ms = Some(duration.as_millis() as u64);
self.memory.update_hand_run(&run).await?;
self.running_hand_runs.remove(&run_id);
return Err(zclaw_types::ZclawError::Internal("Hand execution cancelled".to_string()));
}
self.running_hand_runs.remove(&run_id);
let completed_at = chrono::Utc::now().to_rfc3339();
match &hand_result {
Ok(res) => {
run.status = HandRunStatus::Completed;
run.result = Some(res.output.clone());
run.error = res.error.clone();
}
Err(e) => {
run.status = HandRunStatus::Failed;
run.error = Some(e.to_string());
}
}
run.duration_ms = Some(duration.as_millis() as u64);
run.completed_at = Some(completed_at);
self.memory.update_hand_run(&run).await?;
hand_result.map(|res| (res, run_id))
}
// ============================================================
// Hand Run Tracking
// ============================================================
/// Get a hand run by ID
pub async fn get_hand_run(&self, id: &HandRunId) -> Result<Option<HandRun>> {
self.memory.get_hand_run(id).await
}
/// List hand runs with filter
pub async fn list_hand_runs(&self, filter: &HandRunFilter) -> Result<Vec<HandRun>> {
self.memory.list_hand_runs(filter).await
}
/// Count hand runs matching filter
pub async fn count_hand_runs(&self, filter: &HandRunFilter) -> Result<u32> {
self.memory.count_hand_runs(filter).await
}
/// Cancel a running hand execution
pub async fn cancel_hand_run(&self, id: &HandRunId) -> Result<()> {
if let Some((_, flag)) = self.running_hand_runs.remove(id) {
flag.store(true, std::sync::atomic::Ordering::Relaxed);
// Note: the actual status update happens in execute_hand_with_source
// when it detects the cancel flag
Ok(())
} else {
// Not currently running — check if exists at all
let run = self.memory.get_hand_run(id).await?;
match run {
Some(r) if r.status == HandRunStatus::Pending => {
let mut updated = r;
updated.status = HandRunStatus::Cancelled;
updated.completed_at = Some(chrono::Utc::now().to_rfc3339());
self.memory.update_hand_run(&updated).await?;
Ok(())
}
Some(r) => Err(zclaw_types::ZclawError::InvalidInput(
format!("Cannot cancel hand run {} with status {}", id, r.status)
)),
None => Err(zclaw_types::ZclawError::NotFound(
format!("Hand run {} not found", id)
)),
}
}
}
}

View File

@@ -0,0 +1,314 @@
//! Message sending (non-streaming, streaming, system prompt building)
use tokio::sync::mpsc;
use zclaw_types::{AgentId, Result};
/// Chat mode configuration passed from the frontend.
/// Controls thinking, reasoning, and plan mode behavior.
#[derive(Debug, Clone)]
pub struct ChatModeConfig {
pub thinking_enabled: Option<bool>,
pub reasoning_effort: Option<String>,
pub plan_mode: Option<bool>,
}
use zclaw_runtime::{AgentLoop, tool::builtin::PathValidator};
use super::Kernel;
use super::super::MessageResponse;
impl Kernel {
/// Send a message to an agent
pub async fn send_message(
&self,
agent_id: &AgentId,
message: String,
) -> Result<MessageResponse> {
self.send_message_with_chat_mode(agent_id, message, None).await
}
/// Send a message to an agent with optional chat mode configuration
pub async fn send_message_with_chat_mode(
&self,
agent_id: &AgentId,
message: String,
chat_mode: Option<ChatModeConfig>,
) -> Result<MessageResponse> {
let agent_config = self.registry.get(agent_id)
.ok_or_else(|| zclaw_types::ZclawError::NotFound(format!("Agent not found: {}", agent_id)))?;
// Create or get session
let session_id = self.memory.create_session(agent_id).await?;
// Always use Kernel's current model configuration
// This ensures user's "模型与 API" settings are respected
let model = self.config.model().to_string();
// Create agent loop with model configuration
let tools = self.create_tool_registry();
let mut loop_runner = AgentLoop::new(
*agent_id,
self.driver.clone(),
tools,
self.memory.clone(),
)
.with_model(&model)
.with_skill_executor(self.skill_executor.clone())
.with_max_tokens(agent_config.max_tokens.unwrap_or_else(|| self.config.max_tokens()))
.with_temperature(agent_config.temperature.unwrap_or_else(|| self.config.temperature()))
.with_compaction_threshold(
agent_config.compaction_threshold
.map(|t| t as usize)
.unwrap_or_else(|| self.config.compaction_threshold()),
);
// Set path validator from agent's workspace directory (if configured)
if let Some(ref workspace) = agent_config.workspace {
let path_validator = PathValidator::new().with_workspace(workspace.clone());
tracing::info!(
"[Kernel] Setting path_validator with workspace: {} for agent {}",
workspace.display(),
agent_id
);
loop_runner = loop_runner.with_path_validator(path_validator);
}
// Inject middleware chain if available
if let Some(chain) = self.create_middleware_chain() {
loop_runner = loop_runner.with_middleware_chain(chain);
}
// Apply chat mode configuration (thinking/reasoning/plan mode)
if let Some(ref mode) = chat_mode {
if mode.thinking_enabled.unwrap_or(false) {
loop_runner = loop_runner.with_thinking_enabled(true);
}
if let Some(ref effort) = mode.reasoning_effort {
loop_runner = loop_runner.with_reasoning_effort(effort.clone());
}
if mode.plan_mode.unwrap_or(false) {
loop_runner = loop_runner.with_plan_mode(true);
}
}
// Build system prompt with skill information injected
let system_prompt = self.build_system_prompt_with_skills(agent_config.system_prompt.as_ref()).await;
let loop_runner = loop_runner.with_system_prompt(&system_prompt);
// Run the loop
let result = loop_runner.run(session_id, message).await?;
// Track message count
self.registry.increment_message_count(agent_id);
Ok(MessageResponse {
content: result.response,
input_tokens: result.input_tokens,
output_tokens: result.output_tokens,
})
}
/// Send a message with streaming
pub async fn send_message_stream(
&self,
agent_id: &AgentId,
message: String,
) -> Result<mpsc::Receiver<zclaw_runtime::LoopEvent>> {
self.send_message_stream_with_prompt(agent_id, message, None, None, None).await
}
/// Send a message with streaming, optional system prompt, optional session reuse,
/// and optional chat mode configuration (thinking/reasoning/plan mode).
pub async fn send_message_stream_with_prompt(
&self,
agent_id: &AgentId,
message: String,
system_prompt_override: Option<String>,
session_id_override: Option<zclaw_types::SessionId>,
chat_mode: Option<ChatModeConfig>,
) -> Result<mpsc::Receiver<zclaw_runtime::LoopEvent>> {
let agent_config = self.registry.get(agent_id)
.ok_or_else(|| zclaw_types::ZclawError::NotFound(format!("Agent not found: {}", agent_id)))?;
// Reuse existing session or create new one
let session_id = match session_id_override {
Some(id) => {
// Use get_or_create to ensure the frontend's session ID is persisted.
// This is the critical bridge: without it, the kernel generates a
// different UUID each turn, so conversation history is never found.
tracing::debug!("Reusing frontend session ID: {}", id);
self.memory.get_or_create_session(&id, agent_id).await?
}
None => self.memory.create_session(agent_id).await?,
};
// Always use Kernel's current model configuration
// This ensures user's "模型与 API" settings are respected
let model = self.config.model().to_string();
// Create agent loop with model configuration
let tools = self.create_tool_registry();
let mut loop_runner = AgentLoop::new(
*agent_id,
self.driver.clone(),
tools,
self.memory.clone(),
)
.with_model(&model)
.with_skill_executor(self.skill_executor.clone())
.with_max_tokens(agent_config.max_tokens.unwrap_or_else(|| self.config.max_tokens()))
.with_temperature(agent_config.temperature.unwrap_or_else(|| self.config.temperature()))
.with_compaction_threshold(
agent_config.compaction_threshold
.map(|t| t as usize)
.unwrap_or_else(|| self.config.compaction_threshold()),
);
// Set path validator from agent's workspace directory (if configured)
// This enables file_read / file_write tools to access the workspace
if let Some(ref workspace) = agent_config.workspace {
let path_validator = PathValidator::new().with_workspace(workspace.clone());
tracing::info!(
"[Kernel] Setting path_validator with workspace: {} for agent {}",
workspace.display(),
agent_id
);
loop_runner = loop_runner.with_path_validator(path_validator);
}
// Inject middleware chain if available
if let Some(chain) = self.create_middleware_chain() {
loop_runner = loop_runner.with_middleware_chain(chain);
}
// Apply chat mode configuration (thinking/reasoning/plan mode from frontend)
if let Some(ref mode) = chat_mode {
if mode.thinking_enabled.unwrap_or(false) {
loop_runner = loop_runner.with_thinking_enabled(true);
}
if let Some(ref effort) = mode.reasoning_effort {
loop_runner = loop_runner.with_reasoning_effort(effort.clone());
}
if mode.plan_mode.unwrap_or(false) {
loop_runner = loop_runner.with_plan_mode(true);
}
}
// Use external prompt if provided, otherwise build default
let system_prompt = match system_prompt_override {
Some(prompt) => prompt,
None => self.build_system_prompt_with_skills(agent_config.system_prompt.as_ref()).await,
};
let loop_runner = loop_runner.with_system_prompt(&system_prompt);
// Run with streaming
self.registry.increment_message_count(agent_id);
loop_runner.run_streaming(session_id, message).await
}
/// Build a system prompt with skill information injected
pub(super) async fn build_system_prompt_with_skills(&self, base_prompt: Option<&String>) -> String {
// Get skill list asynchronously
let skills = self.skills.list().await;
let mut prompt = base_prompt
.map(|p| p.clone())
.unwrap_or_else(|| "You are a helpful AI assistant.".to_string());
// Inject skill information with categories
if !skills.is_empty() {
prompt.push_str("\n\n## Available Skills\n\n");
prompt.push_str("You have access to specialized skills. Analyze user intent and autonomously call `execute_skill` with the appropriate skill_id.\n\n");
// Group skills by category based on their ID patterns
let categories = self.categorize_skills(&skills);
for (category, category_skills) in categories {
prompt.push_str(&format!("### {}\n", category));
for skill in category_skills {
prompt.push_str(&format!(
"- **{}**: {}",
skill.id.as_str(),
skill.description
));
prompt.push('\n');
}
prompt.push('\n');
}
prompt.push_str("### When to use skills:\n");
prompt.push_str("- **IMPORTANT**: You should autonomously decide when to use skills based on your understanding of the user's intent.\n");
prompt.push_str("- Do not wait for explicit skill names - recognize the need and act.\n");
prompt.push_str("- Match user's request to the most appropriate skill's domain.\n");
prompt.push_str("- If multiple skills could apply, choose the most specialized one.\n\n");
prompt.push_str("### Example:\n");
prompt.push_str("User: \"分析腾讯财报\" → Intent: Financial analysis → Call: execute_skill(\"finance-tracker\", {...})\n");
}
prompt
}
/// Categorize skills into logical groups
///
/// Priority:
/// 1. Use skill's `category` field if defined in SKILL.md
/// 2. Fall back to pattern matching for backward compatibility
pub(super) fn categorize_skills<'a>(&self, skills: &'a [zclaw_skills::SkillManifest]) -> Vec<(String, Vec<&'a zclaw_skills::SkillManifest>)> {
let mut categories: std::collections::HashMap<String, Vec<&zclaw_skills::SkillManifest>> = std::collections::HashMap::new();
// Fallback category patterns for skills without explicit category
let fallback_patterns = [
("开发工程", vec!["senior-developer", "frontend-developer", "backend-architect", "ai-engineer", "devops-automator", "rapid-prototyper", "lsp-index-engineer"]),
("测试质量", vec!["api-tester", "evidence-collector", "reality-checker", "performance-benchmarker", "test-results-analyzer", "accessibility-auditor", "code-review"]),
("安全合规", vec!["security-engineer", "legal-compliance-checker", "agentic-identity-trust"]),
("数据分析", vec!["analytics-reporter", "finance-tracker", "data-analysis", "sales-data-extraction-agent", "data-consolidation-agent", "report-distribution-agent"]),
("项目管理", vec!["senior-pm", "project-shepherd", "sprint-prioritizer", "experiment-tracker", "feedback-synthesizer", "trend-researcher", "agents-orchestrator"]),
("设计UX", vec!["ui-designer", "ux-architect", "ux-researcher", "visual-storyteller", "image-prompt-engineer", "whimsy-injector", "brand-guardian"]),
("内容营销", vec!["content-creator", "chinese-writing", "executive-summary-generator", "social-media-strategist"]),
("社交平台", vec!["twitter-engager", "instagram-curator", "tiktok-strategist", "reddit-community-builder", "zhihu-strategist", "xiaohongshu-specialist", "wechat-official-account", "growth-hacker", "app-store-optimizer"]),
("运营支持", vec!["studio-operations", "studio-producer", "support-responder", "workflow-optimizer", "infrastructure-maintainer", "tool-evaluator"]),
("XR/空间计算", vec!["visionos-spatial-engineer", "macos-spatial-metal-engineer", "xr-immersive-developer", "xr-interface-architect", "xr-cockpit-interaction-specialist", "terminal-integration-specialist"]),
("基础工具", vec!["web-search", "file-operations", "shell-command", "git", "translation", "feishu-docs"]),
];
// Categorize each skill
for skill in skills {
// Priority 1: Use skill's explicit category
if let Some(ref category) = skill.category {
if !category.is_empty() {
categories.entry(category.clone()).or_default().push(skill);
continue;
}
}
// Priority 2: Fallback to pattern matching
let skill_id = skill.id.as_str();
let mut categorized = false;
for (category, patterns) in &fallback_patterns {
if patterns.iter().any(|p| skill_id.contains(p) || *p == skill_id) {
categories.entry(category.to_string()).or_default().push(skill);
categorized = true;
break;
}
}
// Put uncategorized skills in "其他"
if !categorized {
categories.entry("其他".to_string()).or_default().push(skill);
}
}
// Convert to ordered vector
let mut result: Vec<(String, Vec<_>)> = categories.into_iter().collect();
result.sort_by(|a, b| {
// Sort by predefined order
let order = ["开发工程", "测试质量", "安全合规", "数据分析", "项目管理", "设计UX", "内容营销", "社交平台", "运营支持", "XR/空间计算", "基础工具", "其他"];
let a_idx = order.iter().position(|&x| x == a.0).unwrap_or(99);
let b_idx = order.iter().position(|&x| x == b.0).unwrap_or(99);
a_idx.cmp(&b_idx)
});
result
}
}

View File

@@ -0,0 +1,345 @@
//! Kernel - central coordinator
mod adapters;
mod agents;
mod messaging;
mod skills;
mod hands;
mod triggers;
mod approvals;
#[cfg(feature = "multi-agent")]
mod a2a;
use std::sync::Arc;
use tokio::sync::{broadcast, Mutex};
use zclaw_types::{Event, Result};
#[cfg(feature = "multi-agent")]
use zclaw_types::AgentId;
#[cfg(feature = "multi-agent")]
use zclaw_protocols::A2aRouter;
use crate::registry::AgentRegistry;
use crate::capabilities::CapabilityManager;
use crate::events::EventBus;
use crate::config::KernelConfig;
use zclaw_memory::MemoryStore;
use zclaw_runtime::{LlmDriver, ToolRegistry, tool::SkillExecutor};
use zclaw_skills::SkillRegistry;
use zclaw_hands::{HandRegistry, hands::{BrowserHand, SlideshowHand, SpeechHand, QuizHand, WhiteboardHand, ResearcherHand, CollectorHand, ClipHand, TwitterHand, quiz::LlmQuizGenerator}};
pub use adapters::KernelSkillExecutor;
pub use messaging::ChatModeConfig;
/// The ZCLAW Kernel
pub struct Kernel {
config: KernelConfig,
registry: AgentRegistry,
capabilities: CapabilityManager,
events: EventBus,
memory: Arc<MemoryStore>,
driver: Arc<dyn LlmDriver>,
llm_completer: Arc<dyn zclaw_skills::LlmCompleter>,
skills: Arc<SkillRegistry>,
skill_executor: Arc<KernelSkillExecutor>,
hands: Arc<HandRegistry>,
trigger_manager: crate::trigger_manager::TriggerManager,
pending_approvals: Arc<Mutex<Vec<ApprovalEntry>>>,
/// Running hand runs that can be cancelled (run_id -> cancelled flag)
running_hand_runs: Arc<dashmap::DashMap<zclaw_types::HandRunId, Arc<std::sync::atomic::AtomicBool>>>,
/// Shared memory storage backend for Growth system
viking: Arc<zclaw_runtime::VikingAdapter>,
/// Optional LLM driver for memory extraction (set by Tauri desktop layer)
extraction_driver: Option<Arc<dyn zclaw_runtime::LlmDriverForExtraction>>,
/// A2A router for inter-agent messaging (gated by multi-agent feature)
#[cfg(feature = "multi-agent")]
a2a_router: Arc<A2aRouter>,
/// Per-agent A2A inbox receivers (supports re-queuing non-matching messages)
#[cfg(feature = "multi-agent")]
a2a_inboxes: Arc<dashmap::DashMap<AgentId, Arc<Mutex<adapters::AgentInbox>>>>,
}
impl Kernel {
/// Boot the kernel with the given configuration
pub async fn boot(config: KernelConfig) -> Result<Self> {
// Initialize memory store
let memory = Arc::new(MemoryStore::new(&config.database_url).await?);
// Initialize driver based on config
let driver = config.create_driver()?;
// Initialize subsystems
let registry = AgentRegistry::new();
let capabilities = CapabilityManager::new();
let events = EventBus::new();
// Initialize skill registry
let skills = Arc::new(SkillRegistry::new());
// Scan skills directory if configured
if let Some(ref skills_dir) = config.skills_dir {
if skills_dir.exists() {
skills.add_skill_dir(skills_dir.clone()).await?;
}
}
// Initialize hand registry with built-in hands
let hands = Arc::new(HandRegistry::new());
let quiz_model = config.model().to_string();
let quiz_generator = Arc::new(LlmQuizGenerator::new(driver.clone(), quiz_model));
hands.register(Arc::new(BrowserHand::new())).await;
hands.register(Arc::new(SlideshowHand::new())).await;
hands.register(Arc::new(SpeechHand::new())).await;
hands.register(Arc::new(QuizHand::with_generator(quiz_generator))).await;
hands.register(Arc::new(WhiteboardHand::new())).await;
hands.register(Arc::new(ResearcherHand::new())).await;
hands.register(Arc::new(CollectorHand::new())).await;
hands.register(Arc::new(ClipHand::new())).await;
hands.register(Arc::new(TwitterHand::new())).await;
// Create skill executor
let skill_executor = Arc::new(KernelSkillExecutor::new(skills.clone(), driver.clone()));
// Create LLM completer for skill system (shared with skill_executor)
let llm_completer: Arc<dyn zclaw_skills::LlmCompleter> =
Arc::new(adapters::LlmDriverAdapter {
driver: driver.clone(),
max_tokens: config.max_tokens(),
temperature: config.temperature(),
});
// Initialize trigger manager
let trigger_manager = crate::trigger_manager::TriggerManager::new(hands.clone());
// Initialize Growth system — shared VikingAdapter for memory storage
let viking = Arc::new(zclaw_runtime::VikingAdapter::in_memory());
// Restore persisted agents
let persisted = memory.list_agents().await?;
for agent in persisted {
registry.register(agent);
}
// Initialize A2A router for multi-agent support
#[cfg(feature = "multi-agent")]
let a2a_router = {
let kernel_agent_id = AgentId::new();
Arc::new(A2aRouter::new(kernel_agent_id))
};
Ok(Self {
config,
registry,
capabilities,
events,
memory,
driver,
llm_completer,
skills,
skill_executor,
hands,
trigger_manager,
pending_approvals: Arc::new(Mutex::new(Vec::new())),
running_hand_runs: Arc::new(dashmap::DashMap::new()),
viking,
extraction_driver: None,
#[cfg(feature = "multi-agent")]
a2a_router,
#[cfg(feature = "multi-agent")]
a2a_inboxes: Arc::new(dashmap::DashMap::new()),
})
}
/// Create a tool registry with built-in tools
pub(crate) fn create_tool_registry(&self) -> ToolRegistry {
let mut tools = ToolRegistry::new();
zclaw_runtime::tool::builtin::register_builtin_tools(&mut tools);
// Register TaskTool with driver and memory for sub-agent delegation
let task_tool = zclaw_runtime::tool::builtin::TaskTool::new(
self.driver.clone(),
self.memory.clone(),
self.config.model(),
);
tools.register(Box::new(task_tool));
tools
}
/// Create the middleware chain for the agent loop.
///
/// When middleware is configured, cross-cutting concerns (compaction, loop guard,
/// token calibration, etc.) are delegated to the chain. When no middleware is
/// registered, the legacy inline path in `AgentLoop` is used instead.
pub(crate) fn create_middleware_chain(&self) -> Option<zclaw_runtime::middleware::MiddlewareChain> {
let mut chain = zclaw_runtime::middleware::MiddlewareChain::new();
// Growth integration — shared VikingAdapter for memory middleware & compaction
let mut growth = zclaw_runtime::GrowthIntegration::new(self.viking.clone());
if let Some(ref driver) = self.extraction_driver {
growth = growth.with_llm_driver(driver.clone());
}
// Compaction middleware — only register when threshold > 0
let threshold = self.config.compaction_threshold();
if threshold > 0 {
use std::sync::Arc;
let mut growth_for_compaction = zclaw_runtime::GrowthIntegration::new(self.viking.clone());
if let Some(ref driver) = self.extraction_driver {
growth_for_compaction = growth_for_compaction.with_llm_driver(driver.clone());
}
let mw = zclaw_runtime::middleware::compaction::CompactionMiddleware::new(
threshold,
zclaw_runtime::CompactionConfig::default(),
Some(self.driver.clone()),
Some(growth_for_compaction),
);
chain.register(Arc::new(mw));
}
// Memory middleware — auto-extract memories after conversations
{
use std::sync::Arc;
let mw = zclaw_runtime::middleware::memory::MemoryMiddleware::new(growth);
chain.register(Arc::new(mw));
}
// Loop guard middleware
{
use std::sync::Arc;
let mw = zclaw_runtime::middleware::loop_guard::LoopGuardMiddleware::with_defaults();
chain.register(Arc::new(mw));
}
// Token calibration middleware
{
use std::sync::Arc;
let mw = zclaw_runtime::middleware::token_calibration::TokenCalibrationMiddleware::new();
chain.register(Arc::new(mw));
}
// Skill index middleware — inject lightweight index instead of full descriptions
{
use std::sync::Arc;
let entries = self.skill_executor.list_skill_index();
if !entries.is_empty() {
let mw = zclaw_runtime::middleware::skill_index::SkillIndexMiddleware::new(entries);
chain.register(Arc::new(mw));
}
}
// Title middleware — auto-generate conversation titles after first exchange
{
use std::sync::Arc;
let mw = zclaw_runtime::middleware::title::TitleMiddleware::new();
chain.register(Arc::new(mw));
}
// Dangling tool repair — patch missing tool results before LLM calls
{
use std::sync::Arc;
let mw = zclaw_runtime::middleware::dangling_tool::DanglingToolMiddleware::new();
chain.register(Arc::new(mw));
}
// Tool error middleware — format tool errors for LLM recovery
{
use std::sync::Arc;
let mw = zclaw_runtime::middleware::tool_error::ToolErrorMiddleware::new();
chain.register(Arc::new(mw));
}
// Tool output guard — post-execution output sanitization checks
{
use std::sync::Arc;
let mw = zclaw_runtime::middleware::tool_output_guard::ToolOutputGuardMiddleware::new();
chain.register(Arc::new(mw));
}
// Guardrail middleware — safety rules for tool calls
{
use std::sync::Arc;
let mw = zclaw_runtime::middleware::guardrail::GuardrailMiddleware::new(true)
.with_builtin_rules();
chain.register(Arc::new(mw));
}
// Sub-agent limit — cap concurrent sub-agent spawning
{
use std::sync::Arc;
let mw = zclaw_runtime::middleware::subagent_limit::SubagentLimitMiddleware::new();
chain.register(Arc::new(mw));
}
// Only return Some if we actually registered middleware
if chain.is_empty() {
None
} else {
tracing::info!("[Kernel] Middleware chain created with {} middlewares", chain.len());
Some(chain)
}
}
/// Subscribe to events
pub fn subscribe(&self) -> broadcast::Receiver<Event> {
self.events.subscribe()
}
/// Shutdown the kernel
pub async fn shutdown(&self) -> Result<()> {
self.events.publish(Event::KernelShutdown);
Ok(())
}
/// Get the kernel configuration
pub fn config(&self) -> &KernelConfig {
&self.config
}
/// Get the LLM driver
pub fn driver(&self) -> Arc<dyn LlmDriver> {
self.driver.clone()
}
/// Replace the default in-memory VikingAdapter with a persistent one.
///
/// Called by the Tauri desktop layer after `Kernel::boot()` to bridge
/// the kernel's Growth system to the same SqliteStorage used by
/// viking_commands and intelligence_hooks.
pub fn set_viking(&mut self, viking: Arc<zclaw_runtime::VikingAdapter>) {
tracing::info!("[Kernel] Replacing in-memory VikingAdapter with persistent storage");
self.viking = viking;
}
/// Get a reference to the shared VikingAdapter
pub fn viking(&self) -> Arc<zclaw_runtime::VikingAdapter> {
self.viking.clone()
}
/// Set the LLM extraction driver for the Growth system.
///
/// Required for `MemoryMiddleware` to extract memories from conversations
/// via LLM analysis. If not set, memory extraction is silently skipped.
pub fn set_extraction_driver(&mut self, driver: Arc<dyn zclaw_runtime::LlmDriverForExtraction>) {
tracing::info!("[Kernel] Extraction driver configured for Growth system");
self.extraction_driver = Some(driver);
}
}
#[derive(Debug, Clone)]
pub struct ApprovalEntry {
pub id: String,
pub hand_id: String,
pub status: String,
pub created_at: chrono::DateTime<chrono::Utc>,
pub input: serde_json::Value,
pub reject_reason: Option<String>,
}
/// Response from sending a message
#[derive(Debug, Clone)]
pub struct MessageResponse {
pub content: String,
pub input_tokens: u32,
pub output_tokens: u32,
}

View File

@@ -0,0 +1,79 @@
//! Skills management methods
use std::sync::Arc;
use zclaw_types::Result;
use super::Kernel;
impl Kernel {
/// Get the skills registry
pub fn skills(&self) -> &Arc<zclaw_skills::SkillRegistry> {
&self.skills
}
/// List all discovered skills
pub async fn list_skills(&self) -> Vec<zclaw_skills::SkillManifest> {
self.skills.list().await
}
/// Refresh skills from a directory
pub async fn refresh_skills(&self, dir: Option<std::path::PathBuf>) -> Result<()> {
if let Some(path) = dir {
self.skills.add_skill_dir(path).await?;
} else if let Some(ref skills_dir) = self.config.skills_dir {
self.skills.add_skill_dir(skills_dir.clone()).await?;
}
Ok(())
}
/// Get the configured skills directory
pub fn skills_dir(&self) -> Option<&std::path::PathBuf> {
self.config.skills_dir.as_ref()
}
/// Create a new skill in the skills directory
pub async fn create_skill(&self, manifest: zclaw_skills::SkillManifest) -> Result<()> {
let skills_dir = self.config.skills_dir.as_ref()
.ok_or_else(|| zclaw_types::ZclawError::InvalidInput(
"Skills directory not configured".into()
))?;
self.skills.create_skill(skills_dir, manifest).await
}
/// Update an existing skill
pub async fn update_skill(
&self,
id: &zclaw_types::SkillId,
manifest: zclaw_skills::SkillManifest,
) -> Result<zclaw_skills::SkillManifest> {
let skills_dir = self.config.skills_dir.as_ref()
.ok_or_else(|| zclaw_types::ZclawError::InvalidInput(
"Skills directory not configured".into()
))?;
self.skills.update_skill(skills_dir, id, manifest).await
}
/// Delete a skill
pub async fn delete_skill(&self, id: &zclaw_types::SkillId) -> Result<()> {
let skills_dir = self.config.skills_dir.as_ref()
.ok_or_else(|| zclaw_types::ZclawError::InvalidInput(
"Skills directory not configured".into()
))?;
self.skills.delete_skill(skills_dir, id).await
}
/// Execute a skill with the given ID and input
pub async fn execute_skill(
&self,
id: &str,
context: zclaw_skills::SkillContext,
input: serde_json::Value,
) -> Result<zclaw_skills::SkillResult> {
// Inject LLM completer into context for PromptOnly skills
let mut ctx = context;
if ctx.llm.is_none() {
ctx.llm = Some(self.llm_completer.clone());
}
self.skills.execute(&zclaw_types::SkillId::new(id), &ctx, input).await
}
}

View File

@@ -0,0 +1,52 @@
//! Trigger CRUD operations
use zclaw_types::Result;
use super::Kernel;
impl Kernel {
// ============================================================
// Trigger Management
// ============================================================
/// List all triggers
pub async fn list_triggers(&self) -> Vec<crate::trigger_manager::TriggerEntry> {
self.trigger_manager.list_triggers().await
}
/// Get a specific trigger
pub async fn get_trigger(&self, id: &str) -> Option<crate::trigger_manager::TriggerEntry> {
self.trigger_manager.get_trigger(id).await
}
/// Create a new trigger
pub async fn create_trigger(
&self,
config: zclaw_hands::TriggerConfig,
) -> Result<crate::trigger_manager::TriggerEntry> {
self.trigger_manager.create_trigger(config).await
}
/// Update a trigger
pub async fn update_trigger(
&self,
id: &str,
updates: crate::trigger_manager::TriggerUpdateRequest,
) -> Result<crate::trigger_manager::TriggerEntry> {
self.trigger_manager.update_trigger(id, updates).await
}
/// Delete a trigger
pub async fn delete_trigger(&self, id: &str) -> Result<()> {
self.trigger_manager.delete_trigger(id).await
}
/// Execute a trigger
pub async fn execute_trigger(
&self,
id: &str,
input: serde_json::Value,
) -> Result<zclaw_hands::TriggerResult> {
self.trigger_manager.execute_trigger(id, input).await
}
}