feat: 新增技能编排引擎和工作流构建器组件
Some checks failed
CI / Lint & TypeCheck (push) Has been cancelled
CI / Unit Tests (push) Has been cancelled
CI / Build Frontend (push) Has been cancelled
CI / Rust Check (push) Has been cancelled
CI / Security Scan (push) Has been cancelled
CI / E2E Tests (push) Has been cancelled

refactor: 统一Hands系统常量到单个源文件
refactor: 更新Hands中文名称和描述

fix: 修复技能市场在连接状态变化时重新加载
fix: 修复身份变更提案的错误处理逻辑

docs: 更新多个功能文档的验证状态和实现位置
docs: 更新Hands系统文档

test: 添加测试文件验证工作区路径
This commit is contained in:
iven
2026-03-25 08:27:25 +08:00
parent 9c781f5f2a
commit aa6a9cbd84
110 changed files with 12384 additions and 1337 deletions

View File

@@ -94,78 +94,110 @@ impl AgentLoop {
}
/// Run the agent loop with a single message
/// Implements complete agent loop: LLM → Tool Call → Tool Result → LLM → Final Response
pub async fn run(&self, session_id: SessionId, input: String) -> Result<AgentLoopResult> {
// Add user message to session
let user_message = Message::user(input);
self.memory.append_message(&session_id, &user_message).await?;
// Get all messages for context
let messages = self.memory.get_messages(&session_id).await?;
let mut messages = self.memory.get_messages(&session_id).await?;
// Build completion request with configured model
let request = CompletionRequest {
model: self.model.clone(),
system: self.system_prompt.clone(),
messages,
tools: self.tools.definitions(),
max_tokens: Some(self.max_tokens),
temperature: Some(self.temperature),
stop: Vec::new(),
stream: false,
};
let max_iterations = 10;
let mut iterations = 0;
let mut total_input_tokens = 0u32;
let mut total_output_tokens = 0u32;
// Call LLM
let response = self.driver.complete(request).await?;
// Create tool context
let tool_context = self.create_tool_context(session_id.clone());
// Process response and execute tools
let mut response_parts = Vec::new();
let mut tool_results = Vec::new();
for block in &response.content {
match block {
ContentBlock::Text { text } => {
response_parts.push(text.clone());
}
ContentBlock::Thinking { thinking } => {
response_parts.push(format!("[思考] {}", thinking));
}
ContentBlock::ToolUse { id, name, input } => {
// Execute the tool
let tool_result = match self.execute_tool(name, input.clone(), &tool_context).await {
Ok(result) => {
response_parts.push(format!("[工具执行成功] {}", name));
result
}
Err(e) => {
response_parts.push(format!("[工具执行失败] {}: {}", name, e));
serde_json::json!({ "error": e.to_string() })
}
};
tool_results.push((id.clone(), name.clone(), tool_result));
}
loop {
iterations += 1;
if iterations > max_iterations {
// Save the state before returning
let error_msg = "达到最大迭代次数,请简化请求";
self.memory.append_message(&session_id, &Message::assistant(error_msg)).await?;
return Ok(AgentLoopResult {
response: error_msg.to_string(),
input_tokens: total_input_tokens,
output_tokens: total_output_tokens,
iterations,
});
}
// Build completion request
let request = CompletionRequest {
model: self.model.clone(),
system: self.system_prompt.clone(),
messages: messages.clone(),
tools: self.tools.definitions(),
max_tokens: Some(self.max_tokens),
temperature: Some(self.temperature),
stop: Vec::new(),
stream: false,
};
// Call LLM
let response = self.driver.complete(request).await?;
total_input_tokens += response.input_tokens;
total_output_tokens += response.output_tokens;
// Extract tool calls from response
let tool_calls: Vec<(String, String, serde_json::Value)> = response.content.iter()
.filter_map(|block| match block {
ContentBlock::ToolUse { id, name, input } => Some((id.clone(), name.clone(), input.clone())),
_ => None,
})
.collect();
// If no tool calls, we have the final response
if tool_calls.is_empty() {
// Extract text content
let text = response.content.iter()
.filter_map(|block| match block {
ContentBlock::Text { text } => Some(text.clone()),
ContentBlock::Thinking { thinking } => Some(format!("[思考] {}", thinking)),
_ => None,
})
.collect::<Vec<_>>()
.join("\n");
// Save final assistant message
self.memory.append_message(&session_id, &Message::assistant(&text)).await?;
return Ok(AgentLoopResult {
response: text,
input_tokens: total_input_tokens,
output_tokens: total_output_tokens,
iterations,
});
}
// There are tool calls - add assistant message with tool calls to history
for (id, name, input) in &tool_calls {
messages.push(Message::tool_use(id, zclaw_types::ToolId::new(name), input.clone()));
}
// Create tool context and execute all tools
let tool_context = self.create_tool_context(session_id.clone());
for (id, name, input) in tool_calls {
let tool_result = match self.execute_tool(&name, input, &tool_context).await {
Ok(result) => result,
Err(e) => serde_json::json!({ "error": e.to_string() }),
};
// Add tool result to messages
messages.push(Message::tool_result(
id,
zclaw_types::ToolId::new(&name),
tool_result,
false, // is_error - we include errors in the result itself
));
}
// Continue the loop - LLM will process tool results and generate final response
}
// If there were tool calls, we might need to continue the conversation
// For now, just include tool results in the response
for (id, name, result) in tool_results {
response_parts.push(format!("[工具结果 {}]: {}", name, serde_json::to_string(&result).unwrap_or_default()));
}
let response_text = response_parts.join("\n");
Ok(AgentLoopResult {
response: response_text,
input_tokens: response.input_tokens,
output_tokens: response.output_tokens,
iterations: 1,
})
}
/// Run the agent loop with streaming
/// Implements complete agent loop with multi-turn tool calling support
pub async fn run_streaming(
&self,
session_id: SessionId,
@@ -180,18 +212,6 @@ impl AgentLoop {
// Get all messages for context
let messages = self.memory.get_messages(&session_id).await?;
// Build completion request
let request = CompletionRequest {
model: self.model.clone(),
system: self.system_prompt.clone(),
messages,
tools: self.tools.definitions(),
max_tokens: Some(self.max_tokens),
temperature: Some(self.temperature),
stop: Vec::new(),
stream: true,
};
// Clone necessary data for the async task
let session_id_clone = session_id.clone();
let memory = self.memory.clone();
@@ -199,116 +219,170 @@ impl AgentLoop {
let tools = self.tools.clone();
let skill_executor = self.skill_executor.clone();
let agent_id = self.agent_id.clone();
let system_prompt = self.system_prompt.clone();
let model = self.model.clone();
let max_tokens = self.max_tokens;
let temperature = self.temperature;
tokio::spawn(async move {
let mut full_response = String::new();
let mut input_tokens = 0u32;
let mut output_tokens = 0u32;
let mut pending_tool_calls: Vec<(String, String, serde_json::Value)> = Vec::new();
let mut messages = messages;
let max_iterations = 10;
let mut iteration = 0;
let mut total_input_tokens = 0u32;
let mut total_output_tokens = 0u32;
let mut stream = driver.stream(request);
while let Some(chunk_result) = stream.next().await {
match chunk_result {
Ok(chunk) => {
// Track response and tokens
match &chunk {
StreamChunk::TextDelta { delta } => {
full_response.push_str(delta);
let _ = tx.send(LoopEvent::Delta(delta.clone())).await;
}
StreamChunk::ThinkingDelta { delta } => {
let _ = tx.send(LoopEvent::Delta(format!("[思考] {}", delta))).await;
}
StreamChunk::ToolUseStart { id, name } => {
pending_tool_calls.push((id.clone(), name.clone(), serde_json::Value::Null));
let _ = tx.send(LoopEvent::ToolStart {
name: name.clone(),
input: serde_json::Value::Null,
}).await;
}
StreamChunk::ToolUseDelta { id, delta } => {
// Update the pending tool call's input
if let Some(tool) = pending_tool_calls.iter_mut().find(|(tid, _, _)| tid == id) {
// For simplicity, just store the delta as the input
// In a real implementation, you'd accumulate and parse JSON
tool.2 = serde_json::Value::String(delta.clone());
}
let _ = tx.send(LoopEvent::Delta(format!("[工具参数] {}", delta))).await;
}
StreamChunk::ToolUseEnd { id, input } => {
// Update the tool call with final input
if let Some(tool) = pending_tool_calls.iter_mut().find(|(tid, _, _)| tid == id) {
tool.2 = input.clone();
}
}
StreamChunk::Complete { input_tokens: it, output_tokens: ot, .. } => {
input_tokens = *it;
output_tokens = *ot;
}
StreamChunk::Error { message } => {
let _ = tx.send(LoopEvent::Error(message.clone())).await;
}
}
}
Err(e) => {
let _ = tx.send(LoopEvent::Error(e.to_string())).await;
}
'outer: loop {
iteration += 1;
if iteration > max_iterations {
let _ = tx.send(LoopEvent::Error("达到最大迭代次数".to_string())).await;
break;
}
}
// Execute pending tool calls
for (_id, name, input) in pending_tool_calls {
// Create tool context
let tool_context = ToolContext {
agent_id: agent_id.clone(),
working_directory: None,
session_id: Some(session_id_clone.to_string()),
skill_executor: skill_executor.clone(),
// Notify iteration start
let _ = tx.send(LoopEvent::IterationStart {
iteration,
max_iterations,
}).await;
// Build completion request
let request = CompletionRequest {
model: model.clone(),
system: system_prompt.clone(),
messages: messages.clone(),
tools: tools.definitions(),
max_tokens: Some(max_tokens),
temperature: Some(temperature),
stop: Vec::new(),
stream: true,
};
// Execute the tool
let result = if let Some(tool) = tools.get(&name) {
match tool.execute(input.clone(), &tool_context).await {
Ok(output) => {
let _ = tx.send(LoopEvent::ToolEnd {
name: name.clone(),
output: output.clone(),
}).await;
output
let mut stream = driver.stream(request);
let mut pending_tool_calls: Vec<(String, String, serde_json::Value)> = Vec::new();
let mut iteration_text = String::new();
// Process stream chunks
tracing::debug!("[AgentLoop] Starting to process stream chunks");
while let Some(chunk_result) = stream.next().await {
match chunk_result {
Ok(chunk) => {
match &chunk {
StreamChunk::TextDelta { delta } => {
iteration_text.push_str(delta);
let _ = tx.send(LoopEvent::Delta(delta.clone())).await;
}
StreamChunk::ThinkingDelta { delta } => {
let _ = tx.send(LoopEvent::Delta(format!("[思考] {}", delta))).await;
}
StreamChunk::ToolUseStart { id, name } => {
tracing::debug!("[AgentLoop] ToolUseStart: id={}, name={}", id, name);
pending_tool_calls.push((id.clone(), name.clone(), serde_json::Value::Null));
}
StreamChunk::ToolUseDelta { id, delta } => {
// Accumulate tool input delta (internal processing, not sent to user)
if let Some(tool) = pending_tool_calls.iter_mut().find(|(tid, _, _)| tid == id) {
// Try to accumulate JSON string
match &mut tool.2 {
serde_json::Value::String(s) => s.push_str(delta),
serde_json::Value::Null => tool.2 = serde_json::Value::String(delta.clone()),
_ => {}
}
}
}
StreamChunk::ToolUseEnd { id, input } => {
tracing::debug!("[AgentLoop] ToolUseEnd: id={}, input={:?}", id, input);
// Update with final parsed input and emit ToolStart event
if let Some(tool) = pending_tool_calls.iter_mut().find(|(tid, _, _)| tid == id) {
tool.2 = input.clone();
let _ = tx.send(LoopEvent::ToolStart { name: tool.1.clone(), input: input.clone() }).await;
}
}
StreamChunk::Complete { input_tokens: it, output_tokens: ot, .. } => {
tracing::debug!("[AgentLoop] Stream complete: input_tokens={}, output_tokens={}", it, ot);
total_input_tokens += *it;
total_output_tokens += *ot;
}
StreamChunk::Error { message } => {
tracing::error!("[AgentLoop] Stream error: {}", message);
let _ = tx.send(LoopEvent::Error(message.clone())).await;
}
}
}
Err(e) => {
let error_output: serde_json::Value = serde_json::json!({ "error": e.to_string() });
let _ = tx.send(LoopEvent::ToolEnd {
name: name.clone(),
output: error_output.clone(),
}).await;
error_output
tracing::error!("[AgentLoop] Chunk error: {}", e);
let _ = tx.send(LoopEvent::Error(e.to_string())).await;
}
}
} else {
let error_output: serde_json::Value = serde_json::json!({ "error": format!("Unknown tool: {}", name) });
let _ = tx.send(LoopEvent::ToolEnd {
name: name.clone(),
output: error_output.clone(),
}).await;
error_output
};
}
tracing::debug!("[AgentLoop] Stream ended, pending_tool_calls count: {}", pending_tool_calls.len());
full_response.push_str(&format!("\n[工具 {} 结果]: {}", name, serde_json::to_string(&result).unwrap_or_default()));
// If no tool calls, we have the final response
if pending_tool_calls.is_empty() {
tracing::debug!("[AgentLoop] No tool calls, returning final response");
// Save final assistant message
let _ = memory.append_message(&session_id_clone, &Message::assistant(&iteration_text)).await;
let _ = tx.send(LoopEvent::Complete(AgentLoopResult {
response: iteration_text,
input_tokens: total_input_tokens,
output_tokens: total_output_tokens,
iterations: iteration,
})).await;
break 'outer;
}
tracing::debug!("[AgentLoop] Processing {} tool calls", pending_tool_calls.len());
// There are tool calls - add to message history
for (id, name, input) in &pending_tool_calls {
tracing::debug!("[AgentLoop] Adding tool_use to history: id={}, name={}, input={:?}", id, name, input);
messages.push(Message::tool_use(id, zclaw_types::ToolId::new(name), input.clone()));
}
// Execute tools
for (id, name, input) in pending_tool_calls {
tracing::debug!("[AgentLoop] Executing tool: name={}, input={:?}", name, input);
let tool_context = ToolContext {
agent_id: agent_id.clone(),
working_directory: None,
session_id: Some(session_id_clone.to_string()),
skill_executor: skill_executor.clone(),
};
let (result, is_error) = if let Some(tool) = tools.get(&name) {
tracing::debug!("[AgentLoop] Tool '{}' found, executing...", name);
match tool.execute(input.clone(), &tool_context).await {
Ok(output) => {
tracing::debug!("[AgentLoop] Tool '{}' executed successfully: {:?}", name, output);
let _ = tx.send(LoopEvent::ToolEnd { name: name.clone(), output: output.clone() }).await;
(output, false)
}
Err(e) => {
tracing::error!("[AgentLoop] Tool '{}' execution failed: {}", name, e);
let error_output = serde_json::json!({ "error": e.to_string() });
let _ = tx.send(LoopEvent::ToolEnd { name: name.clone(), output: error_output.clone() }).await;
(error_output, true)
}
}
} else {
tracing::error!("[AgentLoop] Tool '{}' not found in registry", name);
let error_output = serde_json::json!({ "error": format!("Unknown tool: {}", name) });
let _ = tx.send(LoopEvent::ToolEnd { name: name.clone(), output: error_output.clone() }).await;
(error_output, true)
};
// Add tool result to message history
tracing::debug!("[AgentLoop] Adding tool_result to history: id={}, name={}, is_error={}", id, name, is_error);
messages.push(Message::tool_result(
id,
zclaw_types::ToolId::new(&name),
result,
is_error,
));
}
tracing::debug!("[AgentLoop] Continuing to next iteration for LLM to process tool results");
// Continue loop - next iteration will call LLM with tool results
}
// Save assistant message to memory
let assistant_message = Message::assistant(full_response.clone());
let _ = memory.append_message(&session_id_clone, &assistant_message).await;
// Send completion event
let _ = tx.send(LoopEvent::Complete(AgentLoopResult {
response: full_response,
input_tokens,
output_tokens,
iterations: 1,
})).await;
});
Ok(rx)
@@ -327,9 +401,16 @@ pub struct AgentLoopResult {
/// Events emitted during streaming
#[derive(Debug, Clone)]
pub enum LoopEvent {
/// Text delta from LLM
Delta(String),
/// Tool execution started
ToolStart { name: String, input: serde_json::Value },
/// Tool execution completed
ToolEnd { name: String, output: serde_json::Value },
/// New iteration started (multi-turn tool calling)
IterationStart { iteration: usize, max_iterations: usize },
/// Loop completed with final result
Complete(AgentLoopResult),
/// Error occurred
Error(String),
}