refactor(crates): kernel/generation module split + DeerFlow optimizations + middleware + dead code cleanup

- Split zclaw-kernel/kernel.rs (1486 lines) into 9 domain modules
- Split zclaw-kernel/generation.rs (1080 lines) into 3 modules
- Add DeerFlow-inspired middleware: DanglingTool, SubagentLimit, ToolError, ToolOutputGuard
- Add PromptBuilder for structured system prompt assembly
- Add FactStore (zclaw-memory) for persistent fact extraction
- Add task builtin tool for agent task management
- Driver improvements: Anthropic/OpenAI extended thinking, Gemini safety settings
- Replace let _ = with proper log::warn! across SaaS handlers
- Remove unused dependency (url) from zclaw-hands
This commit is contained in:
iven
2026-04-03 00:28:03 +08:00
parent 0a04b260a4
commit 52bdafa633
55 changed files with 4130 additions and 1959 deletions

View File

@@ -130,8 +130,8 @@ impl LlmDriver for OpenAiDriver {
let api_key = self.api_key.expose_secret().to_string();
Box::pin(stream! {
println!("[OpenAI:stream] POST to {}/chat/completions", base_url);
println!("[OpenAI:stream] Request model={}, stream={}", stream_request.model, stream_request.stream);
tracing::debug!("[OpenAI:stream] POST to {}/chat/completions", base_url);
tracing::debug!("[OpenAI:stream] Request model={}, stream={}", stream_request.model, stream_request.stream);
let response = match self.client
.post(format!("{}/chat/completions", base_url))
.header("Authorization", format!("Bearer {}", api_key))
@@ -142,11 +142,11 @@ impl LlmDriver for OpenAiDriver {
.await
{
Ok(r) => {
println!("[OpenAI:stream] Response status: {}, content-type: {:?}", r.status(), r.headers().get("content-type"));
tracing::debug!("[OpenAI:stream] Response status: {}, content-type: {:?}", r.status(), r.headers().get("content-type"));
r
},
Err(e) => {
println!("[OpenAI:stream] HTTP request FAILED: {:?}", e);
tracing::debug!("[OpenAI:stream] HTTP request FAILED: {:?}", e);
yield Err(ZclawError::LlmError(format!("HTTP request failed: {}", e)));
return;
}
@@ -155,7 +155,7 @@ impl LlmDriver for OpenAiDriver {
if !response.status().is_success() {
let status = response.status();
let body = response.text().await.unwrap_or_default();
println!("[OpenAI:stream] API error {}: {}", status, &body[..body.len().min(500)]);
tracing::debug!("[OpenAI:stream] API error {}: {}", status, &body[..body.len().min(500)]);
yield Err(ZclawError::LlmError(format!("API error {}: {}", status, body)));
return;
}
@@ -170,7 +170,7 @@ impl LlmDriver for OpenAiDriver {
let chunk = match chunk_result {
Ok(c) => c,
Err(e) => {
println!("[OpenAI:stream] Byte stream error: {:?}", e);
tracing::debug!("[OpenAI:stream] Byte stream error: {:?}", e);
yield Err(ZclawError::LlmError(format!("Stream error: {}", e)));
continue;
}
@@ -180,7 +180,7 @@ impl LlmDriver for OpenAiDriver {
let text = String::from_utf8_lossy(&chunk);
// Log first 500 bytes of raw data for debugging SSE format
if raw_bytes_total <= 600 {
println!("[OpenAI:stream] RAW chunk ({} bytes): {:?}", text.len(), &text[..text.len().min(500)]);
tracing::debug!("[OpenAI:stream] RAW chunk ({} bytes): {:?}", text.len(), &text[..text.len().min(500)]);
}
for line in text.lines() {
let trimmed = line.trim();
@@ -198,10 +198,10 @@ impl LlmDriver for OpenAiDriver {
if let Some(data) = data {
sse_event_count += 1;
if sse_event_count <= 3 || data == "[DONE]" {
println!("[OpenAI:stream] SSE #{}: {}", sse_event_count, &data[..data.len().min(300)]);
tracing::debug!("[OpenAI:stream] SSE #{}: {}", sse_event_count, &data[..data.len().min(300)]);
}
if data == "[DONE]" {
println!("[OpenAI:stream] Received [DONE], total SSE events: {}, raw bytes: {}", sse_event_count, raw_bytes_total);
tracing::debug!("[OpenAI:stream] Received [DONE], total SSE events: {}, raw bytes: {}", sse_event_count, raw_bytes_total);
// Emit ToolUseEnd for all accumulated tool calls (skip invalid ones with empty name)
for (id, (name, args)) in &accumulated_tool_calls {
@@ -319,7 +319,7 @@ impl LlmDriver for OpenAiDriver {
}
}
}
println!("[OpenAI:stream] Byte stream ended. Total: {} SSE events, {} raw bytes", sse_event_count, raw_bytes_total);
tracing::debug!("[OpenAI:stream] Byte stream ended. Total: {} SSE events, {} raw bytes", sse_event_count, raw_bytes_total);
})
}
}
@@ -496,6 +496,7 @@ impl OpenAiDriver {
stop: if request.stop.is_empty() { None } else { Some(request.stop.clone()) },
stream: request.stream,
tools: if tools.is_empty() { None } else { Some(tools) },
reasoning_effort: request.reasoning_effort.clone(),
};
// Pre-send payload size validation
@@ -581,8 +582,8 @@ impl OpenAiDriver {
let has_reasoning = c.message.reasoning_content.as_ref().map(|t| !t.is_empty()).unwrap_or(false);
let blocks = if has_tool_calls {
// Tool calls take priority
let tool_calls = c.message.tool_calls.as_ref().unwrap();
// Tool calls take priority — safe to unwrap after has_tool_calls check
let tool_calls = c.message.tool_calls.as_ref().cloned().unwrap_or_default();
tracing::debug!("[OpenAiDriver:convert_response] Using tool_calls: {} calls", tool_calls.len());
tool_calls.iter().map(|tc| ContentBlock::ToolUse {
id: tc.id.clone(),
@@ -590,15 +591,15 @@ impl OpenAiDriver {
input: serde_json::from_str(&tc.function.arguments).unwrap_or(serde_json::Value::Null),
}).collect()
} else if has_content {
// Non-empty content
let text = c.message.content.as_ref().unwrap();
// Non-empty content — safe to unwrap after has_content check
let text = c.message.content.as_deref().unwrap_or("");
tracing::debug!("[OpenAiDriver:convert_response] Using text content: {} chars", text.len());
vec![ContentBlock::Text { text: text.clone() }]
vec![ContentBlock::Text { text: text.to_string() }]
} else if has_reasoning {
// Content empty but reasoning_content present (Kimi, Qwen, DeepSeek)
let reasoning = c.message.reasoning_content.as_ref().unwrap();
let reasoning = c.message.reasoning_content.as_deref().unwrap_or("");
tracing::debug!("[OpenAiDriver:convert_response] Using reasoning_content: {} chars", reasoning.len());
vec![ContentBlock::Text { text: reasoning.clone() }]
vec![ContentBlock::Text { text: reasoning.to_string() }]
} else {
// No content or tool_calls
tracing::debug!("[OpenAiDriver:convert_response] No content or tool_calls, using empty text");
@@ -771,6 +772,8 @@ struct OpenAiRequest {
stream: bool,
#[serde(skip_serializing_if = "Option::is_none")]
tools: Option<Vec<OpenAiTool>>,
#[serde(skip_serializing_if = "Option::is_none")]
reasoning_effort: Option<String>,
}
#[derive(Serialize)]
@@ -833,7 +836,7 @@ struct OpenAiResponse {
usage: Option<OpenAiUsage>,
}
#[derive(Deserialize, Default)]
#[derive(Deserialize, Default, Clone)]
struct OpenAiChoice {
#[serde(default)]
message: OpenAiResponseMessage,
@@ -841,7 +844,7 @@ struct OpenAiChoice {
finish_reason: Option<String>,
}
#[derive(Deserialize, Default)]
#[derive(Deserialize, Default, Clone)]
struct OpenAiResponseMessage {
#[serde(default)]
content: Option<String>,
@@ -851,7 +854,7 @@ struct OpenAiResponseMessage {
tool_calls: Option<Vec<OpenAiToolCallResponse>>,
}
#[derive(Deserialize, Default)]
#[derive(Deserialize, Default, Clone)]
struct OpenAiToolCallResponse {
#[serde(default)]
id: String,
@@ -859,7 +862,7 @@ struct OpenAiToolCallResponse {
function: FunctionCallResponse,
}
#[derive(Deserialize, Default)]
#[derive(Deserialize, Default, Clone)]
struct FunctionCallResponse {
#[serde(default)]
name: String,