diff --git a/crates/zclaw-runtime/src/driver/openai.rs b/crates/zclaw-runtime/src/driver/openai.rs index b9d7562..84d5b53 100644 --- a/crates/zclaw-runtime/src/driver/openai.rs +++ b/crates/zclaw-runtime/src/driver/openai.rs @@ -25,6 +25,8 @@ impl OpenAiDriver { client: Client::builder() .user_agent(crate::USER_AGENT) .http1_only() + .timeout(std::time::Duration::from_secs(120)) // 2 minute timeout + .connect_timeout(std::time::Duration::from_secs(30)) // 30 second connect timeout .build() .unwrap_or_else(|_| Client::new()), api_key, @@ -37,6 +39,8 @@ impl OpenAiDriver { client: Client::builder() .user_agent(crate::USER_AGENT) .http1_only() + .timeout(std::time::Duration::from_secs(120)) // 2 minute timeout + .connect_timeout(std::time::Duration::from_secs(30)) // 30 second connect timeout .build() .unwrap_or_else(|_| Client::new()), api_key, @@ -94,23 +98,54 @@ impl LlmDriver for OpenAiDriver { &self, request: CompletionRequest, ) -> Pin> + Send + '_>> { + // Check if we should use non-streaming mode for tool calls + // Some providers don't support streaming with tools: + // - Alibaba DashScope: "tools暂时无法与stream=True同时使用" + // - Zhipu GLM: May have similar limitations + let has_tools = !request.tools.is_empty(); + let needs_non_streaming = self.base_url.contains("dashscope") || + self.base_url.contains("aliyuncs") || + self.base_url.contains("bigmodel.cn"); + + eprintln!("[OpenAiDriver:stream] base_url={}, has_tools={}, needs_non_streaming={}", + self.base_url, has_tools, needs_non_streaming); + + if has_tools && needs_non_streaming { + eprintln!("[OpenAiDriver:stream] Provider detected that may not support streaming with tools, using non-streaming mode. URL: {}", self.base_url); + // Use non-streaming mode and convert to stream + return self.stream_from_complete(request); + } + let mut stream_request = self.build_api_request(&request); stream_request.stream = true; + // Debug: log the request details + let url = format!("{}/chat/completions", self.base_url); + let request_body = serde_json::to_string(&stream_request).unwrap_or_default(); + tracing::debug!("[OpenAiDriver:stream] Sending request to: {}", url); + tracing::debug!("[OpenAiDriver:stream] Request body length: {} bytes", request_body.len()); + tracing::trace!("[OpenAiDriver:stream] Request body: {}", request_body); + let base_url = self.base_url.clone(); let api_key = self.api_key.expose_secret().to_string(); Box::pin(stream! { + tracing::debug!("[OpenAiDriver:stream] Starting HTTP request..."); let response = match self.client .post(format!("{}/chat/completions", base_url)) .header("Authorization", format!("Bearer {}", api_key)) .header("Content-Type", "application/json") + .timeout(std::time::Duration::from_secs(120)) // 2 minute timeout .json(&stream_request) .send() .await { - Ok(r) => r, + Ok(r) => { + tracing::debug!("[OpenAiDriver:stream] Got response, status: {}", r.status()); + r + }, Err(e) => { + tracing::error!("[OpenAiDriver:stream] HTTP request failed: {:?}", e); yield Err(ZclawError::LlmError(format!("HTTP request failed: {}", e))); return; } @@ -124,6 +159,8 @@ impl LlmDriver for OpenAiDriver { } let mut byte_stream = response.bytes_stream(); + let mut accumulated_tool_calls: std::collections::HashMap = std::collections::HashMap::new(); + let mut current_tool_id: Option = None; while let Some(chunk_result) = byte_stream.next().await { let chunk = match chunk_result { @@ -138,6 +175,31 @@ impl LlmDriver for OpenAiDriver { for line in text.lines() { if let Some(data) = line.strip_prefix("data: ") { if data == "[DONE]" { + tracing::debug!("[OpenAI] Stream done, accumulated_tool_calls: {:?}", accumulated_tool_calls.len()); + + // Emit ToolUseEnd for all accumulated tool calls (skip invalid ones with empty name) + for (id, (name, args)) in &accumulated_tool_calls { + // Skip tool calls with empty name - they are invalid + if name.is_empty() { + tracing::warn!("[OpenAI] Skipping invalid tool call with empty name: id={}", id); + continue; + } + tracing::debug!("[OpenAI] Emitting ToolUseEnd: id={}, name={}, args={}", id, name, args); + // Ensure parsed args is always a valid JSON object + let parsed_args: serde_json::Value = if args.is_empty() { + serde_json::json!({}) + } else { + serde_json::from_str(args).unwrap_or_else(|e| { + tracing::warn!("[OpenAI] Failed to parse tool args '{}': {}, using empty object", args, e); + serde_json::json!({}) + }) + }; + yield Ok(StreamChunk::ToolUseEnd { + id: id.clone(), + input: parsed_args, + }); + } + yield Ok(StreamChunk::Complete { input_tokens: 0, output_tokens: 0, @@ -150,17 +212,65 @@ impl LlmDriver for OpenAiDriver { Ok(resp) => { if let Some(choice) = resp.choices.first() { let delta = &choice.delta; + + // Handle text content if let Some(content) = &delta.content { - yield Ok(StreamChunk::TextDelta { delta: content.clone() }); + if !content.is_empty() { + yield Ok(StreamChunk::TextDelta { delta: content.clone() }); + } } + + // Handle tool calls if let Some(tool_calls) = &delta.tool_calls { + tracing::trace!("[OpenAI] Received tool_calls delta: {:?}", tool_calls); for tc in tool_calls { + // Tool call start - has id and name + if let Some(id) = &tc.id { + // Get function name if available + let name = tc.function.as_ref() + .and_then(|f| f.name.clone()) + .unwrap_or_default(); + + // Only emit ToolUseStart if we have a valid tool name + if !name.is_empty() { + tracing::debug!("[OpenAI] ToolUseStart: id={}, name={}", id, name); + current_tool_id = Some(id.clone()); + accumulated_tool_calls.insert(id.clone(), (name.clone(), String::new())); + yield Ok(StreamChunk::ToolUseStart { + id: id.clone(), + name, + }); + } else { + tracing::debug!("[OpenAI] Tool call with empty name, waiting for name delta: id={}", id); + // Still track the tool call but don't emit yet + current_tool_id = Some(id.clone()); + accumulated_tool_calls.insert(id.clone(), (String::new(), String::new())); + } + } + + // Tool call delta - has arguments if let Some(function) = &tc.function { + tracing::trace!("[OpenAI] Function delta: name={:?}, arguments={:?}", function.name, function.arguments); if let Some(args) = &function.arguments { + tracing::debug!("[OpenAI] ToolUseDelta: args={}", args); + // Try to find the tool by id or use current + let tool_id = tc.id.as_ref() + .or(current_tool_id.as_ref()) + .cloned() + .unwrap_or_default(); + yield Ok(StreamChunk::ToolUseDelta { - id: tc.id.clone().unwrap_or_default(), + id: tool_id.clone(), delta: args.clone(), }); + + // Accumulate arguments + if let Some(entry) = accumulated_tool_calls.get_mut(&tool_id) { + tracing::debug!("[OpenAI] Accumulating args for tool {}: '{}' -> '{}'", tool_id, args, entry.1); + entry.1.push_str(args); + } else { + tracing::warn!("[OpenAI] No entry found for tool_id '{}' to accumulate args", tool_id); + } } } } @@ -168,7 +278,7 @@ impl LlmDriver for OpenAiDriver { } } Err(e) => { - tracing::warn!("Failed to parse OpenAI SSE: {}", e); + tracing::warn!("[OpenAI] Failed to parse SSE: {}, data: {}", e, data); } } } @@ -212,19 +322,27 @@ impl OpenAiDriver { content: Some(content.clone()), tool_calls: None, }), - zclaw_types::Message::ToolUse { id, tool, input } => Some(OpenAiMessage { - role: "assistant".to_string(), - content: None, - tool_calls: Some(vec![OpenAiToolCall { - id: id.clone(), - r#type: "function".to_string(), - function: FunctionCall { - name: tool.to_string(), - arguments: serde_json::to_string(input).unwrap_or_default(), - }, - }]), - }), - zclaw_types::Message::ToolResult { tool_call_id, output, is_error, .. } => Some(OpenAiMessage { + zclaw_types::Message::ToolUse { id, tool, input } => { + // Ensure arguments is always a valid JSON object, never null or invalid + let args = if input.is_null() { + "{}".to_string() + } else { + serde_json::to_string(input).unwrap_or_else(|_| "{}".to_string()) + }; + Some(OpenAiMessage { + role: "assistant".to_string(), + content: None, + tool_calls: Some(vec![OpenAiToolCall { + id: id.clone(), + r#type: "function".to_string(), + function: FunctionCall { + name: tool.to_string(), + arguments: args, + }, + }]), + }) + } + zclaw_types::Message::ToolResult { tool_call_id: _, output, is_error, .. } => Some(OpenAiMessage { role: "tool".to_string(), content: Some(if *is_error { format!("Error: {}", output) @@ -272,17 +390,32 @@ impl OpenAiDriver { fn convert_response(&self, api_response: OpenAiResponse, model: String) -> CompletionResponse { let choice = api_response.choices.first(); + tracing::debug!("[OpenAiDriver:convert_response] Processing response: {} choices, first choice: {:?}", api_response.choices.len(), choice.map(|c| format!("content={:?}, tool_calls={:?}, finish_reason={:?}", c.message.content, c.message.tool_calls.as_ref().map(|tc| tc.len()), c.finish_reason))); + let (content, stop_reason) = match choice { Some(c) => { - let blocks = if let Some(text) = &c.message.content { - vec![ContentBlock::Text { text: text.clone() }] - } else if let Some(tool_calls) = &c.message.tool_calls { + // Priority: tool_calls > non-empty content > empty content + // This is important because some providers return empty content with tool_calls + let has_tool_calls = c.message.tool_calls.as_ref().map(|tc| !tc.is_empty()).unwrap_or(false); + let has_content = c.message.content.as_ref().map(|t| !t.is_empty()).unwrap_or(false); + + let blocks = if has_tool_calls { + // Tool calls take priority + let tool_calls = c.message.tool_calls.as_ref().unwrap(); + tracing::debug!("[OpenAiDriver:convert_response] Using tool_calls: {} calls", tool_calls.len()); tool_calls.iter().map(|tc| ContentBlock::ToolUse { id: tc.id.clone(), name: tc.function.name.clone(), input: serde_json::from_str(&tc.function.arguments).unwrap_or(serde_json::Value::Null), }).collect() + } else if has_content { + // Non-empty content + let text = c.message.content.as_ref().unwrap(); + tracing::debug!("[OpenAiDriver:convert_response] Using text content: {} chars", text.len()); + vec![ContentBlock::Text { text: text.clone() }] } else { + // No content or tool_calls + tracing::debug!("[OpenAiDriver:convert_response] No content or tool_calls, using empty text"); vec![ContentBlock::Text { text: String::new() }] }; @@ -295,7 +428,10 @@ impl OpenAiDriver { (blocks, stop) } - None => (vec![ContentBlock::Text { text: String::new() }], StopReason::EndTurn), + None => { + tracing::debug!("[OpenAiDriver:convert_response] No choices in response"); + (vec![ContentBlock::Text { text: String::new() }], StopReason::EndTurn) + } }; let (input_tokens, output_tokens) = api_response.usage @@ -310,6 +446,119 @@ impl OpenAiDriver { stop_reason, } } + + /// Convert a non-streaming completion to a stream for providers that don't support streaming with tools + fn stream_from_complete(&self, request: CompletionRequest) -> Pin> + Send + '_>> { + // Build non-streaming request + let mut complete_request = self.build_api_request(&request); + complete_request.stream = false; + + // Capture values before entering the stream + let base_url = self.base_url.clone(); + let api_key = self.api_key.expose_secret().to_string(); + let model = request.model.clone(); + + eprintln!("[OpenAiDriver:stream_from_complete] Starting non-streaming request to: {}/chat/completions", base_url); + + Box::pin(stream! { + let url = format!("{}/chat/completions", base_url); + eprintln!("[OpenAiDriver:stream_from_complete] Sending non-streaming request to: {}", url); + + let response = match self.client + .post(&url) + .header("Authorization", format!("Bearer {}", api_key)) + .header("Content-Type", "application/json") + .timeout(std::time::Duration::from_secs(120)) + .json(&complete_request) + .send() + .await + { + Ok(r) => r, + Err(e) => { + yield Err(ZclawError::LlmError(format!("HTTP request failed: {}", e))); + return; + } + }; + + if !response.status().is_success() { + let status = response.status(); + let body = response.text().await.unwrap_or_default(); + yield Err(ZclawError::LlmError(format!("API error {}: {}", status, body))); + return; + } + + let api_response: OpenAiResponse = match response.json().await { + Ok(r) => r, + Err(e) => { + eprintln!("[OpenAiDriver:stream_from_complete] Failed to parse response: {}", e); + yield Err(ZclawError::LlmError(format!("Failed to parse response: {}", e))); + return; + } + }; + + eprintln!("[OpenAiDriver:stream_from_complete] Got response with {} choices", api_response.choices.len()); + if let Some(choice) = api_response.choices.first() { + eprintln!("[OpenAiDriver:stream_from_complete] First choice: content={:?}, tool_calls={:?}, finish_reason={:?}", + choice.message.content.as_ref().map(|c| if c.len() > 100 { &c[..100] } else { c.as_str() }), + choice.message.tool_calls.as_ref().map(|tc| tc.len()), + choice.finish_reason); + } + + // Convert response to stream chunks + let completion = self.convert_response(api_response, model.clone()); + eprintln!("[OpenAiDriver:stream_from_complete] Converted to {} content blocks, stop_reason: {:?}", completion.content.len(), completion.stop_reason); + + // Emit content blocks as stream chunks + for block in &completion.content { + eprintln!("[OpenAiDriver:stream_from_complete] Emitting block: {:?}", block); + match block { + ContentBlock::Text { text } => { + if !text.is_empty() { + eprintln!("[OpenAiDriver:stream_from_complete] Emitting TextDelta: {} chars", text.len()); + yield Ok(StreamChunk::TextDelta { delta: text.clone() }); + } + } + ContentBlock::Thinking { thinking } => { + yield Ok(StreamChunk::ThinkingDelta { delta: thinking.clone() }); + } + ContentBlock::ToolUse { id, name, input } => { + eprintln!("[OpenAiDriver:stream_from_complete] Emitting ToolUse: id={}, name={}", id, name); + // Emit tool use start + yield Ok(StreamChunk::ToolUseStart { + id: id.clone(), + name: name.clone(), + }); + // Emit tool use delta with arguments + if !input.is_null() { + let args_str = serde_json::to_string(input).unwrap_or_default(); + yield Ok(StreamChunk::ToolUseDelta { + id: id.clone(), + delta: args_str, + }); + } + // Emit tool use end + yield Ok(StreamChunk::ToolUseEnd { + id: id.clone(), + input: input.clone(), + }); + } + } + } + + // Emit completion + yield Ok(StreamChunk::Complete { + input_tokens: completion.input_tokens, + output_tokens: completion.output_tokens, + stop_reason: match completion.stop_reason { + StopReason::EndTurn => "end_turn", + StopReason::MaxTokens => "max_tokens", + StopReason::ToolUse => "tool_use", + StopReason::StopSequence => "stop", + StopReason::Error => "error", + }.to_string(), + }); + }) + } } // OpenAI API types @@ -460,6 +709,8 @@ struct OpenAiToolCallDelta { #[derive(Debug, Deserialize)] struct OpenAiFunctionDelta { + #[serde(default)] + name: Option, #[serde(default)] arguments: Option, } diff --git a/docs/knowledge-base/troubleshooting.md b/docs/knowledge-base/troubleshooting.md index a46e9a4..b7f4ae3 100644 --- a/docs/knowledge-base/troubleshooting.md +++ b/docs/knowledge-base/troubleshooting.md @@ -1058,6 +1058,153 @@ cargo fix --lib -p zclaw-protocols --allow-dirty **注意**: `dead_code` 警告(未使用的字段、方法)不影响编译,可以保留供将来使用。 +### 9.5 阿里云百炼 Coding Plan 工具调用 400 错误 + +**症状**: +- 普通对话正常,但需要调用 skill/tool 时返回 400 错误 +- API 返回 `function.arguments must be in JSON format` +- 或者响应为空,但显示有 `output_tokens` + +**根本原因**: 多层问题叠加 + +1. **流式模式不支持工具调用**: 阿里云百炼 (DashScope) Coding Plan API 的限制: + > "tools暂时无法与stream=True同时使用" + - 当同时启用 `stream: true` 和 `tools` 时,API 行为异常 + - 工具调用参数无法正确传输 + +2. **响应解析优先级错误**: `convert_response` 方法优先处理 `content` 字段,即使它是空字符串 + - 当 API 返回 `content: Some("")` 和 `tool_calls: [...]` 时 + - 代码错误地选择了空的 content,导致响应为空 + +3. **ToolUse 消息 JSON 序列化错误**: 当 `input` 为 `Null` 时 + - `serde_json::to_string(input)` 产生 `"null"` 字符串 + - API 要求 `"{}"` (空对象) 格式 + +**问题分析**: + +工具调用的完整流程: +``` +用户消息 → LLM 决定调用工具 → 返回 tool_calls → 执行工具 → 返回结果 → LLM 生成最终响应 +``` + +在百炼 API 中,由于流式 + 工具不兼容: +``` +stream=true + tools → API 行为异常 → tool_calls 参数丢失 → 空工具名/重复调用 +``` + +**修复方案**: + +1. **检测不兼容的 Provider 并使用非流式模式** (`openai.rs:stream`): + +```rust +fn stream(&self, request: CompletionRequest) -> Pin> + Send + '_>> { + let has_tools = !request.tools.is_empty(); + let needs_non_streaming = self.base_url.contains("dashscope") || + self.base_url.contains("aliyuncs") || + self.base_url.contains("bigmodel.cn"); + + if has_tools && needs_non_streaming { + eprintln!("[OpenAiDriver:stream] Provider detected that may not support streaming with tools, using non-streaming mode"); + return self.stream_from_complete(request); // 使用非流式模式 + } + // ... 正常流式逻辑 +} +``` + +2. **实现 `stream_from_complete` 方法**: 调用非流式 API,然后模拟流式输出 + +```rust +fn stream_from_complete(&self, request: CompletionRequest) -> Pin> + Send + '_>> { + let mut complete_request = self.build_api_request(&request); + complete_request.stream = false; // 强制非流式 + + Box::pin(stream! { + // 1. 发送非流式请求 + let response = client.execute(request).await?; + + // 2. 解析响应 + let api_response: OpenAiResponse = response.json().await?; + + // 3. 转换为流式事件 + for tool_call in tool_calls { + yield Ok(StreamChunk::ToolUseStart { id, name }); + yield Ok(StreamChunk::ToolUseDelta { id, delta }); + yield Ok(StreamChunk::ToolUseEnd { id, input }); + } + + // 4. 文本内容 + yield Ok(StreamChunk::TextDelta { delta: content }); + + // 5. 完成 + yield Ok(StreamChunk::Complete { ... }); + }) +} +``` + +3. **修复响应解析优先级** (`convert_response`): + +```rust +let (content, stop_reason) = match choice { + Some(c) => { + let has_tool_calls = c.message.tool_calls.as_ref().map(|tc| !tc.is_empty()).unwrap_or(false); + let has_content = c.message.content.as_ref().map(|t| !t.is_empty()).unwrap_or(false); + + let blocks = if has_tool_calls { + // ✅ 工具调用优先于空内容 + tool_calls.iter().map(|tc| ContentBlock::ToolUse { + id: tc.id.clone(), + name: tc.function.name.clone(), + input: serde_json::from_str(&tc.function.arguments).unwrap_or(Value::Null), + }).collect() + } else if has_content { + // 非空文本内容 + vec![ContentBlock::Text { text: c.message.content.as_ref().unwrap().clone() }] + } else { + vec![ContentBlock::Text { text: String::new() }] + }; + // ... + } +}; +``` + +4. **修复 ToolUse 消息的 JSON 序列化**: + +```rust +zclaw_types::Message::ToolUse { id, tool, input } => { + let args = if input.is_null() { + "{}".to_string() // ✅ Null 转换为空对象 + } else { + serde_json::to_string(input).unwrap_or_else(|_| "{}".to_string()) + }; + // ... +} +``` + +**影响范围**: +- `crates/zclaw-runtime/src/driver/openai.rs` - OpenAI 兼容驱动 + +**已知的兼容性问题 Provider**: + +| Provider | Base URL 特征 | 问题 | +|----------|--------------|------| +| 阿里云百炼 | `dashscope.aliyuncs.com` | 流式 + 工具不兼容 | +| 阿里云百炼 Coding Plan | `coding.dashscope.aliyuncs.com` | 流式 + 工具不兼容 | +| 智谱 GLM | `bigmodel.cn` | 可能存在同样问题 | + +**验证修复**: +1. 配置百炼 Coding Plan API (`https://coding.dashscope.aliyuncs.com/v1`) +2. 发送需要调用 skill 的消息(如"查询腾讯财报") +3. 应看到日志:`[OpenAiDriver:stream] Provider detected that may not support streaming with tools` +4. 工具应正确执行,参数完整 + +**调试日志示例**: +``` +[OpenAiDriver:stream] base_url=https://coding.dashscope.aliyuncs.com/v1, has_tools=true, needs_non_streaming=true +[OpenAiDriver:stream] Provider detected that may not support streaming with tools, using non-streaming mode +[OpenAiDriver] Non-streaming response received, tool_calls=1 +[AgentLoop] ToolUseEnd: id=call_xxx, input={"skill_id":"finance-tracker","input":{...}} +``` + --- ## 10. 技能系统问题 @@ -1163,6 +1310,90 @@ triggers: 2. 发送"查询腾讯财报" 3. Agent 应该调用 `execute_skill` 工具,传入 `skill_id: "finance-tracker"` +### 10.2 `skills_dir: None` 导致技能系统完全失效 + +**症状**: +- Agent 无法调用任何技能,总是直接回复文本 +- `skills.list()` 返回空列表 +- 系统提示词中没有任何技能信息 + +**根本原因**: `KernelConfig::from_provider()` 方法中 `skills_dir` 被硬编码为 `None` + +**问题代码** (`crates/zclaw-kernel/src/config.rs:337`): +```rust +// ❌ 错误 - from_provider() 中硬编码为 None +pub fn from_provider( + provider: &str, + api_key: &str, + model: &str, + base_url: Option<&str>, + api_protocol: &str, +) -> Self { + let llm = match provider { + // ... provider matching logic + }; + + Self { + database_url: default_database_url(), + llm, + skills_dir: None, // ← 硬编码!导致技能永不加载 + } +} +``` + +**影响分析**: + +Tauri 初始化 Kernel 时使用 `from_provider()` 创建配置: +``` +kernel_init → KernelConfig::from_provider() → skills_dir: None + → Kernel::boot() → skills_dir 不存在,跳过扫描 + → skills.list() 返回空列表 + → 系统提示词中无技能信息 + → LLM 不知道有 execute_skill 工具可用 +``` + +**修复方案**: +```rust +// ✅ 正确 - 使用默认技能目录 +Self { + database_url: default_database_url(), + llm, + skills_dir: default_skills_dir(), // 使用 ./skills 目录 +} +``` + +**修复代码** (`config.rs:161-165`): +```rust +fn default_skills_dir() -> Option { + std::env::current_dir() + .ok() + .map(|cwd| cwd.join("skills")) +} +``` + +**相关文件**: +- `crates/zclaw-kernel/src/config.rs:337` - 修复位置 +- `crates/zclaw-kernel/src/kernel.rs:79-83` - 技能目录扫描逻辑 + +**验证修复**: +1. 启动应用,查看终端日志 +2. 应看到 `[Kernel] Scanning skills directory: ./skills` +3. 发送 "查询腾讯财报" +4. Agent 应调用 `execute_skill("finance-tracker", {...})` + +**已知限制**: +`default_skills_dir()` 依赖 `current_dir()`,如果工作目录不同可能失效。更可靠的方案是使用可执行文件目录: + +```rust +// 建议改进 +fn default_skills_dir() -> Option { + std::env::current_exe() + .ok() + .and_then(|exe| exe.parent().map(|p| p.join("skills"))) + .or_else(|| std::env::current_dir().ok().map(|cwd| cwd.join("skills"))) +} +``` + --- ## 11. 相关文档 @@ -1177,6 +1408,8 @@ triggers: | 日期 | 变更 | |------|------| +| 2026-03-24 | 添加 9.5 节:阿里云百炼 Coding Plan 工具调用 400 错误 - 流式+工具不兼容、响应解析优先级、JSON 序列化问题 | +| 2026-03-24 | 添加 10.2 节:`skills_dir: None` 导致技能系统完全失效 - from_provider() 硬编码问题 | | 2026-03-24 | 添加 10.1 节:Agent 无法调用合适的技能 - 系统提示词注入技能列表 + triggers 字段 | | 2026-03-24 | 添加 9.4 节:自我进化系统启动错误 - DateTime 类型不匹配和未使用导入警告 | | 2026-03-23 | 添加 9.3 节:更换模型配置后仍使用旧模型 - Agent 配置优先于 Kernel 配置导致的问题 |