fix(growth,skills,kernel): Phase 0 地基修复 — 经验积累覆盖 + Skill 工具调用
Some checks failed
CI / Lint & TypeCheck (push) Has been cancelled
CI / Unit Tests (push) Has been cancelled
CI / Build Frontend (push) Has been cancelled
CI / Rust Check (push) Has been cancelled
CI / Security Scan (push) Has been cancelled
CI / E2E Tests (push) Has been cancelled

Bug 1: ExperienceStore store_experience() 相同 pain_pattern 因确定性 URI
直接覆盖,新 Experience reuse_count=0 重置已有积累。修复为先检查 URI
是否已存在,若存在则合并(保留原 id/created_at,reuse_count+1)。

Bug 2: PromptOnlySkill::execute() 只做纯文本 complete(),75 个 Skill
的 tools 字段是装饰性的。修复为扩展 LlmCompleter 支持 complete_with_tools,
SkillContext 新增 tool_definitions,KernelSkillExecutor 从 ToolRegistry
解析 manifest 声明的工具定义传入 LLM function calling。
This commit is contained in:
iven
2026-04-21 01:12:35 +08:00
parent f89b2263d1
commit c1dea6e07a
7 changed files with 237 additions and 15 deletions

View File

@@ -3,7 +3,7 @@
use serde::{Deserialize, Serialize};
use serde_json::Value;
use std::pin::Pin;
use zclaw_types::{SkillId, Result};
use zclaw_types::{SkillId, ToolDefinition, Result};
/// Type-erased LLM completion interface.
///
@@ -15,6 +15,43 @@ pub trait LlmCompleter: Send + Sync {
&self,
prompt: &str,
) -> Pin<Box<dyn std::future::Future<Output = std::result::Result<String, String>> + Send + '_>>;
/// Complete a prompt with tool definitions available to the LLM.
///
/// The LLM may return text, tool calls, or both. Tool calls are returned
/// in the `tool_calls` field for the caller to execute or relay.
/// Default implementation falls back to plain `complete()`.
fn complete_with_tools(
&self,
prompt: &str,
_system_prompt: Option<&str>,
_tools: Vec<ToolDefinition>,
) -> Pin<Box<dyn std::future::Future<Output = std::result::Result<SkillCompletion, String>> + Send + '_>> {
let prompt = prompt.to_string();
Box::pin(async move {
self.complete(&prompt).await.map(|text| SkillCompletion { text, tool_calls: vec![] })
})
}
}
/// Result of an LLM completion that may include tool calls.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SkillCompletion {
/// The text portion of the LLM response.
pub text: String,
/// Tool calls the LLM requested, if any.
pub tool_calls: Vec<SkillToolCall>,
}
/// A single tool call returned by the LLM during skill execution.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SkillToolCall {
/// Unique call ID.
pub id: String,
/// Name of the tool to invoke.
pub name: String,
/// Input arguments for the tool.
pub input: Value,
}
/// Skill manifest definition
@@ -97,6 +134,9 @@ pub struct SkillContext {
pub file_access_allowed: bool,
/// Optional LLM completer for skills that need AI generation (e.g. PromptOnly)
pub llm: Option<std::sync::Arc<dyn LlmCompleter>>,
/// Tool definitions resolved from the skill manifest's `tools` field.
/// Populated by the kernel when creating the context.
pub tool_definitions: Vec<ToolDefinition>,
}
impl std::fmt::Debug for SkillContext {
@@ -109,6 +149,7 @@ impl std::fmt::Debug for SkillContext {
.field("network_allowed", &self.network_allowed)
.field("file_access_allowed", &self.file_access_allowed)
.field("llm", &self.llm.as_ref().map(|_| "Arc<dyn LlmCompleter>"))
.field("tool_definitions", &self.tool_definitions.len())
.finish()
}
}
@@ -124,6 +165,7 @@ impl Default for SkillContext {
network_allowed: false,
file_access_allowed: false,
llm: None,
tool_definitions: Vec::new(),
}
}
}