fix(growth,skills,kernel): Phase 0 地基修复 — 经验积累覆盖 + Skill 工具调用
Some checks failed
CI / Lint & TypeCheck (push) Has been cancelled
CI / Unit Tests (push) Has been cancelled
CI / Build Frontend (push) Has been cancelled
CI / Rust Check (push) Has been cancelled
CI / Security Scan (push) Has been cancelled
CI / E2E Tests (push) Has been cancelled

Bug 1: ExperienceStore store_experience() 相同 pain_pattern 因确定性 URI
直接覆盖,新 Experience reuse_count=0 重置已有积累。修复为先检查 URI
是否已存在,若存在则合并(保留原 id/created_at,reuse_count+1)。

Bug 2: PromptOnlySkill::execute() 只做纯文本 complete(),75 个 Skill
的 tools 字段是装饰性的。修复为扩展 LlmCompleter 支持 complete_with_tools,
SkillContext 新增 tool_definitions,KernelSkillExecutor 从 ToolRegistry
解析 manifest 声明的工具定义传入 LLM function calling。
This commit is contained in:
iven
2026-04-21 01:12:35 +08:00
parent f89b2263d1
commit c1dea6e07a
7 changed files with 237 additions and 15 deletions

View File

@@ -7,7 +7,7 @@ use std::time::Instant;
use tracing::warn;
use zclaw_types::Result;
use super::{Skill, SkillContext, SkillManifest, SkillResult};
use super::{Skill, SkillCompletion, SkillContext, SkillManifest, SkillResult};
/// Returns the platform-appropriate Python binary name.
/// On Windows, the standard installer provides `python.exe`, not `python3.exe`.
@@ -39,6 +39,17 @@ impl PromptOnlySkill {
prompt
}
fn completion_to_result(&self, completion: SkillCompletion) -> SkillResult {
if completion.tool_calls.is_empty() {
return SkillResult::success(Value::String(completion.text));
}
// Include both text and tool calls so the caller can relay them.
SkillResult::success(serde_json::json!({
"text": completion.text,
"tool_calls": completion.tool_calls,
}))
}
}
#[async_trait]
@@ -50,13 +61,25 @@ impl Skill for PromptOnlySkill {
async fn execute(&self, context: &SkillContext, input: Value) -> Result<SkillResult> {
let prompt = self.format_prompt(&input);
// If an LLM completer is available, generate an AI response
if let Some(completer) = &context.llm {
// If tool definitions are available and the manifest declares tools,
// use tool-augmented completion so the LLM can invoke tools.
if !context.tool_definitions.is_empty() && !self.manifest.tools.is_empty() {
match completer.complete_with_tools(&prompt, None, context.tool_definitions.clone()).await {
Ok(completion) => {
return Ok(self.completion_to_result(completion));
}
Err(e) => {
warn!("[PromptOnlySkill] Tool completion failed: {}, falling back", e);
}
}
}
// Plain completion (no tools or fallback)
match completer.complete(&prompt).await {
Ok(response) => return Ok(SkillResult::success(Value::String(response))),
Err(e) => {
warn!("[PromptOnlySkill] LLM completion failed: {}, falling back to raw prompt", e);
// Fall through to return raw prompt
}
}
}

View File

@@ -3,7 +3,7 @@
use serde::{Deserialize, Serialize};
use serde_json::Value;
use std::pin::Pin;
use zclaw_types::{SkillId, Result};
use zclaw_types::{SkillId, ToolDefinition, Result};
/// Type-erased LLM completion interface.
///
@@ -15,6 +15,43 @@ pub trait LlmCompleter: Send + Sync {
&self,
prompt: &str,
) -> Pin<Box<dyn std::future::Future<Output = std::result::Result<String, String>> + Send + '_>>;
/// Complete a prompt with tool definitions available to the LLM.
///
/// The LLM may return text, tool calls, or both. Tool calls are returned
/// in the `tool_calls` field for the caller to execute or relay.
/// Default implementation falls back to plain `complete()`.
fn complete_with_tools(
&self,
prompt: &str,
_system_prompt: Option<&str>,
_tools: Vec<ToolDefinition>,
) -> Pin<Box<dyn std::future::Future<Output = std::result::Result<SkillCompletion, String>> + Send + '_>> {
let prompt = prompt.to_string();
Box::pin(async move {
self.complete(&prompt).await.map(|text| SkillCompletion { text, tool_calls: vec![] })
})
}
}
/// Result of an LLM completion that may include tool calls.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SkillCompletion {
/// The text portion of the LLM response.
pub text: String,
/// Tool calls the LLM requested, if any.
pub tool_calls: Vec<SkillToolCall>,
}
/// A single tool call returned by the LLM during skill execution.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SkillToolCall {
/// Unique call ID.
pub id: String,
/// Name of the tool to invoke.
pub name: String,
/// Input arguments for the tool.
pub input: Value,
}
/// Skill manifest definition
@@ -97,6 +134,9 @@ pub struct SkillContext {
pub file_access_allowed: bool,
/// Optional LLM completer for skills that need AI generation (e.g. PromptOnly)
pub llm: Option<std::sync::Arc<dyn LlmCompleter>>,
/// Tool definitions resolved from the skill manifest's `tools` field.
/// Populated by the kernel when creating the context.
pub tool_definitions: Vec<ToolDefinition>,
}
impl std::fmt::Debug for SkillContext {
@@ -109,6 +149,7 @@ impl std::fmt::Debug for SkillContext {
.field("network_allowed", &self.network_allowed)
.field("file_access_allowed", &self.file_access_allowed)
.field("llm", &self.llm.as_ref().map(|_| "Arc<dyn LlmCompleter>"))
.field("tool_definitions", &self.tool_definitions.len())
.finish()
}
}
@@ -124,6 +165,7 @@ impl Default for SkillContext {
network_allowed: false,
file_access_allowed: false,
llm: None,
tool_definitions: Vec::new(),
}
}
}