fix(growth,skills,kernel): Phase 0 地基修复 — 经验积累覆盖 + Skill 工具调用
Some checks failed
CI / Lint & TypeCheck (push) Has been cancelled
CI / Unit Tests (push) Has been cancelled
CI / Build Frontend (push) Has been cancelled
CI / Rust Check (push) Has been cancelled
CI / Security Scan (push) Has been cancelled
CI / E2E Tests (push) Has been cancelled

Bug 1: ExperienceStore store_experience() 相同 pain_pattern 因确定性 URI
直接覆盖,新 Experience reuse_count=0 重置已有积累。修复为先检查 URI
是否已存在,若存在则合并(保留原 id/created_at,reuse_count+1)。

Bug 2: PromptOnlySkill::execute() 只做纯文本 complete(),75 个 Skill
的 tools 字段是装饰性的。修复为扩展 LlmCompleter 支持 complete_with_tools,
SkillContext 新增 tool_definitions,KernelSkillExecutor 从 ToolRegistry
解析 manifest 声明的工具定义传入 LLM function calling。
This commit is contained in:
iven
2026-04-21 01:12:35 +08:00
parent f89b2263d1
commit c1dea6e07a
7 changed files with 237 additions and 15 deletions

View File

@@ -118,10 +118,40 @@ impl ExperienceStore {
&self.viking
}
/// Store (or overwrite) an experience. The URI is derived from
/// `agent_id + pain_pattern`, ensuring one experience per pattern.
/// Store an experience, merging with existing if the same pain pattern
/// already exists for this agent. Reuse-count is preserved and incremented
/// rather than reset to zero on re-extraction.
pub async fn store_experience(&self, exp: &Experience) -> zclaw_types::Result<()> {
let uri = exp.uri();
// If an experience with this URI already exists, merge instead of overwrite.
if let Some(existing_entry) = self.viking.get(&uri).await? {
if let Ok(existing) = serde_json::from_str::<Experience>(&existing_entry.content) {
let merged = Experience {
id: existing.id.clone(),
reuse_count: existing.reuse_count + 1,
created_at: existing.created_at,
updated_at: Utc::now(),
// New data takes precedence for content fields
pain_pattern: exp.pain_pattern.clone(),
agent_id: exp.agent_id.clone(),
context: exp.context.clone(),
solution_steps: exp.solution_steps.clone(),
outcome: exp.outcome.clone(),
industry_context: exp.industry_context.clone().or(existing.industry_context.clone()),
source_trigger: exp.source_trigger.clone().or(existing.source_trigger.clone()),
tool_used: exp.tool_used.clone().or(existing.tool_used.clone()),
};
return self.write_entry(&uri, &merged).await;
}
}
self.write_entry(&uri, exp).await
}
/// Low-level write: serialises the experience into a MemoryEntry and
/// persists it through the VikingAdapter.
async fn write_entry(&self, uri: &str, exp: &Experience) -> zclaw_types::Result<()> {
let content = serde_json::to_string(exp)?;
let mut keywords = vec![exp.pain_pattern.clone()];
keywords.extend(exp.solution_steps.iter().take(3).cloned());
@@ -133,7 +163,7 @@ impl ExperienceStore {
}
let entry = MemoryEntry {
uri,
uri: uri.to_string(),
memory_type: MemoryType::Experience,
content,
keywords,
@@ -197,7 +227,7 @@ impl ExperienceStore {
let mut updated = exp.clone();
updated.reuse_count += 1;
updated.updated_at = Utc::now();
if let Err(e) = self.store_experience(&updated).await {
if let Err(e) = self.write_entry(&exp.uri(), &updated).await {
warn!("[ExperienceStore] Failed to increment reuse for {}: {}", exp.id, e);
}
}
@@ -289,7 +319,7 @@ mod tests {
}
#[tokio::test]
async fn test_store_overwrites_same_pattern() {
async fn test_store_merges_same_pattern() {
let viking = Arc::new(VikingAdapter::in_memory());
let store = ExperienceStore::new(viking);
@@ -303,13 +333,19 @@ mod tests {
"agent-1", "packaging", "v2 updated",
vec!["new step".into()], "better",
);
// Force same URI by reusing the ID logic — same pattern → same URI.
// Same pattern → same URI → should merge, not overwrite.
store.store_experience(&exp_v2).await.unwrap();
let found = store.find_by_agent("agent-1").await.unwrap();
// Should be overwritten, not duplicated (same URI).
// Should be merged into one entry, not duplicated.
assert_eq!(found.len(), 1);
// Content fields updated to v2.
assert_eq!(found[0].context, "v2 updated");
assert_eq!(found[0].solution_steps[0], "new step");
// Reuse count incremented (was 0, now 1).
assert_eq!(found[0].reuse_count, 1);
// Original ID and created_at preserved.
assert_eq!(found[0].id, exp_v1.id);
}
#[tokio::test]
@@ -376,4 +412,26 @@ mod tests {
assert_eq!(found_a.len(), 1);
assert_eq!(found_a[0].pain_pattern, "packaging");
}
#[tokio::test]
async fn test_reuse_count_accumulates_across_repeated_patterns() {
let viking = Arc::new(VikingAdapter::in_memory());
let store = ExperienceStore::new(viking);
// Store the same pattern 4 times (simulating 4 conversations)
for i in 0..4 {
let exp = Experience::new(
"agent-1", "logistics delay", &format!("context v{}", i),
vec![format!("step {}", i)], &format!("outcome {}", i),
);
store.store_experience(&exp).await.unwrap();
}
let found = store.find_by_agent("agent-1").await.unwrap();
assert_eq!(found.len(), 1);
// First store: reuse_count=0, then 1, 2, 3 after each re-store.
assert_eq!(found[0].reuse_count, 3);
// Content should reflect the latest version.
assert_eq!(found[0].context, "context v3");
}
}

View File

@@ -6,9 +6,9 @@ use async_trait::async_trait;
use serde_json::{json, Value};
use zclaw_runtime::{LlmDriver, tool::{SkillExecutor, HandExecutor}};
use zclaw_skills::{SkillRegistry, LlmCompleter};
use zclaw_skills::{SkillRegistry, LlmCompleter, SkillCompletion, SkillToolCall};
use zclaw_hands::HandRegistry;
use zclaw_types::{AgentId, Result};
use zclaw_types::{AgentId, Result, ToolDefinition};
/// Adapter that bridges `zclaw_runtime::LlmDriver` -> `zclaw_skills::LlmCompleter`
pub(crate) struct LlmDriverAdapter {
@@ -44,18 +44,111 @@ impl LlmCompleter for LlmDriverAdapter {
Ok(text)
})
}
fn complete_with_tools(
&self,
prompt: &str,
system_prompt: Option<&str>,
tools: Vec<ToolDefinition>,
) -> Pin<Box<dyn std::future::Future<Output = std::result::Result<SkillCompletion, String>> + Send + '_>> {
let driver = self.driver.clone();
let prompt = prompt.to_string();
let system = system_prompt.map(|s| s.to_string());
let max_tokens = self.max_tokens;
let temperature = self.temperature;
Box::pin(async move {
let mut messages = Vec::new();
messages.push(zclaw_types::Message::user(prompt));
let request = zclaw_runtime::CompletionRequest {
model: String::new(),
system,
messages,
tools,
max_tokens: Some(max_tokens),
temperature: Some(temperature),
stop: Vec::new(),
stream: false,
thinking_enabled: false,
reasoning_effort: None,
plan_mode: false,
};
let response = driver.complete(request).await
.map_err(|e| format!("LLM completion error: {}", e))?;
let mut text_parts = Vec::new();
let mut tool_calls = Vec::new();
for block in &response.content {
match block {
zclaw_runtime::ContentBlock::Text { text } => {
text_parts.push(text.clone());
}
zclaw_runtime::ContentBlock::ToolUse { id, name, input } => {
tool_calls.push(SkillToolCall {
id: id.clone(),
name: name.clone(),
input: input.clone(),
});
}
_ => {}
}
}
Ok(SkillCompletion {
text: text_parts.join(""),
tool_calls,
})
})
}
}
/// Skill executor implementation for Kernel
pub struct KernelSkillExecutor {
pub(crate) skills: Arc<SkillRegistry>,
pub(crate) llm: Arc<dyn LlmCompleter>,
/// Shared tool registry, updated before each skill execution from the
/// agent loop's freshly-built registry. Uses std::sync because reads
/// happen from async code but writes are brief and infrequent.
pub(crate) tool_registry: std::sync::RwLock<Option<zclaw_runtime::ToolRegistry>>,
}
impl KernelSkillExecutor {
pub fn new(skills: Arc<SkillRegistry>, driver: Arc<dyn LlmDriver>) -> Self {
let llm: Arc<dyn zclaw_skills::LlmCompleter> = Arc::new(LlmDriverAdapter { driver, max_tokens: 4096, temperature: 0.7 });
Self { skills, llm }
let llm: Arc<dyn LlmCompleter> = Arc::new(LlmDriverAdapter { driver, max_tokens: 4096, temperature: 0.7 });
Self { skills, llm, tool_registry: std::sync::RwLock::new(None) }
}
/// Update the tool registry snapshot. Called by the kernel before each
/// agent loop iteration so skill execution sees the latest tool set.
pub fn set_tool_registry(&self, registry: zclaw_runtime::ToolRegistry) {
if let Ok(mut guard) = self.tool_registry.write() {
*guard = Some(registry);
}
}
/// Resolve the tool definitions declared by a skill manifest against
/// the currently active tool registry.
fn resolve_tool_definitions(&self, skill_id: &str) -> Vec<ToolDefinition> {
let manifests = self.skills.manifests_snapshot();
let manifest = match manifests.get(&zclaw_types::SkillId::new(skill_id)) {
Some(m) => m,
None => return vec![],
};
if manifest.tools.is_empty() {
return vec![];
}
let guard = match self.tool_registry.read() {
Ok(g) => g,
Err(_) => return vec![],
};
let registry = match guard.as_ref() {
Some(r) => r,
None => return vec![],
};
// Only include definitions for tools declared in the skill manifest.
registry.definitions().into_iter()
.filter(|def| manifest.tools.iter().any(|t| t == &def.name))
.collect()
}
}
@@ -68,10 +161,12 @@ impl SkillExecutor for KernelSkillExecutor {
session_id: &str,
input: Value,
) -> Result<Value> {
let tool_definitions = self.resolve_tool_definitions(skill_id);
let context = zclaw_skills::SkillContext {
agent_id: agent_id.to_string(),
session_id: session_id.to_string(),
llm: Some(self.llm.clone()),
tool_definitions,
..Default::default()
};
let result = self.skills.execute(&zclaw_types::SkillId::new(skill_id), &context, input).await?;

View File

@@ -56,6 +56,7 @@ impl Kernel {
// Create agent loop with model configuration
let subagent_enabled = chat_mode.as_ref().and_then(|m| m.subagent_enabled).unwrap_or(false);
let tools = self.create_tool_registry(subagent_enabled);
self.skill_executor.set_tool_registry(tools.clone());
let mut loop_runner = AgentLoop::new(
*agent_id,
self.driver.clone(),
@@ -169,6 +170,7 @@ impl Kernel {
// Create agent loop with model configuration
let subagent_enabled = chat_mode.as_ref().and_then(|m| m.subagent_enabled).unwrap_or(false);
let tools = self.create_tool_registry(subagent_enabled);
self.skill_executor.set_tool_registry(tools.clone());
let mut loop_runner = AgentLoop::new(
*agent_id,
self.driver.clone(),

View File

@@ -7,7 +7,7 @@ use std::time::Instant;
use tracing::warn;
use zclaw_types::Result;
use super::{Skill, SkillContext, SkillManifest, SkillResult};
use super::{Skill, SkillCompletion, SkillContext, SkillManifest, SkillResult};
/// Returns the platform-appropriate Python binary name.
/// On Windows, the standard installer provides `python.exe`, not `python3.exe`.
@@ -39,6 +39,17 @@ impl PromptOnlySkill {
prompt
}
fn completion_to_result(&self, completion: SkillCompletion) -> SkillResult {
if completion.tool_calls.is_empty() {
return SkillResult::success(Value::String(completion.text));
}
// Include both text and tool calls so the caller can relay them.
SkillResult::success(serde_json::json!({
"text": completion.text,
"tool_calls": completion.tool_calls,
}))
}
}
#[async_trait]
@@ -50,13 +61,25 @@ impl Skill for PromptOnlySkill {
async fn execute(&self, context: &SkillContext, input: Value) -> Result<SkillResult> {
let prompt = self.format_prompt(&input);
// If an LLM completer is available, generate an AI response
if let Some(completer) = &context.llm {
// If tool definitions are available and the manifest declares tools,
// use tool-augmented completion so the LLM can invoke tools.
if !context.tool_definitions.is_empty() && !self.manifest.tools.is_empty() {
match completer.complete_with_tools(&prompt, None, context.tool_definitions.clone()).await {
Ok(completion) => {
return Ok(self.completion_to_result(completion));
}
Err(e) => {
warn!("[PromptOnlySkill] Tool completion failed: {}, falling back", e);
}
}
}
// Plain completion (no tools or fallback)
match completer.complete(&prompt).await {
Ok(response) => return Ok(SkillResult::success(Value::String(response))),
Err(e) => {
warn!("[PromptOnlySkill] LLM completion failed: {}, falling back to raw prompt", e);
// Fall through to return raw prompt
}
}
}

View File

@@ -3,7 +3,7 @@
use serde::{Deserialize, Serialize};
use serde_json::Value;
use std::pin::Pin;
use zclaw_types::{SkillId, Result};
use zclaw_types::{SkillId, ToolDefinition, Result};
/// Type-erased LLM completion interface.
///
@@ -15,6 +15,43 @@ pub trait LlmCompleter: Send + Sync {
&self,
prompt: &str,
) -> Pin<Box<dyn std::future::Future<Output = std::result::Result<String, String>> + Send + '_>>;
/// Complete a prompt with tool definitions available to the LLM.
///
/// The LLM may return text, tool calls, or both. Tool calls are returned
/// in the `tool_calls` field for the caller to execute or relay.
/// Default implementation falls back to plain `complete()`.
fn complete_with_tools(
&self,
prompt: &str,
_system_prompt: Option<&str>,
_tools: Vec<ToolDefinition>,
) -> Pin<Box<dyn std::future::Future<Output = std::result::Result<SkillCompletion, String>> + Send + '_>> {
let prompt = prompt.to_string();
Box::pin(async move {
self.complete(&prompt).await.map(|text| SkillCompletion { text, tool_calls: vec![] })
})
}
}
/// Result of an LLM completion that may include tool calls.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SkillCompletion {
/// The text portion of the LLM response.
pub text: String,
/// Tool calls the LLM requested, if any.
pub tool_calls: Vec<SkillToolCall>,
}
/// A single tool call returned by the LLM during skill execution.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SkillToolCall {
/// Unique call ID.
pub id: String,
/// Name of the tool to invoke.
pub name: String,
/// Input arguments for the tool.
pub input: Value,
}
/// Skill manifest definition
@@ -97,6 +134,9 @@ pub struct SkillContext {
pub file_access_allowed: bool,
/// Optional LLM completer for skills that need AI generation (e.g. PromptOnly)
pub llm: Option<std::sync::Arc<dyn LlmCompleter>>,
/// Tool definitions resolved from the skill manifest's `tools` field.
/// Populated by the kernel when creating the context.
pub tool_definitions: Vec<ToolDefinition>,
}
impl std::fmt::Debug for SkillContext {
@@ -109,6 +149,7 @@ impl std::fmt::Debug for SkillContext {
.field("network_allowed", &self.network_allowed)
.field("file_access_allowed", &self.file_access_allowed)
.field("llm", &self.llm.as_ref().map(|_| "Arc<dyn LlmCompleter>"))
.field("tool_definitions", &self.tool_definitions.len())
.finish()
}
}
@@ -124,6 +165,7 @@ impl Default for SkillContext {
network_allowed: false,
file_access_allowed: false,
llm: None,
tool_definitions: Vec::new(),
}
}
}

View File

@@ -241,6 +241,7 @@ pub async fn orchestration_execute(
network_allowed: true,
file_access_allowed: true,
llm: None,
tool_definitions: Vec::new(),
};
// Execute orchestration

View File

@@ -277,6 +277,7 @@ impl From<SkillContext> for zclaw_skills::SkillContext {
network_allowed: true,
file_access_allowed: true,
llm: None, // Injected by Kernel.execute_skill()
tool_definitions: Vec::new(),
}
}
}