Files
zclaw_openfang/crates/zclaw-skills/src/skill.rs
iven c1dea6e07a
Some checks failed
CI / Lint & TypeCheck (push) Has been cancelled
CI / Unit Tests (push) Has been cancelled
CI / Build Frontend (push) Has been cancelled
CI / Rust Check (push) Has been cancelled
CI / Security Scan (push) Has been cancelled
CI / E2E Tests (push) Has been cancelled
fix(growth,skills,kernel): Phase 0 地基修复 — 经验积累覆盖 + Skill 工具调用
Bug 1: ExperienceStore store_experience() 相同 pain_pattern 因确定性 URI
直接覆盖,新 Experience reuse_count=0 重置已有积累。修复为先检查 URI
是否已存在,若存在则合并(保留原 id/created_at,reuse_count+1)。

Bug 2: PromptOnlySkill::execute() 只做纯文本 complete(),75 个 Skill
的 tools 字段是装饰性的。修复为扩展 LlmCompleter 支持 complete_with_tools,
SkillContext 新增 tool_definitions,KernelSkillExecutor 从 ToolRegistry
解析 manifest 声明的工具定义传入 LLM function calling。
2026-04-21 01:12:35 +08:00

230 lines
7.2 KiB
Rust

//! Skill definition and types
use serde::{Deserialize, Serialize};
use serde_json::Value;
use std::pin::Pin;
use zclaw_types::{SkillId, ToolDefinition, Result};
/// Type-erased LLM completion interface.
///
/// Defined here (in zclaw-skills) to avoid a circular dependency on zclaw-runtime.
/// Implementations live in zclaw-kernel where both crates are available.
pub trait LlmCompleter: Send + Sync {
/// Complete a simple prompt → response (no system prompt, no tools).
fn complete(
&self,
prompt: &str,
) -> Pin<Box<dyn std::future::Future<Output = std::result::Result<String, String>> + Send + '_>>;
/// Complete a prompt with tool definitions available to the LLM.
///
/// The LLM may return text, tool calls, or both. Tool calls are returned
/// in the `tool_calls` field for the caller to execute or relay.
/// Default implementation falls back to plain `complete()`.
fn complete_with_tools(
&self,
prompt: &str,
_system_prompt: Option<&str>,
_tools: Vec<ToolDefinition>,
) -> Pin<Box<dyn std::future::Future<Output = std::result::Result<SkillCompletion, String>> + Send + '_>> {
let prompt = prompt.to_string();
Box::pin(async move {
self.complete(&prompt).await.map(|text| SkillCompletion { text, tool_calls: vec![] })
})
}
}
/// Result of an LLM completion that may include tool calls.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SkillCompletion {
/// The text portion of the LLM response.
pub text: String,
/// Tool calls the LLM requested, if any.
pub tool_calls: Vec<SkillToolCall>,
}
/// A single tool call returned by the LLM during skill execution.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SkillToolCall {
/// Unique call ID.
pub id: String,
/// Name of the tool to invoke.
pub name: String,
/// Input arguments for the tool.
pub input: Value,
}
/// Skill manifest definition
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SkillManifest {
/// Unique skill identifier
pub id: SkillId,
/// Human-readable name
pub name: String,
/// Skill description
pub description: String,
/// Skill version
pub version: String,
/// Skill author
#[serde(default)]
pub author: Option<String>,
/// Execution mode
pub mode: SkillMode,
/// Required capabilities
#[serde(default)]
pub capabilities: Vec<String>,
/// Input schema (JSON Schema)
#[serde(default)]
pub input_schema: Option<Value>,
/// Output schema (JSON Schema)
#[serde(default)]
pub output_schema: Option<Value>,
/// Tags for categorization
#[serde(default)]
pub tags: Vec<String>,
/// Category for skill grouping (e.g., "开发工程", "数据分析")
/// If not specified, will be auto-detected from skill ID
#[serde(default)]
pub category: Option<String>,
/// Trigger words for skill activation
#[serde(default)]
pub triggers: Vec<String>,
/// Required tools for skill execution (e.g., "bash", "web_search")
#[serde(default)]
pub tools: Vec<String>,
/// Whether the skill is enabled
#[serde(default = "default_enabled")]
pub enabled: bool,
}
fn default_enabled() -> bool { true }
/// Skill execution mode
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "snake_case")]
pub enum SkillMode {
/// Prompt-only skill (no code execution)
PromptOnly,
/// Python script execution
Python,
/// Shell command execution
Shell,
/// WebAssembly execution (requires 'wasm' feature flag, falls back to PromptOnly otherwise)
Wasm,
/// Native Rust execution (not yet implemented, falls back to PromptOnly)
Native,
}
/// Skill execution context
#[derive(Clone)]
pub struct SkillContext {
/// Agent ID executing the skill
pub agent_id: String,
/// Session ID for the execution
pub session_id: String,
/// Working directory for execution
pub working_dir: Option<std::path::PathBuf>,
/// Environment variables
pub env: std::collections::HashMap<String, String>,
/// Timeout in seconds
pub timeout_secs: u64,
/// Whether to allow network access
pub network_allowed: bool,
/// Whether to allow file system access
pub file_access_allowed: bool,
/// Optional LLM completer for skills that need AI generation (e.g. PromptOnly)
pub llm: Option<std::sync::Arc<dyn LlmCompleter>>,
/// Tool definitions resolved from the skill manifest's `tools` field.
/// Populated by the kernel when creating the context.
pub tool_definitions: Vec<ToolDefinition>,
}
impl std::fmt::Debug for SkillContext {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("SkillContext")
.field("agent_id", &self.agent_id)
.field("session_id", &self.session_id)
.field("working_dir", &self.working_dir)
.field("timeout_secs", &self.timeout_secs)
.field("network_allowed", &self.network_allowed)
.field("file_access_allowed", &self.file_access_allowed)
.field("llm", &self.llm.as_ref().map(|_| "Arc<dyn LlmCompleter>"))
.field("tool_definitions", &self.tool_definitions.len())
.finish()
}
}
impl Default for SkillContext {
fn default() -> Self {
Self {
agent_id: String::new(),
session_id: String::new(),
working_dir: None,
env: std::collections::HashMap::new(),
timeout_secs: 60,
network_allowed: false,
file_access_allowed: false,
llm: None,
tool_definitions: Vec::new(),
}
}
}
/// Skill execution result
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SkillResult {
/// Whether execution succeeded
pub success: bool,
/// Output data
pub output: Value,
/// Error message if failed
#[serde(default)]
pub error: Option<String>,
/// Execution duration in milliseconds
#[serde(default)]
pub duration_ms: Option<u64>,
/// Token usage if LLM was #[serde(default)]
pub tokens_used: Option<u32>,
}
impl SkillResult {
pub fn success(output: Value) -> Self {
Self {
success: true,
output,
error: None,
duration_ms: None,
tokens_used: None,
}
}
pub fn error(message: impl Into<String>) -> Self {
Self {
success: false,
output: Value::Null,
error: Some(message.into()),
duration_ms: None,
tokens_used: None,
}
}
}
/// Skill definition with execution logic
#[async_trait::async_trait]
pub trait Skill: Send + Sync {
/// Get the skill manifest
fn manifest(&self) -> &SkillManifest;
/// Execute the skill with given input
async fn execute(&self, context: &SkillContext, input: Value) -> Result<SkillResult>;
/// Validate input against schema
fn validate_input(&self, input: &Value) -> Result<()> {
// Basic validation - can be overridden
if input.is_null() {
return Err(zclaw_types::ZclawError::InvalidInput("Input cannot be null".into()));
}
Ok(())
}
}