Files
zclaw_openfang/crates/zclaw-skills/src/skill.rs
iven 5fdf96c3f5 chore: 提交所有工作进度 — SaaS 后端增强、Admin UI、桌面端集成
包含大量 SaaS 平台改进、Admin 管理后台更新、桌面端集成完善、
文档同步、测试文件重构等内容。为 QA 测试准备干净工作树。
2026-03-29 10:46:41 +08:00

185 lines
5.5 KiB
Rust

//! Skill definition and types
use serde::{Deserialize, Serialize};
use serde_json::Value;
use std::pin::Pin;
use zclaw_types::{SkillId, Result};
/// Type-erased LLM completion interface.
///
/// Defined here (in zclaw-skills) to avoid a circular dependency on zclaw-runtime.
/// Implementations live in zclaw-kernel where both crates are available.
pub trait LlmCompleter: Send + Sync {
/// Complete a simple prompt → response (no system prompt, no tools).
fn complete(
&self,
prompt: &str,
) -> Pin<Box<dyn std::future::Future<Output = std::result::Result<String, String>> + Send + '_>>;
}
/// Skill manifest definition
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SkillManifest {
/// Unique skill identifier
pub id: SkillId,
/// Human-readable name
pub name: String,
/// Skill description
pub description: String,
/// Skill version
pub version: String,
/// Skill author
#[serde(default)]
pub author: Option<String>,
/// Execution mode
pub mode: SkillMode,
/// Required capabilities
#[serde(default)]
pub capabilities: Vec<String>,
/// Input schema (JSON Schema)
#[serde(default)]
pub input_schema: Option<Value>,
/// Output schema (JSON Schema)
#[serde(default)]
pub output_schema: Option<Value>,
/// Tags for categorization
#[serde(default)]
pub tags: Vec<String>,
/// Category for skill grouping (e.g., "开发工程", "数据分析")
/// If not specified, will be auto-detected from skill ID
#[serde(default)]
pub category: Option<String>,
/// Trigger words for skill activation
#[serde(default)]
pub triggers: Vec<String>,
/// Whether the skill is enabled
#[serde(default = "default_enabled")]
pub enabled: bool,
}
fn default_enabled() -> bool { true }
/// Skill execution mode
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "snake_case")]
pub enum SkillMode {
/// Prompt-only skill (no code execution)
PromptOnly,
/// Python script execution
Python,
/// Shell command execution
Shell,
/// WebAssembly execution (requires 'wasm' feature flag, falls back to PromptOnly otherwise)
Wasm,
/// Native Rust execution (not yet implemented, falls back to PromptOnly)
Native,
}
/// Skill execution context
#[derive(Clone)]
pub struct SkillContext {
/// Agent ID executing the skill
pub agent_id: String,
/// Session ID for the execution
pub session_id: String,
/// Working directory for execution
pub working_dir: Option<std::path::PathBuf>,
/// Environment variables
pub env: std::collections::HashMap<String, String>,
/// Timeout in seconds
pub timeout_secs: u64,
/// Whether to allow network access
pub network_allowed: bool,
/// Whether to allow file system access
pub file_access_allowed: bool,
/// Optional LLM completer for skills that need AI generation (e.g. PromptOnly)
pub llm: Option<std::sync::Arc<dyn LlmCompleter>>,
}
impl std::fmt::Debug for SkillContext {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("SkillContext")
.field("agent_id", &self.agent_id)
.field("session_id", &self.session_id)
.field("working_dir", &self.working_dir)
.field("timeout_secs", &self.timeout_secs)
.field("network_allowed", &self.network_allowed)
.field("file_access_allowed", &self.file_access_allowed)
.field("llm", &self.llm.as_ref().map(|_| "Arc<dyn LlmCompleter>"))
.finish()
}
}
impl Default for SkillContext {
fn default() -> Self {
Self {
agent_id: String::new(),
session_id: String::new(),
working_dir: None,
env: std::collections::HashMap::new(),
timeout_secs: 60,
network_allowed: false,
file_access_allowed: false,
llm: None,
}
}
}
/// Skill execution result
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SkillResult {
/// Whether execution succeeded
pub success: bool,
/// Output data
pub output: Value,
/// Error message if failed
#[serde(default)]
pub error: Option<String>,
/// Execution duration in milliseconds
#[serde(default)]
pub duration_ms: Option<u64>,
/// Token usage if LLM was #[serde(default)]
pub tokens_used: Option<u32>,
}
impl SkillResult {
pub fn success(output: Value) -> Self {
Self {
success: true,
output,
error: None,
duration_ms: None,
tokens_used: None,
}
}
pub fn error(message: impl Into<String>) -> Self {
Self {
success: false,
output: Value::Null,
error: Some(message.into()),
duration_ms: None,
tokens_used: None,
}
}
}
/// Skill definition with execution logic
#[async_trait::async_trait]
pub trait Skill: Send + Sync {
/// Get the skill manifest
fn manifest(&self) -> &SkillManifest;
/// Execute the skill with given input
async fn execute(&self, context: &SkillContext, input: Value) -> Result<SkillResult>;
/// Validate input against schema
fn validate_input(&self, input: &Value) -> Result<()> {
// Basic validation - can be overridden
if input.is_null() {
return Err(zclaw_types::ZclawError::InvalidInput("Input cannot be null".into()));
}
Ok(())
}
}