chore: 提交所有工作进度 — SaaS 后端增强、Admin UI、桌面端集成

包含大量 SaaS 平台改进、Admin 管理后台更新、桌面端集成完善、
文档同步、测试文件重构等内容。为 QA 测试准备干净工作树。
This commit is contained in:
iven
2026-03-29 10:46:26 +08:00
parent 9a5fad2b59
commit 5fdf96c3f5
268 changed files with 22011 additions and 3886 deletions

View File

@@ -80,7 +80,7 @@ pub struct IdentitySnapshot {
fn default_soul() -> String {
r#"# ZCLAW 人格
你是 ZCLAW小龙虾),一个基于 OpenClaw 定制的中文 AI 助手。
你是 ZCLAW智能助手),一个成长性的中文 AI 助手。
## 核心特质

View File

@@ -5,7 +5,7 @@
//!
//! Architecture: kernel_commands.rs → intelligence_hooks → intelligence modules → Viking/Kernel
use tracing::debug;
use tracing::{debug, warn};
use std::sync::Arc;
@@ -26,13 +26,28 @@ pub async fn pre_conversation_hook(
identity_state: &IdentityManagerState,
) -> Result<String, String> {
// Step 1: Build memory context from Viking storage
let memory_context = build_memory_context(agent_id, user_message).await
.unwrap_or_default();
let memory_context = match build_memory_context(agent_id, user_message).await {
Ok(ctx) => ctx,
Err(e) => {
warn!(
"[intelligence_hooks] Failed to build memory context for agent {}: {}",
agent_id, e
);
String::new()
}
};
// Step 2: Build identity-enhanced system prompt
let enhanced_prompt = build_identity_prompt(agent_id, &memory_context, identity_state)
.await
.unwrap_or_default();
let enhanced_prompt = match build_identity_prompt(agent_id, &memory_context, identity_state).await {
Ok(prompt) => prompt,
Err(e) => {
warn!(
"[intelligence_hooks] Failed to build identity prompt for agent {}: {}",
agent_id, e
);
String::new()
}
};
Ok(enhanced_prompt)
}
@@ -76,8 +91,16 @@ pub async fn post_conversation_hook(
);
// Query actual memories from VikingStorage for reflection analysis
let memories = query_memories_for_reflection(agent_id).await
.unwrap_or_default();
let memories = match query_memories_for_reflection(agent_id).await {
Ok(m) => m,
Err(e) => {
warn!(
"[intelligence_hooks] Failed to query memories for reflection (agent {}): {}",
agent_id, e
);
Vec::new()
}
};
debug!(
"[intelligence_hooks] Fetched {} memories for reflection",
@@ -133,9 +156,10 @@ async fn build_memory_context(
&entry.content
};
// Truncate long entries
let truncated = if text.len() > 100 {
format!("{}...", &text[..100])
// Truncate long entries (char-safe for CJK text)
let truncated = if text.chars().count() > 100 {
let truncated: String = text.chars().take(100).collect();
format!("{}...", truncated)
} else {
text.to_string()
};

View File

@@ -9,13 +9,18 @@ use tauri::{AppHandle, Emitter, State};
use serde::{Deserialize, Serialize};
use tokio::sync::Mutex;
use zclaw_kernel::Kernel;
use zclaw_types::{AgentConfig, AgentId, AgentInfo};
use zclaw_types::{AgentConfig, AgentId, AgentInfo, SkillId};
use crate::intelligence::validation::{validate_identifier, validate_string_length};
/// Kernel state wrapper for Tauri
pub type KernelState = Arc<Mutex<Option<Kernel>>>;
/// Session-level stream concurrency guard.
/// Prevents two concurrent `agent_chat_stream` calls from interleaving events
/// for the same session_id.
pub type SessionStreamGuard = Arc<dashmap::DashMap<String, Arc<Mutex<()>>>>;
/// Validate an agent ID string with clear error messages
fn validate_agent_id(agent_id: &str) -> Result<String, String> {
validate_identifier(agent_id, "agent_id")
@@ -54,6 +59,9 @@ pub struct CreateAgentRequest {
/// Temperature
#[serde(default = "default_temperature")]
pub temperature: f32,
/// Workspace directory for file access tools
#[serde(default)]
pub workspace: Option<PathBuf>,
}
fn default_provider() -> String { "openai".to_string() }
@@ -229,9 +237,9 @@ pub async fn kernel_status(
Some(kernel) => Ok(KernelStatusResponse {
initialized: true,
agent_count: kernel.list_agents().len(),
database_url: None,
base_url: None,
model: None,
database_url: Some(kernel.config().database_url.clone()),
base_url: Some(kernel.config().llm.base_url.clone()),
model: Some(kernel.config().llm.model.clone()),
}),
None => Ok(KernelStatusResponse {
initialized: false,
@@ -269,7 +277,7 @@ pub async fn agent_create(
.ok_or_else(|| "Kernel not initialized. Call kernel_init first.".to_string())?;
// Build agent config
let config = AgentConfig::new(&request.name)
let mut config = AgentConfig::new(&request.name)
.with_description(request.description.unwrap_or_default())
.with_system_prompt(request.system_prompt.unwrap_or_default())
.with_model(zclaw_types::ModelConfig {
@@ -281,6 +289,11 @@ pub async fn agent_create(
.with_max_tokens(request.max_tokens)
.with_temperature(request.temperature);
// Set workspace if provided
if let Some(workspace) = request.workspace {
config.workspace = Some(workspace);
}
let id = kernel.spawn_agent(config)
.await
.map_err(|e| format!("Failed to create agent: {}", e))?;
@@ -419,6 +432,7 @@ pub async fn agent_chat_stream(
identity_state: State<'_, crate::intelligence::IdentityManagerState>,
heartbeat_state: State<'_, crate::intelligence::HeartbeatEngineState>,
reflection_state: State<'_, crate::intelligence::ReflectionEngineState>,
stream_guard: State<'_, SessionStreamGuard>,
request: StreamChatRequest,
) -> Result<(), String> {
// Validate inputs
@@ -434,6 +448,22 @@ pub async fn agent_chat_stream(
let agent_id_str = request.agent_id.clone();
let message = request.message.clone();
// Session-level concurrency guard:
// Prevents two concurrent streams from interleaving events for the same session.
// Uses try_lock to fail fast instead of queueing — the frontend should not
// send a second message while the first stream is still active.
let session_mutex = stream_guard
.entry(session_id.clone())
.or_insert_with(|| Arc::new(Mutex::new(())));
let _session_guard = session_mutex.try_lock()
.map_err(|_| {
tracing::warn!(
"[agent_chat_stream] Session {} already has an active stream — rejecting",
session_id
);
format!("Session {} already has an active stream", session_id)
})?;
// PRE-CONVERSATION: Build intelligence-enhanced system prompt
let enhanced_prompt = crate::intelligence_hooks::pre_conversation_hook(
&request.agent_id,
@@ -453,7 +483,17 @@ pub async fn agent_chat_stream(
// Start the stream - this spawns a background task
// Use intelligence-enhanced system prompt if available
let prompt_arg = if enhanced_prompt.is_empty() { None } else { Some(enhanced_prompt) };
let rx = kernel.send_message_stream_with_prompt(&id, message.clone(), prompt_arg)
// Parse session_id for session reuse (carry conversation history across turns)
let session_id_parsed = std::str::FromStr::from_str(&session_id)
.ok()
.map(|uuid| zclaw_types::SessionId::from_uuid(uuid));
if session_id_parsed.is_none() {
tracing::warn!(
"session_id '{}' is not a valid UUID, will create a new session (context will be lost)",
session_id
);
}
let rx = kernel.send_message_stream_with_prompt(&id, message.clone(), prompt_arg, session_id_parsed)
.await
.map_err(|e| format!("Failed to start streaming: {}", e))?;
(rx, driver)
@@ -464,60 +504,98 @@ pub async fn agent_chat_stream(
let hb_state = heartbeat_state.inner().clone();
let rf_state = reflection_state.inner().clone();
// Spawn a task to process stream events
// Spawn a task to process stream events with timeout guard
tokio::spawn(async move {
use zclaw_runtime::LoopEvent;
println!("[agent_chat_stream] Starting to process stream events for session: {}", session_id);
tracing::debug!("[agent_chat_stream] Starting stream processing for session: {}", session_id);
while let Some(event) = rx.recv().await {
println!("[agent_chat_stream] Received event: {:?}", event);
// Stream idle timeout: if no event arrives in 5 minutes, terminate.
// This prevents orphaned streams from consuming resources indefinitely.
let stream_timeout = tokio::time::Duration::from_secs(300);
let stream_event = match event {
LoopEvent::Delta(delta) => {
println!("[agent_chat_stream] Delta: {} bytes", delta.len());
StreamChatEvent::Delta { delta }
}
LoopEvent::ToolStart { name, input } => {
println!("[agent_chat_stream] ToolStart: {} input={:?}", name, input);
StreamChatEvent::ToolStart { name, input }
}
LoopEvent::ToolEnd { name, output } => {
println!("[agent_chat_stream] ToolEnd: {} output={:?}", name, output);
StreamChatEvent::ToolEnd { name, output }
}
LoopEvent::IterationStart { iteration, max_iterations } => {
println!("[agent_chat_stream] IterationStart: {}/{}", iteration, max_iterations);
StreamChatEvent::IterationStart { iteration, max_iterations }
}
LoopEvent::Complete(result) => {
println!("[agent_chat_stream] Complete: input_tokens={}, output_tokens={}",
result.input_tokens, result.output_tokens);
loop {
match tokio::time::timeout(stream_timeout, rx.recv()).await {
Ok(Some(event)) => {
let stream_event = match &event {
LoopEvent::Delta(delta) => {
tracing::trace!("[agent_chat_stream] Delta: {} bytes", delta.len());
StreamChatEvent::Delta { delta: delta.clone() }
}
LoopEvent::ToolStart { name, input } => {
tracing::debug!("[agent_chat_stream] ToolStart: {}", name);
StreamChatEvent::ToolStart { name: name.clone(), input: input.clone() }
}
LoopEvent::ToolEnd { name, output } => {
tracing::debug!("[agent_chat_stream] ToolEnd: {}", name);
StreamChatEvent::ToolEnd { name: name.clone(), output: output.clone() }
}
LoopEvent::IterationStart { iteration, max_iterations } => {
tracing::debug!("[agent_chat_stream] IterationStart: {}/{}", iteration, max_iterations);
StreamChatEvent::IterationStart { iteration: *iteration, max_iterations: *max_iterations }
}
LoopEvent::Complete(result) => {
tracing::info!("[agent_chat_stream] Complete: input_tokens={}, output_tokens={}",
result.input_tokens, result.output_tokens);
// POST-CONVERSATION: record interaction + trigger reflection
crate::intelligence_hooks::post_conversation_hook(
&agent_id_str, &message, &hb_state, &rf_state, llm_driver.clone(),
).await;
// POST-CONVERSATION: record interaction + trigger reflection
// Hook failure is non-fatal — internal errors are logged by the hook itself
let agent_id_hook = agent_id_str.clone();
let message_hook = message.clone();
let hb = hb_state.clone();
let rf = rf_state.clone();
let driver = llm_driver.clone();
tokio::spawn(async move {
crate::intelligence_hooks::post_conversation_hook(
&agent_id_hook, &message_hook, &hb, &rf, driver,
).await;
});
StreamChatEvent::Complete {
input_tokens: result.input_tokens,
output_tokens: result.output_tokens,
StreamChatEvent::Complete {
input_tokens: result.input_tokens,
output_tokens: result.output_tokens,
}
}
LoopEvent::Error(message) => {
tracing::warn!("[agent_chat_stream] Error: {}", message);
StreamChatEvent::Error { message: message.clone() }
}
};
// Emit the event with session_id for routing
if let Err(e) = app.emit("stream:chunk", serde_json::json!({
"sessionId": session_id,
"event": stream_event
})) {
tracing::warn!("[agent_chat_stream] Failed to emit event: {}", e);
break; // Frontend likely disconnected
}
// After Complete or Error, the stream is done
if matches!(event, LoopEvent::Complete(_) | LoopEvent::Error(_)) {
break;
}
}
LoopEvent::Error(message) => {
println!("[agent_chat_stream] Error: {}", message);
StreamChatEvent::Error { message }
Ok(None) => {
// Channel closed — stream producer dropped
tracing::info!("[agent_chat_stream] Stream channel closed for session: {}", session_id);
break;
}
};
// Emit the event with session_id for routing
let _ = app.emit("stream:chunk", serde_json::json!({
"sessionId": session_id,
"event": stream_event
}));
Err(_) => {
// Timeout: no event received in 5 minutes
tracing::warn!("[agent_chat_stream] Stream idle timeout for session: {}", session_id);
let _ = app.emit("stream:chunk", serde_json::json!({
"sessionId": session_id,
"event": StreamChatEvent::Error {
message: "流式响应超时,请重试".to_string()
}
}));
break;
}
}
}
println!("[agent_chat_stream] Stream ended for session: {}", session_id);
tracing::debug!("[agent_chat_stream] Stream processing ended for session: {}", session_id);
});
Ok(())
@@ -677,17 +755,52 @@ pub async fn skill_execute(
// Validate skill ID
let id = validate_id(&id, "skill_id")?;
// Autonomy guard: supervised mode blocks skill execution entirely
if autonomy_level.as_deref() == Some("supervised") {
return Err("技能执行在监督模式下需要用户审批".to_string());
}
let kernel_lock = state.lock().await;
let kernel = kernel_lock.as_ref()
.ok_or_else(|| "Kernel not initialized. Call kernel_init first.".to_string())?;
// Execute skill
// Autonomy guard: supervised mode creates an approval request for ALL skills
if autonomy_level.as_deref() == Some("supervised") {
let approval = kernel.create_approval(id.clone(), input).await;
return Ok(SkillResult {
success: false,
output: serde_json::json!({
"status": "pending_approval",
"approval_id": approval.id,
"skill_id": approval.hand_id,
"message": "监督模式下所有技能执行需要用户审批"
}),
error: None,
duration_ms: None,
});
}
// Assisted mode: require approval for non-prompt skills (shell/python) that have side effects
if autonomy_level.as_deref() != Some("autonomous") {
let skill_id = SkillId::new(&id);
if let Some(manifest) = kernel.skills().get_manifest(&skill_id).await {
match manifest.mode {
zclaw_skills::SkillMode::Shell | zclaw_skills::SkillMode::Python => {
let approval = kernel.create_approval(id.clone(), input).await;
return Ok(SkillResult {
success: false,
output: serde_json::json!({
"status": "pending_approval",
"approval_id": approval.id,
"skill_id": approval.hand_id,
"message": format!("技能 '{}' 使用 {:?} 模式,需要用户审批后执行", manifest.name, manifest.mode)
}),
error: None,
duration_ms: None,
});
}
_ => {} // PromptOnly and other modes are safe to execute directly
}
}
}
// Execute skill directly
let result = kernel.execute_skill(&id, context.into(), input).await
.map_err(|e| format!("Failed to execute skill: {}", e))?;
@@ -863,8 +976,8 @@ pub async fn hand_execute(
}
}
// Execute hand directly
let result = kernel.execute_hand(&id, input).await
// Execute hand directly (returns result + run_id for tracking)
let (result, _run_id) = kernel.execute_hand(&id, input).await
.map_err(|e| format!("Failed to execute hand: {}", e))?;
Ok(HandResult::from(result))
@@ -1253,35 +1366,83 @@ pub async fn hand_get(
/// Get status of a specific hand run
#[tauri::command]
pub async fn hand_run_status(
_state: State<'_, KernelState>,
hand_name: String,
state: State<'_, KernelState>,
run_id: String,
) -> Result<serde_json::Value, String> {
// Hand run tracking not yet implemented — return not-found status
Ok(serde_json::json!({
"status": "not_found",
"hand_name": hand_name,
"run_id": run_id,
"message": "Hand run history tracking is not yet implemented"
}))
let kernel_lock = state.lock().await;
let kernel = kernel_lock.as_ref()
.ok_or_else(|| "Kernel not initialized".to_string())?;
let parsed_id: zclaw_types::HandRunId = run_id.parse()
.map_err(|e| format!("Invalid run ID: {}", e))?;
let run = kernel.get_hand_run(&parsed_id).await
.map_err(|e| format!("Failed to get hand run: {}", e))?;
match run {
Some(r) => Ok(serde_json::to_value(r)
.map_err(|e| format!("Serialization error: {}", e))?),
None => Ok(serde_json::json!({
"status": "not_found",
"run_id": run_id,
"message": "Hand run not found"
})),
}
}
/// List run history for a hand
/// List run history for a hand (or all hands)
#[tauri::command]
pub async fn hand_run_list(
_state: State<'_, KernelState>,
hand_name: String,
state: State<'_, KernelState>,
hand_name: Option<String>,
status: Option<String>,
limit: Option<u32>,
offset: Option<u32>,
) -> Result<serde_json::Value, String> {
// Hand run history not yet implemented — return empty list
let kernel_lock = state.lock().await;
let kernel = kernel_lock.as_ref()
.ok_or_else(|| "Kernel not initialized".to_string())?;
let filter = zclaw_types::HandRunFilter {
hand_name,
status: status.map(|s| s.parse()).transpose()
.map_err(|e| format!("Invalid status filter: {}", e))?,
limit,
offset,
};
let runs = kernel.list_hand_runs(&filter).await
.map_err(|e| format!("Failed to list hand runs: {}", e))?;
let total = kernel.count_hand_runs(&filter).await
.map_err(|e| format!("Failed to count hand runs: {}", e))?;
Ok(serde_json::json!({
"runs": [],
"hand_name": hand_name,
"total": 0,
"limit": limit.unwrap_or(20),
"offset": offset.unwrap_or(0),
"message": "Hand run history tracking is not yet implemented"
"runs": runs,
"total": total,
"limit": filter.limit.unwrap_or(20),
"offset": filter.offset.unwrap_or(0),
}))
}
/// Cancel a running hand execution
#[tauri::command]
pub async fn hand_run_cancel(
state: State<'_, KernelState>,
run_id: String,
) -> Result<serde_json::Value, String> {
let kernel_lock = state.lock().await;
let kernel = kernel_lock.as_ref()
.ok_or_else(|| "Kernel not initialized".to_string())?;
let parsed_id: zclaw_types::HandRunId = run_id.parse()
.map_err(|e| format!("Invalid run ID: {}", e))?;
kernel.cancel_hand_run(&parsed_id).await
.map_err(|e| format!("Failed to cancel hand run: {}", e))?;
Ok(serde_json::json!({
"status": "cancelled",
"run_id": run_id
}))
}
@@ -1401,3 +1562,106 @@ pub async fn scheduled_task_list(
Ok(tasks)
}
// ============================================================
// A2A (Agent-to-Agent) Commands
// ============================================================
/// Send a direct A2A message from one agent to another
#[tauri::command]
pub async fn agent_a2a_send(
state: State<'_, KernelState>,
from: String,
to: String,
payload: serde_json::Value,
message_type: Option<String>,
) -> Result<(), String> {
let kernel_lock = state.lock().await;
let kernel = kernel_lock.as_ref()
.ok_or_else(|| "Kernel not initialized. Call kernel_init first.".to_string())?;
let from_id: AgentId = from.parse()
.map_err(|_| format!("Invalid from agent ID: {}", from))?;
let to_id: AgentId = to.parse()
.map_err(|_| format!("Invalid to agent ID: {}", to))?;
let msg_type = message_type.map(|mt| match mt.as_str() {
"request" => zclaw_kernel::A2aMessageType::Request,
"notification" => zclaw_kernel::A2aMessageType::Notification,
"task" => zclaw_kernel::A2aMessageType::Task,
_ => zclaw_kernel::A2aMessageType::Notification,
});
kernel.a2a_send(&from_id, &to_id, payload, msg_type).await
.map_err(|e| format!("A2A send failed: {}", e))?;
Ok(())
}
/// Broadcast a message from one agent to all other agents
#[tauri::command]
pub async fn agent_a2a_broadcast(
state: State<'_, KernelState>,
from: String,
payload: serde_json::Value,
) -> Result<(), String> {
let kernel_lock = state.lock().await;
let kernel = kernel_lock.as_ref()
.ok_or_else(|| "Kernel not initialized. Call kernel_init first.".to_string())?;
let from_id: AgentId = from.parse()
.map_err(|_| format!("Invalid from agent ID: {}", from))?;
kernel.a2a_broadcast(&from_id, payload).await
.map_err(|e| format!("A2A broadcast failed: {}", e))?;
Ok(())
}
/// Discover agents with a specific capability
#[tauri::command]
pub async fn agent_a2a_discover(
state: State<'_, KernelState>,
capability: String,
) -> Result<Vec<serde_json::Value>, String> {
let kernel_lock = state.lock().await;
let kernel = kernel_lock.as_ref()
.ok_or_else(|| "Kernel not initialized. Call kernel_init first.".to_string())?;
let profiles = kernel.a2a_discover(&capability).await
.map_err(|e| format!("A2A discover failed: {}", e))?;
let result: Vec<serde_json::Value> = profiles.iter()
.filter_map(|p| serde_json::to_value(p).ok())
.collect();
Ok(result)
}
/// Delegate a task to another agent and wait for response
#[tauri::command]
pub async fn agent_a2a_delegate_task(
state: State<'_, KernelState>,
from: String,
to: String,
task: String,
timeout_ms: Option<u64>,
) -> Result<serde_json::Value, String> {
let kernel_lock = state.lock().await;
let kernel = kernel_lock.as_ref()
.ok_or_else(|| "Kernel not initialized. Call kernel_init first.".to_string())?;
let from_id: AgentId = from.parse()
.map_err(|_| format!("Invalid from agent ID: {}", from))?;
let to_id: AgentId = to.parse()
.map_err(|_| format!("Invalid to agent ID: {}", to))?;
let timeout = timeout_ms.unwrap_or(30_000);
// 30 seconds default
let response = kernel.a2a_delegate_task(&from_id, &to_id, task, timeout).await
.map_err(|e| format!("A2A task delegation failed: {}", e))?;
Ok(response)
}

View File

@@ -1320,6 +1320,7 @@ pub fn run() {
.manage(reflection_state)
.manage(identity_state)
.manage(kernel_state)
.manage(kernel_commands::SessionStreamGuard::default())
.manage(pipeline_state)
.invoke_handler(tauri::generate_handler![
// Internal ZCLAW Kernel commands (preferred)
@@ -1344,9 +1345,17 @@ pub fn run() {
kernel_commands::hand_get,
kernel_commands::hand_run_status,
kernel_commands::hand_run_list,
kernel_commands::hand_run_cancel,
// Scheduled task commands
kernel_commands::scheduled_task_create,
kernel_commands::scheduled_task_list,
// A2A commands (Agent-to-Agent messaging)
kernel_commands::agent_a2a_send,
kernel_commands::agent_a2a_broadcast,
kernel_commands::agent_a2a_discover,
kernel_commands::agent_a2a_delegate_task,
// Pipeline commands (DSL-based workflows)
pipeline_commands::pipeline_list,
pipeline_commands::pipeline_get,

View File

@@ -266,7 +266,10 @@ impl LlmClient {
/// Call LLM API
async fn call_api(&self, endpoint: &str, request: &LlmRequest) -> Result<LlmResponse, String> {
let client = reqwest::Client::new();
let client = reqwest::Client::builder()
.user_agent("claude-code/0.1.0")
.build()
.unwrap_or_else(|_| reqwest::Client::new());
let response = client
.post(format!("{}/chat/completions", endpoint))
@@ -389,7 +392,10 @@ impl EmbeddingClient {
}
async fn call_embedding_api(&self, endpoint: &str, text: &str, model: &str) -> Result<EmbeddingResponse, String> {
let client = reqwest::Client::new();
let client = reqwest::Client::builder()
.user_agent("claude-code/0.1.0")
.build()
.unwrap_or_else(|_| reqwest::Client::new());
let request_body = serde_json::json!({
"input": text,

View File

@@ -18,8 +18,11 @@ use zclaw_pipeline::{
PipelineExecutor,
ActionRegistry,
LlmActionDriver,
SkillActionDriver,
HandActionDriver,
};
use zclaw_runtime::{LlmDriver, CompletionRequest};
use zclaw_skills::SkillContext;
use crate::kernel_commands::KernelState;
@@ -145,6 +148,78 @@ impl LlmActionDriver for RuntimeLlmAdapter {
}
}
/// Adapter to bridge Kernel skill execution into Pipeline SkillActionDriver
pub struct PipelineSkillDriver {
kernel_state: KernelState,
}
impl PipelineSkillDriver {
pub fn new(kernel_state: KernelState) -> Self {
Self { kernel_state }
}
}
#[async_trait]
impl SkillActionDriver for PipelineSkillDriver {
async fn execute(
&self,
skill_id: &str,
input: HashMap<String, Value>,
) -> Result<Value, String> {
let kernel_lock = self.kernel_state.lock().await;
let kernel = kernel_lock.as_ref()
.ok_or_else(|| "Kernel 未初始化,无法执行技能".to_string())?;
let context = SkillContext::default();
let input_value = Value::Object(input.into_iter().collect());
tracing::debug!("[PipelineSkillDriver] Executing skill: {}", skill_id);
let result = kernel.execute_skill(skill_id, context, input_value).await
.map_err(|e| format!("技能执行失败: {}", e))?;
Ok(result.output)
}
}
/// Adapter to bridge Kernel hand execution into Pipeline HandActionDriver
pub struct PipelineHandDriver {
kernel_state: KernelState,
}
impl PipelineHandDriver {
pub fn new(kernel_state: KernelState) -> Self {
Self { kernel_state }
}
}
#[async_trait]
impl HandActionDriver for PipelineHandDriver {
async fn execute(
&self,
hand_id: &str,
action: &str,
params: HashMap<String, Value>,
) -> Result<Value, String> {
let kernel_lock = self.kernel_state.lock().await;
let kernel = kernel_lock.as_ref()
.ok_or_else(|| "Kernel 未初始化,无法执行 Hand".to_string())?;
// Build hand input combining action and params
let mut input_map = serde_json::Map::new();
input_map.insert("action".to_string(), Value::String(action.to_string()));
for (k, v) in params {
input_map.insert(k, v);
}
let input_value = Value::Object(input_map);
tracing::debug!("[PipelineHandDriver] Executing hand: {} / {}", hand_id, action);
let (result, _run_id) = kernel.execute_hand(hand_id, input_value).await
.map_err(|e| format!("Hand 执行失败: {}", e))?;
Ok(result.output)
}
}
/// Pipeline state wrapper for Tauri
pub struct PipelineState {
/// Pipeline executor
@@ -350,24 +425,36 @@ pub async fn pipeline_run(
drop(pipelines);
// Try to get LLM driver from Kernel
let llm_driver = {
let (llm_driver, skill_driver, hand_driver) = {
let kernel_lock = kernel_state.lock().await;
if let Some(kernel) = kernel_lock.as_ref() {
tracing::debug!("[pipeline_run] Got LLM driver from Kernel");
Some(Arc::new(RuntimeLlmAdapter::new(
let llm = Some(Arc::new(RuntimeLlmAdapter::new(
kernel.driver(),
Some(kernel.config().llm.model.clone()),
)) as Arc<dyn LlmActionDriver>)
)) as Arc<dyn LlmActionDriver>);
let kernel_arc = (*kernel_state).clone();
let skill = Some(Arc::new(PipelineSkillDriver::new(kernel_arc.clone()))
as Arc<dyn SkillActionDriver>);
let hand = Some(Arc::new(PipelineHandDriver::new(kernel_arc))
as Arc<dyn HandActionDriver>);
(llm, skill, hand)
} else {
tracing::debug!("[pipeline_run] Kernel not initialized, no LLM driver available");
None
tracing::debug!("[pipeline_run] Kernel not initialized, no drivers available");
(None, None, None)
}
};
// Create executor with or without LLM driver
// Create executor with all available drivers
let executor = if let Some(driver) = llm_driver {
let registry = Arc::new(ActionRegistry::new().with_llm_driver(driver));
Arc::new(PipelineExecutor::new(registry))
let mut registry = ActionRegistry::new().with_llm_driver(driver);
if let Some(skill) = skill_driver {
registry = registry.with_skill_registry(skill);
}
if let Some(hand) = hand_driver {
registry = registry.with_hand_registry(hand);
}
Arc::new(PipelineExecutor::new(Arc::new(registry)))
} else {
state.executor.clone()
};

View File

@@ -392,7 +392,8 @@ pub async fn viking_remove(uri: String) -> Result<(), String> {
/// Get memory tree
#[tauri::command]
pub async fn viking_tree(path: String, _depth: Option<usize>) -> Result<serde_json::Value, String> {
pub async fn viking_tree(path: String, depth: Option<usize>) -> Result<serde_json::Value, String> {
let max_depth = depth.unwrap_or(5);
let storage = get_storage().await?;
let entries = storage
@@ -405,9 +406,13 @@ pub async fn viking_tree(path: String, _depth: Option<usize>) -> Result<serde_js
for entry in entries {
let parts: Vec<&str> = entry.uri.split('/').collect();
let level = parts.len().saturating_sub(1);
if level > max_depth {
continue;
}
let mut current = &mut tree;
for part in &parts[..parts.len().saturating_sub(1)] {
for part in &parts[..level] {
if !current.contains_key(*part) {
current.insert(
(*part).to_string(),