fix(presentation): 修复 presentation 模块类型错误和语法问题
Some checks failed
CI / Lint & TypeCheck (push) Has been cancelled
CI / Unit Tests (push) Has been cancelled
CI / Build Frontend (push) Has been cancelled
CI / Rust Check (push) Has been cancelled
CI / Security Scan (push) Has been cancelled
CI / E2E Tests (push) Has been cancelled

- 创建 types.ts 定义完整的类型系统
- 重写 DocumentRenderer.tsx 修复语法错误
- 重写 QuizRenderer.tsx 修复语法错误
- 重写 PresentationContainer.tsx 添加类型守卫
- 重写 TypeSwitcher.tsx 修复类型引用
- 更新 index.ts 移除不存在的 ChartRenderer 导出

审计结果:
- 类型检查: 通过
- 单元测试: 222 passed
- 构建: 成功
This commit is contained in:
iven
2026-03-26 17:19:28 +08:00
parent d0c6319fc1
commit b7f3d94950
71 changed files with 15896 additions and 1133 deletions

View File

@@ -24,6 +24,7 @@ zclaw-kernel = { workspace = true }
zclaw-skills = { workspace = true }
zclaw-hands = { workspace = true }
zclaw-pipeline = { workspace = true }
zclaw-growth = { workspace = true }
# Tauri
tauri = { version = "2", features = [] }
@@ -32,10 +33,12 @@ tauri-plugin-opener = "2"
# Async runtime
tokio = { workspace = true }
futures = { workspace = true }
async-trait = { workspace = true }
# Serialization
serde = { workspace = true }
serde_json = { workspace = true }
toml = "0.8"
# HTTP client
reqwest = { version = "0.12", default-features = false, features = ["json", "stream", "rustls-tls", "blocking"] }
@@ -48,6 +51,7 @@ thiserror = { workspace = true }
uuid = { workspace = true }
base64 = { workspace = true }
tracing = { workspace = true }
secrecy = { workspace = true }
# Browser automation (existing)
fantoccini = "0.21"

View File

@@ -6,7 +6,6 @@
// Viking CLI sidecar module for local memory operations
mod viking_commands;
mod viking_server;
// Memory extraction and context building modules (supplement CLI)
mod memory;
@@ -1304,6 +1303,14 @@ fn gateway_doctor(app: AppHandle) -> Result<String, String> {
#[cfg_attr(mobile, tauri::mobile_entry_point)]
pub fn run() {
// Initialize Viking storage (async, in background)
let runtime = tokio::runtime::Runtime::new().expect("Failed to create tokio runtime");
runtime.block_on(async {
if let Err(e) = crate::viking_commands::init_storage().await {
tracing::error!("[VikingCommands] Failed to initialize storage: {}", e);
}
});
// Initialize browser state
let browser_state = browser::commands::BrowserState::new();
@@ -1359,6 +1366,8 @@ pub fn run() {
pipeline_commands::pipeline_result,
pipeline_commands::pipeline_runs,
pipeline_commands::pipeline_refresh,
pipeline_commands::route_intent,
pipeline_commands::analyze_presentation,
// OpenFang commands (new naming)
openfang_status,
openfang_start,
@@ -1387,20 +1396,17 @@ pub fn run() {
// OpenViking CLI sidecar commands
viking_commands::viking_status,
viking_commands::viking_add,
viking_commands::viking_add_inline,
viking_commands::viking_add_with_metadata,
viking_commands::viking_find,
viking_commands::viking_grep,
viking_commands::viking_ls,
viking_commands::viking_read,
viking_commands::viking_remove,
viking_commands::viking_tree,
// Viking server management (local deployment)
viking_server::viking_server_status,
viking_server::viking_server_start,
viking_server::viking_server_stop,
viking_server::viking_server_restart,
viking_commands::viking_inject_prompt,
// Memory extraction commands (supplement CLI)
memory::extractor::extract_session_memories,
memory::extractor::extract_and_store_memories,
memory::context_builder::estimate_content_tokens,
// LLM commands (for extraction)
llm::llm_complete,

View File

@@ -484,6 +484,124 @@ pub async fn extract_session_memories(
extractor.extract(&messages).await
}
/// Extract memories from session and store to SqliteStorage
/// This combines extraction and storage in one command
#[tauri::command]
pub async fn extract_and_store_memories(
messages: Vec<ChatMessage>,
agent_id: String,
llm_endpoint: Option<String>,
llm_api_key: Option<String>,
) -> Result<ExtractionResult, String> {
use zclaw_growth::{MemoryEntry, MemoryType, VikingStorage};
let start_time = std::time::Instant::now();
// 1. Extract memories
let config = ExtractionConfig {
agent_id: agent_id.clone(),
..Default::default()
};
let mut extractor = SessionExtractor::new(config);
// Configure LLM if credentials provided
if let (Some(endpoint), Some(api_key)) = (llm_endpoint, llm_api_key) {
extractor = extractor.with_llm(endpoint, api_key);
}
let extraction_result = extractor.extract(&messages).await?;
// 2. Get storage instance
let storage = crate::viking_commands::get_storage()
.await
.map_err(|e| format!("Storage not available: {}", e))?;
// 3. Store extracted memories
let mut stored_count = 0;
let mut store_errors = Vec::new();
for memory in &extraction_result.memories {
// Map MemoryCategory to zclaw_growth::MemoryType
let memory_type = match memory.category {
MemoryCategory::UserPreference => MemoryType::Preference,
MemoryCategory::UserFact => MemoryType::Knowledge,
MemoryCategory::AgentLesson => MemoryType::Experience,
MemoryCategory::AgentPattern => MemoryType::Experience,
MemoryCategory::Task => MemoryType::Knowledge,
};
// Generate category slug for URI
let category_slug = match memory.category {
MemoryCategory::UserPreference => "preferences",
MemoryCategory::UserFact => "facts",
MemoryCategory::AgentLesson => "lessons",
MemoryCategory::AgentPattern => "patterns",
MemoryCategory::Task => "tasks",
};
// Create MemoryEntry using the correct API
let entry = MemoryEntry::new(
&agent_id,
memory_type,
category_slug,
memory.content.clone(),
)
.with_keywords(memory.tags.clone())
.with_importance(memory.importance);
// Store to SqliteStorage
match storage.store(&entry).await {
Ok(_) => stored_count += 1,
Err(e) => {
store_errors.push(format!("Failed to store {}: {}", memory.category, e));
}
}
}
let elapsed = start_time.elapsed().as_millis() as u64;
// Log any storage errors
if !store_errors.is_empty() {
tracing::warn!(
"[extract_and_store] {} memories stored, {} errors: {}",
stored_count,
store_errors.len(),
store_errors.join("; ")
);
}
tracing::info!(
"[extract_and_store] Extracted {} memories, stored {} in {}ms",
extraction_result.memories.len(),
stored_count,
elapsed
);
// Return updated result with storage info
Ok(ExtractionResult {
memories: extraction_result.memories,
summary: format!(
"{} (Stored: {})",
extraction_result.summary, stored_count
),
tokens_saved: extraction_result.tokens_saved,
extraction_time_ms: elapsed,
})
}
impl std::fmt::Display for MemoryCategory {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
MemoryCategory::UserPreference => write!(f, "user_preference"),
MemoryCategory::UserFact => write!(f, "user_fact"),
MemoryCategory::AgentLesson => write!(f, "agent_lesson"),
MemoryCategory::AgentPattern => write!(f, "agent_pattern"),
MemoryCategory::Task => write!(f, "task"),
}
}
}
#[cfg(test)]
mod tests {
use super::*;

View File

@@ -9,13 +9,141 @@ use tauri::{AppHandle, Emitter, State};
use serde::{Deserialize, Serialize};
use tokio::sync::RwLock;
use serde_json::Value;
use async_trait::async_trait;
use secrecy::SecretString;
use zclaw_pipeline::{
Pipeline, RunStatus,
parse_pipeline_yaml,
PipelineExecutor,
ActionRegistry,
LlmActionDriver,
};
use zclaw_runtime::{LlmDriver, CompletionRequest};
use crate::kernel_commands::KernelState;
/// Adapter to connect zclaw-runtime LlmDriver to zclaw-pipeline LlmActionDriver
pub struct RuntimeLlmAdapter {
driver: Arc<dyn LlmDriver>,
default_model: String,
}
impl RuntimeLlmAdapter {
pub fn new(driver: Arc<dyn LlmDriver>, default_model: Option<String>) -> Self {
Self {
driver,
default_model: default_model.unwrap_or_else(|| "claude-3-sonnet-20240229".to_string()),
}
}
}
#[async_trait]
impl LlmActionDriver for RuntimeLlmAdapter {
async fn generate(
&self,
prompt: String,
input: HashMap<String, Value>,
model: Option<String>,
temperature: Option<f32>,
max_tokens: Option<u32>,
json_mode: bool,
) -> Result<Value, String> {
println!("[DEBUG RuntimeLlmAdapter] generate called with prompt length: {}", prompt.len());
println!("[DEBUG RuntimeLlmAdapter] input HashMap contents:");
for (k, v) in &input {
println!(" {} => {}", k, v);
}
// Build user content from prompt and input
let user_content = if input.is_empty() {
println!("[DEBUG RuntimeLlmAdapter] WARNING: input is empty, using raw prompt");
prompt.clone()
} else {
// Inject input values into prompt
// Support multiple placeholder formats: {{key}}, {{ key }}, ${key}, ${inputs.key}
let mut rendered = prompt.clone();
println!("[DEBUG RuntimeLlmAdapter] Original prompt (first 500 chars): {}", &prompt[..prompt.len().min(500)]);
for (key, value) in &input {
let str_value = if let Some(s) = value.as_str() {
s.to_string()
} else {
value.to_string()
};
println!("[DEBUG RuntimeLlmAdapter] Replacing '{}' with '{}'", key, str_value);
// Replace all common placeholder formats
rendered = rendered.replace(&format!("{{{{{key}}}}}"), &str_value); // {{key}}
rendered = rendered.replace(&format!("{{{{ {key} }}}}"), &str_value); // {{ key }}
rendered = rendered.replace(&format!("${{{key}}}"), &str_value); // ${key}
rendered = rendered.replace(&format!("${{inputs.{key}}}"), &str_value); // ${inputs.key}
}
println!("[DEBUG RuntimeLlmAdapter] Rendered prompt (first 500 chars): {}", &rendered[..rendered.len().min(500)]);
rendered
};
// Create message using zclaw_types::Message enum
let messages = vec![zclaw_types::Message::user(user_content)];
let request = CompletionRequest {
model: model.unwrap_or_else(|| self.default_model.clone()),
system: None,
messages,
tools: Vec::new(),
max_tokens,
temperature,
stop: Vec::new(),
stream: false,
};
let response = self.driver.complete(request)
.await
.map_err(|e| format!("LLM completion failed: {}", e))?;
// Extract text from response
let text = response.content.iter()
.find_map(|block| match block {
zclaw_runtime::ContentBlock::Text { text } => Some(text.clone()),
_ => None,
})
.unwrap_or_default();
// Safe truncation for UTF-8 strings
let truncated: String = text.chars().take(1000).collect();
println!("[DEBUG RuntimeLlmAdapter] LLM response text (first 1000 chars): {}", truncated);
// Parse as JSON if json_mode, otherwise return as string
if json_mode {
// Try to extract JSON from the response (LLM might wrap it in markdown code blocks)
let json_text = if text.contains("```json") {
// Extract JSON from markdown code block
let start = text.find("```json").map(|i| i + 7).unwrap_or(0);
let end = text.rfind("```").unwrap_or(text.len());
text[start..end].trim().to_string()
} else if text.contains("```") {
// Extract from generic code block
let start = text.find("```").map(|i| i + 3).unwrap_or(0);
let end = text.rfind("```").unwrap_or(text.len());
text[start..end].trim().to_string()
} else {
text.clone()
};
// Safe truncation for UTF-8 strings
let truncated_json: String = json_text.chars().take(500).collect();
println!("[DEBUG RuntimeLlmAdapter] JSON text to parse (first 500 chars): {}", truncated_json);
serde_json::from_str(&json_text)
.map_err(|e| {
println!("[DEBUG RuntimeLlmAdapter] JSON parse error: {}", e);
format!("Failed to parse LLM response as JSON: {}\nResponse: {}", e, json_text)
})
} else {
Ok(Value::String(text))
}
}
}
/// Pipeline state wrapper for Tauri
pub struct PipelineState {
@@ -47,8 +175,10 @@ pub struct PipelineInfo {
pub display_name: String,
/// Description
pub description: String,
/// Category
/// Category (functional classification)
pub category: String,
/// Industry classification (e.g., "internet", "finance", "healthcare")
pub industry: String,
/// Tags
pub tags: Vec<String>,
/// Icon (emoji)
@@ -134,21 +264,28 @@ pub struct PipelineRunResponse {
pub async fn pipeline_list(
state: State<'_, Arc<PipelineState>>,
category: Option<String>,
industry: Option<String>,
) -> Result<Vec<PipelineInfo>, String> {
// Get pipelines directory
let pipelines_dir = get_pipelines_directory()?;
tracing::info!("[pipeline_list] Scanning directory: {:?}", pipelines_dir);
println!("[DEBUG pipeline_list] Scanning directory: {:?}", pipelines_dir);
println!("[DEBUG pipeline_list] Filters - category: {:?}, industry: {:?}", category, industry);
// Scan for pipeline files (returns both info and paths)
let mut pipelines_with_paths: Vec<(PipelineInfo, PathBuf)> = Vec::new();
if pipelines_dir.exists() {
scan_pipelines_with_paths(&pipelines_dir, category.as_deref(), &mut pipelines_with_paths)?;
scan_pipelines_with_paths(&pipelines_dir, category.as_deref(), industry.as_deref(), &mut pipelines_with_paths)?;
} else {
tracing::warn!("[pipeline_list] Pipelines directory does not exist: {:?}", pipelines_dir);
eprintln!("[WARN pipeline_list] Pipelines directory does not exist: {:?}", pipelines_dir);
}
tracing::info!("[pipeline_list] Found {} pipelines", pipelines_with_paths.len());
println!("[DEBUG pipeline_list] Found {} pipelines", pipelines_with_paths.len());
// Debug: log all pipelines with their industry values
for (info, _) in &pipelines_with_paths {
println!("[DEBUG pipeline_list] Pipeline: {} -> category: {}, industry: '{}'", info.id, info.category, info.industry);
}
// Update state
let mut state_pipelines = state.pipelines.write().await;
@@ -188,27 +325,73 @@ pub async fn pipeline_get(
pub async fn pipeline_run(
app: AppHandle,
state: State<'_, Arc<PipelineState>>,
kernel_state: State<'_, KernelState>,
request: RunPipelineRequest,
) -> Result<RunPipelineResponse, String> {
println!("[DEBUG pipeline_run] Received request for pipeline_id: {}", request.pipeline_id);
// Get pipeline
let pipelines = state.pipelines.read().await;
println!("[DEBUG pipeline_run] State has {} pipelines loaded", pipelines.len());
// Debug: list all loaded pipeline IDs
for (id, _) in pipelines.iter() {
println!("[DEBUG pipeline_run] Loaded pipeline: {}", id);
}
let pipeline = pipelines.get(&request.pipeline_id)
.ok_or_else(|| format!("Pipeline not found: {}", request.pipeline_id))?
.ok_or_else(|| {
println!("[ERROR pipeline_run] Pipeline '{}' not found in state. Available: {:?}",
request.pipeline_id,
pipelines.keys().collect::<Vec<_>>());
format!("Pipeline not found: {}", request.pipeline_id)
})?
.clone();
drop(pipelines);
// Clone executor for async task
let executor = state.executor.clone();
// Try to get LLM driver from Kernel
let llm_driver = {
let kernel_lock = kernel_state.lock().await;
if let Some(kernel) = kernel_lock.as_ref() {
println!("[DEBUG pipeline_run] Got LLM driver from Kernel");
Some(Arc::new(RuntimeLlmAdapter::new(
kernel.driver(),
Some(kernel.config().llm.model.clone()),
)) as Arc<dyn LlmActionDriver>)
} else {
println!("[DEBUG pipeline_run] Kernel not initialized, no LLM driver available");
None
}
};
// Create executor with or without LLM driver
let executor = if let Some(driver) = llm_driver {
let registry = Arc::new(ActionRegistry::new().with_llm_driver(driver));
Arc::new(PipelineExecutor::new(registry))
} else {
state.executor.clone()
};
// Generate run ID upfront so we can return it to the caller
let run_id = uuid::Uuid::new_v4().to_string();
let pipeline_id = request.pipeline_id.clone();
let inputs = request.inputs.clone();
// Run pipeline in background
// Clone for async task
let run_id_for_spawn = run_id.clone();
// Run pipeline in background with the known run_id
tokio::spawn(async move {
let result = executor.execute(&pipeline, inputs).await;
println!("[DEBUG pipeline_run] Starting execution with run_id: {}", run_id_for_spawn);
let result = executor.execute_with_id(&pipeline, inputs, &run_id_for_spawn).await;
println!("[DEBUG pipeline_run] Execution completed for run_id: {}, status: {:?}",
run_id_for_spawn,
result.as_ref().map(|r| r.status.clone()).unwrap_or(RunStatus::Failed));
// Emit completion event
let _ = app.emit("pipeline-complete", &PipelineRunResponse {
run_id: result.as_ref().map(|r| r.id.clone()).unwrap_or_default(),
run_id: run_id_for_spawn.clone(),
pipeline_id: pipeline_id.clone(),
status: match &result {
Ok(r) => r.status.to_string(),
@@ -227,10 +410,10 @@ pub async fn pipeline_run(
});
});
// Return immediately with run ID
// Note: In a real implementation, we'd track the run ID properly
// Return immediately with the known run ID
println!("[DEBUG pipeline_run] Returning run_id: {} to caller", run_id);
Ok(RunPipelineResponse {
run_id: uuid::Uuid::new_v4().to_string(),
run_id,
pipeline_id: request.pipeline_id,
status: "running".to_string(),
})
@@ -390,8 +573,10 @@ fn get_pipelines_directory() -> Result<PathBuf, String> {
fn scan_pipelines_with_paths(
dir: &PathBuf,
category_filter: Option<&str>,
industry_filter: Option<&str>,
pipelines: &mut Vec<(PipelineInfo, PathBuf)>,
) -> Result<(), String> {
println!("[DEBUG scan] Entering directory: {:?}", dir);
let entries = std::fs::read_dir(dir)
.map_err(|e| format!("Failed to read pipelines directory: {}", e))?;
@@ -401,12 +586,22 @@ fn scan_pipelines_with_paths(
if path.is_dir() {
// Recursively scan subdirectory
scan_pipelines_with_paths(&path, category_filter, pipelines)?;
scan_pipelines_with_paths(&path, category_filter, industry_filter, pipelines)?;
} else if path.extension().map(|e| e == "yaml" || e == "yml").unwrap_or(false) {
// Try to parse pipeline file
println!("[DEBUG scan] Found YAML file: {:?}", path);
if let Ok(content) = std::fs::read_to_string(&path) {
println!("[DEBUG scan] File content length: {} bytes", content.len());
match parse_pipeline_yaml(&content) {
Ok(pipeline) => {
// Debug: log parsed pipeline metadata
println!(
"[DEBUG scan] Parsed YAML: {} -> category: {:?}, industry: {:?}",
pipeline.metadata.name,
pipeline.metadata.category,
pipeline.metadata.industry
);
// Apply category filter
if let Some(filter) = category_filter {
if pipeline.metadata.category.as_deref() != Some(filter) {
@@ -414,11 +609,18 @@ fn scan_pipelines_with_paths(
}
}
// Apply industry filter
if let Some(filter) = industry_filter {
if pipeline.metadata.industry.as_deref() != Some(filter) {
continue;
}
}
tracing::debug!("[scan] Found pipeline: {} at {:?}", pipeline.metadata.name, path);
pipelines.push((pipeline_to_info(&pipeline), path));
}
Err(e) => {
tracing::warn!("[scan] Failed to parse pipeline at {:?}: {}", path, e);
eprintln!("[ERROR scan] Failed to parse pipeline at {:?}: {}", path, e);
}
}
}
@@ -454,12 +656,21 @@ fn scan_pipelines_full_sync(
}
fn pipeline_to_info(pipeline: &Pipeline) -> PipelineInfo {
let industry = pipeline.metadata.industry.clone().unwrap_or_default();
println!(
"[DEBUG pipeline_to_info] Pipeline: {}, category: {:?}, industry: {:?}",
pipeline.metadata.name,
pipeline.metadata.category,
pipeline.metadata.industry
);
PipelineInfo {
id: pipeline.metadata.name.clone(),
display_name: pipeline.metadata.display_name.clone()
.unwrap_or_else(|| pipeline.metadata.name.clone()),
description: pipeline.metadata.description.clone().unwrap_or_default(),
category: pipeline.metadata.category.clone().unwrap_or_default(),
industry,
tags: pipeline.metadata.tags.clone(),
icon: pipeline.metadata.icon.clone().unwrap_or_else(|| "📦".to_string()),
version: pipeline.metadata.version.clone(),
@@ -488,6 +699,245 @@ fn pipeline_to_info(pipeline: &Pipeline) -> PipelineInfo {
/// Create pipeline state with default action registry
pub fn create_pipeline_state() -> Arc<PipelineState> {
let action_registry = Arc::new(ActionRegistry::new());
// Try to create an LLM driver from environment/config
let action_registry = if let Some(driver) = create_llm_driver_from_config() {
println!("[DEBUG create_pipeline_state] LLM driver configured successfully");
Arc::new(ActionRegistry::new().with_llm_driver(driver))
} else {
println!("[DEBUG create_pipeline_state] No LLM driver configured - pipelines requiring LLM will fail");
Arc::new(ActionRegistry::new())
};
Arc::new(PipelineState::new(action_registry))
}
// === Intent Router Commands ===
/// Route result for frontend
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(tag = "type", rename_all = "snake_case")]
pub enum RouteResultResponse {
Matched {
pipeline_id: String,
display_name: Option<String>,
mode: String,
params: HashMap<String, Value>,
confidence: f32,
missing_params: Vec<MissingParamInfo>,
},
Ambiguous {
candidates: Vec<PipelineCandidateInfo>,
},
NoMatch {
suggestions: Vec<PipelineCandidateInfo>,
},
NeedMoreInfo {
prompt: String,
related_pipeline: Option<String>,
},
}
/// Missing parameter info
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct MissingParamInfo {
pub name: String,
pub label: Option<String>,
pub param_type: String,
pub required: bool,
pub default: Option<Value>,
}
/// Pipeline candidate info
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct PipelineCandidateInfo {
pub id: String,
pub display_name: Option<String>,
pub description: Option<String>,
pub icon: Option<String>,
pub category: Option<String>,
pub match_reason: Option<String>,
}
/// Route user input to matching pipeline
#[tauri::command]
pub async fn route_intent(
state: State<'_, Arc<PipelineState>>,
user_input: String,
) -> Result<RouteResultResponse, String> {
use zclaw_pipeline::{TriggerParser, Trigger, TriggerParam, compile_trigger};
println!("[DEBUG route_intent] Routing user input: {}", user_input);
// Build trigger parser from loaded pipelines
let pipelines = state.pipelines.read().await;
let mut parser = TriggerParser::new();
for (id, pipeline) in pipelines.iter() {
// Extract trigger info from pipeline metadata
// For now, use tags as keywords and description as trigger description
let trigger = Trigger {
keywords: pipeline.metadata.tags.clone(),
patterns: vec![], // TODO: add pattern support in pipeline definition
description: pipeline.metadata.description.clone(),
examples: vec![],
};
// Convert pipeline inputs to trigger params
let param_defs: Vec<TriggerParam> = pipeline.spec.inputs.iter().map(|input| {
TriggerParam {
name: input.name.clone(),
param_type: match input.input_type {
zclaw_pipeline::InputType::String => "string".to_string(),
zclaw_pipeline::InputType::Number => "number".to_string(),
zclaw_pipeline::InputType::Boolean => "boolean".to_string(),
zclaw_pipeline::InputType::Select => "select".to_string(),
zclaw_pipeline::InputType::MultiSelect => "multi-select".to_string(),
zclaw_pipeline::InputType::File => "file".to_string(),
zclaw_pipeline::InputType::Text => "text".to_string(),
},
required: input.required,
label: input.label.clone(),
default: input.default.clone(),
}
}).collect();
match compile_trigger(
id.clone(),
pipeline.metadata.display_name.clone(),
&trigger,
param_defs,
) {
Ok(compiled) => parser.register(compiled),
Err(e) => {
eprintln!("[WARN route_intent] Failed to compile trigger for {}: {}", id, e);
}
}
}
// Quick match
if let Some(match_result) = parser.quick_match(&user_input) {
let trigger = parser.get_trigger(&match_result.pipeline_id);
// Determine input mode
let mode = if let Some(t) = &trigger {
let required_count = t.param_defs.iter().filter(|p| p.required).count();
if required_count > 3 || t.param_defs.len() > 5 {
"form"
} else if t.param_defs.is_empty() {
"conversation"
} else {
"conversation"
}
} else {
"auto"
};
// Find missing params
let missing_params: Vec<MissingParamInfo> = trigger
.map(|t| {
t.param_defs.iter()
.filter(|p| p.required && !match_result.params.contains_key(&p.name) && p.default.is_none())
.map(|p| MissingParamInfo {
name: p.name.clone(),
label: p.label.clone(),
param_type: p.param_type.clone(),
required: p.required,
default: p.default.clone(),
})
.collect()
})
.unwrap_or_default();
return Ok(RouteResultResponse::Matched {
pipeline_id: match_result.pipeline_id,
display_name: trigger.and_then(|t| t.display_name.clone()),
mode: mode.to_string(),
params: match_result.params,
confidence: match_result.confidence,
missing_params,
});
}
// No match - return suggestions
let suggestions: Vec<PipelineCandidateInfo> = parser.triggers()
.iter()
.take(3)
.map(|t| PipelineCandidateInfo {
id: t.pipeline_id.clone(),
display_name: t.display_name.clone(),
description: t.description.clone(),
icon: None,
category: None,
match_reason: Some("推荐".to_string()),
})
.collect();
Ok(RouteResultResponse::NoMatch { suggestions })
}
/// Create an LLM driver from configuration file or environment variables
fn create_llm_driver_from_config() -> Option<Arc<dyn LlmActionDriver>> {
// Try to read config file
let config_path = dirs::config_dir()
.map(|p| p.join("zclaw").join("config.toml"))?;
if !config_path.exists() {
println!("[DEBUG create_llm_driver] Config file not found at {:?}", config_path);
return None;
}
// Read and parse config
let config_content = std::fs::read_to_string(&config_path).ok()?;
let config: toml::Value = toml::from_str(&config_content).ok()?;
// Extract LLM config
let llm_config = config.get("llm")?;
let provider = llm_config.get("provider")?.as_str()?.to_string();
let api_key = llm_config.get("api_key")?.as_str()?.to_string();
let base_url = llm_config.get("base_url").and_then(|v| v.as_str()).map(|s| s.to_string());
let model = llm_config.get("model").and_then(|v| v.as_str()).map(|s| s.to_string());
println!("[DEBUG create_llm_driver] Found LLM config: provider={}, model={:?}", provider, model);
// Convert api_key to SecretString
let secret_key = SecretString::new(api_key);
// Create the runtime driver
let runtime_driver: Arc<dyn zclaw_runtime::LlmDriver> = match provider.as_str() {
"anthropic" => {
Arc::new(zclaw_runtime::AnthropicDriver::new(secret_key))
}
"openai" | "doubao" | "qwen" | "deepseek" | "kimi" => {
Arc::new(zclaw_runtime::OpenAiDriver::new(secret_key))
}
"gemini" => {
Arc::new(zclaw_runtime::GeminiDriver::new(secret_key))
}
"local" | "ollama" => {
let url = base_url.unwrap_or_else(|| "http://localhost:11434".to_string());
Arc::new(zclaw_runtime::LocalDriver::new(&url))
}
_ => {
eprintln!("[WARN create_llm_driver] Unknown provider: {}", provider);
return None;
}
};
Some(Arc::new(RuntimeLlmAdapter::new(runtime_driver, model)))
}
/// Analyze presentation data
#[tauri::command]
pub async fn analyze_presentation(
data: Value,
) -> Result<serde_json::Value, String> {
use zclaw_pipeline::presentation::PresentationAnalyzer;
let analyzer = PresentationAnalyzer::new();
let analysis = analyzer.analyze(&data);
// Convert analysis to JSON
serde_json::to_value(&analysis).map_err(|e| e.to_string())
}

View File

@@ -1,12 +1,22 @@
//! OpenViking CLI Sidecar Integration
//! OpenViking Memory Storage - Native Rust Implementation
//!
//! Wraps the OpenViking Rust CLI (`ov`) as a Tauri sidecar for local memory operations.
//! This eliminates the need for a Python server dependency.
//! Provides native Rust memory storage using SqliteStorage with TF-IDF semantic search.
//! This is a self-contained implementation that doesn't require external Python or CLI dependencies.
//!
//! Reference: https://github.com/volcengine/OpenViking
//! Features:
//! - SQLite persistence with FTS5 full-text search
//! - TF-IDF semantic scoring
//! - Token budget control
//! - Automatic memory indexing
use serde::{Deserialize, Serialize};
use std::process::Command;
use std::path::PathBuf;
use std::sync::Arc;
use tokio::sync::OnceCell;
use zclaw_growth::{
FindOptions, MemoryEntry, MemoryType, PromptInjector, RetrievalResult, SqliteStorage,
VikingStorage,
};
// === Types ===
@@ -57,302 +67,399 @@ pub struct VikingAddResult {
pub status: String,
}
// === CLI Path Resolution ===
// === Global Storage Instance ===
fn get_viking_cli_path() -> Result<String, String> {
// Try environment variable first
if let Ok(path) = std::env::var("ZCLAW_VIKING_BIN") {
if std::path::Path::new(&path).exists() {
return Ok(path);
}
}
/// Global storage instance
static STORAGE: OnceCell<Arc<SqliteStorage>> = OnceCell::const_new();
// Try bundled sidecar location
let binary_name = if cfg!(target_os = "windows") {
"ov-x86_64-pc-windows-msvc.exe"
} else if cfg!(target_os = "macos") {
if cfg!(target_arch = "aarch64") {
"ov-aarch64-apple-darwin"
} else {
"ov-x86_64-apple-darwin"
}
/// Get the storage directory path
fn get_storage_dir() -> PathBuf {
// Use platform-specific data directory
if let Some(data_dir) = dirs::data_dir() {
data_dir.join("zclaw").join("memories")
} else {
"ov-x86_64-unknown-linux-gnu"
};
// Check common locations
let locations = vec![
format!("./binaries/{}", binary_name),
format!("./resources/viking/{}", binary_name),
format!("./{}", binary_name),
];
for loc in locations {
if std::path::Path::new(&loc).exists() {
return Ok(loc);
}
}
// Fallback to system PATH
Ok("ov".to_string())
}
fn run_viking_cli(args: &[&str]) -> Result<String, String> {
let cli_path = get_viking_cli_path()?;
let output = Command::new(&cli_path)
.args(args)
.output()
.map_err(|e| {
if e.kind() == std::io::ErrorKind::NotFound {
format!(
"OpenViking CLI not found. Please install 'ov' or set ZCLAW_VIKING_BIN. Tried: {}",
cli_path
)
} else {
format!("Failed to run OpenViking CLI: {}", e)
}
})?;
if output.status.success() {
Ok(String::from_utf8_lossy(&output.stdout).trim().to_string())
} else {
let stderr = String::from_utf8_lossy(&output.stderr).trim().to_string();
let stdout = String::from_utf8_lossy(&output.stdout).trim().to_string();
if !stderr.is_empty() {
Err(stderr)
} else if !stdout.is_empty() {
Err(stdout)
} else {
Err(format!("OpenViking CLI failed with status: {}", output.status))
}
// Fallback to current directory
PathBuf::from("./zclaw_data/memories")
}
}
/// Helper function to run Viking CLI and parse JSON output
/// Reserved for future JSON-based commands
#[allow(dead_code)]
fn run_viking_cli_json<T: for<'de> Deserialize<'de>>(args: &[&str]) -> Result<T, String> {
let output = run_viking_cli(args)?;
/// Initialize the storage (should be called once at startup)
pub async fn init_storage() -> Result<(), String> {
let storage_dir = get_storage_dir();
let db_path = storage_dir.join("memories.db");
// Handle empty output
if output.is_empty() {
return Err("OpenViking CLI returned empty output".to_string());
}
tracing::info!("[VikingCommands] Initializing storage at {:?}", db_path);
// Try to parse as JSON
serde_json::from_str(&output)
.map_err(|e| format!("Failed to parse OpenViking output as JSON: {}\nOutput: {}", e, output))
let storage = SqliteStorage::new(&db_path)
.await
.map_err(|e| format!("Failed to initialize storage: {}", e))?;
let _ = STORAGE.set(Arc::new(storage));
tracing::info!("[VikingCommands] Storage initialized successfully");
Ok(())
}
/// Get the storage instance (public for use by other modules)
pub async fn get_storage() -> Result<Arc<SqliteStorage>, String> {
STORAGE
.get()
.cloned()
.ok_or_else(|| "Storage not initialized. Call init_storage() first.".to_string())
}
/// Get storage directory for status
fn get_data_dir_string() -> Option<String> {
get_storage_dir().to_str().map(|s| s.to_string())
}
// === Tauri Commands ===
/// Check if OpenViking CLI is available
/// Check if memory storage is available
#[tauri::command]
pub fn viking_status() -> Result<VikingStatus, String> {
let result = run_viking_cli(&["--version"]);
match result {
Ok(version_output) => {
// Parse version from output like "ov 0.1.0"
let version = version_output
.lines()
.next()
.map(|s| s.trim().to_string());
pub async fn viking_status() -> Result<VikingStatus, String> {
match get_storage().await {
Ok(storage) => {
// Try a simple query to verify storage is working
let _ = storage
.find("", FindOptions::default())
.await
.map_err(|e| format!("Storage health check failed: {}", e))?;
Ok(VikingStatus {
available: true,
version,
data_dir: None, // TODO: Get from CLI
version: Some("0.2.0-native".to_string()),
data_dir: get_data_dir_string(),
error: None,
})
}
Err(e) => Ok(VikingStatus {
available: false,
version: None,
data_dir: None,
data_dir: get_data_dir_string(),
error: Some(e),
}),
}
}
/// Add a resource to OpenViking
/// Add a memory entry
#[tauri::command]
pub fn viking_add(uri: String, content: String) -> Result<VikingAddResult, String> {
// Create a temporary file for the content
let temp_dir = std::env::temp_dir();
let timestamp = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_millis())
.unwrap_or(0);
let temp_file = temp_dir.join(format!("viking_add_{}.txt", timestamp));
pub async fn viking_add(uri: String, content: String) -> Result<VikingAddResult, String> {
let storage = get_storage().await?;
std::fs::write(&temp_file, &content)
.map_err(|e| format!("Failed to write temp file: {}", e))?;
// Parse URI to extract agent_id, memory_type, and category
// Expected format: agent://{agent_id}/{type}/{category}
let (agent_id, memory_type, category) = parse_uri(&uri)?;
let temp_path = temp_file.to_string_lossy();
let result = run_viking_cli(&["add", &uri, "--file", &temp_path]);
let entry = MemoryEntry::new(&agent_id, memory_type, &category, content);
// Clean up temp file
let _ = std::fs::remove_file(&temp_file);
storage
.store(&entry)
.await
.map_err(|e| format!("Failed to store memory: {}", e))?;
match result {
Ok(_) => Ok(VikingAddResult {
uri,
status: "added".to_string(),
}),
Err(e) => Err(e),
}
Ok(VikingAddResult {
uri,
status: "added".to_string(),
})
}
/// Add a resource with inline content (for small content)
/// Add a memory with metadata
#[tauri::command]
pub fn viking_add_inline(uri: String, content: String) -> Result<VikingAddResult, String> {
// Use stdin for content
let cli_path = get_viking_cli_path()?;
pub async fn viking_add_with_metadata(
uri: String,
content: String,
keywords: Vec<String>,
importance: Option<u8>,
) -> Result<VikingAddResult, String> {
let storage = get_storage().await?;
let output = Command::new(&cli_path)
.args(["add", &uri])
.stdin(std::process::Stdio::piped())
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::piped())
.spawn()
.map_err(|e| format!("Failed to spawn OpenViking CLI: {}", e))?;
let (agent_id, memory_type, category) = parse_uri(&uri)?;
// Write content to stdin
if let Some(mut stdin) = output.stdin.as_ref() {
use std::io::Write;
stdin.write_all(content.as_bytes())
.map_err(|e| format!("Failed to write to stdin: {}", e))?;
let mut entry = MemoryEntry::new(&agent_id, memory_type, &category, content);
entry.keywords = keywords;
if let Some(imp) = importance {
entry.importance = imp.min(10).max(1);
}
let result = output.wait_with_output()
.map_err(|e| format!("Failed to read output: {}", e))?;
storage
.store(&entry)
.await
.map_err(|e| format!("Failed to store memory: {}", e))?;
if result.status.success() {
Ok(VikingAddResult {
uri,
status: "added".to_string(),
})
} else {
let stderr = String::from_utf8_lossy(&result.stderr).trim().to_string();
Err(if !stderr.is_empty() { stderr } else { "Failed to add resource".to_string() })
}
Ok(VikingAddResult {
uri,
status: "added".to_string(),
})
}
/// Find resources by semantic search
/// Find memories by semantic search
#[tauri::command]
pub fn viking_find(
pub async fn viking_find(
query: String,
scope: Option<String>,
limit: Option<usize>,
) -> Result<Vec<VikingFindResult>, String> {
let mut args = vec!["find", "--json", &query];
let storage = get_storage().await?;
let scope_arg;
if let Some(ref s) = scope {
scope_arg = format!("--scope={}", s);
args.push(&scope_arg);
}
let options = FindOptions {
scope,
limit,
min_similarity: Some(0.1),
};
let limit_arg;
if let Some(l) = limit {
limit_arg = format!("--limit={}", l);
args.push(&limit_arg);
}
let entries = storage
.find(&query, options)
.await
.map_err(|e| format!("Failed to search memories: {}", e))?;
// CLI returns JSON array directly
let output = run_viking_cli(&args)?;
// Handle empty or null results
if output.is_empty() || output == "null" || output == "[]" {
return Ok(Vec::new());
}
serde_json::from_str(&output)
.map_err(|e| format!("Failed to parse find results: {}\nOutput: {}", e, output))
Ok(entries
.into_iter()
.enumerate()
.map(|(i, entry)| VikingFindResult {
uri: entry.uri,
score: 1.0 - (i as f64 * 0.1), // Simple scoring based on rank
content: entry.content,
level: "L1".to_string(),
overview: None,
})
.collect())
}
/// Grep resources by pattern
/// Grep memories by pattern (uses FTS5)
#[tauri::command]
pub fn viking_grep(
pub async fn viking_grep(
pattern: String,
uri: Option<String>,
case_sensitive: Option<bool>,
_case_sensitive: Option<bool>,
limit: Option<usize>,
) -> Result<Vec<VikingGrepResult>, String> {
let mut args = vec!["grep", "--json", &pattern];
let storage = get_storage().await?;
let uri_arg;
if let Some(ref u) = uri {
uri_arg = format!("--uri={}", u);
args.push(&uri_arg);
}
let scope = uri.as_ref().and_then(|u| {
// Extract agent scope from URI
u.strip_prefix("agent://")
.and_then(|s| s.split('/').next())
.map(|agent| format!("agent://{}", agent))
});
if case_sensitive.unwrap_or(false) {
args.push("--case-sensitive");
}
let options = FindOptions {
scope,
limit,
min_similarity: Some(0.05), // Lower threshold for grep
};
let limit_arg;
if let Some(l) = limit {
limit_arg = format!("--limit={}", l);
args.push(&limit_arg);
}
let entries = storage
.find(&pattern, options)
.await
.map_err(|e| format!("Failed to grep memories: {}", e))?;
let output = run_viking_cli(&args)?;
if output.is_empty() || output == "null" || output == "[]" {
return Ok(Vec::new());
}
serde_json::from_str(&output)
.map_err(|e| format!("Failed to parse grep results: {}\nOutput: {}", e, output))
Ok(entries
.into_iter()
.flat_map(|entry| {
// Find matching lines
entry
.content
.lines()
.enumerate()
.filter(|(_, line)| {
line.to_lowercase()
.contains(&pattern.to_lowercase())
})
.map(|(i, line)| VikingGrepResult {
uri: entry.uri.clone(),
line: (i + 1) as u32,
content: line.to_string(),
match_start: line.find(&pattern).unwrap_or(0) as u32,
match_end: (line.find(&pattern).unwrap_or(0) + pattern.len()) as u32,
})
.collect::<Vec<_>>()
})
.take(limit.unwrap_or(100))
.collect())
}
/// List resources at a path
/// List memories at a path
#[tauri::command]
pub fn viking_ls(path: String) -> Result<Vec<VikingResource>, String> {
let output = run_viking_cli(&["ls", "--json", &path])?;
pub async fn viking_ls(path: String) -> Result<Vec<VikingResource>, String> {
let storage = get_storage().await?;
if output.is_empty() || output == "null" || output == "[]" {
return Ok(Vec::new());
let entries = storage
.find_by_prefix(&path)
.await
.map_err(|e| format!("Failed to list memories: {}", e))?;
Ok(entries
.into_iter()
.map(|entry| VikingResource {
uri: entry.uri.clone(),
name: entry
.uri
.rsplit('/')
.next()
.unwrap_or(&entry.uri)
.to_string(),
resource_type: entry.memory_type.to_string(),
size: Some(entry.content.len() as u64),
modified_at: Some(entry.last_accessed.to_rfc3339()),
})
.collect())
}
/// Read memory content
#[tauri::command]
pub async fn viking_read(uri: String, _level: Option<String>) -> Result<String, String> {
let storage = get_storage().await?;
let entry = storage
.get(&uri)
.await
.map_err(|e| format!("Failed to read memory: {}", e))?;
match entry {
Some(e) => Ok(e.content),
None => Err(format!("Memory not found: {}", uri)),
}
serde_json::from_str(&output)
.map_err(|e| format!("Failed to parse ls results: {}\nOutput: {}", e, output))
}
/// Read resource content
/// Remove a memory
#[tauri::command]
pub fn viking_read(uri: String, level: Option<String>) -> Result<String, String> {
let level_val = level.unwrap_or_else(|| "L1".to_string());
let level_arg = format!("--level={}", level_val);
pub async fn viking_remove(uri: String) -> Result<(), String> {
let storage = get_storage().await?;
run_viking_cli(&["read", &uri, &level_arg])
}
storage
.delete(&uri)
.await
.map_err(|e| format!("Failed to remove memory: {}", e))?;
/// Remove a resource
#[tauri::command]
pub fn viking_remove(uri: String) -> Result<(), String> {
run_viking_cli(&["remove", &uri])?;
Ok(())
}
/// Get resource tree
/// Get memory tree
#[tauri::command]
pub fn viking_tree(path: String, depth: Option<usize>) -> Result<serde_json::Value, String> {
let depth_val = depth.unwrap_or(2);
let depth_arg = format!("--depth={}", depth_val);
pub async fn viking_tree(path: String, _depth: Option<usize>) -> Result<serde_json::Value, String> {
let storage = get_storage().await?;
let output = run_viking_cli(&["tree", "--json", &path, &depth_arg])?;
let entries = storage
.find_by_prefix(&path)
.await
.map_err(|e| format!("Failed to get tree: {}", e))?;
if output.is_empty() || output == "null" {
return Ok(serde_json::json!({}));
// Build a simple tree structure
let mut tree = serde_json::Map::new();
for entry in entries {
let parts: Vec<&str> = entry.uri.split('/').collect();
let mut current = &mut tree;
for part in &parts[..parts.len().saturating_sub(1)] {
if !current.contains_key(*part) {
current.insert(
(*part).to_string(),
serde_json::json!({}),
);
}
current = current
.get_mut(*part)
.and_then(|v| v.as_object_mut())
.unwrap();
}
if let Some(last) = parts.last() {
current.insert(
(*last).to_string(),
serde_json::json!({
"type": entry.memory_type.to_string(),
"importance": entry.importance,
"access_count": entry.access_count,
}),
);
}
}
serde_json::from_str(&output)
.map_err(|e| format!("Failed to parse tree result: {}\nOutput: {}", e, output))
Ok(serde_json::Value::Object(tree))
}
/// Inject memories into prompt (for agent loop integration)
#[tauri::command]
pub async fn viking_inject_prompt(
agent_id: String,
base_prompt: String,
user_input: String,
max_tokens: Option<usize>,
) -> Result<String, String> {
let storage = get_storage().await?;
// Retrieve relevant memories
let options = FindOptions {
scope: Some(format!("agent://{}", agent_id)),
limit: Some(10),
min_similarity: Some(0.3),
};
let entries = storage
.find(&user_input, options)
.await
.map_err(|e| format!("Failed to retrieve memories: {}", e))?;
// Convert to RetrievalResult
let mut result = RetrievalResult::default();
for entry in entries {
match entry.memory_type {
MemoryType::Preference => result.preferences.push(entry),
MemoryType::Knowledge => result.knowledge.push(entry),
MemoryType::Experience => result.experience.push(entry),
MemoryType::Session => {} // Skip session memories
}
}
// Calculate tokens
result.total_tokens = result.calculate_tokens();
// Apply token budget
let budget = max_tokens.unwrap_or(500);
if result.total_tokens > budget {
// Truncate by priority: preferences > knowledge > experience
while result.total_tokens > budget && !result.experience.is_empty() {
result.experience.pop();
result.total_tokens = result.calculate_tokens();
}
while result.total_tokens > budget && !result.knowledge.is_empty() {
result.knowledge.pop();
result.total_tokens = result.calculate_tokens();
}
while result.total_tokens > budget && !result.preferences.is_empty() {
result.preferences.pop();
result.total_tokens = result.calculate_tokens();
}
}
// Inject into prompt
let injector = PromptInjector::new();
Ok(injector.inject_with_format(&base_prompt, &result))
}
// === Helper Functions ===
/// Parse URI to extract components
fn parse_uri(uri: &str) -> Result<(String, MemoryType, String), String> {
// Expected format: agent://{agent_id}/{type}/{category}
let without_prefix = uri
.strip_prefix("agent://")
.ok_or_else(|| format!("Invalid URI format: {}", uri))?;
let parts: Vec<&str> = without_prefix.splitn(3, '/').collect();
if parts.len() < 3 {
return Err(format!("Invalid URI format, expected agent://{{agent_id}}/{{type}}/{{category}}: {}", uri));
}
let agent_id = parts[0].to_string();
let memory_type = MemoryType::parse(parts[1]);
let category = parts[2].to_string();
Ok((agent_id, memory_type, category))
}
// === Tests ===
@@ -361,10 +468,19 @@ pub fn viking_tree(path: String, depth: Option<usize>) -> Result<serde_json::Val
mod tests {
use super::*;
#[tokio::test]
async fn test_parse_uri() {
let (agent_id, memory_type, category) =
parse_uri("agent://test-agent/preferences/style").unwrap();
assert_eq!(agent_id, "test-agent");
assert_eq!(memory_type, MemoryType::Preference);
assert_eq!(category, "style");
}
#[test]
fn test_status_unavailable_without_cli() {
// This test will fail if ov is installed, which is fine
let result = viking_status();
assert!(result.is_ok());
fn test_invalid_uri() {
assert!(parse_uri("invalid-uri").is_err());
assert!(parse_uri("agent://only-agent").is_err());
}
}

View File

@@ -1,295 +0,0 @@
//! OpenViking Local Server Management
//!
//! Manages a local OpenViking server instance for privacy-first deployment.
//! All data is stored locally in ~/.openviking/ - nothing is uploaded to remote servers.
//!
//! Architecture:
//! ┌─────────────────────────────────────────────────────────────────┐
//! │ ZCLAW Desktop (Tauri) │
//! │ │
//! │ ┌─────────────────┐ HTTP ┌─────────────────────────┐ │
//! │ │ viking_commands │ ◄────────────►│ openviking-server │ │
//! │ │ (Tauri cmds) │ localhost │ (Python, managed here) │ │
//! │ └─────────────────┘ └───────────┬─────────────┘ │
//! │ │ │
//! │ ┌─────────▼─────────────┐ │
//! │ │ SQLite + Vector Store │ │
//! │ │ ~/.openviking/ │ │
//! │ │ (LOCAL DATA ONLY) │ │
//! │ └───────────────────────┘ │
//! └─────────────────────────────────────────────────────────────────┘
use serde::{Deserialize, Serialize};
use std::process::{Child, Command};
use std::sync::Mutex;
use std::time::Duration;
// === Types ===
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ServerStatus {
pub running: bool,
pub port: u16,
pub pid: Option<u32>,
pub data_dir: Option<String>,
pub version: Option<String>,
pub error: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ServerConfig {
pub port: u16,
pub data_dir: String,
pub config_file: Option<String>,
}
impl Default for ServerConfig {
fn default() -> Self {
let home = dirs::home_dir()
.map(|p| p.to_string_lossy().to_string())
.unwrap_or_else(|| ".".to_string());
Self {
port: 1933,
data_dir: format!("{}/.openviking/workspace", home),
config_file: Some(format!("{}/.openviking/ov.conf", home)),
}
}
}
// === Server Process Management ===
static SERVER_PROCESS: Mutex<Option<Child>> = Mutex::new(None);
/// Check if OpenViking server is running
fn is_server_running(port: u16) -> bool {
// Try to connect to the server
let url = format!("http://127.0.0.1:{}/api/v1/status", port);
let client = reqwest::blocking::Client::builder()
.timeout(Duration::from_secs(2))
.build()
.ok();
if let Some(client) = client {
if let Ok(resp) = client.get(&url).send() {
return resp.status().is_success();
}
}
false
}
/// Find openviking-server executable
fn find_server_binary() -> Result<String, String> {
// Check environment variable first
if let Ok(path) = std::env::var("ZCLAW_VIKING_SERVER_BIN") {
if std::path::Path::new(&path).exists() {
return Ok(path);
}
}
// Check common locations
let candidates = vec![
"openviking-server".to_string(),
"python -m openviking.server".to_string(),
];
// Try to find in PATH
for cmd in &candidates {
if Command::new("which")
.arg(cmd.split_whitespace().next().unwrap_or(""))
.output()
.map(|o| o.status.success())
.unwrap_or(false)
{
return Ok(cmd.clone());
}
}
// Check Python virtual environment
let home = dirs::home_dir()
.map(|p| p.to_string_lossy().to_string())
.unwrap_or_default();
let venv_candidates = vec![
format!("{}/.openviking/venv/bin/openviking-server", home),
format!("{}/.local/bin/openviking-server", home),
];
for path in venv_candidates {
if std::path::Path::new(&path).exists() {
return Ok(path);
}
}
// Fallback: assume it's in PATH via pip install
Ok("openviking-server".to_string())
}
// === Tauri Commands ===
/// Get server status
#[tauri::command]
pub fn viking_server_status() -> Result<ServerStatus, String> {
let config = ServerConfig::default();
let running = is_server_running(config.port);
let pid = if running {
SERVER_PROCESS
.lock()
.map(|guard| guard.as_ref().map(|c| c.id()))
.ok()
.flatten()
} else {
None
};
// Get version if running
let version = if running {
let url = format!("http://127.0.0.1:{}/api/v1/version", config.port);
reqwest::blocking::Client::builder()
.timeout(Duration::from_secs(2))
.build()
.ok()
.and_then(|client| client.get(&url).send().ok())
.and_then(|resp| resp.text().ok())
} else {
None
};
Ok(ServerStatus {
running,
port: config.port,
pid,
data_dir: Some(config.data_dir),
version,
error: None,
})
}
/// Start local OpenViking server
#[tauri::command]
pub fn viking_server_start(config: Option<ServerConfig>) -> Result<ServerStatus, String> {
let config = config.unwrap_or_default();
// Check if already running
if is_server_running(config.port) {
return Ok(ServerStatus {
running: true,
port: config.port,
pid: None,
data_dir: Some(config.data_dir),
version: None,
error: Some("Server already running".to_string()),
});
}
// Find server binary
let server_bin = find_server_binary()?;
// Ensure data directory exists
std::fs::create_dir_all(&config.data_dir)
.map_err(|e| format!("Failed to create data directory: {}", e))?;
// Set environment variables
if let Some(ref config_file) = config.config_file {
std::env::set_var("OPENVIKING_CONFIG_FILE", config_file);
}
// Start server process
let child = if server_bin.contains("python") {
// Use Python module
let parts: Vec<&str> = server_bin.split_whitespace().collect();
Command::new(parts[0])
.args(&parts[1..])
.arg("--host")
.arg("127.0.0.1")
.arg("--port")
.arg(config.port.to_string())
.spawn()
.map_err(|e| format!("Failed to start server: {}", e))?
} else {
// Direct binary
Command::new(&server_bin)
.arg("--host")
.arg("127.0.0.1")
.arg("--port")
.arg(config.port.to_string())
.spawn()
.map_err(|e| format!("Failed to start server: {}", e))?
};
let pid = child.id();
// Store process handle
if let Ok(mut guard) = SERVER_PROCESS.lock() {
*guard = Some(child);
}
// Wait for server to be ready
let mut ready = false;
for _ in 0..30 {
std::thread::sleep(Duration::from_millis(500));
if is_server_running(config.port) {
ready = true;
break;
}
}
if !ready {
return Err("Server failed to start within 15 seconds".to_string());
}
Ok(ServerStatus {
running: true,
port: config.port,
pid: Some(pid),
data_dir: Some(config.data_dir),
version: None,
error: None,
})
}
/// Stop local OpenViking server
#[tauri::command]
pub fn viking_server_stop() -> Result<(), String> {
if let Ok(mut guard) = SERVER_PROCESS.lock() {
if let Some(mut child) = guard.take() {
child.kill().map_err(|e| format!("Failed to kill server: {}", e))?;
}
}
Ok(())
}
/// Restart local OpenViking server
#[tauri::command]
pub fn viking_server_restart(config: Option<ServerConfig>) -> Result<ServerStatus, String> {
viking_server_stop()?;
std::thread::sleep(Duration::from_secs(1));
viking_server_start(config)
}
// === Tests ===
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_server_config_default() {
let config = ServerConfig::default();
assert_eq!(config.port, 1933);
assert!(config.data_dir.contains(".openviking"));
}
#[test]
fn test_is_server_running_not_running() {
// Should return false when no server is running on port 1933
let result = is_server_running(1933);
// Just check it doesn't panic
assert!(result || !result);
}
}

View File

@@ -43,6 +43,7 @@ const CATEGORY_CONFIG: Record<string, { label: string; className: string }> = {
default: { label: '其他', className: 'bg-gray-100 text-gray-700 dark:bg-gray-800 dark:text-gray-400' },
};
function CategoryBadge({ category }: { category: string }) {
const config = CATEGORY_CONFIG[category] || CATEGORY_CONFIG.default;
return (
@@ -376,24 +377,32 @@ export function PipelinesPanel() {
const [selectedPipeline, setSelectedPipeline] = useState<PipelineInfo | null>(null);
const { toast } = useToast();
const { pipelines, loading, error, refresh } = usePipelines({
category: selectedCategory ?? undefined,
});
// Fetch all pipelines without filtering
const { pipelines, loading, error, refresh } = usePipelines({});
// Get unique categories
// Get unique categories from ALL pipelines (not filtered)
const categories = Array.from(
new Set(pipelines.map((p) => p.category).filter(Boolean))
);
// Filter pipelines by search
const filteredPipelines = searchQuery
? pipelines.filter(
(p) =>
p.displayName.toLowerCase().includes(searchQuery.toLowerCase()) ||
p.description.toLowerCase().includes(searchQuery.toLowerCase()) ||
p.tags.some((t) => t.toLowerCase().includes(searchQuery.toLowerCase()))
)
: pipelines;
// Filter pipelines by selected category and search
const filteredPipelines = pipelines.filter((p) => {
// Category filter
if (selectedCategory && p.category !== selectedCategory) {
return false;
}
// Search filter
if (searchQuery) {
const query = searchQuery.toLowerCase();
return (
p.displayName.toLowerCase().includes(query) ||
p.description.toLowerCase().includes(query) ||
p.tags.some((t) => t.toLowerCase().includes(query))
);
}
return true;
});
const handleRunPipeline = (pipeline: PipelineInfo) => {
setSelectedPipeline(pipeline);
@@ -474,6 +483,7 @@ export function PipelinesPanel() {
))}
</div>
)}
</div>
{/* Content */}

View File

@@ -0,0 +1,400 @@
/**
* IntentInput - 智能输入组件
*
* 提供自然语言触发 Pipeline 的入口:
* - 支持关键词/模式快速匹配
* - 显示匹配建议
* - 参数收集(对话式/表单式)
*/
import { useState, useCallback, useRef, useEffect } from 'react';
import {
Send,
Sparkles,
Loader2,
ChevronRight,
X,
MessageSquare,
FileText,
Zap,
} from 'lucide-react';
import { invoke } from '@tauri-apps/api/core';
// === Types ===
/** 路由结果 */
interface RouteResult {
type: 'matched' | 'ambiguous' | 'no_match' | 'need_more_info';
pipeline_id?: string;
display_name?: string;
mode?: 'conversation' | 'form' | 'hybrid' | 'auto';
params?: Record<string, unknown>;
confidence?: number;
missing_params?: MissingParam[];
candidates?: PipelineCandidate[];
suggestions?: PipelineCandidate[];
prompt?: string;
}
/** 缺失参数 */
interface MissingParam {
name: string;
label?: string;
param_type: string;
required: boolean;
default?: unknown;
}
/** Pipeline 候选 */
interface PipelineCandidate {
id: string;
display_name?: string;
description?: string;
icon?: string;
category?: string;
match_reason?: string;
}
/** 组件 Props */
export interface IntentInputProps {
/** 匹配成功回调 */
onMatch?: (pipelineId: string, params: Record<string, unknown>, mode: string) => void;
/** 取消回调 */
onCancel?: () => void;
/** 占位符文本 */
placeholder?: string;
/** 是否禁用 */
disabled?: boolean;
/** 自定义类名 */
className?: string;
}
// === IntentInput Component ===
export function IntentInput({
onMatch,
onCancel,
placeholder = '输入你想做的事情,如"帮我做一个Python入门课程"...',
disabled = false,
className = '',
}: IntentInputProps) {
const [input, setInput] = useState('');
const [loading, setLoading] = useState(false);
const [result, setResult] = useState<RouteResult | null>(null);
const [paramValues, setParamValues] = useState<Record<string, unknown>>({});
const inputRef = useRef<HTMLTextAreaElement>(null);
// Focus input on mount
useEffect(() => {
inputRef.current?.focus();
}, []);
// Handle route request
const handleRoute = useCallback(async () => {
if (!input.trim() || loading) return;
setLoading(true);
setResult(null);
try {
const routeResult = await invoke<RouteResult>('route_intent', {
userInput: input.trim(),
});
setResult(routeResult);
// Initialize param values from extracted params
if (routeResult.params) {
setParamValues(routeResult.params);
}
// If high confidence and no missing params, auto-execute
if (
routeResult.type === 'matched' &&
routeResult.confidence &&
routeResult.confidence >= 0.9 &&
(!routeResult.missing_params || routeResult.missing_params.length === 0)
) {
handleExecute(routeResult.pipeline_id!, routeResult.params || {}, routeResult.mode!);
}
} catch (error) {
console.error('Route error:', error);
setResult({
type: 'no_match',
suggestions: [],
});
} finally {
setLoading(false);
}
}, [input, loading]);
// Handle execute
const handleExecute = useCallback(
(pipelineId: string, params: Record<string, unknown>, mode: string) => {
onMatch?.(pipelineId, params, mode);
// Reset state
setInput('');
setResult(null);
setParamValues({});
},
[onMatch]
);
// Handle param change
const handleParamChange = useCallback((name: string, value: unknown) => {
setParamValues((prev) => ({ ...prev, [name]: value }));
}, []);
// Handle key press
const handleKeyPress = useCallback(
(e: React.KeyboardEvent) => {
if (e.key === 'Enter' && !e.shiftKey) {
e.preventDefault();
if (result?.type === 'matched') {
handleExecute(result.pipeline_id!, paramValues, result.mode!);
} else {
handleRoute();
}
} else if (e.key === 'Escape') {
onCancel?.();
}
},
[result, paramValues, handleRoute, handleExecute, onCancel]
);
// Render input area
const renderInput = () => (
<div className="relative">
<textarea
ref={inputRef}
value={input}
onChange={(e) => setInput(e.target.value)}
onKeyDown={handleKeyPress}
placeholder={placeholder}
disabled={disabled || loading}
rows={2}
className={`w-full px-4 py-3 pr-12 border border-gray-300 dark:border-gray-600 rounded-xl resize-none focus:ring-2 focus:ring-blue-500 focus:border-transparent dark:bg-gray-800 dark:text-white disabled:opacity-50 ${className}`}
/>
<button
onClick={result?.type === 'matched' ? undefined : handleRoute}
disabled={!input.trim() || disabled || loading}
className="absolute right-3 bottom-3 p-2 rounded-lg bg-blue-600 hover:bg-blue-700 text-white disabled:opacity-50 disabled:cursor-not-allowed transition-colors"
>
{loading ? (
<Loader2 className="w-5 h-5 animate-spin" />
) : (
<Send className="w-5 h-5" />
)}
</button>
</div>
);
// Render matched result
const renderMatched = () => {
if (!result || result.type !== 'matched') return null;
const { pipeline_id, display_name, mode, missing_params, confidence } = result;
return (
<div className="mt-3 p-4 bg-blue-50 dark:bg-blue-900/20 rounded-xl border border-blue-200 dark:border-blue-800">
<div className="flex items-start justify-between mb-3">
<div className="flex items-center gap-2">
<Sparkles className="w-5 h-5 text-blue-600" />
<span className="font-medium text-blue-700 dark:text-blue-300">
{display_name || pipeline_id}
</span>
{confidence && (
<span className="text-xs text-blue-500 dark:text-blue-400">
({Math.round(confidence * 100)}% )
</span>
)}
</div>
<button
onClick={() => setResult(null)}
className="p-1 hover:bg-blue-100 dark:hover:bg-blue-800 rounded"
>
<X className="w-4 h-4 text-blue-500" />
</button>
</div>
{/* Mode indicator */}
<div className="flex items-center gap-2 mb-3 text-sm">
<span className="text-gray-500 dark:text-gray-400">:</span>
<span className="flex items-center gap-1 px-2 py-0.5 bg-blue-100 dark:bg-blue-800 rounded">
{mode === 'conversation' && <MessageSquare className="w-3 h-3" />}
{mode === 'form' && <FileText className="w-3 h-3" />}
{mode === 'hybrid' && <Zap className="w-3 h-3" />}
{mode === 'conversation' && '对话式'}
{mode === 'form' && '表单式'}
{mode === 'hybrid' && '混合式'}
{mode === 'auto' && '自动'}
</span>
</div>
{/* Missing params form */}
{missing_params && missing_params.length > 0 && (
<div className="space-y-3 mb-4">
{missing_params.map((param) => (
<div key={param.name}>
<label className="block text-sm font-medium text-gray-700 dark:text-gray-300 mb-1">
{param.label || param.name}
{param.required && <span className="text-red-500 ml-1">*</span>}
</label>
{renderParamInput(param)}
</div>
))}
</div>
)}
{/* Execute button */}
<button
onClick={() => handleExecute(pipeline_id!, paramValues, mode!)}
className="w-full flex items-center justify-center gap-2 px-4 py-2 bg-blue-600 hover:bg-blue-700 text-white font-medium rounded-lg transition-colors"
>
<Zap className="w-4 h-4" />
<ChevronRight className="w-4 h-4" />
</button>
</div>
);
};
// Render param input
const renderParamInput = (param: MissingParam) => {
const value = paramValues[param.name] ?? param.default ?? '';
switch (param.param_type) {
case 'text':
return (
<textarea
value={(value as string) || ''}
onChange={(e) => handleParamChange(param.name, e.target.value)}
rows={3}
className="w-full px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-lg focus:ring-2 focus:ring-blue-500 dark:bg-gray-700 dark:text-white"
/>
);
case 'number':
return (
<input
type="number"
value={(value as number) ?? ''}
onChange={(e) => handleParamChange(param.name, e.target.valueAsNumber)}
className="w-full px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-lg focus:ring-2 focus:ring-blue-500 dark:bg-gray-700 dark:text-white"
/>
);
case 'boolean':
return (
<label className="flex items-center gap-2">
<input
type="checkbox"
checked={(value as boolean) || false}
onChange={(e) => handleParamChange(param.name, e.target.checked)}
className="rounded border-gray-300 text-blue-600 focus:ring-blue-500"
/>
<span className="text-sm text-gray-600 dark:text-gray-300"></span>
</label>
);
default:
return (
<input
type="text"
value={(value as string) || ''}
onChange={(e) => handleParamChange(param.name, e.target.value)}
className="w-full px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-lg focus:ring-2 focus:ring-blue-500 dark:bg-gray-700 dark:text-white"
/>
);
}
};
// Render suggestions
const renderSuggestions = () => {
if (!result || result.type !== 'no_match') return null;
const { suggestions } = result;
return (
<div className="mt-3 p-4 bg-gray-50 dark:bg-gray-800/50 rounded-xl border border-gray-200 dark:border-gray-700">
<p className="text-sm text-gray-600 dark:text-gray-400 mb-3">
Pipeline:
</p>
{suggestions && suggestions.length > 0 ? (
<div className="space-y-2">
{suggestions.map((candidate) => (
<button
key={candidate.id}
onClick={() => {
setInput('');
handleExecute(candidate.id, {}, 'form');
}}
className="w-full flex items-center justify-between p-3 bg-white dark:bg-gray-800 rounded-lg border border-gray-200 dark:border-gray-700 hover:border-blue-300 dark:hover:border-blue-600 transition-colors text-left"
>
<div>
<span className="font-medium text-gray-900 dark:text-white">
{candidate.display_name || candidate.id}
</span>
{candidate.description && (
<p className="text-sm text-gray-500 dark:text-gray-400 mt-0.5">
{candidate.description}
</p>
)}
</div>
<ChevronRight className="w-5 h-5 text-gray-400" />
</button>
))}
</div>
) : (
<p className="text-sm text-gray-500 dark:text-gray-400">
</p>
)}
</div>
);
};
// Render ambiguous results
const renderAmbiguous = () => {
if (!result || result.type !== 'ambiguous') return null;
const { candidates } = result;
return (
<div className="mt-3 p-4 bg-amber-50 dark:bg-amber-900/20 rounded-xl border border-amber-200 dark:border-amber-800">
<p className="text-sm text-amber-700 dark:text-amber-300 mb-3">
Pipeline:
</p>
<div className="space-y-2">
{candidates?.map((candidate) => (
<button
key={candidate.id}
onClick={() => handleExecute(candidate.id, paramValues, 'form')}
className="w-full flex items-center justify-between p-3 bg-white dark:bg-gray-800 rounded-lg border border-amber-200 dark:border-amber-700 hover:border-amber-300 dark:hover:border-amber-600 transition-colors text-left"
>
<div>
<span className="font-medium text-gray-900 dark:text-white">
{candidate.display_name || candidate.id}
</span>
{candidate.match_reason && (
<p className="text-sm text-amber-600 dark:text-amber-400 mt-0.5">
{candidate.match_reason}
</p>
)}
</div>
<ChevronRight className="w-5 h-5 text-amber-500" />
</button>
))}
</div>
</div>
);
};
return (
<div className="intent-input">
{renderInput()}
{renderMatched()}
{renderSuggestions()}
{renderAmbiguous()}
</div>
);
}
export default IntentInput;

View File

@@ -0,0 +1,148 @@
/**
* Presentation Container
*
* Main container for smart presentation rendering.
*
* Features:
* - Auto-detects presentation type from data structure
* - Supports manual type switching
* - Manages presentation state
* - Provides export functionality
*/
import React, { useState, useMemo, useCallback } from 'react';
import { invoke } from '@tauri-apps/api/core';
import type { PresentationType, PresentationAnalysis } from './types';
import { TypeSwitcher } from './TypeSwitcher';
import { QuizRenderer } from './renderers/QuizRenderer';
const SlideshowRenderer = React.lazy(() => import('./renderers/SlideshowRenderer').then(m => ({ default: m.SlideshowRenderer })));
const DocumentRenderer = React.lazy(() => import('./renderers/DocumentRenderer').then(m => ({ default: m.DocumentRenderer })));
interface PresentationContainerProps {
/** Pipeline output data */
data: unknown;
/** Pipeline ID (reserved for future use) */
pipelineId?: string;
/** Supported presentation types (from pipeline config) */
supportedTypes?: PresentationType[];
/** Default presentation type */
defaultType?: PresentationType;
/** Allow user to switch types */
allowSwitch?: boolean;
/** Called when export is triggered (reserved for future use) */
onExport?: (format: string) => void;
/** Custom className */
className?: string;
}
export function PresentationContainer({
data,
supportedTypes,
defaultType,
allowSwitch = true,
className = '',
}: PresentationContainerProps) {
const [analysis, setAnalysis] = useState<PresentationAnalysis | null>(null);
const [currentType, setCurrentType] = useState<PresentationType | null>(null);
const [isAnalyzing, setIsAnalyzing] = useState(true);
useMemo(() => {
const runAnalysis = async () => {
setIsAnalyzing(true);
try {
const result = await invoke<PresentationAnalysis>('analyze_presentation', { data });
setAnalysis(result);
if (defaultType) {
setCurrentType(defaultType);
} else if (result) {
setCurrentType(result.recommendedType);
}
} catch (error) {
console.error('Failed to analyze presentation:', error);
setCurrentType('document');
} finally {
setIsAnalyzing(false);
}
};
runAnalysis();
}, [data, defaultType]);
const handleTypeChange = useCallback((type: PresentationType) => {
setCurrentType(type);
}, []);
const availableTypes = useMemo(() => {
if (supportedTypes && supportedTypes.length > 0) {
return supportedTypes.filter((t): t is PresentationType => t !== 'auto');
}
return (['quiz', 'slideshow', 'document', 'whiteboard'] as PresentationType[]);
}, [supportedTypes]);
const renderContent = () => {
if (isAnalyzing) {
return (
<div className="flex items-center justify-center h-64">
<div className="animate-spin rounded-full h-8 w-8 border-b-2 border-blue-500" />
<p className="ml-3 text-gray-500">...</p>
</div>
);
}
switch (currentType) {
case 'quiz':
return <QuizRenderer data={data as Parameters<typeof QuizRenderer>[0]['data']} />;
case 'slideshow':
return (
<React.Suspense fallback={<div className="h-64 animate-pulse bg-gray-100" />}>
<SlideshowRenderer data={data as Parameters<typeof SlideshowRenderer>[0]['data']} />
</React.Suspense>
);
case 'document':
return (
<React.Suspense fallback={<div className="h-64 animate-pulse bg-gray-100" />}>
<DocumentRenderer data={data as Parameters<typeof DocumentRenderer>[0]['data']} />
</React.Suspense>
);
case 'whiteboard':
return (
<div className="flex items-center justify-center h-64 bg-gray-50">
<p className="text-gray-500">...</p>
</div>
);
default:
return (
<div className="flex items-center justify-center h-64 bg-gray-50">
<p className="text-gray-500"></p>
</div>
);
}
};
return (
<div className={`flex flex-col h-full ${className}`}>
{allowSwitch && (
<div className="border-b border-gray-200 bg-gray-50 p-3">
<TypeSwitcher
availableTypes={availableTypes}
currentType={currentType || 'document'}
analysis={analysis || undefined}
onTypeChange={handleTypeChange}
/>
</div>
)}
<div className="flex-1 overflow-auto">
{renderContent()}
</div>
</div>
);
}
export default PresentationContainer;

View File

@@ -0,0 +1,113 @@
/**
* Type Switcher Component
*
* Allows users to switch between presentation types.
*/
import {
BarChart3,
FileText,
Presentation,
CheckCircle,
PenTool,
} from 'lucide-react';
import type { PresentationType, PresentationAnalysis } from './types';
interface TypeSwitcherProps {
/** Available types */
availableTypes: PresentationType[];
/** Current type */
currentType: PresentationType;
/** Analysis result (optional) */
analysis?: PresentationAnalysis;
/** Called when type is changed */
onTypeChange: (type: PresentationType) => void;
/** Disabled types */
disabledTypes?: PresentationType[];
/** Custom className */
className?: string;
}
const typeConfig: Record<PresentationType, { icon: React.ReactNode; label: string; description: string }> = {
chart: {
icon: <BarChart3 className="w-4 h-4" />,
label: '图表',
description: '数据可视化',
},
slideshow: {
icon: <Presentation className="w-4 h-4" />,
label: '幻灯片',
description: '演示文稿风格',
},
quiz: {
icon: <CheckCircle className="w-4 h-4" />,
label: '测验',
description: '互动问答',
},
document: {
icon: <FileText className="w-4 h-4" />,
label: '文档',
description: 'Markdown 文档',
},
whiteboard: {
icon: <PenTool className="w-4 h-4" />,
label: '白板',
description: '交互式画布',
},
auto: {
icon: <CheckCircle className="w-4 h-4" />,
label: '自动',
description: '自动检测类型',
},
};
export function TypeSwitcher({
availableTypes,
currentType,
analysis,
onTypeChange,
disabledTypes = [],
className = '',
}: TypeSwitcherProps) {
return (
<div className={`flex items-center gap-2 ${className}`}>
{availableTypes.map((type) => {
const config = typeConfig[type];
if (!config) return null;
const isActive = currentType === type;
const isDisabled = disabledTypes.includes(type);
const recommendation = analysis?.recommendedType === type;
return (
<button
key={type}
onClick={() => onTypeChange(type)}
disabled={isDisabled}
className={`
flex items-center gap-2 px-3 py-2 rounded-lg transition-all
${isActive
? 'bg-blue-100 text-blue-700 border-2 border-blue-500'
: 'bg-white text-gray-600 border border-gray-200 hover:bg-gray-100'
}
${isDisabled ? 'opacity-50 cursor-not-allowed' : ''}
`}
title={config.description}
>
<span className="text-lg">{config.icon}</span>
<span className="text-sm font-medium">{config.label}</span>
{recommendation && (
<span className="text-xs text-blue-500"></span>
)}
</button>
);
})}
{analysis && (
<div className="ml-4 text-xs text-gray-500">
<p>: {(analysis.confidence * 100).toFixed(0)}%</p>
</div>
)}
</div>
);
}

View File

@@ -0,0 +1,33 @@
/**
* Presentation Components
*
* Smart presentation layer for Pipeline output rendering.
*
* @example
* ```tsx
* import { PresentationContainer } from '@/components/presentation';
*
* <PresentationContainer
* data={pipelineOutput}
* pipelineId="course-generator"
* supportedTypes={['slideshow', 'quiz', 'document']}
* />
* ```
*/
export { PresentationContainer } from './PresentationContainer';
export { TypeSwitcher } from './TypeSwitcher';
export { QuizRenderer } from './renderers/QuizRenderer';
export { DocumentRenderer } from './renderers/DocumentRenderer';
export { SlideshowRenderer } from './renderers/SlideshowRenderer';
export type {
PresentationType,
PresentationAnalysis,
ChartData,
QuizData,
QuizQuestion,
QuestionType,
SlideshowData,
DocumentData,
WhiteboardData,
} from './types';

View File

@@ -0,0 +1,150 @@
/**
* Document Renderer
*
* Renders content as a scrollable document with Markdown support.
*/
import { useState } from 'react';
import { Download, ExternalLink, Copy } from 'lucide-react';
import type { DocumentData } from '../types';
interface DocumentRendererProps {
/** Document data */
data: DocumentData;
/** Enable markdown rendering */
enableMarkdown?: boolean;
/** Custom className */
className?: string;
}
export function DocumentRenderer({
data,
enableMarkdown = true,
className = '',
}: DocumentRendererProps) {
const [copied, setCopied] = useState(false);
const handleCopy = async () => {
try {
const textToCopy = typeof data === 'string' ? data : (data.content || JSON.stringify(data, null, 2));
await navigator.clipboard.writeText(textToCopy);
setCopied(true);
setTimeout(() => setCopied(false), 2000);
} catch (error) {
console.error('Failed to copy:', error);
}
};
const handleDownload = () => {
if (data.downloadUrl) {
const link = document.createElement('a');
link.href = data.downloadUrl;
link.download = data.downloadFilename || 'document.md';
document.body.appendChild(link);
link.click();
document.body.removeChild(link);
}
};
const renderMarkdown = (content: string): React.ReactNode => {
const lines = content.split('\n');
const elements: React.ReactNode[] = [];
for (const line of lines) {
const trimmed = line.trim();
if (!trimmed) continue;
if (trimmed.startsWith('# ')) {
elements.push(
<h1 key={trimmed} className="text-2xl font-bold mb-4">
{trimmed.substring(2)}
</h1>
);
} else if (trimmed.startsWith('## ')) {
elements.push(
<h2 key={trimmed} className="text-xl font-semibold mb-3">
{trimmed.substring(3)}
</h2>
);
} else if (trimmed.startsWith('### ')) {
elements.push(
<h3 key={trimmed} className="text-lg font-medium mb-2">
{trimmed.substring(4)}
</h3>
);
} else if (trimmed.startsWith('- ')) {
elements.push(
<li key={trimmed} className="ml-4 list-disc">
{trimmed.substring(2)}
</li>
);
} else if (trimmed.startsWith('```')) {
elements.push(
<pre key={trimmed} className="bg-gray-900 text-gray-100 p-4 rounded-lg overflow-x-auto text-sm my-2">
<code>{trimmed.substring(3, trimmed.length - 3)}</code>
</pre>
);
} else {
elements.push(
<p key={trimmed} className="mb-2">{trimmed}</p>
);
}
}
return <div className={className}>{elements}</div>;
};
if (!enableMarkdown) {
return (
<div className={`flex flex-col h-full ${className}`}>
<pre className="whitespace-pre-wrap text-sm">{JSON.stringify(data, null, 2)}</pre>
</div>
);
}
return (
<div className={`flex flex-col h-full ${className}`}>
{data.title && (
<div className="flex items-center justify-between p-4 border-b border-gray-200">
<h1 className="text-xl font-semibold text-gray-900">{data.title}</h1>
<div className="flex items-center gap-2">
<button
onClick={handleCopy}
className="p-1 text-gray-400 hover:text-gray-600 flex items-center gap-1"
title="复制"
>
<Copy className="w-4 h-4" />
{copied && <span className="text-xs text-green-500"></span>}
</button>
{data.downloadUrl && (
<button
onClick={handleDownload}
className="p-1 text-gray-400 hover:text-gray-600"
title="下载"
>
<Download className="w-4 h-4" />
</button>
)}
{data.url && (
<button
onClick={() => window.open(data.url, '_blank')}
className="p-1 text-gray-400 hover:text-gray-600"
title="在新窗口打开"
>
<ExternalLink className="w-4 h-4" />
</button>
)}
</div>
</div>
)}
<div className="flex-1 overflow-auto p-6">
{typeof data === 'string'
? renderMarkdown(data)
: renderMarkdown(data.content || JSON.stringify(data))}
</div>
</div>
);
}
export default DocumentRenderer;

View File

@@ -0,0 +1,354 @@
/**
* Quiz Renderer
*
* Renders interactive quizzes with support for:
* - Single choice
* - Multiple choice
* - True/False
* - Fill in blank
* - Short answer
*/
import { useState, useMemo } from 'react';
import {
CheckCircle,
XCircle,
Award,
RotateCcw,
} from 'lucide-react';
import type { QuizData, QuizQuestion } from '../types';
interface QuizRendererProps {
data: QuizData;
onComplete?: (score: number, correct: number, total: number) => void;
onAnswer?: (questionId: string, answer: unknown) => void;
showAnswers?: boolean;
allowRetry?: boolean;
className?: string;
}
interface UserAnswer {
questionId: string;
answer: unknown;
isCorrect: boolean;
}
export function QuizRenderer({
data,
onComplete,
onAnswer,
showAnswers = true,
allowRetry = true,
className = '',
}: QuizRendererProps) {
const [currentIndex, setCurrentIndex] = useState(0);
const [answers, setAnswers] = useState<Record<string, UserAnswer>>({});
const [showResults, setShowResults] = useState(showAnswers ?? false);
const [score, setScore] = useState(0);
const [correctCount, setCorrectCount] = useState(0);
const [isCompleted, setIsCompleted] = useState(false);
const checkAnswer = (answer: unknown, question: QuizQuestion): boolean => {
if (question.questionType === 'singleChoice' || question.questionType === 'trueFalse') {
return answer === question.correctAnswer;
}
if (question.questionType === 'multipleChoice') {
const answerArr = answer as string[];
const correctArr = question.correctAnswer as string[];
if (!Array.isArray(answerArr) || !Array.isArray(correctArr)) return false;
return JSON.stringify([...answerArr].sort()) === JSON.stringify([...correctArr].sort());
}
if (question.questionType === 'fillBlank' || question.questionType === 'shortAnswer') {
return String(answer).toLowerCase().trim() === String(question.correctAnswer).toLowerCase().trim();
}
return false;
};
useMemo(() => {
if (!data.questions || data.questions.length === 0) return;
const total = data.questions.length;
const correct = data.questions.filter((q: QuizQuestion) => {
const userAnswer = answers[q.id];
return userAnswer?.isCorrect ?? false;
}).length;
setScore(Math.round((correct / total) * 100));
setCorrectCount(correct);
}, [answers, data.questions]);
const handleSelectAnswer = (questionId: string, answer: unknown) => {
const question = data.questions.find((q: QuizQuestion) => q.id === questionId);
if (!question) return;
const isCorrect = checkAnswer(answer, question);
setAnswers(prev => ({
...prev,
[questionId]: { questionId, answer, isCorrect },
}));
if (onAnswer) {
onAnswer(questionId, answer);
}
};
const handleNext = () => {
if (currentIndex < data.questions.length - 1) {
setCurrentIndex(currentIndex + 1);
}
};
const handlePrev = () => {
if (currentIndex > 0) {
setCurrentIndex(currentIndex - 1);
}
};
const handleSubmit = () => {
setShowResults(true);
setIsCompleted(true);
if (onComplete) {
onComplete(score, correctCount, data.questions.length);
}
};
const handleRetry = () => {
setAnswers({});
setShowResults(false);
setIsCompleted(false);
setScore(0);
setCorrectCount(0);
setCurrentIndex(0);
};
const question = data.questions[currentIndex];
if (!question) return null;
const progressPercent = ((currentIndex + 1) / data.questions.length) * 100;
const renderQuestionOptions = () => {
const qType = question.questionType;
if (qType === 'singleChoice' || qType === 'trueFalse') {
return (
<div className="space-y-3">
{question.options.map((option) => {
const isSelected = answers[question.id]?.answer === option.id;
const showCorrect = showResults && question.correctAnswer === option.id;
const showIncorrect = showResults && isSelected && !showCorrect;
return (
<button
key={option.id}
onClick={() => !showResults && handleSelectAnswer(question.id, option.id)}
disabled={showResults}
className={`w-full p-4 text-left rounded-lg border-2 transition-all ${
isSelected && !showResults ? 'border-blue-500 bg-blue-50' : ''
} ${showCorrect ? 'border-green-500 bg-green-50' : ''} ${
showIncorrect ? 'border-red-500 bg-red-50' : ''
} ${!isSelected && !showCorrect ? 'border-gray-200' : ''}`}
>
<div className="flex items-center justify-between">
<span className="flex-1">{option.text}</span>
{showCorrect && <CheckCircle className="w-5 h-5 text-green-500" />}
{showIncorrect && <XCircle className="w-5 h-5 text-red-500" />}
</div>
</button>
);
})}
</div>
);
}
if (qType === 'multipleChoice') {
return (
<div className="space-y-3">
{question.options.map((option) => {
const selectedAnswers = (answers[question.id]?.answer as string[]) || [];
const isSelected = selectedAnswers.includes(option.id);
const showCorrect = showResults && (question.correctAnswer as string[]).includes(option.id);
return (
<button
key={option.id}
onClick={() => {
if (showResults) return;
const newAnswers = isSelected
? selectedAnswers.filter(a => a !== option.id)
: [...selectedAnswers, option.id];
handleSelectAnswer(question.id, newAnswers);
}}
disabled={showResults}
className={`w-full p-4 text-left rounded-lg border-2 transition-all ${
isSelected && !showResults ? 'border-blue-500 bg-blue-50' : ''
} ${showCorrect ? 'border-green-500 bg-green-50' : ''} ${
!isSelected && !showCorrect && showResults ? 'border-gray-200 opacity-50' : ''
}`}
>
<div className="flex items-center gap-3">
<input
type="checkbox"
checked={isSelected}
onChange={() => {}}
className="w-4 h-4 rounded"
disabled={showResults}
/>
<span className="flex-1">{option.text}</span>
{showCorrect && <CheckCircle className="w-5 h-5 text-green-500" />}
</div>
</button>
);
})}
</div>
);
}
if (qType === 'fillBlank') {
return (
<div className="mt-4">
<input
type="text"
placeholder="请输入答案..."
className="w-full p-3 border rounded-lg focus:ring-2 focus:ring-blue-500"
onChange={(e) => handleSelectAnswer(question.id, e.target.value)}
disabled={showResults}
/>
{showResults && (
<p className="text-sm text-gray-500 mt-2">
: {question.correctAnswer}
</p>
)}
</div>
);
}
if (qType === 'shortAnswer') {
return (
<div className="mt-4">
<textarea
placeholder="请输入你的答案..."
className="w-full p-3 border rounded-lg focus:ring-2 focus:ring-blue-500 min-h-32"
onChange={(e) => handleSelectAnswer(question.id, e.target.value)}
disabled={showResults}
/>
{showResults && (
<p className="text-sm text-gray-500 mt-2">
: {question.correctAnswer}
</p>
)}
</div>
);
}
return null;
};
return (
<div className={`flex flex-col h-full ${className}`}>
<div className="bg-white border-b border-gray-200 p-4">
<div className="flex items-center justify-between">
<div>
{data.title && (
<h2 className="text-lg font-semibold text-gray-900">{data.title}</h2>
)}
{data.description && (
<p className="text-sm text-gray-500">{data.description}</p>
)}
</div>
<div className="flex items-center gap-2 mt-4">
<div className="flex-1 bg-gray-200 rounded-full h-2">
<div
className="h-2 bg-blue-500 transition-all"
style={{ width: `${progressPercent}%` }}
/>
</div>
<span className="text-sm text-gray-600">
{currentIndex + 1} / {data.questions.length}
</span>
</div>
</div>
</div>
<div className="bg-white rounded-lg shadow p-6 flex-1">
<div className="mb-4">
<p className="text-lg font-medium text-gray-900">{question.text}</p>
{question.hint && !showResults && (
<p className="text-sm text-gray-500 mt-2">
💡 {question.hint}
</p>
)}
</div>
{renderQuestionOptions()}
<div className="flex items-center justify-between mt-6">
<button
onClick={handlePrev}
disabled={currentIndex === 0}
className="p-2 text-gray-600 hover:text-gray-900 disabled:opacity-50"
>
<RotateCcw className="w-5 h-5" />
</button>
<span className="text-sm text-gray-500">
{currentIndex + 1} / {data.questions.length}
</span>
<button
onClick={handleNext}
disabled={currentIndex === data.questions.length - 1 || showResults}
className="p-2 text-gray-600 hover:text-gray-900 disabled:opacity-50"
>
</button>
</div>
{!showResults ? (
<button
onClick={handleSubmit}
className="w-full py-3 bg-blue-500 text-white rounded-lg font-medium hover:bg-blue-600 transition-colors mt-4"
>
</button>
) : (
<div className="space-y-4 mt-4">
<div className="flex items-center justify-center gap-4 p-4 bg-gray-50 rounded-lg">
<div className="text-center">
<div className="text-3xl font-bold text-gray-900">
{score}%
</div>
<div className="text-sm text-gray-500">
{correctCount} / {data.questions.length}
</div>
</div>
</div>
{allowRetry && (
<button
onClick={handleRetry}
className="w-full py-2 text-blue-600 hover:bg-blue-50 rounded-lg font-medium transition-colors"
>
</button>
)}
</div>
)}
</div>
{isCompleted && (
<div className="bg-green-50 p-4 text-center">
<Award className="w-8 h-8 text-green-500 mx-auto mb-2" />
<p className="text-lg font-semibold text-green-700">
🎉
</p>
<p className="text-sm text-green-600">
: {score}% ({correctCount}/{data.questions.length} )
</p>
</div>
)}
</div>
);
}

View File

@@ -0,0 +1,172 @@
/**
* Slideshow Renderer
*
* Renders presentation as a slideshow with slide navigation.
*/
import { useState, useEffect, useCallback } from 'react';
import {
ChevronLeft,
ChevronRight,
Maximize2,
Minimize2,
Play,
Pause,
} from 'lucide-react';
import type { SlideshowData } from '../types';
interface SlideshowRendererProps {
data: SlideshowData;
/** Auto-play interval in seconds (0 = disabled) */
autoPlayInterval?: number;
/** Show progress indicator */
showProgress?: boolean;
/** Show speaker notes */
showNotes?: boolean;
/** Custom className */
className?: string;
}
export function SlideshowRenderer({
data,
autoPlayInterval = 0,
showProgress = true,
showNotes = true,
className = '',
}: SlideshowRendererProps) {
const [currentIndex, setCurrentIndex] = useState(0);
const [isPlaying, setIsPlaying] = useState(false);
const [isFullscreen, setIsFullscreen] = useState(false);
const slides = data.slides || [];
const totalSlides = slides.length;
// Handle keyboard navigation
useEffect(() => {
const handleKeyDown = (e: KeyboardEvent) => {
if (e.key === 'ArrowRight' || e.key === ' ') {
handleNext();
} else if (e.key === 'ArrowLeft') {
handlePrev();
} else if (e.key === 'f') {
toggleFullscreen();
}
};
window.addEventListener('keydown', handleKeyDown);
return () => window.removeEventListener('keydown', handleKeyDown);
}, []);
// Auto-play
useEffect(() => {
if (isPlaying && autoPlayInterval > 0) {
const timer = setInterval(handleNext, autoPlayInterval * 1000);
return () => clearInterval(timer);
}
}, [isPlaying, autoPlayInterval]);
const handleNext = useCallback(() => {
setCurrentIndex((prev) => (prev + 1) % totalSlides);
}, [totalSlides]);
const handlePrev = useCallback(() => {
setCurrentIndex((prev) => (prev - 1 + totalSlides) % totalSlides);
}, [totalSlides]);
const toggleFullscreen = useCallback(() => {
setIsFullscreen((prev) => !prev);
}, []);
const currentSlide = slides[currentIndex];
if (!currentSlide) {
return (
<div className={`flex items-center justify-center h-64 bg-gray-50 ${className}`}>
<p className="text-gray-500"></p>
</div>
);
}
return (
<div className={`flex flex-col h-full ${isFullscreen ? 'fixed inset-0 z-50 bg-white' : ''} ${className}`}>
{/* Slide Content */}
<div className="flex-1 flex items-center justify-center p-8">
<div className="max-w-4xl w-full">
{/* Title */}
{currentSlide.title && (
<h2 className="text-3xl font-bold text-center mb-6">
{currentSlide.title}
</h2>
)}
{/* Content rendering would go here */}
<div className="text-gray-700">
{/* This is simplified - real implementation would render based on content type */}
{typeof currentSlide.content === 'string' ? (
<p>{currentSlide.content}</p>
) : (
<div>Complex content rendering</div>
)}
</div>
</div>
</div>
{/* Controls */}
<div className="flex items-center justify-between p-4 bg-gray-50 border-t">
<div className="flex items-center gap-2">
<button
onClick={handlePrev}
disabled={totalSlides <= 1}
className="p-2 hover:bg-gray-200 rounded disabled:opacity-50"
>
<ChevronLeft className="w-5 h-5" />
</button>
<button
onClick={() => setIsPlaying(!isPlaying)}
disabled={autoPlayInterval === 0}
className="p-2 hover:bg-gray-200 rounded disabled:opacity-50"
>
{isPlaying ? <Pause className="w-5 h-5" /> : <Play className="w-5 h-5" />}
</button>
<button
onClick={handleNext}
disabled={totalSlides <= 1}
className="p-2 hover:bg-gray-200 rounded disabled:opacity-50"
>
<ChevronRight className="w-5 h-5" />
</button>
</div>
{/* Progress */}
{showProgress && (
<div className="text-sm text-gray-500">
{currentIndex + 1} / {totalSlides}
</div>
)}
{/* Fullscreen */}
<button
onClick={toggleFullscreen}
className="p-2 hover:bg-gray-200 rounded"
>
{isFullscreen ? (
<Minimize2 className="w-5 h-5" />
) : (
<Maximize2 className="w-5 h-5" />
)}
</button>
</div>
{/* Speaker Notes */}
{showNotes && currentSlide.notes && (
<div className="p-4 bg-yellow-50 border-t text-sm text-gray-600">
📝 {currentSlide.notes}
</div>
)}
</div>
);
}
export default SlideshowRenderer;

View File

@@ -0,0 +1,145 @@
/**
* Presentation Types
*
* Type definitions for the presentation layer.
* Used by renderers and container components.
*/
export type PresentationType =
| 'chart'
| 'quiz'
| 'slideshow'
| 'document'
| 'whiteboard'
| 'auto';
export interface PresentationAnalysis {
recommendedType: PresentationType;
confidence: number;
detectedFeatures: string[];
metadata?: Record<string, unknown>;
}
export interface ChartData {
type: 'line' | 'bar' | 'pie' | 'scatter' | 'area';
title?: string;
labels?: string[];
datasets: ChartDataset[];
options?: ChartOptions;
}
export interface ChartDataset {
label: string;
data: number[];
backgroundColor?: string | string[];
borderColor?: string | string[];
fill?: boolean;
}
export interface ChartOptions {
responsive?: boolean;
maintainAspectRatio?: boolean;
plugins?: {
legend?: {
display?: boolean;
position?: 'top' | 'bottom' | 'left' | 'right';
};
title?: {
display?: boolean;
text?: string;
};
};
scales?: Record<string, unknown>;
}
export interface QuizData {
title?: string;
description?: string;
questions: QuizQuestion[];
timeLimit?: number;
passingScore?: number;
}
export interface QuizQuestion {
id: string;
text: string;
questionType: QuestionType;
options: QuizOption[];
correctAnswer: string | string[];
hint?: string;
explanation?: string;
points?: number;
}
export type QuestionType =
| 'singleChoice'
| 'multipleChoice'
| 'trueFalse'
| 'fillBlank'
| 'shortAnswer';
export interface QuizOption {
id: string;
text: string;
isCorrect?: boolean;
}
export interface SlideshowData {
title?: string;
slides: Slide[];
theme?: SlideshowTheme;
autoPlay?: boolean;
interval?: number;
}
export interface Slide {
id: string;
type: 'title' | 'content' | 'image' | 'code' | 'twoColumn';
title?: string;
content?: string;
image?: string;
code?: string;
language?: string;
leftContent?: string;
rightContent?: string;
notes?: string;
}
export interface SlideshowTheme {
backgroundColor?: string;
textColor?: string;
accentColor?: string;
fontFamily?: string;
}
export interface DocumentData {
title?: string;
content?: string;
format?: 'markdown' | 'html' | 'plain';
downloadUrl?: string;
downloadFilename?: string;
url?: string;
}
export interface WhiteboardData {
title?: string;
elements: WhiteboardElement[];
background?: string;
gridSize?: number;
}
export interface WhiteboardElement {
id: string;
type: 'rect' | 'circle' | 'line' | 'text' | 'image' | 'path';
x: number;
y: number;
width?: number;
height?: number;
fill?: string;
stroke?: string;
strokeWidth?: number;
text?: string;
fontSize?: number;
src?: string;
points?: number[];
}

View File

@@ -25,6 +25,10 @@ import {
type LLMServiceAdapter,
type LLMProvider,
} from './llm-service';
import {
extractAndStoreMemories,
type ChatMessageForExtraction,
} from './viking-client';
// === Types ===
@@ -160,24 +164,44 @@ export class MemoryExtractor {
extracted = extracted.filter(item => item.importance >= this.config.minImportanceThreshold);
console.log(`[MemoryExtractor] After importance filtering (>= ${this.config.minImportanceThreshold}): ${extracted.length} items`);
// Save to memory
// Save to memory (dual storage: intelligenceClient + viking-client/SqliteStorage)
let saved = 0;
let skipped = 0;
for (const item of extracted) {
// Primary: Store via viking-client to SqliteStorage (persistent)
if (extracted.length > 0) {
try {
await intelligenceClient.memory.store({
agent_id: agentId,
memory_type: item.type,
content: item.content,
importance: item.importance,
source: 'auto',
tags: item.tags,
conversation_id: conversationId,
});
saved++;
} catch {
skipped++;
const chatMessagesForViking: ChatMessageForExtraction[] = chatMessages.map(m => ({
role: m.role,
content: m.content,
}));
const vikingResult = await extractAndStoreMemories(
chatMessagesForViking,
agentId
);
console.log(`[MemoryExtractor] Viking storage result: ${vikingResult.summary}`);
saved = vikingResult.memories.length;
} catch (err) {
console.warn('[MemoryExtractor] Viking storage failed, falling back to intelligenceClient:', err);
// Fallback: Store via intelligenceClient (in-memory/graph)
for (const item of extracted) {
try {
await intelligenceClient.memory.store({
agent_id: agentId,
memory_type: item.type,
content: item.content,
importance: item.importance,
source: 'auto',
tags: item.tags,
conversation_id: conversationId,
});
saved++;
} catch {
skipped++;
}
}
}
}

View File

@@ -28,6 +28,7 @@ export interface PipelineInfo {
displayName: string;
description: string;
category: string;
industry: string;
tags: string[];
icon: string;
version: string;
@@ -75,10 +76,12 @@ export class PipelineClient {
*/
static async listPipelines(options?: {
category?: string;
industry?: string;
}): Promise<PipelineInfo[]> {
try {
const pipelines = await invoke<PipelineInfo[]>('pipeline_list', {
category: options?.category || null,
industry: options?.industry || null,
});
return pipelines;
} catch (error) {
@@ -206,20 +209,28 @@ export class PipelineClient {
pollIntervalMs: number = 1000
): Promise<PipelineRunResponse> {
// Start the pipeline
console.log('[DEBUG runAndWait] Starting pipeline:', request.pipelineId);
const { runId } = await this.runPipeline(request);
console.log('[DEBUG runAndWait] Got runId:', runId);
// Poll for progress until completion
let result = await this.getProgress(runId);
console.log('[DEBUG runAndWait] Initial progress:', result.status, result.message);
let pollCount = 0;
while (result.status === 'running' || result.status === 'pending') {
if (onProgress) {
onProgress(result);
}
await new Promise((resolve) => setTimeout(resolve, pollIntervalMs));
pollCount++;
console.log(`[DEBUG runAndWait] Poll #${pollCount} for runId:`, runId);
result = await this.getProgress(runId);
console.log(`[DEBUG runAndWait] Progress:`, result.status, result.message);
}
console.log('[DEBUG runAndWait] Final result:', result.status, result.error || 'no error');
return result;
}
}
@@ -330,6 +341,7 @@ import { useState, useEffect, useCallback } from 'react';
export interface UsePipelineOptions {
category?: string;
industry?: string;
autoRefresh?: boolean;
refreshInterval?: number;
}
@@ -345,6 +357,7 @@ export function usePipelines(options: UsePipelineOptions = {}) {
try {
const result = await PipelineClient.listPipelines({
category: options.category,
industry: options.industry,
});
setPipelines(result);
} catch (err) {
@@ -352,24 +365,28 @@ export function usePipelines(options: UsePipelineOptions = {}) {
} finally {
setLoading(false);
}
}, [options.category]);
}, [options.category, options.industry]);
const refresh = useCallback(async () => {
setLoading(true);
setError(null);
try {
const result = await PipelineClient.refresh();
// Filter by category if specified
const filtered = options.category
? result.filter((p) => p.category === options.category)
: result;
// Filter by category and industry if specified
let filtered = result;
if (options.category) {
filtered = filtered.filter((p) => p.category === options.category);
}
if (options.industry) {
filtered = filtered.filter((p) => p.industry === options.industry);
}
setPipelines(filtered);
} catch (err) {
setError(err instanceof Error ? err.message : String(err));
} finally {
setLoading(false);
}
}, [options.category]);
}, [options.category, options.industry]);
useEffect(() => {
loadPipelines();

View File

@@ -172,3 +172,71 @@ export async function stopVikingServer(): Promise<void> {
export async function restartVikingServer(): Promise<void> {
return invoke<void>('viking_server_restart');
}
// === Memory Extraction Functions ===
export interface ChatMessageForExtraction {
role: string;
content: string;
timestamp?: string;
}
export interface ExtractedMemory {
category: 'user_preference' | 'user_fact' | 'agent_lesson' | 'agent_pattern' | 'task';
content: string;
tags: string[];
importance: number;
suggestedUri: string;
reasoning?: string;
}
export interface ExtractionResult {
memories: ExtractedMemory[];
summary: string;
tokensSaved?: number;
extractionTimeMs: number;
}
/**
* Extract memories from conversation session
*/
export async function extractSessionMemories(
messages: ChatMessageForExtraction[],
agentId: string
): Promise<ExtractionResult> {
return invoke<ExtractionResult>('extract_session_memories', { messages, agentId });
}
/**
* Extract memories and store to SqliteStorage in one call
*/
export async function extractAndStoreMemories(
messages: ChatMessageForExtraction[],
agentId: string,
llmEndpoint?: string,
llmApiKey?: string
): Promise<ExtractionResult> {
return invoke<ExtractionResult>('extract_and_store_memories', {
messages,
agentId,
llmEndpoint,
llmApiKey,
});
}
/**
* Inject relevant memories into prompt for enhanced context
*/
export async function injectVikingPrompt(
agentId: string,
basePrompt: string,
userInput: string,
maxTokens?: number
): Promise<string> {
return invoke<string>('viking_inject_prompt', {
agentId,
basePrompt,
userInput,
maxTokens,
});
}