Files
zclaw_openfang/desktop/src-tauri/src/kernel_commands/lifecycle.rs
iven 0a04b260a4
Some checks failed
CI / Lint & TypeCheck (push) Has been cancelled
CI / Unit Tests (push) Has been cancelled
CI / Build Frontend (push) Has been cancelled
CI / Rust Check (push) Has been cancelled
CI / Security Scan (push) Has been cancelled
CI / E2E Tests (push) Has been cancelled
refactor(desktop): ChatStore structured split + IDB persistence + stream cancel
Split monolithic chatStore.ts (908 lines) into 4 focused stores:
- chatStore.ts: facade layer, owns messages[], backward-compatible selectors
- conversationStore.ts: conversation CRUD, agent switching, IndexedDB persistence
- streamStore.ts: streaming orchestration, chat mode, suggestions
- messageStore.ts: token tracking

Key fixes from 3-round deep audit:
- C1: Fix Rust serde camelCase vs TS snake_case mismatch (toolStart/toolEnd/iterationStart)
- C2: Fix IDB async rehydration race with persist.hasHydrated() subscribe
- C3: Add sessionKey to partialize to survive page refresh
- H3: Fix IDB migration retry on failure (don't set migrated=true in catch)
- M3: Fix ToolCallStep deduplication (toolStart creates, toolEnd updates)
- M-NEW-2: Clear sessionKey on cancelStream

Also adds:
- Rust backend stream cancellation via AtomicBool + cancel_stream command
- IndexedDB storage adapter with one-time localStorage migration
- HMR cleanup for cross-store subscriptions
2026-04-03 00:24:16 +08:00

383 lines
13 KiB
Rust

//! Kernel lifecycle commands: init, status, shutdown
use serde::{Deserialize, Serialize};
use tauri::State;
use super::{KernelState, SchedulerState};
// ---------------------------------------------------------------------------
// Request / Response types
// ---------------------------------------------------------------------------
fn default_api_protocol() -> String { "openai".to_string() }
fn default_kernel_provider() -> String { "openai".to_string() }
fn default_kernel_model() -> String { "gpt-4o-mini".to_string() }
/// Kernel configuration request
///
/// Simple configuration: base_url + api_key + model
/// Model ID is passed directly to the API without any transformation
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct KernelConfigRequest {
/// LLM provider (for preset URLs): anthropic, openai, zhipu, kimi, qwen, deepseek, local, custom
#[serde(default = "default_kernel_provider")]
pub provider: String,
/// Model identifier - passed directly to the API
#[serde(default = "default_kernel_model")]
pub model: String,
/// API key
pub api_key: Option<String>,
/// Base URL (optional, uses provider default if not specified)
pub base_url: Option<String>,
/// API protocol: openai or anthropic
#[serde(default = "default_api_protocol")]
pub api_protocol: String,
}
/// Kernel status response
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct KernelStatusResponse {
pub initialized: bool,
pub agent_count: usize,
pub database_url: Option<String>,
pub base_url: Option<String>,
pub model: Option<String>,
}
// ---------------------------------------------------------------------------
// Commands
// ---------------------------------------------------------------------------
/// Initialize the internal ZCLAW Kernel
///
/// If kernel already exists with the same config, returns existing status.
/// If config changed, reboots kernel with new config.
// @connected
#[tauri::command]
pub async fn kernel_init(
state: State<'_, KernelState>,
scheduler_state: State<'_, SchedulerState>,
config_request: Option<KernelConfigRequest>,
) -> Result<KernelStatusResponse, String> {
let mut kernel_lock = state.lock().await;
// Check if we need to reboot kernel with new config
if let Some(kernel) = kernel_lock.as_ref() {
// Get current config from kernel
let current_config = kernel.config();
// Check if config changed
let config_changed = if let Some(ref req) = config_request {
let default_base_url = zclaw_kernel::config::KernelConfig::from_provider(
&req.provider, "", &req.model, None, &req.api_protocol
).llm.base_url;
let request_base_url = req.base_url.clone().unwrap_or(default_base_url.clone());
current_config.llm.model != req.model ||
current_config.llm.base_url != request_base_url
} else {
false
};
if !config_changed {
// Same config, return existing status
return Ok(KernelStatusResponse {
initialized: true,
agent_count: kernel.list_agents().len(),
database_url: None,
base_url: Some(current_config.llm.base_url.clone()),
model: Some(current_config.llm.model.clone()),
});
}
// Config changed, need to reboot kernel
// Shutdown old kernel
if let Err(e) = kernel.shutdown().await {
tracing::warn!("[kernel_init] Failed to shutdown old kernel: {}", e);
}
*kernel_lock = None;
}
// Build configuration from request
let config = if let Some(req) = &config_request {
let api_key = req.api_key.as_deref().unwrap_or("");
let base_url = req.base_url.as_deref();
zclaw_kernel::config::KernelConfig::from_provider(
&req.provider,
api_key,
&req.model,
base_url,
&req.api_protocol,
)
} else {
zclaw_kernel::config::KernelConfig::default()
};
// Debug: print skills directory
if let Some(ref skills_dir) = config.skills_dir {
tracing::debug!("[kernel_init] Skills directory: {} (exists: {})", skills_dir.display(), skills_dir.exists());
} else {
tracing::debug!("[kernel_init] No skills directory configured");
}
let base_url = config.llm.base_url.clone();
let model = config.llm.model.clone();
// Boot kernel
let mut kernel = zclaw_kernel::Kernel::boot(config.clone())
.await
.map_err(|e| format!("Failed to initialize kernel: {}", e))?;
let agent_count = kernel.list_agents().len();
// Configure extraction driver so the Growth system can call LLM for memory extraction
let driver = kernel.driver();
crate::intelligence::extraction_adapter::configure_extraction_driver(
driver.clone(),
model.clone(),
);
// Bridge SqliteStorage to Kernel's GrowthIntegration
{
match crate::viking_commands::get_storage().await {
Ok(sqlite_storage) => {
let viking = std::sync::Arc::new(zclaw_runtime::VikingAdapter::new(sqlite_storage));
kernel.set_viking(viking);
tracing::info!("[kernel_init] Bridged persistent SqliteStorage to Kernel GrowthIntegration");
}
Err(e) => {
tracing::warn!(
"[kernel_init] Failed to get SqliteStorage, GrowthIntegration will use in-memory storage: {}",
e
);
}
}
// Set the LLM extraction driver on the kernel for memory extraction via middleware
let extraction_driver = crate::intelligence::extraction_adapter::TauriExtractionDriver::new(
driver.clone(),
model.clone(),
);
kernel.set_extraction_driver(std::sync::Arc::new(extraction_driver));
}
// Configure summary driver so the Growth system can generate L0/L1 summaries
if let Some(api_key) = config_request.as_ref().and_then(|r| r.api_key.clone()) {
crate::summarizer_adapter::configure_summary_driver(
crate::summarizer_adapter::TauriSummaryDriver::new(
format!("{}/chat/completions", base_url),
api_key,
Some(model.clone()),
),
);
}
*kernel_lock = Some(kernel);
// Start SchedulerService — periodically checks and fires scheduled triggers
{
let mut sched_lock = scheduler_state.lock().await;
// Stop old scheduler if any
if let Some(ref old) = *sched_lock {
old.stop();
}
let scheduler = zclaw_kernel::scheduler::SchedulerService::new(
state.inner().clone(),
60, // check every 60 seconds
);
scheduler.start();
tracing::info!("[kernel_init] SchedulerService started (60s interval)");
*sched_lock = Some(scheduler);
}
Ok(KernelStatusResponse {
initialized: true,
agent_count,
database_url: Some(config.database_url),
base_url: Some(base_url),
model: Some(model),
})
}
/// Get kernel status
// @connected
#[tauri::command]
pub async fn kernel_status(
state: State<'_, KernelState>,
) -> Result<KernelStatusResponse, String> {
let kernel_lock = state.lock().await;
match kernel_lock.as_ref() {
Some(kernel) => Ok(KernelStatusResponse {
initialized: true,
agent_count: kernel.list_agents().len(),
database_url: Some(kernel.config().database_url.clone()),
base_url: Some(kernel.config().llm.base_url.clone()),
model: Some(kernel.config().llm.model.clone()),
}),
None => Ok(KernelStatusResponse {
initialized: false,
agent_count: 0,
database_url: None,
base_url: None,
model: None,
}),
}
}
/// Shutdown the kernel
// @reserved: 暂无前端集成
#[tauri::command]
pub async fn kernel_shutdown(
state: State<'_, KernelState>,
scheduler_state: State<'_, SchedulerState>,
) -> Result<(), String> {
// Stop scheduler first
{
let mut sched_lock = scheduler_state.lock().await;
if let Some(scheduler) = sched_lock.take() {
scheduler.stop();
tracing::info!("[kernel_shutdown] SchedulerService stopped");
}
}
let mut kernel_lock = state.lock().await;
if let Some(kernel) = kernel_lock.take() {
kernel.shutdown().await.map_err(|e| e.to_string())?;
}
Ok(())
}
/// Apply SaaS-synced configuration to the Kernel config file.
///
/// Writes relevant config values (agent, llm categories) to the TOML config file.
/// The changes take effect on the next Kernel restart.
// @connected
#[tauri::command]
pub async fn kernel_apply_saas_config(
configs: Vec<SaasConfigItem>,
) -> Result<u32, String> {
use std::io::Write;
let config_path = zclaw_kernel::config::KernelConfig::find_config_path()
.ok_or_else(|| "No config file path found".to_string())?;
// Read existing config or create empty
let existing = if config_path.exists() {
std::fs::read_to_string(&config_path).unwrap_or_default()
} else {
String::new()
};
let mut updated = existing;
let mut applied: u32 = 0;
for config in &configs {
// Only process kernel-relevant categories
if !matches!(config.category.as_str(), "agent" | "llm") {
continue;
}
// Write key=value to the [llm] or [agent] section
let section = &config.category;
let key = config.key.replace('.', "_");
let value = &config.value;
// Simple TOML patching: find or create section, update key
let section_header = format!("[{}]", section);
let line_to_set = format!("{} = {}", key, toml_quote_value(value));
if let Some(section_start) = updated.find(&section_header) {
// Section exists, find or add the key within it
let after_header = section_start + section_header.len();
let next_section = updated[after_header..].find("\n[")
.map(|i| after_header + i)
.unwrap_or(updated.len());
let section_content = &updated[after_header..next_section];
let key_prefix = format!("\n{} =", key);
let key_prefix_alt = format!("\n{}=", key);
if let Some(key_pos) = section_content.find(&key_prefix)
.or_else(|| section_content.find(&key_prefix_alt))
{
// Key exists, replace the line
let line_start = after_header + key_pos + 1; // skip \n
let line_end = updated[line_start..].find('\n')
.map(|i| line_start + i)
.unwrap_or(updated.len());
updated = format!(
"{}{}{}\n{}",
&updated[..line_start],
line_to_set,
if line_end < updated.len() { "" } else { "" },
&updated[line_end..]
);
// Remove the extra newline if line_end included one
updated = updated.replace(&format!("{}\n\n", line_to_set), &format!("{}\n", line_to_set));
} else {
// Key doesn't exist, append to section
updated.insert_str(next_section, format!("\n{}", line_to_set).as_str());
}
} else {
// Section doesn't exist, append it
updated = format!("{}\n{}\n{}\n", updated.trim_end(), section_header, line_to_set);
}
applied += 1;
}
if applied > 0 {
// Ensure parent directory exists
if let Some(parent) = config_path.parent() {
std::fs::create_dir_all(parent).map_err(|e| format!("Failed to create config dir: {}", e))?;
}
let mut file = std::fs::File::create(&config_path)
.map_err(|e| format!("Failed to write config: {}", e))?;
file.write_all(updated.as_bytes())
.map_err(|e| format!("Failed to write config: {}", e))?;
tracing::info!(
"[kernel_apply_saas_config] Applied {} config items to {:?} (restart required)",
applied,
config_path
);
}
Ok(applied)
}
/// Single config item from SaaS sync
#[derive(Debug, Clone, serde::Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct SaasConfigItem {
pub category: String,
pub key: String,
pub value: String,
}
/// Quote a value for TOML format
fn toml_quote_value(value: &str) -> String {
// Try to parse as number or boolean
if value == "true" || value == "false" {
return value.to_string();
}
if let Ok(n) = value.parse::<i64>() {
return n.to_string();
}
if let Ok(n) = value.parse::<f64>() {
return n.to_string();
}
// Handle multi-line strings with TOML triple-quote syntax
if value.contains('\n') {
return format!("\"\"\"\n{}\"\"\"", value.replace('\\', "\\\\").replace("\"\"\"", "'\"'\"'\""));
}
// Default: quote as string
format!("\"{}\"", value.replace('\\', "\\\\").replace('"', "\\\""))
}