refactor(desktop): split kernel_commands/pipeline_commands into modules, add SaaS client libs and gateway modules

Split monolithic kernel_commands.rs (2185 lines) and pipeline_commands.rs (1391 lines)
into focused sub-modules under kernel_commands/ and pipeline_commands/ directories.
Add gateway module (commands, config, io, runtime), health_check, and 15 new
TypeScript client libraries for SaaS relay, auth, admin, telemetry, and kernel
sub-systems (a2a, agent, chat, hands, skills, triggers).

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
iven
2026-03-31 11:12:47 +08:00
parent d0ae7d2770
commit f79560a911
71 changed files with 8521 additions and 5997 deletions

View File

@@ -0,0 +1,251 @@
//! Kernel lifecycle commands: init, status, shutdown
use serde::{Deserialize, Serialize};
use tauri::State;
use super::{KernelState, SchedulerState};
// ---------------------------------------------------------------------------
// Request / Response types
// ---------------------------------------------------------------------------
fn default_api_protocol() -> String { "openai".to_string() }
fn default_kernel_provider() -> String { "openai".to_string() }
fn default_kernel_model() -> String { "gpt-4o-mini".to_string() }
/// Kernel configuration request
///
/// Simple configuration: base_url + api_key + model
/// Model ID is passed directly to the API without any transformation
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct KernelConfigRequest {
/// LLM provider (for preset URLs): anthropic, openai, zhipu, kimi, qwen, deepseek, local, custom
#[serde(default = "default_kernel_provider")]
pub provider: String,
/// Model identifier - passed directly to the API
#[serde(default = "default_kernel_model")]
pub model: String,
/// API key
pub api_key: Option<String>,
/// Base URL (optional, uses provider default if not specified)
pub base_url: Option<String>,
/// API protocol: openai or anthropic
#[serde(default = "default_api_protocol")]
pub api_protocol: String,
}
/// Kernel status response
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct KernelStatusResponse {
pub initialized: bool,
pub agent_count: usize,
pub database_url: Option<String>,
pub base_url: Option<String>,
pub model: Option<String>,
}
// ---------------------------------------------------------------------------
// Commands
// ---------------------------------------------------------------------------
/// Initialize the internal ZCLAW Kernel
///
/// If kernel already exists with the same config, returns existing status.
/// If config changed, reboots kernel with new config.
#[tauri::command]
pub async fn kernel_init(
state: State<'_, KernelState>,
scheduler_state: State<'_, SchedulerState>,
config_request: Option<KernelConfigRequest>,
) -> Result<KernelStatusResponse, String> {
let mut kernel_lock = state.lock().await;
// Check if we need to reboot kernel with new config
if let Some(kernel) = kernel_lock.as_ref() {
// Get current config from kernel
let current_config = kernel.config();
// Check if config changed
let config_changed = if let Some(ref req) = config_request {
let default_base_url = zclaw_kernel::config::KernelConfig::from_provider(
&req.provider, "", &req.model, None, &req.api_protocol
).llm.base_url;
let request_base_url = req.base_url.clone().unwrap_or(default_base_url.clone());
current_config.llm.model != req.model ||
current_config.llm.base_url != request_base_url
} else {
false
};
if !config_changed {
// Same config, return existing status
return Ok(KernelStatusResponse {
initialized: true,
agent_count: kernel.list_agents().len(),
database_url: None,
base_url: Some(current_config.llm.base_url.clone()),
model: Some(current_config.llm.model.clone()),
});
}
// Config changed, need to reboot kernel
// Shutdown old kernel
if let Err(e) = kernel.shutdown().await {
eprintln!("[kernel_init] Warning: Failed to shutdown old kernel: {}", e);
}
*kernel_lock = None;
}
// Build configuration from request
let config = if let Some(req) = &config_request {
let api_key = req.api_key.as_deref().unwrap_or("");
let base_url = req.base_url.as_deref();
zclaw_kernel::config::KernelConfig::from_provider(
&req.provider,
api_key,
&req.model,
base_url,
&req.api_protocol,
)
} else {
zclaw_kernel::config::KernelConfig::default()
};
// Debug: print skills directory
if let Some(ref skills_dir) = config.skills_dir {
println!("[kernel_init] Skills directory: {} (exists: {})", skills_dir.display(), skills_dir.exists());
} else {
println!("[kernel_init] No skills directory configured");
}
let base_url = config.llm.base_url.clone();
let model = config.llm.model.clone();
// Boot kernel
let mut kernel = zclaw_kernel::Kernel::boot(config.clone())
.await
.map_err(|e| format!("Failed to initialize kernel: {}", e))?;
let agent_count = kernel.list_agents().len();
// Configure extraction driver so the Growth system can call LLM for memory extraction
let driver = kernel.driver();
crate::intelligence::extraction_adapter::configure_extraction_driver(
driver.clone(),
model.clone(),
);
// Bridge SqliteStorage to Kernel's GrowthIntegration
{
match crate::viking_commands::get_storage().await {
Ok(sqlite_storage) => {
let viking = std::sync::Arc::new(zclaw_runtime::VikingAdapter::new(sqlite_storage));
kernel.set_viking(viking);
tracing::info!("[kernel_init] Bridged persistent SqliteStorage to Kernel GrowthIntegration");
}
Err(e) => {
tracing::warn!(
"[kernel_init] Failed to get SqliteStorage, GrowthIntegration will use in-memory storage: {}",
e
);
}
}
// Set the LLM extraction driver on the kernel for memory extraction via middleware
let extraction_driver = crate::intelligence::extraction_adapter::TauriExtractionDriver::new(
driver.clone(),
model.clone(),
);
kernel.set_extraction_driver(std::sync::Arc::new(extraction_driver));
}
// Configure summary driver so the Growth system can generate L0/L1 summaries
if let Some(api_key) = config_request.as_ref().and_then(|r| r.api_key.clone()) {
crate::summarizer_adapter::configure_summary_driver(
crate::summarizer_adapter::TauriSummaryDriver::new(
format!("{}/chat/completions", base_url),
api_key,
Some(model.clone()),
),
);
}
*kernel_lock = Some(kernel);
// Start SchedulerService — periodically checks and fires scheduled triggers
{
let mut sched_lock = scheduler_state.lock().await;
// Stop old scheduler if any
if let Some(ref old) = *sched_lock {
old.stop();
}
let scheduler = zclaw_kernel::scheduler::SchedulerService::new(
state.inner().clone(),
60, // check every 60 seconds
);
scheduler.start();
tracing::info!("[kernel_init] SchedulerService started (60s interval)");
*sched_lock = Some(scheduler);
}
Ok(KernelStatusResponse {
initialized: true,
agent_count,
database_url: Some(config.database_url),
base_url: Some(base_url),
model: Some(model),
})
}
/// Get kernel status
#[tauri::command]
pub async fn kernel_status(
state: State<'_, KernelState>,
) -> Result<KernelStatusResponse, String> {
let kernel_lock = state.lock().await;
match kernel_lock.as_ref() {
Some(kernel) => Ok(KernelStatusResponse {
initialized: true,
agent_count: kernel.list_agents().len(),
database_url: Some(kernel.config().database_url.clone()),
base_url: Some(kernel.config().llm.base_url.clone()),
model: Some(kernel.config().llm.model.clone()),
}),
None => Ok(KernelStatusResponse {
initialized: false,
agent_count: 0,
database_url: None,
base_url: None,
model: None,
}),
}
}
/// Shutdown the kernel
#[tauri::command]
pub async fn kernel_shutdown(
state: State<'_, KernelState>,
scheduler_state: State<'_, SchedulerState>,
) -> Result<(), String> {
// Stop scheduler first
{
let mut sched_lock = scheduler_state.lock().await;
if let Some(scheduler) = sched_lock.take() {
scheduler.stop();
tracing::info!("[kernel_shutdown] SchedulerService stopped");
}
}
let mut kernel_lock = state.lock().await;
if let Some(kernel) = kernel_lock.take() {
kernel.shutdown().await.map_err(|e| e.to_string())?;
}
Ok(())
}