diff --git a/crates/zclaw-growth/src/storage/sqlite.rs b/crates/zclaw-growth/src/storage/sqlite.rs index 8b4d10d..d2c188e 100644 --- a/crates/zclaw-growth/src/storage/sqlite.rs +++ b/crates/zclaw-growth/src/storage/sqlite.rs @@ -162,22 +162,44 @@ impl SqliteStorage { .map_err(|e| ZclawError::StorageError(format!("Failed to create importance index: {}", e)))?; // Migration: add overview column (L1 summary) - let _ = sqlx::query("ALTER TABLE memories ADD COLUMN overview TEXT") + // SQLite ALTER TABLE ADD COLUMN fails with "duplicate column name" if already applied + if let Err(e) = sqlx::query("ALTER TABLE memories ADD COLUMN overview TEXT") .execute(&self.pool) - .await; + .await + { + let msg = e.to_string(); + if !msg.contains("duplicate column name") { + tracing::warn!("[Growth] Migration overview failed: {}", msg); + } + } // Migration: add abstract_summary column (L0 keywords) - let _ = sqlx::query("ALTER TABLE memories ADD COLUMN abstract_summary TEXT") + if let Err(e) = sqlx::query("ALTER TABLE memories ADD COLUMN abstract_summary TEXT") .execute(&self.pool) - .await; + .await + { + let msg = e.to_string(); + if !msg.contains("duplicate column name") { + tracing::warn!("[Growth] Migration abstract_summary failed: {}", msg); + } + } // P2-24: Migration — content fingerprint for deduplication - let _ = sqlx::query("ALTER TABLE memories ADD COLUMN content_hash TEXT") + if let Err(e) = sqlx::query("ALTER TABLE memories ADD COLUMN content_hash TEXT") .execute(&self.pool) - .await; - let _ = sqlx::query("CREATE INDEX IF NOT EXISTS idx_content_hash ON memories(content_hash)") + .await + { + let msg = e.to_string(); + if !msg.contains("duplicate column name") { + tracing::warn!("[Growth] Migration content_hash failed: {}", msg); + } + } + if let Err(e) = sqlx::query("CREATE INDEX IF NOT EXISTS idx_content_hash ON memories(content_hash)") .execute(&self.pool) - .await; + .await + { + tracing::warn!("[Growth] Migration idx_content_hash failed: {}", e); + } // Backfill content_hash for existing entries that have NULL content_hash { diff --git a/crates/zclaw-kernel/src/config.rs b/crates/zclaw-kernel/src/config.rs index cad17f6..76ba76c 100644 --- a/crates/zclaw-kernel/src/config.rs +++ b/crates/zclaw-kernel/src/config.rs @@ -30,7 +30,7 @@ impl Default for ApiProtocol { /// /// This is the single source of truth for LLM configuration. /// Model ID is passed directly to the API without any transformation. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Clone, Serialize, Deserialize)] pub struct LlmConfig { /// API base URL (e.g., "https://api.openai.com/v1") pub base_url: String, @@ -61,6 +61,20 @@ pub struct LlmConfig { pub context_window: u32, } +impl std::fmt::Debug for LlmConfig { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("LlmConfig") + .field("base_url", &self.base_url) + .field("api_key", &"***REDACTED***") + .field("model", &self.model) + .field("api_protocol", &self.api_protocol) + .field("max_tokens", &self.max_tokens) + .field("temperature", &self.temperature) + .field("context_window", &self.context_window) + .finish() + } +} + impl LlmConfig { /// Create a new LLM config pub fn new(base_url: impl Into, api_key: impl Into, model: impl Into) -> Self { diff --git a/crates/zclaw-kernel/src/events.rs b/crates/zclaw-kernel/src/events.rs index 3a97bf0..ba0293c 100644 --- a/crates/zclaw-kernel/src/events.rs +++ b/crates/zclaw-kernel/src/events.rs @@ -17,8 +17,9 @@ impl EventBus { /// Publish an event pub fn publish(&self, event: Event) { - // Ignore send errors (no subscribers) - let _ = self.sender.send(event); + if let Err(e) = self.sender.send(event) { + tracing::debug!("Event dropped (no subscribers or channel full): {:?}", e); + } } /// Subscribe to events diff --git a/crates/zclaw-kernel/src/kernel/approvals.rs b/crates/zclaw-kernel/src/kernel/approvals.rs index acf538e..a74fa62 100644 --- a/crates/zclaw-kernel/src/kernel/approvals.rs +++ b/crates/zclaw-kernel/src/kernel/approvals.rs @@ -86,12 +86,12 @@ impl Kernel { completed_at: None, }; let _ = memory.save_hand_run(&run).await.map_err(|e| { - tracing::warn!("[Approval] Failed to save hand run: {}", e); + tracing::error!("[Approval] Failed to save hand run: {}", e); }); run.status = HandRunStatus::Running; run.started_at = Some(chrono::Utc::now().to_rfc3339()); let _ = memory.update_hand_run(&run).await.map_err(|e| { - tracing::warn!("[Approval] Failed to update hand run (running): {}", e); + tracing::error!("[Approval] Failed to update hand run (running): {}", e); }); // Register cancellation flag @@ -122,7 +122,7 @@ impl Kernel { run.duration_ms = Some(duration.as_millis() as u64); run.completed_at = Some(completed_at); let _ = memory.update_hand_run(&run).await.map_err(|e| { - tracing::warn!("[Approval] Failed to update hand run (completed): {}", e); + tracing::error!("[Approval] Failed to update hand run (completed): {}", e); }); // Update approval status based on execution result diff --git a/crates/zclaw-protocols/src/mcp_transport.rs b/crates/zclaw-protocols/src/mcp_transport.rs index 0dd0132..0b3a61e 100644 --- a/crates/zclaw-protocols/src/mcp_transport.rs +++ b/crates/zclaw-protocols/src/mcp_transport.rs @@ -84,12 +84,20 @@ impl McpServerConfig { } } +/// Combined transport handles (stdin + stdout) behind a single Mutex. +/// This ensures write-then-read is atomic, preventing concurrent requests +/// from receiving each other's responses. +struct TransportHandles { + stdin: BufWriter, + stdout: BufReader, +} + /// MCP Transport using stdio pub struct McpTransport { config: McpServerConfig, child: Arc>>, - stdin: Arc>>>, - stdout: Arc>>>, + /// Single Mutex protecting both stdin and stdout for atomic write-then-read + handles: Arc>>, capabilities: Arc>>, } @@ -99,8 +107,7 @@ impl McpTransport { Self { config, child: Arc::new(Mutex::new(None)), - stdin: Arc::new(Mutex::new(None)), - stdout: Arc::new(Mutex::new(None)), + handles: Arc::new(Mutex::new(None)), capabilities: Arc::new(Mutex::new(None)), } } @@ -162,9 +169,11 @@ impl McpTransport { }); } - // Store handles in separate mutexes - *self.stdin.lock().await = Some(BufWriter::new(stdin)); - *self.stdout.lock().await = Some(BufReader::new(stdout)); + // Store handles in single mutex for atomic write-then-read + *self.handles.lock().await = Some(TransportHandles { + stdin: BufWriter::new(stdin), + stdout: BufReader::new(stdout), + }); *child_guard = Some(child); Ok(()) @@ -201,21 +210,21 @@ impl McpTransport { let line = serde_json::to_string(notification) .map_err(|e| ZclawError::McpError(format!("Failed to serialize notification: {}", e)))?; - let mut stdin_guard = self.stdin.lock().await; - let stdin = stdin_guard.as_mut() + let mut handles_guard = self.handles.lock().await; + let handles = handles_guard.as_mut() .ok_or_else(|| ZclawError::McpError("Transport not started".to_string()))?; - stdin.write_all(line.as_bytes()) + handles.stdin.write_all(line.as_bytes()) .map_err(|e| ZclawError::McpError(format!("Failed to write notification: {}", e)))?; - stdin.write_all(b"\n") + handles.stdin.write_all(b"\n") .map_err(|e| ZclawError::McpError(format!("Failed to write newline: {}", e)))?; - stdin.flush() + handles.stdin.flush() .map_err(|e| ZclawError::McpError(format!("Failed to flush notification: {}", e)))?; Ok(()) } - /// Send JSON-RPC request + /// Send JSON-RPC request (atomic write-then-read under single lock) async fn send_request( &self, method: &str, @@ -234,28 +243,23 @@ impl McpTransport { let line = serde_json::to_string(&request) .map_err(|e| ZclawError::McpError(format!("Failed to serialize request: {}", e)))?; - // Write to stdin - { - let mut stdin_guard = self.stdin.lock().await; - let stdin = stdin_guard.as_mut() - .ok_or_else(|| ZclawError::McpError("Transport not started".to_string()))?; - - stdin.write_all(line.as_bytes()) - .map_err(|e| ZclawError::McpError(format!("Failed to write request: {}", e)))?; - stdin.write_all(b"\n") - .map_err(|e| ZclawError::McpError(format!("Failed to write newline: {}", e)))?; - stdin.flush() - .map_err(|e| ZclawError::McpError(format!("Failed to flush request: {}", e)))?; - } - - // Read from stdout + // Atomic write-then-read under single lock let response_line = { - let mut stdout_guard = self.stdout.lock().await; - let stdout = stdout_guard.as_mut() + let mut handles_guard = self.handles.lock().await; + let handles = handles_guard.as_mut() .ok_or_else(|| ZclawError::McpError("Transport not started".to_string()))?; + // Write to stdin + handles.stdin.write_all(line.as_bytes()) + .map_err(|e| ZclawError::McpError(format!("Failed to write request: {}", e)))?; + handles.stdin.write_all(b"\n") + .map_err(|e| ZclawError::McpError(format!("Failed to write newline: {}", e)))?; + handles.stdin.flush() + .map_err(|e| ZclawError::McpError(format!("Failed to flush request: {}", e)))?; + + // Read from stdout (still holding the lock — no interleaving possible) let mut response_line = String::new(); - stdout.read_line(&mut response_line) + handles.stdout.read_line(&mut response_line) .map_err(|e| ZclawError::McpError(format!("Failed to read response: {}", e)))?; response_line }; @@ -429,7 +433,7 @@ impl Drop for McpTransport { let _ = child.wait(); } Err(e) => { - eprintln!("[McpTransport] Failed to kill child process: {}", e); + tracing::warn!("[McpTransport] Failed to kill child process (potential zombie): {}", e); } } } diff --git a/crates/zclaw-saas/migrations/20260418000001_add_missing_indexes.sql b/crates/zclaw-saas/migrations/20260418000001_add_missing_indexes.sql new file mode 100644 index 0000000..b989425 --- /dev/null +++ b/crates/zclaw-saas/migrations/20260418000001_add_missing_indexes.sql @@ -0,0 +1,11 @@ +-- Add missing indexes for performance-critical queries +-- 2026-04-18 Release readiness audit + +-- Rate limit events cleanup (DELETE WHERE created_at < ...) +CREATE INDEX IF NOT EXISTS idx_rle_created_at ON rate_limit_events(created_at); + +-- Billing subscriptions plan lookup +CREATE INDEX IF NOT EXISTS idx_billing_sub_plan ON billing_subscriptions(plan_id); + +-- Knowledge items created_by lookup +CREATE INDEX IF NOT EXISTS idx_ki_created_by ON knowledge_items(created_by); diff --git a/crates/zclaw-saas/src/billing/handlers.rs b/crates/zclaw-saas/src/billing/handlers.rs index 749412d..dad289c 100644 --- a/crates/zclaw-saas/src/billing/handlers.rs +++ b/crates/zclaw-saas/src/billing/handlers.rs @@ -587,7 +587,7 @@ pub async fn get_invoice_pdf( })?; // 返回 PDF 响应 - Ok(axum::response::Response::builder() + axum::response::Response::builder() .status(200) .header("Content-Type", "application/pdf") .header( @@ -595,5 +595,8 @@ pub async fn get_invoice_pdf( format!("attachment; filename=\"invoice-{}.pdf\"", invoice.id), ) .body(axum::body::Body::from(bytes)) - .unwrap()) + .map_err(|e| { + tracing::error!("Failed to build PDF response: {}", e); + SaasError::Internal("PDF 响应构建失败".into()) + }) } diff --git a/crates/zclaw-saas/src/config.rs b/crates/zclaw-saas/src/config.rs index 0ea1f0c..6d6baeb 100644 --- a/crates/zclaw-saas/src/config.rs +++ b/crates/zclaw-saas/src/config.rs @@ -396,6 +396,23 @@ impl SaaSConfig { config.database.url = db_url; } + // Config validation + if config.auth.jwt_expiration_hours < 1 { + anyhow::bail!( + "auth.jwt_expiration_hours must be >= 1, got {}", + config.auth.jwt_expiration_hours + ); + } + if config.database.max_connections == 0 { + anyhow::bail!("database.max_connections must be > 0"); + } + if config.database.min_connections > config.database.max_connections { + anyhow::bail!( + "database.min_connections ({}) must be <= max_connections ({})", + config.database.min_connections, config.database.max_connections + ); + } + Ok(config) } diff --git a/desktop/src-tauri/src/classroom_commands/mod.rs b/desktop/src-tauri/src/classroom_commands/mod.rs index 32b5e01..639044e 100644 --- a/desktop/src-tauri/src/classroom_commands/mod.rs +++ b/desktop/src-tauri/src/classroom_commands/mod.rs @@ -55,7 +55,9 @@ pub async fn init_persistence( .map_err(|e| format!("Failed to get app data dir: {}", e))?; let db_path = app_dir.join("classroom").join("classrooms.db"); - std::fs::create_dir_all(db_path.parent().unwrap()) + let db_dir = db_path.parent() + .ok_or_else(|| "Invalid classroom database path: no parent directory".to_string())?; + std::fs::create_dir_all(db_dir) .map_err(|e| format!("Failed to create classroom dir: {}", e))?; let persistence: ClassroomPersistence = ClassroomPersistence::open(db_path).await?; diff --git a/desktop/tsconfig.json b/desktop/tsconfig.json index 0bf4c35..b9cc783 100644 --- a/desktop/tsconfig.json +++ b/desktop/tsconfig.json @@ -22,9 +22,5 @@ "useUnknownInCatchVariables": true }, "include": ["src"], - "exclude": [ - "src/components/ui/ErrorAlert.tsx", - "src/components/ui/ErrorBoundary.tsx" - ], "references": [{ "path": "./tsconfig.node.json" }] }