fix: 发布前审计 Batch 2 — Debug遮蔽 + unwrap + 静默吞错 + MCP锁 + 索引 + Config验证

安全:
- LlmConfig 自定义 Debug impl,api_key 显示为 "***REDACTED***"
- tsconfig.json 移除 ErrorBoundary.tsx 排除项(安全关键组件)
- billing/handlers.rs Response builder unwrap → map_err 错误传播
- classroom_commands/mod.rs db_path.parent().unwrap() → ok_or_else

静默吞错:
- approvals.rs 3处 warn→error(审批状态丢失是严重事件)
- events.rs publish() 添加 Event dropped debug 日志
- mcp_transport.rs eprintln→tracing::warn (僵尸进程风险)
- zclaw-growth sqlite.rs 4处迁移:区分 duplicate column name 与真实错误

MCP Transport:
- 合并 stdin+stdout 为单一 Mutex<TransportHandles>
- send_request write-then-read 原子化,防止并发响应错配

数据库:
- 新迁移 20260418000001: idx_rle_created_at + idx_billing_sub_plan + idx_ki_created_by

配置验证:
- SaaSConfig::load() 添加 jwt_expiration_hours>=1, max_connections>0, min<=max
This commit is contained in:
iven
2026-04-18 14:09:36 +08:00
parent f3fb5340b5
commit e10549a1b9
10 changed files with 123 additions and 53 deletions

View File

@@ -162,22 +162,44 @@ impl SqliteStorage {
.map_err(|e| ZclawError::StorageError(format!("Failed to create importance index: {}", e)))?;
// Migration: add overview column (L1 summary)
let _ = sqlx::query("ALTER TABLE memories ADD COLUMN overview TEXT")
// SQLite ALTER TABLE ADD COLUMN fails with "duplicate column name" if already applied
if let Err(e) = sqlx::query("ALTER TABLE memories ADD COLUMN overview TEXT")
.execute(&self.pool)
.await;
.await
{
let msg = e.to_string();
if !msg.contains("duplicate column name") {
tracing::warn!("[Growth] Migration overview failed: {}", msg);
}
}
// Migration: add abstract_summary column (L0 keywords)
let _ = sqlx::query("ALTER TABLE memories ADD COLUMN abstract_summary TEXT")
if let Err(e) = sqlx::query("ALTER TABLE memories ADD COLUMN abstract_summary TEXT")
.execute(&self.pool)
.await;
.await
{
let msg = e.to_string();
if !msg.contains("duplicate column name") {
tracing::warn!("[Growth] Migration abstract_summary failed: {}", msg);
}
}
// P2-24: Migration — content fingerprint for deduplication
let _ = sqlx::query("ALTER TABLE memories ADD COLUMN content_hash TEXT")
if let Err(e) = sqlx::query("ALTER TABLE memories ADD COLUMN content_hash TEXT")
.execute(&self.pool)
.await;
let _ = sqlx::query("CREATE INDEX IF NOT EXISTS idx_content_hash ON memories(content_hash)")
.await
{
let msg = e.to_string();
if !msg.contains("duplicate column name") {
tracing::warn!("[Growth] Migration content_hash failed: {}", msg);
}
}
if let Err(e) = sqlx::query("CREATE INDEX IF NOT EXISTS idx_content_hash ON memories(content_hash)")
.execute(&self.pool)
.await;
.await
{
tracing::warn!("[Growth] Migration idx_content_hash failed: {}", e);
}
// Backfill content_hash for existing entries that have NULL content_hash
{

View File

@@ -30,7 +30,7 @@ impl Default for ApiProtocol {
///
/// This is the single source of truth for LLM configuration.
/// Model ID is passed directly to the API without any transformation.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[derive(Clone, Serialize, Deserialize)]
pub struct LlmConfig {
/// API base URL (e.g., "https://api.openai.com/v1")
pub base_url: String,
@@ -61,6 +61,20 @@ pub struct LlmConfig {
pub context_window: u32,
}
impl std::fmt::Debug for LlmConfig {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("LlmConfig")
.field("base_url", &self.base_url)
.field("api_key", &"***REDACTED***")
.field("model", &self.model)
.field("api_protocol", &self.api_protocol)
.field("max_tokens", &self.max_tokens)
.field("temperature", &self.temperature)
.field("context_window", &self.context_window)
.finish()
}
}
impl LlmConfig {
/// Create a new LLM config
pub fn new(base_url: impl Into<String>, api_key: impl Into<String>, model: impl Into<String>) -> Self {

View File

@@ -17,8 +17,9 @@ impl EventBus {
/// Publish an event
pub fn publish(&self, event: Event) {
// Ignore send errors (no subscribers)
let _ = self.sender.send(event);
if let Err(e) = self.sender.send(event) {
tracing::debug!("Event dropped (no subscribers or channel full): {:?}", e);
}
}
/// Subscribe to events

View File

@@ -86,12 +86,12 @@ impl Kernel {
completed_at: None,
};
let _ = memory.save_hand_run(&run).await.map_err(|e| {
tracing::warn!("[Approval] Failed to save hand run: {}", e);
tracing::error!("[Approval] Failed to save hand run: {}", e);
});
run.status = HandRunStatus::Running;
run.started_at = Some(chrono::Utc::now().to_rfc3339());
let _ = memory.update_hand_run(&run).await.map_err(|e| {
tracing::warn!("[Approval] Failed to update hand run (running): {}", e);
tracing::error!("[Approval] Failed to update hand run (running): {}", e);
});
// Register cancellation flag
@@ -122,7 +122,7 @@ impl Kernel {
run.duration_ms = Some(duration.as_millis() as u64);
run.completed_at = Some(completed_at);
let _ = memory.update_hand_run(&run).await.map_err(|e| {
tracing::warn!("[Approval] Failed to update hand run (completed): {}", e);
tracing::error!("[Approval] Failed to update hand run (completed): {}", e);
});
// Update approval status based on execution result

View File

@@ -84,12 +84,20 @@ impl McpServerConfig {
}
}
/// Combined transport handles (stdin + stdout) behind a single Mutex.
/// This ensures write-then-read is atomic, preventing concurrent requests
/// from receiving each other's responses.
struct TransportHandles {
stdin: BufWriter<ChildStdin>,
stdout: BufReader<ChildStdout>,
}
/// MCP Transport using stdio
pub struct McpTransport {
config: McpServerConfig,
child: Arc<Mutex<Option<Child>>>,
stdin: Arc<Mutex<Option<BufWriter<ChildStdin>>>>,
stdout: Arc<Mutex<Option<BufReader<ChildStdout>>>>,
/// Single Mutex protecting both stdin and stdout for atomic write-then-read
handles: Arc<Mutex<Option<TransportHandles>>>,
capabilities: Arc<Mutex<Option<ServerCapabilities>>>,
}
@@ -99,8 +107,7 @@ impl McpTransport {
Self {
config,
child: Arc::new(Mutex::new(None)),
stdin: Arc::new(Mutex::new(None)),
stdout: Arc::new(Mutex::new(None)),
handles: Arc::new(Mutex::new(None)),
capabilities: Arc::new(Mutex::new(None)),
}
}
@@ -162,9 +169,11 @@ impl McpTransport {
});
}
// Store handles in separate mutexes
*self.stdin.lock().await = Some(BufWriter::new(stdin));
*self.stdout.lock().await = Some(BufReader::new(stdout));
// Store handles in single mutex for atomic write-then-read
*self.handles.lock().await = Some(TransportHandles {
stdin: BufWriter::new(stdin),
stdout: BufReader::new(stdout),
});
*child_guard = Some(child);
Ok(())
@@ -201,21 +210,21 @@ impl McpTransport {
let line = serde_json::to_string(notification)
.map_err(|e| ZclawError::McpError(format!("Failed to serialize notification: {}", e)))?;
let mut stdin_guard = self.stdin.lock().await;
let stdin = stdin_guard.as_mut()
let mut handles_guard = self.handles.lock().await;
let handles = handles_guard.as_mut()
.ok_or_else(|| ZclawError::McpError("Transport not started".to_string()))?;
stdin.write_all(line.as_bytes())
handles.stdin.write_all(line.as_bytes())
.map_err(|e| ZclawError::McpError(format!("Failed to write notification: {}", e)))?;
stdin.write_all(b"\n")
handles.stdin.write_all(b"\n")
.map_err(|e| ZclawError::McpError(format!("Failed to write newline: {}", e)))?;
stdin.flush()
handles.stdin.flush()
.map_err(|e| ZclawError::McpError(format!("Failed to flush notification: {}", e)))?;
Ok(())
}
/// Send JSON-RPC request
/// Send JSON-RPC request (atomic write-then-read under single lock)
async fn send_request<T: DeserializeOwned>(
&self,
method: &str,
@@ -234,28 +243,23 @@ impl McpTransport {
let line = serde_json::to_string(&request)
.map_err(|e| ZclawError::McpError(format!("Failed to serialize request: {}", e)))?;
// Write to stdin
{
let mut stdin_guard = self.stdin.lock().await;
let stdin = stdin_guard.as_mut()
.ok_or_else(|| ZclawError::McpError("Transport not started".to_string()))?;
stdin.write_all(line.as_bytes())
.map_err(|e| ZclawError::McpError(format!("Failed to write request: {}", e)))?;
stdin.write_all(b"\n")
.map_err(|e| ZclawError::McpError(format!("Failed to write newline: {}", e)))?;
stdin.flush()
.map_err(|e| ZclawError::McpError(format!("Failed to flush request: {}", e)))?;
}
// Read from stdout
// Atomic write-then-read under single lock
let response_line = {
let mut stdout_guard = self.stdout.lock().await;
let stdout = stdout_guard.as_mut()
let mut handles_guard = self.handles.lock().await;
let handles = handles_guard.as_mut()
.ok_or_else(|| ZclawError::McpError("Transport not started".to_string()))?;
// Write to stdin
handles.stdin.write_all(line.as_bytes())
.map_err(|e| ZclawError::McpError(format!("Failed to write request: {}", e)))?;
handles.stdin.write_all(b"\n")
.map_err(|e| ZclawError::McpError(format!("Failed to write newline: {}", e)))?;
handles.stdin.flush()
.map_err(|e| ZclawError::McpError(format!("Failed to flush request: {}", e)))?;
// Read from stdout (still holding the lock — no interleaving possible)
let mut response_line = String::new();
stdout.read_line(&mut response_line)
handles.stdout.read_line(&mut response_line)
.map_err(|e| ZclawError::McpError(format!("Failed to read response: {}", e)))?;
response_line
};
@@ -429,7 +433,7 @@ impl Drop for McpTransport {
let _ = child.wait();
}
Err(e) => {
eprintln!("[McpTransport] Failed to kill child process: {}", e);
tracing::warn!("[McpTransport] Failed to kill child process (potential zombie): {}", e);
}
}
}

View File

@@ -0,0 +1,11 @@
-- Add missing indexes for performance-critical queries
-- 2026-04-18 Release readiness audit
-- Rate limit events cleanup (DELETE WHERE created_at < ...)
CREATE INDEX IF NOT EXISTS idx_rle_created_at ON rate_limit_events(created_at);
-- Billing subscriptions plan lookup
CREATE INDEX IF NOT EXISTS idx_billing_sub_plan ON billing_subscriptions(plan_id);
-- Knowledge items created_by lookup
CREATE INDEX IF NOT EXISTS idx_ki_created_by ON knowledge_items(created_by);

View File

@@ -587,7 +587,7 @@ pub async fn get_invoice_pdf(
})?;
// 返回 PDF 响应
Ok(axum::response::Response::builder()
axum::response::Response::builder()
.status(200)
.header("Content-Type", "application/pdf")
.header(
@@ -595,5 +595,8 @@ pub async fn get_invoice_pdf(
format!("attachment; filename=\"invoice-{}.pdf\"", invoice.id),
)
.body(axum::body::Body::from(bytes))
.unwrap())
.map_err(|e| {
tracing::error!("Failed to build PDF response: {}", e);
SaasError::Internal("PDF 响应构建失败".into())
})
}

View File

@@ -396,6 +396,23 @@ impl SaaSConfig {
config.database.url = db_url;
}
// Config validation
if config.auth.jwt_expiration_hours < 1 {
anyhow::bail!(
"auth.jwt_expiration_hours must be >= 1, got {}",
config.auth.jwt_expiration_hours
);
}
if config.database.max_connections == 0 {
anyhow::bail!("database.max_connections must be > 0");
}
if config.database.min_connections > config.database.max_connections {
anyhow::bail!(
"database.min_connections ({}) must be <= max_connections ({})",
config.database.min_connections, config.database.max_connections
);
}
Ok(config)
}