release(v0.2.0): streaming, MCP protocol, Browser Hand, security enhancements

## Major Features

### Streaming Response System
- Implement LlmDriver trait with `stream()` method returning async Stream
- Add SSE parsing for Anthropic and OpenAI API streaming
- Integrate Tauri event system for frontend streaming (`stream:chunk` events)
- Add StreamChunk types: Delta, ToolStart, ToolEnd, Complete, Error

### MCP Protocol Implementation
- Add MCP JSON-RPC 2.0 types (mcp_types.rs)
- Implement stdio-based MCP transport (mcp_transport.rs)
- Support tool discovery, execution, and resource operations

### Browser Hand Implementation
- Complete browser automation with Playwright-style actions
- Support Navigate, Click, Type, Scrape, Screenshot, Wait actions
- Add educational Hands: Whiteboard, Slideshow, Speech, Quiz

### Security Enhancements
- Implement command whitelist/blacklist for shell_exec tool
- Add SSRF protection with private IP blocking
- Create security.toml configuration file

## Test Improvements
- Fix test import paths (security-utils, setup)
- Fix vi.mock hoisting issues with vi.hoisted()
- Update test expectations for validateUrl and sanitizeFilename
- Add getUnsupportedLocalGatewayStatus mock

## Documentation Updates
- Update architecture documentation
- Improve configuration reference
- Add quick-start guide updates

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
iven
2026-03-24 03:24:24 +08:00
parent e49ba4460b
commit 3ff08faa56
78 changed files with 29575 additions and 1682 deletions

View File

@@ -330,16 +330,160 @@ fn filter_by_proactivity(alerts: &[HeartbeatAlert], level: &ProactivityLevel) ->
// === Built-in Checks ===
/// Check for pending task memories (placeholder - would connect to memory store)
fn check_pending_tasks(_agent_id: &str) -> Option<HeartbeatAlert> {
// In full implementation, this would query the memory store
// For now, return None (no tasks)
/// Pattern detection counters (shared state for personality detection)
use std::collections::HashMap as StdHashMap;
use std::sync::RwLock;
use std::sync::OnceLock;
/// Global correction counters
static CORRECTION_COUNTERS: OnceLock<RwLock<StdHashMap<String, usize>>> = OnceLock::new();
/// Global memory stats cache (updated by frontend via Tauri command)
/// Key: agent_id, Value: (task_count, total_memories, storage_bytes)
static MEMORY_STATS_CACHE: OnceLock<RwLock<StdHashMap<String, MemoryStatsCache>>> = OnceLock::new();
/// Cached memory stats for an agent
#[derive(Clone, Debug, Default)]
pub struct MemoryStatsCache {
pub task_count: usize,
pub total_entries: usize,
pub storage_size_bytes: usize,
pub last_updated: Option<String>,
}
fn get_correction_counters() -> &'static RwLock<StdHashMap<String, usize>> {
CORRECTION_COUNTERS.get_or_init(|| RwLock::new(StdHashMap::new()))
}
fn get_memory_stats_cache() -> &'static RwLock<StdHashMap<String, MemoryStatsCache>> {
MEMORY_STATS_CACHE.get_or_init(|| RwLock::new(StdHashMap::new()))
}
/// Update memory stats cache for an agent
/// Call this from frontend via Tauri command after fetching memory stats
pub fn update_memory_stats_cache(agent_id: &str, task_count: usize, total_entries: usize, storage_size_bytes: usize) {
let cache = get_memory_stats_cache();
if let Ok(mut cache) = cache.write() {
cache.insert(agent_id.to_string(), MemoryStatsCache {
task_count,
total_entries,
storage_size_bytes,
last_updated: Some(chrono::Utc::now().to_rfc3339()),
});
}
}
/// Get memory stats for an agent
fn get_cached_memory_stats(agent_id: &str) -> Option<MemoryStatsCache> {
let cache = get_memory_stats_cache();
if let Ok(cache) = cache.read() {
cache.get(agent_id).cloned()
} else {
None
}
}
/// Record a user correction for pattern detection
/// Call this when user corrects agent behavior
pub fn record_user_correction(agent_id: &str, correction_type: &str) {
let key = format!("{}:{}", agent_id, correction_type);
let counters = get_correction_counters();
if let Ok(mut counters) = counters.write() {
*counters.entry(key).or_insert(0) += 1;
}
}
/// Get and reset correction count
fn get_correction_count(agent_id: &str, correction_type: &str) -> usize {
let key = format!("{}:{}", agent_id, correction_type);
let counters = get_correction_counters();
if let Ok(mut counters) = counters.write() {
counters.remove(&key).unwrap_or(0)
} else {
0
}
}
/// Check all correction patterns for an agent
fn check_correction_patterns(agent_id: &str) -> Vec<HeartbeatAlert> {
let patterns = [
("communication_style", "简洁", "用户偏好简洁回复,建议减少冗长解释"),
("tone", "轻松", "用户偏好轻松语气,建议减少正式用语"),
("detail_level", "概要", "用户偏好概要性回答,建议先给结论再展开"),
("language", "中文", "用户语言偏好,建议优先使用中文"),
("code_first", "代码优先", "用户偏好代码优先,建议先展示代码再解释"),
];
let mut alerts = Vec::new();
for (pattern_type, _keyword, suggestion) in patterns {
let count = get_correction_count(agent_id, pattern_type);
if count >= 3 {
alerts.push(HeartbeatAlert {
title: "人格改进建议".to_string(),
content: format!("{} (检测到 {} 次相关纠正)", suggestion, count),
urgency: Urgency::Medium,
source: "personality-improvement".to_string(),
timestamp: chrono::Utc::now().to_rfc3339(),
});
}
}
alerts
}
/// Check for pending task memories
/// Uses cached memory stats to detect task backlog
fn check_pending_tasks(agent_id: &str) -> Option<HeartbeatAlert> {
if let Some(stats) = get_cached_memory_stats(agent_id) {
// Alert if there are 5+ pending tasks
if stats.task_count >= 5 {
return Some(HeartbeatAlert {
title: "待办任务积压".to_string(),
content: format!("当前有 {} 个待办任务未完成,建议处理或重新评估优先级", stats.task_count),
urgency: if stats.task_count >= 10 {
Urgency::High
} else {
Urgency::Medium
},
source: "pending-tasks".to_string(),
timestamp: chrono::Utc::now().to_rfc3339(),
});
}
}
None
}
/// Check memory storage health (placeholder)
fn check_memory_health(_agent_id: &str) -> Option<HeartbeatAlert> {
// In full implementation, this would check memory stats
/// Check memory storage health
/// Uses cached memory stats to detect storage issues
fn check_memory_health(agent_id: &str) -> Option<HeartbeatAlert> {
if let Some(stats) = get_cached_memory_stats(agent_id) {
// Alert if storage is very large (> 50MB)
if stats.storage_size_bytes > 50 * 1024 * 1024 {
return Some(HeartbeatAlert {
title: "记忆存储过大".to_string(),
content: format!(
"记忆存储已达 {:.1}MB建议清理低重要性记忆或归档旧记忆",
stats.storage_size_bytes as f64 / (1024.0 * 1024.0)
),
urgency: Urgency::Medium,
source: "memory-health".to_string(),
timestamp: chrono::Utc::now().to_rfc3339(),
});
}
// Alert if too many memories (> 1000)
if stats.total_entries > 1000 {
return Some(HeartbeatAlert {
title: "记忆条目过多".to_string(),
content: format!(
"当前有 {} 条记忆,可能影响检索效率,建议清理或归档",
stats.total_entries
),
urgency: Urgency::Low,
source: "memory-health".to_string(),
timestamp: chrono::Utc::now().to_rfc3339(),
});
}
}
None
}
@@ -358,38 +502,43 @@ fn check_idle_greeting(_agent_id: &str) -> Option<HeartbeatAlert> {
///
/// When threshold is reached, proposes a personality change via the identity system.
fn check_personality_improvement(agent_id: &str) -> Option<HeartbeatAlert> {
// Pattern detection heuristics
// In full implementation, this would:
// 1. Query memory for recent "correction" type interactions
// 2. Count frequency of similar corrections
// 3. If >= 3 similar corrections, trigger proposal
// Common correction patterns to detect
let correction_patterns = [
("啰嗦|冗长|简洁", "用户偏好简洁回复", "communication_style"),
("正式|随意|轻松", "用户偏好轻松语气", "tone"),
("详细|概括|摘要", "用户偏好概要性回答", "detail_level"),
("英文|中文|语言", "用户语言偏好", "language"),
("代码|解释|说明", "用户偏好代码优先", "code_first"),
];
// Placeholder: In production, query memory store for these patterns
// For now, return None (no pattern detected)
let _ = (agent_id, correction_patterns);
None
// Check all correction patterns and return the first one that triggers
let alerts = check_correction_patterns(agent_id);
alerts.into_iter().next()
}
/// Check for learning opportunities from recent conversations
///
/// Identifies opportunities to capture user preferences or behavioral patterns
/// that could enhance agent effectiveness.
fn check_learning_opportunities(_agent_id: &str) -> Option<HeartbeatAlert> {
// In full implementation, this would:
// 1. Analyze recent conversations for explicit preferences
// 2. Detect implicit preferences from user reactions
// 3. Suggest memory entries or identity changes
fn check_learning_opportunities(agent_id: &str) -> Option<HeartbeatAlert> {
// Check if any correction patterns are approaching threshold
let counters = get_correction_counters();
let mut approaching_threshold: Vec<String> = Vec::new();
None
if let Ok(counters) = counters.read() {
for (key, count) in counters.iter() {
if key.starts_with(&format!("{}:", agent_id)) && *count >= 2 && *count < 3 {
let pattern_type = key.split(':').nth(1).unwrap_or("unknown").to_string();
approaching_threshold.push(pattern_type);
}
}
}
if !approaching_threshold.is_empty() {
Some(HeartbeatAlert {
title: "学习机会".to_string(),
content: format!(
"检测到用户可能有偏好调整倾向 ({}),继续观察将触发人格改进建议",
approaching_threshold.join(", ")
),
urgency: Urgency::Low,
source: "learning-opportunities".to_string(),
timestamp: chrono::Utc::now().to_rfc3339(),
})
} else {
None
}
}
// === Tauri Commands ===
@@ -493,6 +642,29 @@ pub async fn heartbeat_get_history(
Ok(engine.get_history(limit.unwrap_or(20)).await)
}
/// Update memory stats cache for heartbeat checks
/// This should be called by the frontend after fetching memory stats
#[tauri::command]
pub async fn heartbeat_update_memory_stats(
agent_id: String,
task_count: usize,
total_entries: usize,
storage_size_bytes: usize,
) -> Result<(), String> {
update_memory_stats_cache(&agent_id, task_count, total_entries, storage_size_bytes);
Ok(())
}
/// Record a user correction for personality improvement detection
#[tauri::command]
pub async fn heartbeat_record_correction(
agent_id: String,
correction_type: String,
) -> Result<(), String> {
record_user_correction(&agent_id, &correction_type);
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;

View File

@@ -5,6 +5,7 @@
//! - USER.md auto-update by agent (stores learned preferences)
//! - SOUL.md/AGENTS.md change proposals (require user approval)
//! - Snapshot history for rollback
//! - File system persistence (survives app restart)
//!
//! Phase 3 of Intelligence Layer Migration.
//! Reference: ZCLAW_AGENT_INTELLIGENCE_EVOLUTION.md §6.2.3
@@ -12,6 +13,9 @@
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::fs;
use std::path::PathBuf;
use tracing::{error, info, warn};
// === Types ===
@@ -107,20 +111,107 @@ _尚未收集到用户偏好信息。随着交互积累此文件将自动更
// === Agent Identity Manager ===
pub struct AgentIdentityManager {
/// Data structure for disk persistence
#[derive(Debug, Clone, Serialize, Deserialize)]
struct IdentityStore {
identities: HashMap<String, IdentityFiles>,
proposals: Vec<IdentityChangeProposal>,
snapshots: Vec<IdentitySnapshot>,
snapshot_counter: usize,
}
pub struct AgentIdentityManager {
identities: HashMap<String, IdentityFiles>,
proposals: Vec<IdentityChangeProposal>,
snapshots: Vec<IdentitySnapshot>,
snapshot_counter: usize,
data_dir: PathBuf,
}
impl AgentIdentityManager {
/// Create a new identity manager with persistence
pub fn new() -> Self {
Self {
let data_dir = Self::get_data_dir();
let mut manager = Self {
identities: HashMap::new(),
proposals: Vec::new(),
snapshots: Vec::new(),
snapshot_counter: 0,
data_dir,
};
manager.load_from_disk();
manager
}
/// Get the data directory for identity storage
fn get_data_dir() -> PathBuf {
// Use ~/.zclaw/identity/ as the data directory
if let Some(home) = dirs::home_dir() {
home.join(".zclaw").join("identity")
} else {
// Fallback to current directory
PathBuf::from(".zclaw").join("identity")
}
}
/// Load all data from disk
fn load_from_disk(&mut self) {
let store_path = self.data_dir.join("store.json");
if !store_path.exists() {
return; // No saved data, use defaults
}
match fs::read_to_string(&store_path) {
Ok(content) => {
match serde_json::from_str::<IdentityStore>(&content) {
Ok(store) => {
self.identities = store.identities;
self.proposals = store.proposals;
self.snapshots = store.snapshots;
self.snapshot_counter = store.snapshot_counter;
eprintln!(
"[IdentityManager] Loaded {} identities, {} proposals, {} snapshots",
self.identities.len(),
self.proposals.len(),
self.snapshots.len()
);
}
Err(e) => {
warn!("[IdentityManager] Failed to parse store.json: {}", e);
}
}
}
Err(e) => {
warn!("[IdentityManager] Failed to read store.json: {}", e);
}
}
}
/// Save all data to disk
fn save_to_disk(&self) {
// Ensure directory exists
if let Err(e) = fs::create_dir_all(&self.data_dir) {
error!("[IdentityManager] Failed to create data directory: {}", e);
return;
}
let store = IdentityStore {
identities: self.identities.clone(),
proposals: self.proposals.clone(),
snapshots: self.snapshots.clone(),
snapshot_counter: self.snapshot_counter,
};
let store_path = self.data_dir.join("store.json");
match serde_json::to_string_pretty(&store) {
Ok(content) => {
if let Err(e) = fs::write(&store_path, content) {
error!("[IdentityManager] Failed to write store.json: {}", e);
}
}
Err(e) => {
error!("[IdentityManager] Failed to serialize data: {}", e);
}
}
}
@@ -184,6 +275,7 @@ impl AgentIdentityManager {
let mut updated = identity.clone();
updated.user_profile = new_content.to_string();
self.identities.insert(agent_id.to_string(), updated);
self.save_to_disk();
}
/// Append to user profile
@@ -219,6 +311,7 @@ impl AgentIdentityManager {
};
self.proposals.push(proposal.clone());
self.save_to_disk();
proposal
}
@@ -256,6 +349,7 @@ impl AgentIdentityManager {
// Update proposal status
self.proposals[proposal_idx].status = ProposalStatus::Approved;
self.save_to_disk();
Ok(updated)
}
@@ -268,6 +362,7 @@ impl AgentIdentityManager {
.ok_or_else(|| "Proposal not found or not pending".to_string())?;
proposal.status = ProposalStatus::Rejected;
self.save_to_disk();
Ok(())
}
@@ -301,6 +396,7 @@ impl AgentIdentityManager {
}
self.identities.insert(agent_id.to_string(), updated);
self.save_to_disk();
Ok(())
}
@@ -375,6 +471,7 @@ impl AgentIdentityManager {
self.identities
.insert(agent_id.to_string(), files);
self.save_to_disk();
Ok(())
}
@@ -388,6 +485,7 @@ impl AgentIdentityManager {
self.identities.remove(agent_id);
self.proposals.retain(|p| p.agent_id != agent_id);
self.snapshots.retain(|s| s.agent_id != agent_id);
self.save_to_disk();
}
/// Export all identities for backup
@@ -400,6 +498,7 @@ impl AgentIdentityManager {
for (agent_id, files) in identities {
self.identities.insert(agent_id, files);
}
self.save_to_disk();
}
/// Get all proposals (for debugging)

View File

@@ -43,7 +43,7 @@ impl Default for ReflectionConfig {
Self {
trigger_after_conversations: 5,
trigger_after_hours: 24,
allow_soul_modification: false,
allow_soul_modification: true, // Allow soul modification by default for self-evolution
require_approval: true,
use_llm: true,
llm_fallback_to_rules: true,
@@ -468,14 +468,17 @@ use tokio::sync::Mutex;
pub type ReflectionEngineState = Arc<Mutex<ReflectionEngine>>;
/// Initialize reflection engine
/// Initialize reflection engine with config
/// Updates the shared state with new configuration
#[tauri::command]
pub async fn reflection_init(
config: Option<ReflectionConfig>,
state: tauri::State<'_, ReflectionEngineState>,
) -> Result<bool, String> {
// Note: The engine is initialized but we don't return the state
// as it cannot be serialized to the frontend
let _engine = Arc::new(Mutex::new(ReflectionEngine::new(config)));
let mut engine = state.lock().await;
if let Some(cfg) = config {
engine.update_config(cfg);
}
Ok(true)
}

View File

@@ -1427,6 +1427,8 @@ pub fn run() {
intelligence::heartbeat::heartbeat_get_config,
intelligence::heartbeat::heartbeat_update_config,
intelligence::heartbeat::heartbeat_get_history,
intelligence::heartbeat::heartbeat_update_memory_stats,
intelligence::heartbeat::heartbeat_record_correction,
// Context Compactor
intelligence::compactor::compactor_estimate_tokens,
intelligence::compactor::compactor_estimate_messages_tokens,