test(kernel,growth): Phase 1 缝测试安全网 — 3条核心链路 19 测试全部通过
Some checks failed
CI / Lint & TypeCheck (push) Has been cancelled
CI / Unit Tests (push) Has been cancelled
CI / Build Frontend (push) Has been cancelled
CI / Rust Check (push) Has been cancelled
CI / Security Scan (push) Has been cancelled
CI / E2E Tests (push) Has been cancelled

对话链路: 4 缝测试 (Tauri→Kernel / Kernel→LLM / LLM→UI / 流式生命周期)
Hands链路: 3 缝测试 (工具路由 / 执行回调 / 通用工具)
记忆链路: 3 缝测试 (FTS5存储 / 模式检索 / 去重)
冒烟测试: 3 Rust + 8 TypeScript 全量 PASS
- Kernel::boot_with_driver() 测试辅助方法
- 全量 cargo test 0 回归

Co-Authored-By: Claude Opus 4.7 <noreply@anthropic.com>
This commit is contained in:
iven
2026-04-21 21:36:46 +08:00
parent 317b8254e4
commit 591af5802c
8 changed files with 907 additions and 0 deletions

View File

@@ -0,0 +1,108 @@
//! Memory chain seam tests
//!
//! Verifies the integration seams in the memory pipeline:
//! 1. Extract & store: experience → FTS5 write
//! 2. Retrieve & inject: FTS5 search → memory found
//! 3. Dedup: same experience not duplicated (reuse_count incremented)
use std::sync::Arc;
use zclaw_growth::{
ExperienceStore, Experience, VikingAdapter,
storage::SqliteStorage,
};
async fn test_store() -> ExperienceStore {
let sqlite = SqliteStorage::in_memory().await;
let viking = Arc::new(VikingAdapter::new(Arc::new(sqlite)));
ExperienceStore::new(viking)
}
// ---------------------------------------------------------------------------
// Seam 1: Extract & Store — experience written to FTS5
// ---------------------------------------------------------------------------
#[tokio::test]
async fn seam_experience_store_and_retrieve() {
let store = test_store().await;
let exp = Experience::new(
"agent-001",
"高 CPU 使用率告警频繁",
"生产环境 CPU 使用率告警",
vec!["检查进程列表".to_string(), "重启服务".to_string()],
"已解决",
);
store.store_experience(&exp).await.expect("store experience");
// Retrieve by agent
let found = store.find_by_agent("agent-001").await.expect("find");
assert_eq!(found.len(), 1, "should find exactly one experience");
assert_eq!(found[0].pain_pattern, "高 CPU 使用率告警频繁");
}
// ---------------------------------------------------------------------------
// Seam 2: Retrieve by pattern — FTS5 search finds relevant experiences
// ---------------------------------------------------------------------------
#[tokio::test]
async fn seam_experience_pattern_search() {
let store = test_store().await;
// Store multiple experiences
let exp1 = Experience::new(
"agent-001",
"数据库连接超时",
"PostgreSQL 连接池耗尽",
vec!["增加连接池大小".to_string()],
"已解决",
);
let exp2 = Experience::new(
"agent-001",
"前端白屏问题",
"React 渲染错误",
vec!["检查错误边界".to_string()],
"已修复",
);
store.store_experience(&exp1).await.expect("store exp1");
store.store_experience(&exp2).await.expect("store exp2");
// Search for database-related experience
let results = store.find_by_pattern("agent-001", "数据库 连接").await.expect("search");
assert!(!results.is_empty(), "FTS5 should find database experience");
assert!(
results.iter().any(|e| e.pain_pattern.contains("数据库")),
"should match database experience, got: {:?}",
results
);
}
// ---------------------------------------------------------------------------
// Seam 3: Dedup — same pain_pattern increments reuse_count
// ---------------------------------------------------------------------------
#[tokio::test]
async fn seam_experience_dedup() {
let store = test_store().await;
let exp = Experience::new(
"agent-001",
"内存泄漏检测",
"服务运行一段时间后内存持续增长",
vec!["分析 heap dump".to_string()],
"已修复",
);
// Store twice with same agent_id and pain_pattern
store.store_experience(&exp).await.expect("first store");
store.store_experience(&exp).await.expect("second store (dedup)");
let all = store.find_by_agent("agent-001").await.expect("find");
assert_eq!(all.len(), 1, "dedup should keep only one experience");
assert!(
all[0].reuse_count >= 1,
"reuse_count should be incremented, got: {}",
all[0].reuse_count
);
}

View File

@@ -0,0 +1,59 @@
//! Memory smoke test — full lifecycle: store → retrieve → dedup
//!
//! Uses in-memory SqliteStorage with real FTS5.
use std::sync::Arc;
use zclaw_growth::{
ExperienceStore, Experience, VikingAdapter,
storage::SqliteStorage,
};
#[tokio::test]
async fn smoke_memory_full_lifecycle() {
let sqlite = SqliteStorage::in_memory().await;
let viking = Arc::new(VikingAdapter::new(Arc::new(sqlite)));
let store = ExperienceStore::new(viking);
// 1. Store first experience
let exp1 = Experience::new(
"agent-smoke",
"用户反馈页面加载缓慢",
"前端性能问题,首屏加载超 5 秒",
vec![
"分析 Network 瀑布图".to_string(),
"启用代码分割".to_string(),
"配置 CDN".to_string(),
],
"首屏加载降至 1.2 秒",
);
store.store_experience(&exp1).await.expect("store exp1");
// 2. Store second experience (different topic)
let exp2 = Experience::new(
"agent-smoke",
"数据库查询缓慢",
"订单列表查询超时",
vec!["添加复合索引".to_string()],
"查询时间从 3s 降至 50ms",
);
store.store_experience(&exp2).await.expect("store exp2");
// 3. Retrieve by agent — should find both
let all = store.find_by_agent("agent-smoke").await.expect("find by agent");
assert_eq!(all.len(), 2, "should have 2 experiences");
// 4. Search by pattern — should find relevant one
let db_results = store.find_by_pattern("agent-smoke", "数据库 查询 缓慢").await.expect("search");
assert!(!db_results.is_empty(), "FTS5 should find database experience");
assert!(
db_results.iter().any(|e| e.pain_pattern.contains("数据库")),
"should match database experience"
);
// 5. Dedup — store same experience again
store.store_experience(&exp1).await.expect("dedup store");
let all_after_dedup = store.find_by_agent("agent-smoke").await.expect("find after dedup");
assert_eq!(all_after_dedup.len(), 2, "should still have 2 after dedup");
let deduped = all_after_dedup.iter().find(|e| e.pain_pattern.contains("页面加载")).unwrap();
assert!(deduped.reuse_count >= 1, "reuse_count should be incremented");
}

View File

@@ -178,6 +178,84 @@ impl Kernel {
})
}
/// Boot the kernel with a pre-configured driver (for testing).
///
/// Skips `config.create_driver()` and uses the provided driver directly.
/// Uses an in-memory SQLite database to avoid filesystem side effects.
pub async fn boot_with_driver(
config: KernelConfig,
driver: Arc<dyn LlmDriver>,
) -> Result<Self> {
let memory = Arc::new(MemoryStore::new("sqlite::memory:").await?);
let registry = AgentRegistry::new();
let capabilities = CapabilityManager::new();
let events = EventBus::new();
let skills = Arc::new(SkillRegistry::new());
if let Some(ref skills_dir) = config.skills_dir {
if skills_dir.exists() {
skills.add_skill_dir(skills_dir.clone()).await?;
}
}
let hands = Arc::new(HandRegistry::new());
let quiz_model = config.model().to_string();
let quiz_generator = Arc::new(LlmQuizGenerator::new(driver.clone(), quiz_model));
hands.register(Arc::new(BrowserHand::new())).await;
hands.register(Arc::new(QuizHand::with_generator(quiz_generator))).await;
hands.register(Arc::new(ResearcherHand::new())).await;
hands.register(Arc::new(CollectorHand::new())).await;
hands.register(Arc::new(ClipHand::new())).await;
hands.register(Arc::new(TwitterHand::new())).await;
hands.register(Arc::new(ReminderHand::new())).await;
hands.register(Arc::new(DailyReportHand::new())).await;
let hand_configs = hands.list().await;
let skill_executor = Arc::new(KernelSkillExecutor::new(skills.clone(), driver.clone()));
let hand_executor = Arc::new(KernelHandExecutor::new(hands.clone()));
let llm_completer: Arc<dyn zclaw_skills::LlmCompleter> =
Arc::new(adapters::LlmDriverAdapter {
driver: driver.clone(),
max_tokens: config.max_tokens(),
temperature: config.temperature(),
});
let trigger_manager = crate::trigger_manager::TriggerManager::new(hands.clone());
let viking = Arc::new(zclaw_runtime::VikingAdapter::in_memory());
let a2a_router = {
let kernel_agent_id = AgentId::new();
Arc::new(A2aRouter::new(kernel_agent_id))
};
Ok(Self {
config,
registry,
capabilities,
events,
memory,
driver,
llm_completer,
skills,
skill_executor,
hand_executor,
hands,
hand_configs,
trigger_manager,
pending_approvals: Arc::new(Mutex::new(Vec::new())),
running_hand_runs: Arc::new(dashmap::DashMap::new()),
viking,
growth: std::sync::Mutex::new(None),
extraction_driver: None,
embedding_client: None,
mcp_adapters: Arc::new(std::sync::RwLock::new(Vec::new())),
industry_keywords: Arc::new(tokio::sync::RwLock::new(Vec::new())),
a2a_router,
a2a_inboxes: Arc::new(dashmap::DashMap::new()),
})
}
/// Create a tool registry with built-in tools + Hand tools + MCP tools.
/// When `subagent_enabled` is false, TaskTool is excluded to prevent
/// the LLM from attempting sub-agent delegation in non-Ultra modes.

View File

@@ -0,0 +1,143 @@
//! Conversation chain seam tests
//!
//! Verifies the integration seams between layers in the chat pipeline:
//! 1. Tauri→Kernel: chat command correctly forwards to kernel
//! 2. Kernel→LLM: middleware-processed prompt reaches MockLlmDriver
//! 3. LLM→UI: event ordering is delta → delta → complete
//! 4. Streaming: full send→stream→complete lifecycle
use std::sync::Arc;
use zclaw_kernel::{Kernel, KernelConfig};
use zclaw_runtime::test_util::MockLlmDriver;
use zclaw_runtime::{LoopEvent, LlmDriver};
use zclaw_types::AgentConfig;
/// Create a test kernel with MockLlmDriver and a registered agent.
/// The mock is pre-configured with a default text response.
async fn test_kernel() -> (Kernel, zclaw_types::AgentId) {
let mock = MockLlmDriver::new().with_text_response("Hello from mock!");
let config = KernelConfig::default();
let kernel = Kernel::boot_with_driver(config, Arc::new(mock) as Arc<dyn LlmDriver>)
.await
.expect("kernel boot");
let agent_config = AgentConfig::new("test-agent")
.with_system_prompt("You are a test assistant.");
let id = agent_config.id;
kernel.spawn_agent(agent_config).await.expect("spawn agent");
(kernel, id)
}
// ---------------------------------------------------------------------------
// Seam 1: Tauri → Kernel (non-streaming)
// ---------------------------------------------------------------------------
#[tokio::test]
async fn seam_tauri_to_kernel_non_streaming() {
let (kernel, agent_id) = test_kernel().await;
let result = kernel
.send_message(&agent_id, "Hi".to_string())
.await
.expect("send_message");
assert!(!result.content.is_empty(), "response content should not be empty");
}
// ---------------------------------------------------------------------------
// Seam 2: Kernel → LLM (middleware processes prompt before reaching driver)
// ---------------------------------------------------------------------------
#[tokio::test]
async fn seam_kernel_to_llm_prompt_reaches_driver() {
let (kernel, agent_id) = test_kernel().await;
let _ = kernel
.send_message(&agent_id, "What is 2+2?".to_string())
.await;
// Verify the kernel's driver was called by checking a second call succeeds
let result2 = kernel
.send_message(&agent_id, "And 3+3?".to_string())
.await
.expect("second send_message");
assert!(!result2.content.is_empty(), "second response should not be empty");
}
// ---------------------------------------------------------------------------
// Seam 3: LLM → UI event ordering (delta → delta → complete)
// ---------------------------------------------------------------------------
#[tokio::test]
async fn seam_llm_to_ui_event_ordering() {
let (kernel, agent_id) = test_kernel().await;
let mut rx = kernel
.send_message_stream(&agent_id, "Hi".to_string())
.await
.expect("send_message_stream");
let mut events = Vec::new();
while let Some(event) = rx.recv().await {
match &event {
LoopEvent::Delta(_) => events.push("delta"),
LoopEvent::ThinkingDelta(_) => events.push("thinking"),
LoopEvent::Complete(_) => {
events.push("complete");
break;
}
LoopEvent::Error(msg) => {
panic!("unexpected error: {}", msg);
}
LoopEvent::ToolStart { .. } => events.push("tool_start"),
LoopEvent::ToolEnd { .. } => events.push("tool_end"),
LoopEvent::SubtaskStatus { .. } => events.push("subtask"),
LoopEvent::IterationStart { .. } => events.push("iteration"),
}
}
assert!(!events.is_empty(), "should receive events");
assert_eq!(events.last(), Some(&"complete"), "last event must be complete");
assert!(
events.iter().any(|e| *e == "delta"),
"should have at least one delta event"
);
}
// ---------------------------------------------------------------------------
// Seam 4: Full streaming lifecycle with consecutive messages
// ---------------------------------------------------------------------------
#[tokio::test]
async fn seam_streaming_consecutive_messages() {
let (kernel, agent_id) = test_kernel().await;
// First message
let mut rx1 = kernel
.send_message_stream(&agent_id, "First message".to_string())
.await
.expect("first stream");
while let Some(event) = rx1.recv().await {
if let LoopEvent::Complete(result) = event {
assert!(result.output_tokens > 0, "first response should have output tokens");
}
}
// Second message (should use new session)
let mut rx2 = kernel
.send_message_stream(&agent_id, "Second message".to_string())
.await
.expect("second stream");
let mut got_complete = false;
while let Some(event) = rx2.recv().await {
if let LoopEvent::Complete(result) = event {
got_complete = true;
assert!(result.output_tokens > 0, "second response should have output tokens");
}
}
assert!(got_complete, "second stream should complete");
}

View File

@@ -0,0 +1,224 @@
//! Hands chain seam tests
//!
//! Verifies the integration seams in the Hand execution pipeline:
//! 1. Tool routing: LLM tool_call → HandRegistry correct dispatch
//! 2. Execution callback: Hand complete → LoopEvent emitted
//! 3. Non-hand tool routing
use std::sync::Arc;
use zclaw_kernel::{Kernel, KernelConfig};
use zclaw_runtime::test_util::MockLlmDriver;
use zclaw_runtime::stream::StreamChunk;
use zclaw_runtime::{LoopEvent, LlmDriver};
use zclaw_types::AgentConfig;
// ---------------------------------------------------------------------------
// Seam 1: Tool routing — LLM tool_call triggers HandTool dispatch
// ---------------------------------------------------------------------------
#[tokio::test]
async fn seam_hand_tool_routing() {
// First stream: tool_use for hand_quiz
let mock = MockLlmDriver::new()
.with_stream_chunks(vec![
StreamChunk::TextDelta { delta: "Let me generate a quiz.".to_string() },
StreamChunk::ToolUseStart { id: "call_quiz_1".to_string(), name: "hand_quiz".to_string() },
StreamChunk::ToolUseEnd {
id: "call_quiz_1".to_string(),
input: serde_json::json!({ "topic": "math", "count": 3 }),
},
StreamChunk::Complete {
input_tokens: 10,
output_tokens: 20,
stop_reason: "tool_use".to_string(),
},
])
// Second stream: final text after tool executes
.with_stream_chunks(vec![
StreamChunk::TextDelta { delta: "Here is your quiz!".to_string() },
StreamChunk::Complete {
input_tokens: 10,
output_tokens: 5,
stop_reason: "end_turn".to_string(),
},
]);
let config = KernelConfig::default();
let kernel = Kernel::boot_with_driver(config, Arc::new(mock) as Arc<dyn LlmDriver>)
.await
.expect("kernel boot");
let agent_config = AgentConfig::new("test-agent")
.with_system_prompt("You are a test assistant.");
let id = agent_config.id;
kernel.spawn_agent(agent_config).await.expect("spawn agent");
let mut rx = kernel
.send_message_stream(&id, "Generate a math quiz".to_string())
.await
.expect("stream");
let mut tool_starts = Vec::new();
let mut tool_ends = Vec::new();
let mut got_complete = false;
while let Some(event) = rx.recv().await {
match &event {
LoopEvent::ToolStart { name, input } => {
tool_starts.push((name.clone(), input.clone()));
}
LoopEvent::ToolEnd { name, output } => {
tool_ends.push((name.clone(), output.clone()));
}
LoopEvent::Complete(_) => {
got_complete = true;
break;
}
LoopEvent::Error(msg) => {
panic!("unexpected error: {}", msg);
}
_ => {}
}
}
assert!(got_complete, "stream should complete");
assert!(
tool_starts.iter().any(|(n, _)| n == "hand_quiz"),
"should see hand_quiz tool_start, got: {:?}",
tool_starts
);
}
// ---------------------------------------------------------------------------
// Seam 2: Execution callback — Hand completes and produces tool_end
// ---------------------------------------------------------------------------
#[tokio::test]
async fn seam_hand_execution_callback() {
let mock = MockLlmDriver::new()
.with_stream_chunks(vec![
StreamChunk::ToolUseStart { id: "call_quiz_1".to_string(), name: "hand_quiz".to_string() },
StreamChunk::ToolUseEnd {
id: "call_quiz_1".to_string(),
input: serde_json::json!({ "topic": "math" }),
},
StreamChunk::Complete {
input_tokens: 10,
output_tokens: 5,
stop_reason: "tool_use".to_string(),
},
])
.with_stream_chunks(vec![
StreamChunk::TextDelta { delta: "Done!".to_string() },
StreamChunk::Complete {
input_tokens: 5,
output_tokens: 1,
stop_reason: "end_turn".to_string(),
},
]);
let config = KernelConfig::default();
let kernel = Kernel::boot_with_driver(config, Arc::new(mock) as Arc<dyn LlmDriver>)
.await
.expect("kernel boot");
let agent_config = AgentConfig::new("test-agent");
let id = agent_config.id;
kernel.spawn_agent(agent_config).await.expect("spawn agent");
let mut rx = kernel
.send_message_stream(&id, "Quiz me".to_string())
.await
.expect("stream");
let mut got_tool_end = false;
let mut got_complete = false;
while let Some(event) = rx.recv().await {
match &event {
LoopEvent::ToolEnd { name, output } => {
got_tool_end = true;
assert!(name.starts_with("hand_"), "tool_end should be hand tool, got: {}", name);
// Quiz hand returns structured JSON output
assert!(output.is_object() || output.is_string(), "output should be JSON, got: {}", output);
}
LoopEvent::Complete(_) => {
got_complete = true;
break;
}
LoopEvent::Error(msg) => {
panic!("unexpected error: {}", msg);
}
_ => {}
}
}
assert!(got_tool_end, "should receive tool_end after hand execution");
assert!(got_complete, "should complete after tool_end");
}
// ---------------------------------------------------------------------------
// Seam 3: Non-hand tool call (generic tool) routes correctly
// ---------------------------------------------------------------------------
#[tokio::test]
async fn seam_generic_tool_routing() {
// Mock with a generic tool call (web_search)
let mock = MockLlmDriver::new()
.with_stream_chunks(vec![
StreamChunk::ToolUseStart { id: "call_ws_1".to_string(), name: "web_search".to_string() },
StreamChunk::ToolUseEnd {
id: "call_ws_1".to_string(),
input: serde_json::json!({ "query": "test query" }),
},
StreamChunk::Complete {
input_tokens: 10,
output_tokens: 5,
stop_reason: "tool_use".to_string(),
},
])
.with_stream_chunks(vec![
StreamChunk::TextDelta { delta: "Search results found.".to_string() },
StreamChunk::Complete {
input_tokens: 5,
output_tokens: 3,
stop_reason: "end_turn".to_string(),
},
]);
let config = KernelConfig::default();
let kernel = Kernel::boot_with_driver(config, Arc::new(mock) as Arc<dyn LlmDriver>)
.await
.expect("kernel boot");
let agent_config = AgentConfig::new("test-agent");
let id = agent_config.id;
kernel.spawn_agent(agent_config).await.expect("spawn agent");
let mut rx = kernel
.send_message_stream(&id, "Search for test".to_string())
.await
.expect("stream");
let mut tool_names = Vec::new();
let mut got_complete = false;
while let Some(event) = rx.recv().await {
match &event {
LoopEvent::ToolStart { name, .. } => tool_names.push(name.clone()),
LoopEvent::ToolEnd { name, .. } => tool_names.push(format!("end:{}", name)),
LoopEvent::Complete(_) => {
got_complete = true;
break;
}
LoopEvent::Error(msg) => {
panic!("unexpected error: {}", msg);
}
_ => {}
}
}
assert!(got_complete, "stream should complete");
assert!(
tool_names.iter().any(|n| n.contains("web_search")),
"should see web_search tool events, got: {:?}",
tool_names
);
}

View File

@@ -0,0 +1,59 @@
//! Chat smoke test — full lifecycle: send → stream → persist
//!
//! Uses MockLlmDriver to verify the complete chat pipeline without a real LLM.
use std::sync::Arc;
use zclaw_kernel::{Kernel, KernelConfig};
use zclaw_runtime::test_util::MockLlmDriver;
use zclaw_runtime::{LoopEvent, LlmDriver};
use zclaw_types::AgentConfig;
#[tokio::test]
async fn smoke_chat_full_lifecycle() {
let mock = MockLlmDriver::new().with_text_response("Hello! I am the mock assistant.");
let config = KernelConfig::default();
let kernel = Kernel::boot_with_driver(config, Arc::new(mock) as Arc<dyn LlmDriver>)
.await
.expect("kernel boot");
let agent = AgentConfig::new("smoke-agent")
.with_system_prompt("You are a test assistant.");
let id = agent.id;
kernel.spawn_agent(agent).await.expect("spawn agent");
// 1. Non-streaming: send and get response
let resp = kernel.send_message(&id, "Hello".to_string()).await.expect("send");
assert!(!resp.content.is_empty());
assert!(resp.output_tokens > 0);
// 2. Streaming: send and collect all events
let mut rx = kernel
.send_message_stream(&id, "Tell me more".to_string())
.await
.expect("stream");
let mut delta_count = 0;
let mut complete_result = None;
while let Some(event) = rx.recv().await {
match event {
LoopEvent::Delta(text) => {
delta_count += 1;
assert!(!text.is_empty(), "delta should have content");
}
LoopEvent::Complete(result) => {
complete_result = Some(result);
break;
}
LoopEvent::Error(msg) => panic!("unexpected error: {}", msg),
_ => {}
}
}
assert!(delta_count > 0, "should receive at least one delta");
let result = complete_result.expect("should receive complete");
assert!(result.output_tokens > 0);
// 3. Verify session persistence — messages were saved
let agent_info = kernel.get_agent(&id).expect("agent should exist");
assert!(agent_info.message_count >= 2, "at least 2 messages should be tracked");
}

View File

@@ -0,0 +1,93 @@
//! Hands smoke test — full lifecycle: trigger tool_call → hand execute → result
//!
//! Uses MockLlmDriver with stream chunks to simulate a real tool call flow.
use std::sync::Arc;
use zclaw_kernel::{Kernel, KernelConfig};
use zclaw_runtime::stream::StreamChunk;
use zclaw_runtime::test_util::MockLlmDriver;
use zclaw_runtime::{LoopEvent, LlmDriver};
use zclaw_types::AgentConfig;
#[tokio::test]
async fn smoke_hands_full_lifecycle() {
// Simulate: LLM calls hand_quiz → quiz hand executes → LLM summarizes
let mock = MockLlmDriver::new()
.with_stream_chunks(vec![
StreamChunk::TextDelta { delta: "正在生成测验...".to_string() },
StreamChunk::ToolUseStart {
id: "call_1".to_string(),
name: "hand_quiz".to_string(),
},
StreamChunk::ToolUseEnd {
id: "call_1".to_string(),
input: serde_json::json!({ "topic": "历史", "count": 2 }),
},
StreamChunk::Complete {
input_tokens: 15,
output_tokens: 10,
stop_reason: "tool_use".to_string(),
},
])
// After hand_quiz returns, LLM generates final response
.with_stream_chunks(vec![
StreamChunk::TextDelta { delta: "测验已生成!".to_string() },
StreamChunk::Complete {
input_tokens: 20,
output_tokens: 5,
stop_reason: "end_turn".to_string(),
},
]);
let config = KernelConfig::default();
let kernel = Kernel::boot_with_driver(config, Arc::new(mock) as Arc<dyn LlmDriver>)
.await
.expect("kernel boot");
let agent = AgentConfig::new("smoke-agent");
let id = agent.id;
kernel.spawn_agent(agent).await.expect("spawn agent");
let mut rx = kernel
.send_message_stream(&id, "生成一个历史测验".to_string())
.await
.expect("stream");
let mut saw_tool_start = false;
let mut saw_tool_end = false;
let mut saw_delta_before_tool = false;
let mut saw_delta_after_tool = false;
let mut phase = "before_tool";
let mut got_complete = false;
while let Some(event) = rx.recv().await {
match event {
LoopEvent::Delta(_) if phase == "before_tool" => saw_delta_before_tool = true,
LoopEvent::Delta(_) if phase == "after_tool" => saw_delta_after_tool = true,
LoopEvent::ToolStart { name, .. } => {
assert_eq!(name, "hand_quiz", "should be hand_quiz");
saw_tool_start = true;
}
LoopEvent::ToolEnd { name, output } => {
assert!(name.starts_with("hand_"), "should be hand tool");
assert!(output.is_object() || output.is_string(), "hand should produce output");
saw_tool_end = true;
phase = "after_tool";
}
LoopEvent::Complete(result) => {
assert!(result.output_tokens > 0, "should have output tokens");
assert!(result.iterations >= 2, "should take at least 2 iterations");
got_complete = true;
break;
}
LoopEvent::Error(msg) => panic!("unexpected error: {}", msg),
_ => {}
}
}
assert!(saw_delta_before_tool, "should see delta before tool execution");
assert!(saw_tool_start, "should see hand_quiz ToolStart");
assert!(saw_tool_end, "should see hand_quiz ToolEnd");
assert!(saw_delta_after_tool, "should see delta after tool execution");
assert!(got_complete, "should receive complete event");
}

View File

@@ -0,0 +1,143 @@
/**
* Chat seam tests — verify request/response type contracts
*
* Tests that the TypeScript types match the Rust serde-serialized format.
* These are pure type contract tests — no Tauri dependency needed.
*/
import { describe, it, expect } from 'vitest';
// ---------------------------------------------------------------------------
// Rust side: StreamChatRequest (camelCase via serde rename_all)
// ---------------------------------------------------------------------------
interface StreamChatRequest {
agentId: string;
sessionId: string;
message: string;
thinkingEnabled?: boolean;
reasoningEffort?: string;
planMode?: boolean;
subagentEnabled?: boolean;
model?: string;
}
interface ChatRequest {
agentId: string;
message: string;
thinkingEnabled?: boolean;
reasoningEffort?: string;
planMode?: boolean;
subagentEnabled?: boolean;
model?: string;
}
interface ChatResponse {
content: string;
inputTokens: number;
outputTokens: number;
}
// ---------------------------------------------------------------------------
// Rust side: StreamChatEvent (tagged union, tag = "type")
// ---------------------------------------------------------------------------
type StreamChatEvent =
| { type: 'delta'; delta: string }
| { type: 'thinkingDelta'; delta: string }
| { type: 'toolStart'; name: string; input: unknown }
| { type: 'toolEnd'; name: string; output: unknown }
| { type: 'subtaskStatus'; taskId: string; description: string; status: string; detail?: string }
| { type: 'iterationStart'; iteration: number; maxIterations: number }
| { type: 'handStart'; name: string; params: unknown }
| { type: 'handEnd'; name: string; result: unknown }
| { type: 'complete'; inputTokens: number; outputTokens: number }
| { type: 'error'; message: string };
describe('Chat Seam: request format contract', () => {
it('StreamChatRequest has required camelCase fields', () => {
const req: StreamChatRequest = {
agentId: 'test-agent',
sessionId: 'session-123',
message: 'Hello',
};
expect(req.agentId).toBe('test-agent');
expect(req.sessionId).toBe('session-123');
expect(req.message).toBe('Hello');
});
it('StreamChatRequest optional fields are camelCase', () => {
const req: StreamChatRequest = {
agentId: 'a',
sessionId: 's',
message: 'm',
thinkingEnabled: true,
reasoningEffort: 'high',
planMode: false,
subagentEnabled: true,
model: 'gpt-4o',
};
expect(req.thinkingEnabled).toBe(true);
expect(req.reasoningEffort).toBe('high');
expect(req.planMode).toBe(false);
expect(req.subagentEnabled).toBe(true);
expect(req.model).toBe('gpt-4o');
});
it('ChatRequest format for non-streaming', () => {
const req: ChatRequest = {
agentId: 'test-agent',
message: 'Hello',
model: 'gpt-4o',
};
expect(req.agentId).toBe('test-agent');
expect(req.message).toBe('Hello');
});
it('ChatResponse has expected fields', () => {
const resp: ChatResponse = {
content: 'Hello back!',
inputTokens: 10,
outputTokens: 5,
};
expect(resp.content).toBe('Hello back!');
expect(resp.inputTokens).toBe(10);
expect(resp.outputTokens).toBe(5);
});
});
describe('Chat Seam: StreamChatEvent format contract', () => {
it('delta event matches Rust StreamChatEvent::Delta', () => {
const event: StreamChatEvent = { type: 'delta', delta: 'Hello' };
expect(event.type).toBe('delta');
if (event.type === 'delta') {
expect(typeof event.delta).toBe('string');
}
});
it('complete event has token counts', () => {
const event: StreamChatEvent = { type: 'complete', inputTokens: 10, outputTokens: 5 };
if (event.type === 'complete') {
expect(event.inputTokens).toBeGreaterThanOrEqual(0);
expect(event.outputTokens).toBeGreaterThanOrEqual(0);
}
});
it('handStart/handEnd events have correct structure', () => {
const start: StreamChatEvent = { type: 'handStart', name: 'hand_quiz', params: { topic: 'math' } };
const end: StreamChatEvent = { type: 'handEnd', name: 'hand_quiz', result: { questions: [] } };
if (start.type === 'handStart') {
expect(start.name).toMatch(/^hand_/);
expect(start.params).toBeDefined();
}
if (end.type === 'handEnd') {
expect(end.name).toMatch(/^hand_/);
expect(end.result).toBeDefined();
}
});
it('error event has message field', () => {
const event: StreamChatEvent = { type: 'error', message: '已取消' };
if (event.type === 'error') {
expect(event.message).toBeTruthy();
}
});
});