Files
zclaw_openfang/crates/zclaw-kernel/tests/chat_chain.rs
iven 591af5802c
Some checks failed
CI / Lint & TypeCheck (push) Has been cancelled
CI / Unit Tests (push) Has been cancelled
CI / Build Frontend (push) Has been cancelled
CI / Rust Check (push) Has been cancelled
CI / Security Scan (push) Has been cancelled
CI / E2E Tests (push) Has been cancelled
test(kernel,growth): Phase 1 缝测试安全网 — 3条核心链路 19 测试全部通过
对话链路: 4 缝测试 (Tauri→Kernel / Kernel→LLM / LLM→UI / 流式生命周期)
Hands链路: 3 缝测试 (工具路由 / 执行回调 / 通用工具)
记忆链路: 3 缝测试 (FTS5存储 / 模式检索 / 去重)
冒烟测试: 3 Rust + 8 TypeScript 全量 PASS
- Kernel::boot_with_driver() 测试辅助方法
- 全量 cargo test 0 回归

Co-Authored-By: Claude Opus 4.7 <noreply@anthropic.com>
2026-04-21 21:36:46 +08:00

144 lines
5.1 KiB
Rust

//! Conversation chain seam tests
//!
//! Verifies the integration seams between layers in the chat pipeline:
//! 1. Tauri→Kernel: chat command correctly forwards to kernel
//! 2. Kernel→LLM: middleware-processed prompt reaches MockLlmDriver
//! 3. LLM→UI: event ordering is delta → delta → complete
//! 4. Streaming: full send→stream→complete lifecycle
use std::sync::Arc;
use zclaw_kernel::{Kernel, KernelConfig};
use zclaw_runtime::test_util::MockLlmDriver;
use zclaw_runtime::{LoopEvent, LlmDriver};
use zclaw_types::AgentConfig;
/// Create a test kernel with MockLlmDriver and a registered agent.
/// The mock is pre-configured with a default text response.
async fn test_kernel() -> (Kernel, zclaw_types::AgentId) {
let mock = MockLlmDriver::new().with_text_response("Hello from mock!");
let config = KernelConfig::default();
let kernel = Kernel::boot_with_driver(config, Arc::new(mock) as Arc<dyn LlmDriver>)
.await
.expect("kernel boot");
let agent_config = AgentConfig::new("test-agent")
.with_system_prompt("You are a test assistant.");
let id = agent_config.id;
kernel.spawn_agent(agent_config).await.expect("spawn agent");
(kernel, id)
}
// ---------------------------------------------------------------------------
// Seam 1: Tauri → Kernel (non-streaming)
// ---------------------------------------------------------------------------
#[tokio::test]
async fn seam_tauri_to_kernel_non_streaming() {
let (kernel, agent_id) = test_kernel().await;
let result = kernel
.send_message(&agent_id, "Hi".to_string())
.await
.expect("send_message");
assert!(!result.content.is_empty(), "response content should not be empty");
}
// ---------------------------------------------------------------------------
// Seam 2: Kernel → LLM (middleware processes prompt before reaching driver)
// ---------------------------------------------------------------------------
#[tokio::test]
async fn seam_kernel_to_llm_prompt_reaches_driver() {
let (kernel, agent_id) = test_kernel().await;
let _ = kernel
.send_message(&agent_id, "What is 2+2?".to_string())
.await;
// Verify the kernel's driver was called by checking a second call succeeds
let result2 = kernel
.send_message(&agent_id, "And 3+3?".to_string())
.await
.expect("second send_message");
assert!(!result2.content.is_empty(), "second response should not be empty");
}
// ---------------------------------------------------------------------------
// Seam 3: LLM → UI event ordering (delta → delta → complete)
// ---------------------------------------------------------------------------
#[tokio::test]
async fn seam_llm_to_ui_event_ordering() {
let (kernel, agent_id) = test_kernel().await;
let mut rx = kernel
.send_message_stream(&agent_id, "Hi".to_string())
.await
.expect("send_message_stream");
let mut events = Vec::new();
while let Some(event) = rx.recv().await {
match &event {
LoopEvent::Delta(_) => events.push("delta"),
LoopEvent::ThinkingDelta(_) => events.push("thinking"),
LoopEvent::Complete(_) => {
events.push("complete");
break;
}
LoopEvent::Error(msg) => {
panic!("unexpected error: {}", msg);
}
LoopEvent::ToolStart { .. } => events.push("tool_start"),
LoopEvent::ToolEnd { .. } => events.push("tool_end"),
LoopEvent::SubtaskStatus { .. } => events.push("subtask"),
LoopEvent::IterationStart { .. } => events.push("iteration"),
}
}
assert!(!events.is_empty(), "should receive events");
assert_eq!(events.last(), Some(&"complete"), "last event must be complete");
assert!(
events.iter().any(|e| *e == "delta"),
"should have at least one delta event"
);
}
// ---------------------------------------------------------------------------
// Seam 4: Full streaming lifecycle with consecutive messages
// ---------------------------------------------------------------------------
#[tokio::test]
async fn seam_streaming_consecutive_messages() {
let (kernel, agent_id) = test_kernel().await;
// First message
let mut rx1 = kernel
.send_message_stream(&agent_id, "First message".to_string())
.await
.expect("first stream");
while let Some(event) = rx1.recv().await {
if let LoopEvent::Complete(result) = event {
assert!(result.output_tokens > 0, "first response should have output tokens");
}
}
// Second message (should use new session)
let mut rx2 = kernel
.send_message_stream(&agent_id, "Second message".to_string())
.await
.expect("second stream");
let mut got_complete = false;
while let Some(event) = rx2.recv().await {
if let LoopEvent::Complete(result) = event {
got_complete = true;
assert!(result.output_tokens > 0, "second response should have output tokens");
}
}
assert!(got_complete, "second stream should complete");
}