test(growth,runtime,skills): 深度验证测试 Phase 1-2 — 20 个新测试
Some checks failed
CI / Lint & TypeCheck (push) Has been cancelled
CI / Unit Tests (push) Has been cancelled
CI / Build Frontend (push) Has been cancelled
CI / Rust Check (push) Has been cancelled
CI / Security Scan (push) Has been cancelled
CI / E2E Tests (push) Has been cancelled

- MockLlmDriver 基础设施 (zclaw-runtime/src/test_util.rs)
- 经验闭环 E-01~06: 累积/溢出/反序列化/跨行业/并发/阈值
- Embedding 管道 EM-01~08: 路由/降级/维度不匹配/空查询/CJK/LLM Fallback/热更新
- Skill 执行 SK-01~03: 工具传递/纯 Prompt/锁竞争
This commit is contained in:
iven
2026-04-21 19:00:29 +08:00
parent b726d0cd5e
commit 79e7cd3446
6 changed files with 1092 additions and 0 deletions

View File

@@ -19,6 +19,8 @@ pub mod middleware;
pub mod prompt;
pub mod nl_schedule;
pub mod test_util;
// Re-export main types
pub use driver::{
LlmDriver, CompletionRequest, CompletionResponse, ContentBlock, StopReason,

View File

@@ -0,0 +1,206 @@
//! Shared test utilities for zclaw-runtime and dependent crates.
//!
//! Provides `MockLlmDriver` — a controllable LLM driver for offline testing.
use crate::driver::{
CompletionRequest, CompletionResponse, ContentBlock, LlmDriver, StopReason,
};
use crate::stream::StreamChunk;
use async_trait::async_trait;
use futures::{Stream, StreamExt};
use serde_json::Value;
use std::collections::VecDeque;
use std::pin::Pin;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, Mutex};
use zclaw_types::Result;
use zclaw_types::ZclawError;
/// Thread-safe mock LLM driver for testing.
///
/// # Usage
/// ```ignore
/// let mock = MockLlmDriver::new()
/// .with_text_response("Hello!")
/// .with_text_response("How can I help?");
///
/// let resp = mock.complete(request).await?;
/// assert_eq!(resp.content_text(), "Hello!");
/// ```
pub struct MockLlmDriver {
responses: Arc<Mutex<VecDeque<CompletionResponse>>>,
stream_chunks: Arc<Mutex<VecDeque<Vec<StreamChunk>>>>,
call_count: AtomicUsize,
last_request: Arc<Mutex<Option<CompletionRequest>>>,
/// If true, `complete()` returns an error instead of a response.
fail_mode: Arc<Mutex<bool>>,
}
impl MockLlmDriver {
pub fn new() -> Self {
Self {
responses: Arc::new(Mutex::new(VecDeque::new())),
stream_chunks: Arc::new(Mutex::new(VecDeque::new())),
call_count: AtomicUsize::new(0),
last_request: Arc::new(Mutex::new(None)),
fail_mode: Arc::new(Mutex::new(false)),
}
}
/// Queue a text response.
pub fn with_text_response(mut self, text: &str) -> Self {
self.push_response(CompletionResponse {
content: vec![ContentBlock::Text { text: text.to_string() }],
model: "mock-model".to_string(),
input_tokens: 10,
output_tokens: text.len() as u32 / 4,
stop_reason: StopReason::EndTurn,
});
self
}
/// Queue a response with tool calls.
pub fn with_tool_call(mut self, tool_name: &str, args: Value) -> Self {
self.push_response(CompletionResponse {
content: vec![
ContentBlock::Text { text: format!("Calling {}", tool_name) },
ContentBlock::ToolUse {
id: format!("call_{}", self.call_count()),
name: tool_name.to_string(),
input: args,
},
],
model: "mock-model".to_string(),
input_tokens: 10,
output_tokens: 20,
stop_reason: StopReason::ToolUse,
});
self
}
/// Queue an error response.
pub fn with_error(self, _error: &str) -> Self {
self.push_response(CompletionResponse {
content: vec![],
model: "mock-model".to_string(),
input_tokens: 0,
output_tokens: 0,
stop_reason: StopReason::Error,
});
self
}
/// Queue a raw response.
pub fn with_response(mut self, response: CompletionResponse) -> Self {
self.push_response(response);
self
}
/// Queue stream chunks for a streaming call.
pub fn with_stream_chunks(mut self, chunks: Vec<StreamChunk>) -> Self {
self.stream_chunks
.lock()
.expect("stream_chunks lock")
.push_back(chunks);
self
}
/// Enable fail mode — all `complete()` calls return an error.
pub fn set_fail_mode(&self, fail: bool) {
*self.fail_mode.lock().expect("fail_mode lock") = fail;
}
/// Number of times `complete()` was called.
pub fn call_count(&self) -> usize {
self.call_count.load(Ordering::SeqCst)
}
/// Inspect the last request sent to the driver.
pub fn last_request(&self) -> Option<CompletionRequest> {
self.last_request
.lock()
.expect("last_request lock")
.clone()
}
fn push_response(&mut self, resp: CompletionResponse) {
self.responses
.lock()
.expect("responses lock")
.push_back(resp);
}
fn next_response(&self) -> CompletionResponse {
let mut queue = self.responses.lock().expect("responses lock");
queue
.pop_front()
.unwrap_or_else(|| CompletionResponse {
content: vec![ContentBlock::Text {
text: "mock default response".to_string(),
}],
model: "mock-model".to_string(),
input_tokens: 0,
output_tokens: 0,
stop_reason: StopReason::EndTurn,
})
}
}
impl Default for MockLlmDriver {
fn default() -> Self {
Self::new()
}
}
#[async_trait]
impl LlmDriver for MockLlmDriver {
fn provider(&self) -> &str {
"mock"
}
async fn complete(&self, request: CompletionRequest) -> Result<CompletionResponse> {
self.call_count.fetch_add(1, Ordering::SeqCst);
*self.last_request.lock().expect("last_request lock") = Some(request);
if *self.fail_mode.lock().expect("fail_mode lock") {
return Err(ZclawError::LlmError("mock driver fail mode".to_string()));
}
Ok(self.next_response())
}
fn stream(
&self,
request: CompletionRequest,
) -> Pin<Box<dyn Stream<Item = Result<StreamChunk>> + Send + '_>> {
self.call_count.fetch_add(1, Ordering::SeqCst);
*self.last_request.lock().expect("last_request lock") = Some(request);
let chunks: Vec<Result<StreamChunk>> = self
.stream_chunks
.lock()
.expect("stream_chunks lock")
.pop_front()
.unwrap_or_else(|| {
vec![
StreamChunk::TextDelta {
delta: "mock stream".to_string(),
},
StreamChunk::Complete {
input_tokens: 10,
output_tokens: 2,
stop_reason: "end_turn".to_string(),
},
]
})
.into_iter()
.map(Ok)
.collect();
futures::stream::iter(chunks).boxed()
}
fn is_configured(&self) -> bool {
true
}
}