chore: 提交所有工作进度 — SaaS 后端增强、Admin UI、桌面端集成

包含大量 SaaS 平台改进、Admin 管理后台更新、桌面端集成完善、
文档同步、测试文件重构等内容。为 QA 测试准备干净工作树。
This commit is contained in:
iven
2026-03-29 10:46:26 +08:00
parent 9a5fad2b59
commit 5fdf96c3f5
268 changed files with 22011 additions and 3886 deletions

View File

@@ -130,7 +130,8 @@ impl LlmDriver for OpenAiDriver {
let api_key = self.api_key.expose_secret().to_string();
Box::pin(stream! {
tracing::debug!("[OpenAiDriver:stream] Starting HTTP request...");
println!("[OpenAI:stream] POST to {}/chat/completions", base_url);
println!("[OpenAI:stream] Request model={}, stream={}", stream_request.model, stream_request.stream);
let response = match self.client
.post(format!("{}/chat/completions", base_url))
.header("Authorization", format!("Bearer {}", api_key))
@@ -141,11 +142,11 @@ impl LlmDriver for OpenAiDriver {
.await
{
Ok(r) => {
tracing::debug!("[OpenAiDriver:stream] Got response, status: {}", r.status());
println!("[OpenAI:stream] Response status: {}, content-type: {:?}", r.status(), r.headers().get("content-type"));
r
},
Err(e) => {
tracing::error!("[OpenAiDriver:stream] HTTP request failed: {:?}", e);
println!("[OpenAI:stream] HTTP request FAILED: {:?}", e);
yield Err(ZclawError::LlmError(format!("HTTP request failed: {}", e)));
return;
}
@@ -154,6 +155,7 @@ impl LlmDriver for OpenAiDriver {
if !response.status().is_success() {
let status = response.status();
let body = response.text().await.unwrap_or_default();
println!("[OpenAI:stream] API error {}: {}", status, &body[..body.len().min(500)]);
yield Err(ZclawError::LlmError(format!("API error {}: {}", status, body)));
return;
}
@@ -161,21 +163,45 @@ impl LlmDriver for OpenAiDriver {
let mut byte_stream = response.bytes_stream();
let mut accumulated_tool_calls: std::collections::HashMap<String, (String, String)> = std::collections::HashMap::new();
let mut current_tool_id: Option<String> = None;
let mut sse_event_count: usize = 0;
let mut raw_bytes_total: usize = 0;
while let Some(chunk_result) = byte_stream.next().await {
let chunk = match chunk_result {
Ok(c) => c,
Err(e) => {
println!("[OpenAI:stream] Byte stream error: {:?}", e);
yield Err(ZclawError::LlmError(format!("Stream error: {}", e)));
continue;
}
};
raw_bytes_total += chunk.len();
let text = String::from_utf8_lossy(&chunk);
// Log first 500 bytes of raw data for debugging SSE format
if raw_bytes_total <= 600 {
println!("[OpenAI:stream] RAW chunk ({} bytes): {:?}", text.len(), &text[..text.len().min(500)]);
}
for line in text.lines() {
if let Some(data) = line.strip_prefix("data: ") {
let trimmed = line.trim();
if trimmed.is_empty() || trimmed.starts_with(':') {
continue; // Skip empty lines and SSE comments
}
// Handle both "data: " (standard) and "data:" (no space)
let data = if let Some(d) = trimmed.strip_prefix("data: ") {
Some(d)
} else if let Some(d) = trimmed.strip_prefix("data:") {
Some(d.trim_start())
} else {
None
};
if let Some(data) = data {
sse_event_count += 1;
if sse_event_count <= 3 || data == "[DONE]" {
println!("[OpenAI:stream] SSE #{}: {}", sse_event_count, &data[..data.len().min(300)]);
}
if data == "[DONE]" {
tracing::debug!("[OpenAI] Stream done, accumulated_tool_calls: {:?}", accumulated_tool_calls.len());
println!("[OpenAI:stream] Received [DONE], total SSE events: {}, raw bytes: {}", sse_event_count, raw_bytes_total);
// Emit ToolUseEnd for all accumulated tool calls (skip invalid ones with empty name)
for (id, (name, args)) in &accumulated_tool_calls {
@@ -216,10 +242,19 @@ impl LlmDriver for OpenAiDriver {
// Handle text content
if let Some(content) = &delta.content {
if !content.is_empty() {
tracing::debug!("[OpenAI:stream] TextDelta: {} chars", content.len());
yield Ok(StreamChunk::TextDelta { delta: content.clone() });
}
}
// Handle reasoning_content (Kimi, Qwen, DeepSeek, GLM thinking)
if let Some(reasoning) = &delta.reasoning_content {
if !reasoning.is_empty() {
tracing::debug!("[OpenAI:stream] ThinkingDelta (reasoning_content): {} chars", reasoning.len());
yield Ok(StreamChunk::ThinkingDelta { delta: reasoning.clone() });
}
}
// Handle tool calls
if let Some(tool_calls) = &delta.tool_calls {
tracing::trace!("[OpenAI] Received tool_calls delta: {:?}", tool_calls);
@@ -284,6 +319,7 @@ impl LlmDriver for OpenAiDriver {
}
}
}
println!("[OpenAI:stream] Byte stream ended. Total: {} SSE events, {} raw bytes", sse_event_count, raw_bytes_total);
})
}
}
@@ -304,55 +340,122 @@ impl OpenAiDriver {
request.system.clone()
};
let messages: Vec<OpenAiMessage> = request.messages
.iter()
.filter_map(|msg| match msg {
zclaw_types::Message::User { content } => Some(OpenAiMessage {
role: "user".to_string(),
content: Some(content.clone()),
tool_calls: None,
}),
zclaw_types::Message::Assistant { content, thinking: _ } => Some(OpenAiMessage {
// Build messages with tool result truncation to prevent payload overflow.
// Most LLM APIs have a 2-4MB HTTP payload limit.
const MAX_TOOL_RESULT_BYTES: usize = 32_768; // 32KB per tool result
const MAX_PAYLOAD_BYTES: usize = 1_800_000; // 1.8MB (under 2MB API limit)
let mut messages: Vec<OpenAiMessage> = Vec::new();
let mut pending_tool_calls: Option<Vec<OpenAiToolCall>> = None;
let mut pending_content: Option<String> = None;
let mut pending_reasoning: Option<String> = None;
let flush_pending = |tc: &mut Option<Vec<OpenAiToolCall>>,
c: &mut Option<String>,
r: &mut Option<String>,
out: &mut Vec<OpenAiMessage>| {
let calls = tc.take();
let content = c.take();
let reasoning = r.take();
if let Some(calls) = calls {
if !calls.is_empty() {
// Merge assistant content + reasoning into the tool call message
out.push(OpenAiMessage {
role: "assistant".to_string(),
content: content.filter(|s| !s.is_empty()),
reasoning_content: reasoning.filter(|s| !s.is_empty()),
tool_calls: Some(calls),
tool_call_id: None,
});
return;
}
}
// No tool calls — emit a plain assistant message
if content.is_some() || reasoning.is_some() {
out.push(OpenAiMessage {
role: "assistant".to_string(),
content: Some(content.clone()),
content: content.filter(|s| !s.is_empty()),
reasoning_content: reasoning.filter(|s| !s.is_empty()),
tool_calls: None,
}),
zclaw_types::Message::System { content } => Some(OpenAiMessage {
role: "system".to_string(),
content: Some(content.clone()),
tool_calls: None,
}),
tool_call_id: None,
});
}
};
for msg in &request.messages {
match msg {
zclaw_types::Message::User { content } => {
flush_pending(&mut pending_tool_calls, &mut pending_content, &mut pending_reasoning, &mut messages);
messages.push(OpenAiMessage {
role: "user".to_string(),
content: Some(content.clone()),
tool_calls: None,
tool_call_id: None,
reasoning_content: None,
});
}
zclaw_types::Message::Assistant { content, thinking } => {
flush_pending(&mut pending_tool_calls, &mut pending_content, &mut pending_reasoning, &mut messages);
// Don't push immediately — wait to see if next messages are ToolUse
pending_content = Some(content.clone());
pending_reasoning = thinking.clone();
}
zclaw_types::Message::System { content } => {
flush_pending(&mut pending_tool_calls, &mut pending_content, &mut pending_reasoning, &mut messages);
messages.push(OpenAiMessage {
role: "system".to_string(),
content: Some(content.clone()),
tool_calls: None,
tool_call_id: None,
reasoning_content: None,
});
}
zclaw_types::Message::ToolUse { id, tool, input } => {
// Ensure arguments is always a valid JSON object, never null or invalid
// Accumulate tool calls — they'll be merged with the pending assistant message
let args = if input.is_null() {
"{}".to_string()
} else {
serde_json::to_string(input).unwrap_or_else(|_| "{}".to_string())
};
Some(OpenAiMessage {
role: "assistant".to_string(),
content: None,
tool_calls: Some(vec![OpenAiToolCall {
pending_tool_calls
.get_or_insert_with(Vec::new)
.push(OpenAiToolCall {
id: id.clone(),
r#type: "function".to_string(),
function: FunctionCall {
name: tool.to_string(),
arguments: args,
},
}]),
})
});
}
zclaw_types::Message::ToolResult { tool_call_id: _, output, is_error, .. } => Some(OpenAiMessage {
role: "tool".to_string(),
content: Some(if *is_error {
zclaw_types::Message::ToolResult { tool_call_id, output, is_error, .. } => {
flush_pending(&mut pending_tool_calls, &mut pending_content, &mut pending_reasoning, &mut messages);
let content_str = if *is_error {
format!("Error: {}", output)
} else {
output.to_string()
}),
tool_calls: None,
}),
})
.collect();
};
// Truncate oversized tool results to prevent payload overflow
let truncated = if content_str.len() > MAX_TOOL_RESULT_BYTES {
let mut s = String::from(&content_str[..MAX_TOOL_RESULT_BYTES]);
s.push_str("\n\n... [内容已截断,原文过大]");
s
} else {
content_str
};
messages.push(OpenAiMessage {
role: "tool".to_string(),
content: Some(truncated),
tool_calls: None,
tool_call_id: Some(tool_call_id.clone()),
reasoning_content: None,
});
}
}
}
// Flush any remaining accumulated assistant content and/or tool calls
flush_pending(&mut pending_tool_calls, &mut pending_content, &mut pending_reasoning, &mut messages);
// Add system prompt if provided
let mut messages = messages;
@@ -361,6 +464,8 @@ impl OpenAiDriver {
role: "system".to_string(),
content: Some(system.clone()),
tool_calls: None,
tool_call_id: None,
reasoning_content: None,
});
}
@@ -376,7 +481,7 @@ impl OpenAiDriver {
})
.collect();
OpenAiRequest {
let api_request = OpenAiRequest {
model: request.model.clone(), // Use model ID directly without any transformation
messages,
max_tokens: request.max_tokens,
@@ -384,7 +489,75 @@ impl OpenAiDriver {
stop: if request.stop.is_empty() { None } else { Some(request.stop.clone()) },
stream: request.stream,
tools: if tools.is_empty() { None } else { Some(tools) },
};
// Pre-send payload size validation
if let Ok(serialized) = serde_json::to_string(&api_request) {
if serialized.len() > MAX_PAYLOAD_BYTES {
tracing::warn!(
target: "openai_driver",
"Request payload too large: {} bytes (limit: {}), truncating messages",
serialized.len(),
MAX_PAYLOAD_BYTES
);
return Self::truncate_messages_to_fit(api_request, MAX_PAYLOAD_BYTES);
}
tracing::debug!(
target: "openai_driver",
"Request payload size: {} bytes (limit: {})",
serialized.len(),
MAX_PAYLOAD_BYTES
);
}
api_request
}
/// Emergency truncation: drop oldest non-system messages until payload fits
fn truncate_messages_to_fit(mut request: OpenAiRequest, _max_bytes: usize) -> OpenAiRequest {
// Keep system message (if any) and last 4 non-system messages
let has_system = request.messages.first()
.map(|m| m.role == "system")
.unwrap_or(false);
let non_system: Vec<OpenAiMessage> = request.messages.into_iter()
.filter(|m| m.role != "system")
.collect();
// Keep last N messages and truncate any remaining large tool results
let keep_count = 4.min(non_system.len());
let start = non_system.len() - keep_count;
let kept: Vec<OpenAiMessage> = non_system.into_iter()
.skip(start)
.map(|mut msg| {
// Additional per-message truncation for tool results
if msg.role == "tool" {
if let Some(ref content) = msg.content {
if content.len() > 16_384 {
let mut s = String::from(&content[..16_384]);
s.push_str("\n\n... [上下文压缩截断]");
msg.content = Some(s);
}
}
}
msg
})
.collect();
let mut messages = Vec::new();
if has_system {
messages.push(OpenAiMessage {
role: "system".to_string(),
content: Some("You are a helpful AI assistant. (注意:对话历史已被压缩以适应上下文大小限制)".to_string()),
tool_calls: None,
tool_call_id: None,
reasoning_content: None,
});
}
messages.extend(kept);
request.messages = messages;
request
}
fn convert_response(&self, api_response: OpenAiResponse, model: String) -> CompletionResponse {
@@ -398,6 +571,7 @@ impl OpenAiDriver {
// This is important because some providers return empty content with tool_calls
let has_tool_calls = c.message.tool_calls.as_ref().map(|tc| !tc.is_empty()).unwrap_or(false);
let has_content = c.message.content.as_ref().map(|t| !t.is_empty()).unwrap_or(false);
let has_reasoning = c.message.reasoning_content.as_ref().map(|t| !t.is_empty()).unwrap_or(false);
let blocks = if has_tool_calls {
// Tool calls take priority
@@ -413,6 +587,11 @@ impl OpenAiDriver {
let text = c.message.content.as_ref().unwrap();
tracing::debug!("[OpenAiDriver:convert_response] Using text content: {} chars", text.len());
vec![ContentBlock::Text { text: text.clone() }]
} else if has_reasoning {
// Content empty but reasoning_content present (Kimi, Qwen, DeepSeek)
let reasoning = c.message.reasoning_content.as_ref().unwrap();
tracing::debug!("[OpenAiDriver:convert_response] Using reasoning_content: {} chars", reasoning.len());
vec![ContentBlock::Text { text: reasoning.clone() }]
} else {
// No content or tool_calls
tracing::debug!("[OpenAiDriver:convert_response] No content or tool_calls, using empty text");
@@ -594,6 +773,10 @@ struct OpenAiMessage {
content: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
tool_calls: Option<Vec<OpenAiToolCall>>,
#[serde(skip_serializing_if = "Option::is_none")]
tool_call_id: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
reasoning_content: Option<String>,
}
#[derive(Serialize)]
@@ -656,6 +839,8 @@ struct OpenAiResponseMessage {
#[serde(default)]
content: Option<String>,
#[serde(default)]
reasoning_content: Option<String>,
#[serde(default)]
tool_calls: Option<Vec<OpenAiToolCallResponse>>,
}
@@ -705,6 +890,8 @@ struct OpenAiDelta {
#[serde(default)]
content: Option<String>,
#[serde(default)]
reasoning_content: Option<String>,
#[serde(default)]
tool_calls: Option<Vec<OpenAiToolCallDelta>>,
}