Files
zclaw_openfang/crates/zclaw-kernel/src/generation/chat.rs
iven 4329bae1ea
Some checks failed
CI / Lint & TypeCheck (push) Has been cancelled
CI / Unit Tests (push) Has been cancelled
CI / Build Frontend (push) Has been cancelled
CI / Rust Check (push) Has been cancelled
CI / Security Scan (push) Has been cancelled
CI / E2E Tests (push) Has been cancelled
fix(audit): Batch 2 生产代码 unwrap 替换 (20 处)
P0 修复:
- viking_commands.rs: URI 路径构建 unwrap → ok_or_else 错误传播
- clip.rs: 临时文件路径 unwrap → ok_or_else (防 Windows 中文路径 panic)

P1 修复:
- personality_detector.rs: Mutex lock unwrap → unwrap_or_else 防中毒传播
- pptx.rs: HashMap.get unwrap → expect (来自 keys() 迭代)

P2 修复:
- 4 处 SystemTime.unwrap → expect("system clock is valid")
- 4 处 dev_server URL.parse.unwrap → expect("hardcoded URL is valid")
- 9 处 nl_schedule Regex.unwrap → expect("static regex is valid")
- 5 处 data_masking Regex.unwrap → expect("static regex is valid")
- 2 处 pipeline/state Regex.unwrap → expect("static regex is valid")

全量测试通过: 719 passed, 0 failed
2026-04-19 08:38:09 +08:00

338 lines
10 KiB
Rust
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

//! Classroom Multi-Agent Chat
//!
//! Handles multi-agent conversation within the classroom context.
//! A single LLM call generates responses from multiple agent perspectives.
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use super::agents::AgentProfile;
/// A single chat message in the classroom
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ClassroomChatMessage {
/// Unique message ID
pub id: String,
/// Agent profile ID of the sender
pub agent_id: String,
/// Display name of the sender
pub agent_name: String,
/// Avatar of the sender
pub agent_avatar: String,
/// Message content
pub content: String,
/// Unix timestamp (milliseconds)
pub timestamp: i64,
/// Role of the sender
pub role: String,
/// Theme color of the sender
pub color: String,
}
/// Chat state for a classroom session
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ClassroomChatState {
/// All chat messages
pub messages: Vec<ClassroomChatMessage>,
/// Whether chat is active
pub active: bool,
}
/// Request for generating a chat response
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ClassroomChatRequest {
/// Classroom ID
pub classroom_id: String,
/// User's message
pub user_message: String,
/// Available agents
pub agents: Vec<AgentProfile>,
/// Current scene context (optional, for contextual responses)
pub scene_context: Option<String>,
/// Chat history for context
pub history: Vec<ClassroomChatMessage>,
}
/// Response from multi-agent chat generation
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ClassroomChatResponse {
/// Agent responses (may be 1-3 agents responding)
pub responses: Vec<ClassroomChatMessage>,
}
impl ClassroomChatMessage {
/// Create a user message
pub fn user_message(content: &str) -> Self {
Self {
id: format!("msg_{}", Uuid::new_v4()),
agent_id: "user".to_string(),
agent_name: "You".to_string(),
agent_avatar: "👤".to_string(),
content: content.to_string(),
timestamp: current_timestamp_millis(),
role: "user".to_string(),
color: "#6B7280".to_string(),
}
}
/// Create an agent message
pub fn agent_message(agent: &AgentProfile, content: &str) -> Self {
Self {
id: format!("msg_{}", Uuid::new_v4()),
agent_id: agent.id.clone(),
agent_name: agent.name.clone(),
agent_avatar: agent.avatar.clone(),
content: content.to_string(),
timestamp: current_timestamp_millis(),
role: agent.role.to_string(),
color: agent.color.clone(),
}
}
}
/// Build the LLM prompt for multi-agent chat response generation.
///
/// This function constructs a prompt that instructs the LLM to generate
/// responses from multiple agent perspectives in a structured JSON format.
pub fn build_chat_prompt(request: &ClassroomChatRequest) -> String {
let agent_descriptions: Vec<String> = request.agents.iter()
.map(|a| format!(
"- **{}** ({}): {}",
a.name, a.role, a.persona
))
.collect();
let history_text = if request.history.is_empty() {
"No previous messages.".to_string()
} else {
request.history.iter()
.map(|m| format!("**{}**: {}", m.agent_name, m.content))
.collect::<Vec<_>>()
.join("\n")
};
let scene_hint = request.scene_context.as_deref()
.map(|ctx| format!("\n当前场景上下文:{}", ctx))
.unwrap_or_default();
format!(
r#"你是一个课堂多智能体讨论的协调器。根据学生的问题选择1-3个合适的角色来回复。
## 可用角色
{agents}
## 对话历史
{history}
{scene_hint}
## 学生最新问题
{question}
## 回复规则
1. 选择最合适的1-3个角色来回复
2. 老师角色应该给出权威、清晰的解释
3. 助教角色可以补充代码示例或图表说明
4. 学生角色可以表达理解、提出追问或分享自己的理解
5. 每个角色的回复应该符合其个性设定
6. 回复应该自然、有教育意义
## 输出格式
你必须返回合法的JSON数组每个元素包含
```json
[
{{
"agentName": "角色名",
"content": "回复内容"
}}
]
```
只返回JSON数组不要包含其他文字。"#,
agents = agent_descriptions.join("\n"),
history = history_text,
scene_hint = scene_hint,
question = request.user_message,
)
}
/// Parse multi-agent responses from LLM output.
///
/// Extracts agent messages from the LLM's JSON response.
/// Falls back to a single teacher response if parsing fails.
pub fn parse_chat_responses(
llm_output: &str,
agents: &[AgentProfile],
) -> Vec<ClassroomChatMessage> {
// Try to extract JSON from the response
let json_text = extract_json_array(llm_output);
// Try parsing as JSON array
if let Ok(parsed) = serde_json::from_str::<Vec<serde_json::Value>>(&json_text) {
let mut messages = Vec::new();
for item in &parsed {
if let (Some(name), Some(content)) = (
item.get("agentName").and_then(|v| v.as_str()),
item.get("content").and_then(|v| v.as_str()),
) {
// Find matching agent
if let Some(agent) = agents.iter().find(|a| a.name == name) {
messages.push(ClassroomChatMessage::agent_message(agent, content));
}
}
}
if !messages.is_empty() {
return messages;
}
}
// Fallback: teacher responds with the raw LLM output
if let Some(teacher) = agents.iter().find(|a| a.role == super::agents::AgentRole::Teacher) {
vec![ClassroomChatMessage::agent_message(
teacher,
&clean_fallback_response(llm_output),
)]
} else if let Some(first) = agents.first() {
vec![ClassroomChatMessage::agent_message(first, llm_output)]
} else {
vec![]
}
}
/// Extract JSON array from text (handles markdown code blocks)
fn extract_json_array(text: &str) -> String {
// Try markdown code block first
if let Some(start) = text.find("```json") {
if let Some(end) = text[start + 7..].find("```") {
return text[start + 7..start + 7 + end].trim().to_string();
}
}
// Try to find JSON array directly
if let Some(start) = text.find('[') {
if let Some(end) = text.rfind(']') {
if end > start {
return text[start..=end].to_string();
}
}
}
text.to_string()
}
/// Clean up fallback response (remove JSON artifacts if present)
fn clean_fallback_response(text: &str) -> String {
let trimmed = text.trim();
// If it looks like JSON attempt, extract just the text content
if trimmed.starts_with('[') || trimmed.starts_with('{') {
if let Ok(values) = serde_json::from_str::<Vec<serde_json::Value>>(trimmed) {
if let Some(first) = values.first() {
if let Some(content) = first.get("content").and_then(|v| v.as_str()) {
return content.to_string();
}
}
}
}
trimmed.to_string()
}
fn current_timestamp_millis() -> i64 {
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.expect("system clock is valid")
.as_millis() as i64
}
#[cfg(test)]
mod tests {
use super::*;
use crate::generation::agents::{AgentProfile, AgentRole};
fn test_agents() -> Vec<AgentProfile> {
vec![
AgentProfile {
id: "t1".into(),
name: "陈老师".into(),
role: AgentRole::Teacher,
persona: "Test teacher".into(),
avatar: "👩‍🏫".into(),
color: "#4F46E5".into(),
allowed_actions: vec![],
priority: 10,
},
AgentProfile {
id: "s1".into(),
name: "李思".into(),
role: AgentRole::Student,
persona: "Curious student".into(),
avatar: "🤔".into(),
color: "#EF4444".into(),
allowed_actions: vec![],
priority: 5,
},
]
}
#[test]
fn test_parse_chat_responses_valid_json() {
let agents = test_agents();
let llm_output = r#"```json
[
{"agentName": "陈老师", "content": "好问题!让我来解释一下..."},
{"agentName": "李思", "content": "原来如此,那如果..."}
]
```"#;
let messages = parse_chat_responses(llm_output, &agents);
assert_eq!(messages.len(), 2);
assert_eq!(messages[0].agent_name, "陈老师");
assert_eq!(messages[1].agent_name, "李思");
}
#[test]
fn test_parse_chat_responses_fallback() {
let agents = test_agents();
let llm_output = "这是一个关于Rust的好问题。所有权意味着每个值只有一个主人。";
let messages = parse_chat_responses(llm_output, &agents);
assert_eq!(messages.len(), 1);
assert_eq!(messages[0].agent_name, "陈老师"); // Falls back to teacher
}
#[test]
fn test_build_chat_prompt() {
let agents = test_agents();
let request = ClassroomChatRequest {
classroom_id: "test".into(),
user_message: "什么是所有权?".into(),
agents,
scene_context: Some("Rust 所有权核心规则".into()),
history: vec![],
};
let prompt = build_chat_prompt(&request);
assert!(prompt.contains("陈老师"));
assert!(prompt.contains("什么是所有权?"));
assert!(prompt.contains("Rust 所有权核心规则"));
}
#[test]
fn test_user_message() {
let msg = ClassroomChatMessage::user_message("Hello");
assert_eq!(msg.agent_name, "You");
assert_eq!(msg.role, "user");
}
#[test]
fn test_agent_message() {
let agent = &test_agents()[0];
let msg = ClassroomChatMessage::agent_message(agent, "Test");
assert_eq!(msg.agent_name, "陈老师");
assert_eq!(msg.role, "teacher");
}
}