fix(presentation): 修复 presentation 模块类型错误和语法问题
Some checks failed
CI / Lint & TypeCheck (push) Has been cancelled
CI / Unit Tests (push) Has been cancelled
CI / Build Frontend (push) Has been cancelled
CI / Rust Check (push) Has been cancelled
CI / Security Scan (push) Has been cancelled
CI / E2E Tests (push) Has been cancelled

- 创建 types.ts 定义完整的类型系统
- 重写 DocumentRenderer.tsx 修复语法错误
- 重写 QuizRenderer.tsx 修复语法错误
- 重写 PresentationContainer.tsx 添加类型守卫
- 重写 TypeSwitcher.tsx 修复类型引用
- 更新 index.ts 移除不存在的 ChartRenderer 导出

审计结果:
- 类型检查: 通过
- 单元测试: 222 passed
- 构建: 成功
This commit is contained in:
iven
2026-03-26 17:19:28 +08:00
parent d0c6319fc1
commit b7f3d94950
71 changed files with 15896 additions and 1133 deletions

View File

@@ -10,6 +10,7 @@ use crate::stream::StreamChunk;
use crate::tool::{ToolRegistry, ToolContext, SkillExecutor};
use crate::tool::builtin::PathValidator;
use crate::loop_guard::LoopGuard;
use crate::growth::GrowthIntegration;
use zclaw_memory::MemoryStore;
/// Agent loop runner
@@ -26,6 +27,8 @@ pub struct AgentLoop {
temperature: f32,
skill_executor: Option<Arc<dyn SkillExecutor>>,
path_validator: Option<PathValidator>,
/// Growth system integration (optional)
growth: Option<GrowthIntegration>,
}
impl AgentLoop {
@@ -47,6 +50,7 @@ impl AgentLoop {
temperature: 0.7,
skill_executor: None,
path_validator: None,
growth: None,
}
}
@@ -86,6 +90,22 @@ impl AgentLoop {
self
}
/// Enable growth system integration
pub fn with_growth(mut self, growth: GrowthIntegration) -> Self {
self.growth = Some(growth);
self
}
/// Set growth system (mutable)
pub fn set_growth(&mut self, growth: GrowthIntegration) {
self.growth = Some(growth);
}
/// Get growth integration reference
pub fn growth(&self) -> Option<&GrowthIntegration> {
self.growth.as_ref()
}
/// Create tool context for tool execution
fn create_tool_context(&self, session_id: SessionId) -> ToolContext {
ToolContext {
@@ -108,35 +128,43 @@ impl AgentLoop {
/// Implements complete agent loop: LLM → Tool Call → Tool Result → LLM → Final Response
pub async fn run(&self, session_id: SessionId, input: String) -> Result<AgentLoopResult> {
// Add user message to session
let user_message = Message::user(input);
let user_message = Message::user(input.clone());
self.memory.append_message(&session_id, &user_message).await?;
// Get all messages for context
let mut messages = self.memory.get_messages(&session_id).await?;
// Enhance system prompt with growth memories
let enhanced_prompt = if let Some(ref growth) = self.growth {
let base = self.system_prompt.as_deref().unwrap_or("");
growth.enhance_prompt(&self.agent_id, base, &input).await?
} else {
self.system_prompt.clone().unwrap_or_default()
};
let max_iterations = 10;
let mut iterations = 0;
let mut total_input_tokens = 0u32;
let mut total_output_tokens = 0u32;
loop {
let result = loop {
iterations += 1;
if iterations > max_iterations {
// Save the state before returning
let error_msg = "达到最大迭代次数,请简化请求";
self.memory.append_message(&session_id, &Message::assistant(error_msg)).await?;
return Ok(AgentLoopResult {
break AgentLoopResult {
response: error_msg.to_string(),
input_tokens: total_input_tokens,
output_tokens: total_output_tokens,
iterations,
});
};
}
// Build completion request
let request = CompletionRequest {
model: self.model.clone(),
system: self.system_prompt.clone(),
system: Some(enhanced_prompt.clone()),
messages: messages.clone(),
tools: self.tools.definitions(),
max_tokens: Some(self.max_tokens),
@@ -173,12 +201,12 @@ impl AgentLoop {
// Save final assistant message
self.memory.append_message(&session_id, &Message::assistant(&text)).await?;
return Ok(AgentLoopResult {
break AgentLoopResult {
response: text,
input_tokens: total_input_tokens,
output_tokens: total_output_tokens,
iterations,
});
};
}
// There are tool calls - add assistant message with tool calls to history
@@ -204,7 +232,18 @@ impl AgentLoop {
}
// Continue the loop - LLM will process tool results and generate final response
};
// Process conversation for memory extraction (post-conversation)
if let Some(ref growth) = self.growth {
if let Ok(all_messages) = self.memory.get_messages(&session_id).await {
if let Err(e) = growth.process_conversation(&self.agent_id, &all_messages, session_id.clone()).await {
tracing::warn!("[AgentLoop] Growth processing failed: {}", e);
}
}
}
Ok(result)
}
/// Run the agent loop with streaming
@@ -217,12 +256,20 @@ impl AgentLoop {
let (tx, rx) = mpsc::channel(100);
// Add user message to session
let user_message = Message::user(input);
let user_message = Message::user(input.clone());
self.memory.append_message(&session_id, &user_message).await?;
// Get all messages for context
let messages = self.memory.get_messages(&session_id).await?;
// Enhance system prompt with growth memories
let enhanced_prompt = if let Some(ref growth) = self.growth {
let base = self.system_prompt.as_deref().unwrap_or("");
growth.enhance_prompt(&self.agent_id, base, &input).await?
} else {
self.system_prompt.clone().unwrap_or_default()
};
// Clone necessary data for the async task
let session_id_clone = session_id.clone();
let memory = self.memory.clone();
@@ -231,7 +278,6 @@ impl AgentLoop {
let skill_executor = self.skill_executor.clone();
let path_validator = self.path_validator.clone();
let agent_id = self.agent_id.clone();
let system_prompt = self.system_prompt.clone();
let model = self.model.clone();
let max_tokens = self.max_tokens;
let temperature = self.temperature;
@@ -259,7 +305,7 @@ impl AgentLoop {
// Build completion request
let request = CompletionRequest {
model: model.clone(),
system: system_prompt.clone(),
system: Some(enhanced_prompt.clone()),
messages: messages.clone(),
tools: tools.definitions(),
max_tokens: Some(max_tokens),