fix(kernel): 使用 Kernel 配置的 model 而非 Agent 持久化的旧值
Some checks failed
CI / Lint & TypeCheck (push) Has been cancelled
CI / Unit Tests (push) Has been cancelled
CI / Build Frontend (push) Has been cancelled
CI / Rust Check (push) Has been cancelled
CI / Security Scan (push) Has been cancelled
CI / E2E Tests (push) Has been cancelled

问题:在"模型与 API"页面切换模型后,对话仍使用旧模型
根因:Agent 配置从数据库恢复,其 model 字段优先于 Kernel 配置

修复:
- kernel.rs: send_message/send_message_stream 始终使用 Kernel 的当前 model
- openai.rs: 添加 User-Agent header 解决 Coding Plan API 405 错误
- kernel_commands.rs: 添加详细调试日志便于追踪配置传递
- troubleshooting.md: 记录此问题的排查过程和解决方案

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
iven
2026-03-23 22:56:06 +08:00
parent 86e79b4ad1
commit ae4bf815e3
5 changed files with 415 additions and 40 deletions

View File

@@ -109,20 +109,36 @@ impl Kernel {
/// Send a message to an agent
pub async fn send_message(&self, agent_id: &AgentId, message: String) -> Result<MessageResponse> {
let _agent = self.registry.get(agent_id)
let agent_config = self.registry.get(agent_id)
.ok_or_else(|| zclaw_types::ZclawError::NotFound(format!("Agent not found: {}", agent_id)))?;
// Create or get session
let session_id = self.memory.create_session(agent_id).await?;
// Create agent loop
// Always use Kernel's current model configuration
// This ensures user's "模型与 API" settings are respected
let model = self.config.model().to_string();
eprintln!("[Kernel] send_message: using model={} from kernel config", model);
// Create agent loop with model configuration
let tools = self.create_tool_registry();
let loop_runner = AgentLoop::new(
*agent_id,
self.driver.clone(),
tools,
self.memory.clone(),
);
)
.with_model(&model)
.with_max_tokens(agent_config.max_tokens.unwrap_or_else(|| self.config.max_tokens()))
.with_temperature(agent_config.temperature.unwrap_or_else(|| self.config.temperature()));
// Add system prompt if configured
let loop_runner = if let Some(ref prompt) = agent_config.system_prompt {
loop_runner.with_system_prompt(prompt)
} else {
loop_runner
};
// Run the loop
let result = loop_runner.run(session_id, message).await?;
@@ -140,20 +156,36 @@ impl Kernel {
agent_id: &AgentId,
message: String,
) -> Result<mpsc::Receiver<zclaw_runtime::LoopEvent>> {
let _agent = self.registry.get(agent_id)
let agent_config = self.registry.get(agent_id)
.ok_or_else(|| zclaw_types::ZclawError::NotFound(format!("Agent not found: {}", agent_id)))?;
// Create session
let session_id = self.memory.create_session(agent_id).await?;
// Create agent loop
// Always use Kernel's current model configuration
// This ensures user's "模型与 API" settings are respected
let model = self.config.model().to_string();
eprintln!("[Kernel] send_message_stream: using model={} from kernel config", model);
// Create agent loop with model configuration
let tools = self.create_tool_registry();
let loop_runner = AgentLoop::new(
*agent_id,
self.driver.clone(),
tools,
self.memory.clone(),
);
)
.with_model(&model)
.with_max_tokens(agent_config.max_tokens.unwrap_or_else(|| self.config.max_tokens()))
.with_temperature(agent_config.temperature.unwrap_or_else(|| self.config.temperature()));
// Add system prompt if configured
let loop_runner = if let Some(ref prompt) = agent_config.system_prompt {
loop_runner.with_system_prompt(prompt)
} else {
loop_runner
};
// Run with streaming
loop_runner.run_streaming(session_id, message).await
@@ -169,6 +201,11 @@ impl Kernel {
self.events.publish(Event::KernelShutdown);
Ok(())
}
/// Get the kernel configuration
pub fn config(&self) -> &KernelConfig {
&self.config
}
}
/// Response from sending a message