fix(intelligence): 精确化 dead_code 标注并实现 LLM 上下文压缩
Some checks failed
CI / Lint & TypeCheck (push) Has been cancelled
CI / Unit Tests (push) Has been cancelled
CI / Build Frontend (push) Has been cancelled
CI / Rust Check (push) Has been cancelled
CI / Security Scan (push) Has been cancelled
CI / E2E Tests (push) Has been cancelled

- 将 intelligence/llm/memory/browser 模块的 dead_code 注释从模糊的
  "reserved for future" 改为明确说明 Tauri invoke_handler 运行时注册机制
- 为 identity.rs 中 3 个真正未使用的方法添加 #[allow(dead_code)]
- 实现 compactor use_llm: true 功能:新增 compact_with_llm 方法和
  compactor_compact_llm Tauri 命令,支持 LLM 驱动的对话摘要生成
- 将 pipeline_commands.rs 中 40+ 处 println!/eprintln! 调试输出替换为
  tracing::debug!/warn!/error! 结构化日志
- 移除 intelligence/mod.rs 中不必要的 #[allow(unused_imports)]
This commit is contained in:
iven
2026-03-27 00:43:14 +08:00
parent c3996573aa
commit 9a77fd4645
14 changed files with 433 additions and 265 deletions

View File

@@ -11,7 +11,10 @@
//!
//! NOTE: Some configuration methods are reserved for future dynamic adjustment.
#![allow(dead_code)] // Configuration methods reserved for future dynamic compaction tuning
// NOTE: #[tauri::command] functions are registered via invoke_handler! at runtime,
// which the Rust compiler does not track as "use". Module-level allow required
// for Tauri-commanded functions. Genuinely unused methods annotated individually.
#![allow(dead_code)]
use serde::{Deserialize, Serialize};
use regex::Regex;
@@ -95,6 +98,15 @@ pub struct CompactionCheck {
pub urgency: CompactionUrgency,
}
/// Configuration for LLM-based summary generation
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LlmSummaryConfig {
pub provider: String,
pub api_key: String,
pub endpoint: Option<String>,
pub model: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum CompactionUrgency {
@@ -232,7 +244,139 @@ impl ContextCompactor {
}
}
/// Phase 2: Rule-based summary generation
/// Generate summary using LLM when configured
///
/// Falls back to rule-based summary if:
/// - `use_llm` is false
/// - LLM config is not provided
/// - LLM call fails and `llm_fallback_to_rules` is true
pub async fn compact_with_llm(
&self,
messages: &[CompactableMessage],
_agent_id: &str,
_conversation_id: Option<&str>,
llm_config: Option<&LlmSummaryConfig>,
) -> CompactionResult {
let tokens_before_compaction = estimate_messages_tokens(messages);
let keep_count = self.config.keep_recent_messages.min(messages.len());
let split_index = messages.len().saturating_sub(keep_count);
let old_messages = &messages[..split_index];
let recent_messages = &messages[split_index..];
let summary = if self.config.use_llm {
match llm_config {
Some(config) => {
match self.generate_llm_summary(old_messages, config).await {
Ok(s) => s,
Err(e) => {
tracing::warn!(
"[Compactor] LLM summary failed, falling back to rules: {}",
e
);
if self.config.llm_fallback_to_rules {
self.generate_summary(old_messages)
} else {
format!("[摘要生成失败: {}]", e)
}
}
}
}
None => {
tracing::debug!("[Compactor] use_llm=true but no LLM config provided, using rules");
self.generate_summary(old_messages)
}
}
} else {
self.generate_summary(old_messages)
};
let summary_message = CompactableMessage {
role: "system".to_string(),
content: summary.clone(),
id: Some(format!("compaction_{}", chrono::Utc::now().timestamp())),
timestamp: Some(chrono::Utc::now().to_rfc3339()),
};
let mut compacted_messages = vec![summary_message];
compacted_messages.extend(recent_messages.to_vec());
let tokens_after_compaction = estimate_messages_tokens(&compacted_messages);
CompactionResult {
compacted_messages,
summary,
original_count: messages.len(),
retained_count: split_index + 1,
flushed_memories: 0,
tokens_before_compaction,
tokens_after_compaction,
}
}
/// Generate summary using LLM API
async fn generate_llm_summary(
&self,
messages: &[CompactableMessage],
config: &LlmSummaryConfig,
) -> Result<String, String> {
if messages.is_empty() {
return Ok("[对话开始]".to_string());
}
// Build conversation text for LLM
let mut conversation_text = String::new();
for msg in messages {
let role_label = match msg.role.as_str() {
"user" => "用户",
"assistant" => "助手",
"system" => "系统",
_ => &msg.role,
};
conversation_text.push_str(&format!("{}: {}\n", role_label, msg.content));
}
// Truncate if too long for LLM context
let max_chars = 12000;
if conversation_text.len() > max_chars {
conversation_text = format!("...(截断)...\n{}", &conversation_text[conversation_text.len() - max_chars..]);
}
let prompt = format!(
"请简洁地总结以下对话的关键内容,包括:\n\
1. 讨论的主要话题\n\
2. 达成的关键结论\n\
3. 重要的技术细节或决策\n\n\
对话内容:\n{}\n\n\
请用简洁的中文要点格式输出控制在200字以内。",
conversation_text
);
let llm_messages = vec![
crate::llm::LlmMessage {
role: "system".to_string(),
content: "你是一个对话摘要助手。请简洁地总结对话的关键信息。".to_string(),
},
crate::llm::LlmMessage {
role: "user".to_string(),
content: prompt,
},
];
let llm_config = crate::llm::LlmConfig {
provider: config.provider.clone(),
api_key: config.api_key.clone(),
endpoint: config.endpoint.clone(),
model: config.model.clone(),
};
let client = crate::llm::LlmClient::new(llm_config);
let response = client.complete(llm_messages).await?;
Ok(response.content)
}
/// Phase 2: Rule-based summary generation (fallback)
fn generate_summary(&self, messages: &[CompactableMessage]) -> String {
if messages.is_empty() {
return "[对话开始]".to_string();
@@ -357,11 +501,13 @@ impl ContextCompactor {
}
/// Get current configuration
#[allow(dead_code)] // Reserved: no Tauri command yet
pub fn get_config(&self) -> &CompactionConfig {
&self.config
}
/// Update configuration
#[allow(dead_code)] // Reserved: no Tauri command yet
pub fn update_config(&mut self, updates: CompactionConfig) {
self.config = updates;
}
@@ -403,6 +549,21 @@ pub fn compactor_compact(
compactor.compact(&messages, &agent_id, conversation_id.as_deref())
}
/// Execute compaction with optional LLM-based summary
#[tauri::command]
pub async fn compactor_compact_llm(
messages: Vec<CompactableMessage>,
agent_id: String,
conversation_id: Option<String>,
compaction_config: Option<CompactionConfig>,
llm_config: Option<LlmSummaryConfig>,
) -> CompactionResult {
let compactor = ContextCompactor::new(compaction_config);
compactor
.compact_with_llm(&messages, &agent_id, conversation_id.as_deref(), llm_config.as_ref())
.await
}
#[cfg(test)]
mod tests {
use super::*;