Some checks failed
CI / Lint & TypeCheck (push) Has been cancelled
CI / Unit Tests (push) Has been cancelled
CI / Build Frontend (push) Has been cancelled
CI / Rust Check (push) Has been cancelled
CI / Security Scan (push) Has been cancelled
CI / E2E Tests (push) Has been cancelled
DeerFlow frontend visual overhaul: - Card-style input box (white rounded card, textarea top, actions bottom) - Dropdown mode selector (闪速/思考/Pro/Ultra with icons+descriptions) - Colored quick-action chips (小惊喜/写作/研究/收集/学习) - Minimal top bar (title + token count + export) - Warm gray color system (#faf9f6 bg, #f5f4f1 sidebar, #e8e6e1 border) - DeerFlow-style sidebar (新对话/对话/智能体 nav) - Reasoning block, tool call chain, task progress visualization - Streaming text, model selector, suggestion chips components - Resizable artifact panel with drag handle - Virtualized message list for 100+ messages Bug fixes: - Stream hang: GatewayClient onclose code 1000 now calls onComplete - WebView2 textarea border: CSS !important override for UA styles - Gateway stream event handling (response/phase/tool_call types) Intelligence client: - Unified client with fallback drivers (compactor/heartbeat/identity/memory/reflection) - Gateway API types and type conversions
62 lines
2.1 KiB
TypeScript
62 lines
2.1 KiB
TypeScript
/**
|
|
* Intelligence Layer - LocalStorage Compactor Fallback
|
|
*
|
|
* Provides rule-based compaction for browser/dev environment.
|
|
*/
|
|
|
|
import type { CompactableMessage, CompactionResult, CompactionCheck, CompactionConfig } from '../intelligence-backend';
|
|
|
|
export const fallbackCompactor = {
|
|
async estimateTokens(text: string): Promise<number> {
|
|
// Simple heuristic: ~4 chars per token for English, ~1.5 for CJK
|
|
const cjkChars = (text.match(/[\u4e00-\u9fff\u3040-\u30ff]/g) ?? []).length;
|
|
const otherChars = text.length - cjkChars;
|
|
return Math.ceil(cjkChars * 1.5 + otherChars / 4);
|
|
},
|
|
|
|
async estimateMessagesTokens(messages: CompactableMessage[]): Promise<number> {
|
|
let total = 0;
|
|
for (const m of messages) {
|
|
total += await fallbackCompactor.estimateTokens(m.content);
|
|
}
|
|
return total;
|
|
},
|
|
|
|
async checkThreshold(
|
|
messages: CompactableMessage[],
|
|
config?: CompactionConfig
|
|
): Promise<CompactionCheck> {
|
|
const threshold = config?.soft_threshold_tokens ?? 15000;
|
|
const currentTokens = await fallbackCompactor.estimateMessagesTokens(messages);
|
|
|
|
return {
|
|
should_compact: currentTokens >= threshold,
|
|
current_tokens: currentTokens,
|
|
threshold,
|
|
urgency: currentTokens >= (config?.hard_threshold_tokens ?? 20000) ? 'hard' :
|
|
currentTokens >= threshold ? 'soft' : 'none',
|
|
};
|
|
},
|
|
|
|
async compact(
|
|
messages: CompactableMessage[],
|
|
_agentId: string,
|
|
_conversationId?: string,
|
|
config?: CompactionConfig
|
|
): Promise<CompactionResult> {
|
|
// Simple rule-based compaction: keep last N messages
|
|
const keepRecent = config?.keep_recent_messages ?? 10;
|
|
const retained = messages.slice(-keepRecent);
|
|
|
|
return {
|
|
compacted_messages: retained,
|
|
summary: `[Compacted ${messages.length - retained.length} earlier messages]`,
|
|
original_count: messages.length,
|
|
retained_count: retained.length,
|
|
flushed_memories: 0,
|
|
tokens_before_compaction: await fallbackCompactor.estimateMessagesTokens(messages),
|
|
tokens_after_compaction: await fallbackCompactor.estimateMessagesTokens(retained),
|
|
};
|
|
},
|
|
};
|