feat: 实现循环防护和安全验证功能
Some checks failed
CI / Lint & TypeCheck (push) Has been cancelled
CI / Unit Tests (push) Has been cancelled
CI / Build Frontend (push) Has been cancelled
CI / Rust Check (push) Has been cancelled
CI / Security Scan (push) Has been cancelled
CI / E2E Tests (push) Has been cancelled

refactor(loop_guard): 为LoopGuard添加Clone派生
feat(capabilities): 实现CapabilityManager.validate()安全验证
fix(agentStore): 添加token用量追踪
chore: 删除未实现的Predictor/Lead HAND.toml文件
style(Credits): 移除假数据并标注开发中状态
refactor(Skills): 动态加载技能卡片
perf(configStore): 为定时任务添加localStorage降级
docs: 更新功能文档和版本变更记录
This commit is contained in:
iven
2026-03-27 07:56:53 +08:00
parent 0d4fa96b82
commit eed347e1a6
14 changed files with 724 additions and 476 deletions

View File

@@ -85,6 +85,9 @@ interface ChatState {
isLoading: boolean;
currentModel: string;
sessionKey: string | null;
// Token usage tracking
totalInputTokens: number;
totalOutputTokens: number;
addMessage: (message: Message) => void;
updateMessage: (id: string, updates: Partial<Message>) => void;
@@ -97,6 +100,8 @@ interface ChatState {
newConversation: () => void;
switchConversation: (id: string) => void;
deleteConversation: (id: string) => void;
addTokenUsage: (inputTokens: number, outputTokens: number) => void;
getTotalTokens: () => { input: number; output: number; total: number };
searchSkills: (query: string) => { results: Array<{ id: string; name: string; description: string }>; totalAvailable: number };
}
@@ -194,8 +199,10 @@ export const useChatStore = create<ChatState>()(
isLoading: false,
currentModel: 'glm-4-flash',
sessionKey: null,
totalInputTokens: 0,
totalOutputTokens: 0,
addMessage: (message) =>
addMessage: (message: Message) =>
set((state) => ({ messages: [...state.messages, message] })),
updateMessage: (id, updates) =>
@@ -432,7 +439,7 @@ export const useChatStore = create<ChatState>()(
};
set((state) => ({ messages: [...state.messages, handMsg] }));
},
onComplete: () => {
onComplete: (inputTokens?: number, outputTokens?: number) => {
const state = get();
// Save conversation to persist across refresh
@@ -448,6 +455,11 @@ export const useChatStore = create<ChatState>()(
),
});
// Track token usage if provided (KernelClient provides these)
if (inputTokens !== undefined && outputTokens !== undefined) {
get().addTokenUsage(inputTokens, outputTokens);
}
// Async memory extraction after stream completes
const msgs = get().messages
.filter(m => m.role === 'user' || m.role === 'assistant')
@@ -518,6 +530,17 @@ export const useChatStore = create<ChatState>()(
}
},
addTokenUsage: (inputTokens: number, outputTokens: number) =>
set((state) => ({
totalInputTokens: state.totalInputTokens + inputTokens,
totalOutputTokens: state.totalOutputTokens + outputTokens,
})),
getTotalTokens: () => {
const { totalInputTokens, totalOutputTokens } = get();
return { input: totalInputTokens, output: totalOutputTokens, total: totalInputTokens + totalOutputTokens };
},
searchSkills: (query: string) => {
const discovery = getSkillDiscovery();
const result = discovery.searchSkills(query);