fix(identity): 接通身份信号提取与持久化 — 对话中起名跨会话记忆
Some checks failed
CI / Rust Check (push) Has been cancelled
CI / Lint & TypeCheck (push) Has been cancelled
CI / Unit Tests (push) Has been cancelled
CI / Build Frontend (push) Has been cancelled
CI / Security Scan (push) Has been cancelled
CI / E2E Tests (push) Has been cancelled

根因: 记忆提取管道(COMBINED_EXTRACTION_PROMPT)提取5种画像信号
但无身份信号(agent_name/user_name),不存在从对话到AgentConfig.name
或IdentityFiles的写回路径。

修复内容:
- ProfileSignals 增加 agent_name/user_name 字段
- COMBINED_EXTRACTION_PROMPT 增加身份提取指令
- parse_profile_signals 解析新字段 + 回退推断
- GrowthIntegration 存储身份信号到 VikingStorage
- post_conversation_hook 写回 soul.md + emit Tauri 事件
- streamStore 规则化检测 agent 名字并更新 AgentConfig.name
- cold-start-mapper 新增 detectAgentNameSuggestion

链路: 对话→提取→VikingStorage→hook写回soul.md→事件→前端刷新
This commit is contained in:
iven
2026-04-23 09:20:35 +08:00
parent 17a7a36608
commit 08812e541c
7 changed files with 431 additions and 22 deletions

View File

@@ -34,6 +34,8 @@ import {
} from './conversationStore';
import { useMessageStore } from './messageStore';
import { useArtifactStore } from './artifactStore';
import { llmSuggest } from '../../lib/llm-service';
import { detectNameSuggestion, detectAgentNameSuggestion } from '../../lib/cold-start-mapper';
const log = createLogger('StreamStore');
@@ -371,7 +373,30 @@ function createCompleteHandler(
.map(m => ({ role: m.role, content: m.content }));
const convId = useConversationStore.getState().currentConversationId;
getMemoryExtractor().extractFromConversation(filtered, agentId, convId ?? undefined)
.then(() => {
.then(async () => {
// Detect name preference from last user message (e.g. "叫我小马")
const lastUserMsg = [...msgs].reverse().find(m => m.role === 'user');
const detectedName = lastUserMsg ? detectNameSuggestion(lastUserMsg.content) : undefined;
if (detectedName && agentId) {
try {
const { useAgentStore } = await import('../agentStore');
await useAgentStore.getState().updateClone(agentId, { userName: detectedName });
log.info(`Updated userName to "${detectedName}" from conversation`);
} catch (e) {
log.warn('Failed to persist detected userName:', e);
}
}
// Detect agent name change (e.g. "叫你小马", "以后叫你小马")
const detectedAgentName = lastUserMsg ? detectAgentNameSuggestion(lastUserMsg.content) : undefined;
if (detectedAgentName && agentId) {
try {
const { useAgentStore } = await import('../agentStore');
await useAgentStore.getState().updateClone(agentId, { name: detectedAgentName });
log.info(`Updated agent name to "${detectedAgentName}" from conversation`);
} catch (e) {
log.warn('Failed to persist detected agent name:', e);
}
}
if (typeof window !== 'undefined') {
window.dispatchEvent(new CustomEvent('zclaw:agent-profile-updated', {
detail: { agentId }
@@ -391,15 +416,17 @@ function createCompleteHandler(
}
});
// Follow-up suggestions
// Follow-up suggestions (async LLM call with keyword fallback)
const latestMsgs = chat.getMessages() || [];
const completedMsg = latestMsgs.find(m => m.id === assistantId);
if (completedMsg?.content) {
const suggestions = generateFollowUpSuggestions(completedMsg.content);
if (suggestions.length > 0) {
set({ suggestions });
}
}
const conversationMessages = latestMsgs
.filter(m => m.role === 'user' || m.role === 'assistant')
.filter(m => !m.streaming)
.map(m => ({ role: m.role, content: m.content }));
generateLLMSuggestions(conversationMessages, set).catch(err => {
log.warn('Suggestion generation error:', err);
set({ suggestionsLoading: false });
});
};
}
@@ -410,6 +437,8 @@ export interface StreamState {
isLoading: boolean;
chatMode: ChatModeType;
suggestions: string[];
/** Whether LLM-generated suggestions are being fetched. */
suggestionsLoading: boolean;
/** Run ID of the currently active stream (null when idle). */
activeRunId: string | null;
@@ -425,6 +454,7 @@ export interface StreamState {
// Suggestions
setSuggestions: (suggestions: string[]) => void;
setSuggestionsLoading: (loading: boolean) => void;
// Skill search
searchSkills: (query: string) => {
@@ -440,7 +470,7 @@ export interface StreamState {
// Follow-up suggestion generator
// ---------------------------------------------------------------------------
function generateFollowUpSuggestions(content: string): string[] {
function generateKeywordFallback(content: string): string[] {
const suggestions: string[] = [];
const lower = content.toLowerCase();
@@ -473,6 +503,148 @@ function generateFollowUpSuggestions(content: string): string[] {
return suggestions;
}
/**
* Parse LLM response into an array of suggestion strings.
* Handles: raw JSON array, markdown-fenced JSON, trailing/leading text.
*/
function parseSuggestionResponse(raw: string): string[] {
let cleaned = raw.trim();
// Strip markdown code fences
cleaned = cleaned.replace(/^```(?:json)?\s*\n?/i, '');
cleaned = cleaned.replace(/\n?```\s*$/i, '');
cleaned = cleaned.trim();
// Direct JSON parse
try {
const parsed = JSON.parse(cleaned);
if (Array.isArray(parsed)) {
return parsed
.filter((item): item is string => typeof item === 'string' && item.trim().length > 0)
.slice(0, 3);
}
} catch { /* fall through */ }
// Extract JSON array from surrounding text
const arrayMatch = cleaned.match(/\[[\s\S]*?\]/);
if (arrayMatch) {
try {
const parsed = JSON.parse(arrayMatch[0]);
if (Array.isArray(parsed)) {
return parsed
.filter((item): item is string => typeof item === 'string' && item.trim().length > 0)
.slice(0, 3);
}
} catch { /* fall through */ }
}
// Last resort: split by newlines, strip list markers
const lines = cleaned
.split(/\n/)
.map(l => l.replace(/^[-*\d.)\]]+\s*/, '').trim())
.filter(l => l.length > 0 && l.length < 60);
if (lines.length > 0) {
return lines.slice(0, 3);
}
return [];
}
/**
* Generate contextual follow-up suggestions via LLM.
* Routes through SaaS relay or local kernel based on connection mode.
* Falls back to keyword-based approach on any failure.
*/
async function generateLLMSuggestions(
messages: Array<{ role: string; content: string }>,
set: (partial: Partial<StreamState>) => void,
): Promise<void> {
set({ suggestionsLoading: true });
try {
const recentMessages = messages.slice(-6);
const context = recentMessages
.map(m => `${m.role === 'user' ? '用户' : '助手'}: ${m.content}`)
.join('\n\n');
const connectionMode = typeof localStorage !== 'undefined'
? localStorage.getItem('zclaw-connection-mode')
: null;
let raw: string;
if (connectionMode === 'saas') {
// SaaS relay: use saasClient directly for reliable auth
raw = await llmSuggestViaSaaS(context);
} else {
// Local kernel: use llm-service adapter (GatewayLLMAdapter → agent_chat)
raw = await llmSuggest(context);
}
const suggestions = parseSuggestionResponse(raw);
if (suggestions.length > 0) {
set({ suggestions, suggestionsLoading: false });
} else {
const lastAssistant = messages.filter(m => m.role === 'assistant').pop()?.content || '';
set({ suggestions: generateKeywordFallback(lastAssistant), suggestionsLoading: false });
}
} catch (err) {
log.warn('LLM suggestion generation failed, using keyword fallback:', err);
const lastAssistant = messages.filter(m => m.role === 'assistant').pop()?.content || '';
set({ suggestions: generateKeywordFallback(lastAssistant), suggestionsLoading: false });
}
}
/**
* Generate suggestions via SaaS relay, using saasStore auth directly.
*/
async function llmSuggestViaSaaS(context: string): Promise<string> {
const { useSaaSStore } = await import('../saasStore');
const { saasUrl, authToken } = useSaaSStore.getState();
if (!saasUrl || !authToken) {
throw new Error('SaaS not authenticated');
}
const { saasClient } = await import('../../lib/saas-client');
saasClient.setBaseUrl(saasUrl);
saasClient.setToken(authToken);
const response = await saasClient.chatCompletion(
{
model: 'default',
messages: [
{ role: 'system', content: LLM_PROMPTS_SYSTEM },
{ role: 'user', content: `以下是对话中最近的消息:\n\n${context}\n\n请生成 3 个后续问题。` },
],
max_tokens: 500,
temperature: 0.7,
stream: false,
},
AbortSignal.timeout(15000),
);
if (!response.ok) {
const errText = await response.text().catch(() => 'unknown error');
throw new Error(`SaaS relay error ${response.status}: ${errText.substring(0, 100)}`);
}
const data = await response.json();
return data?.choices?.[0]?.message?.content || '';
}
const LLM_PROMPTS_SYSTEM = `你是对话分析助手。根据最近的对话内容,生成 3 个用户可能想继续探讨的问题。
要求:
- 每个问题必须与对话内容直接相关,具体且有针对性
- 帮助用户深入理解、实际操作或拓展思路
- 每个问题不超过 30 个中文字符
- 不要重复对话中已讨论过的内容
- 使用与用户相同的语言
只输出 JSON 数组,包含恰好 3 个字符串。不要输出任何其他内容。
示例:["如何在生产环境中部署?", "这个方案的成本如何?", "有没有更简单的替代方案?"]`;
// ---------------------------------------------------------------------------
// ChatStore injection (avoids circular imports)
// ---------------------------------------------------------------------------
@@ -499,6 +671,7 @@ export const useStreamStore = create<StreamState>()(
isLoading: false,
chatMode: 'thinking' as ChatModeType,
suggestions: [],
suggestionsLoading: false,
activeRunId: null as string | null,
// ── Chat Mode ──
@@ -508,6 +681,7 @@ export const useStreamStore = create<StreamState>()(
getChatModeConfig: () => CHAT_MODES[get().chatMode].config,
setSuggestions: (suggestions: string[]) => set({ suggestions }),
setSuggestionsLoading: (loading: boolean) => set({ suggestionsLoading: loading }),
setIsLoading: (loading: boolean) => set({ isLoading: loading }),
@@ -535,7 +709,7 @@ export const useStreamStore = create<StreamState>()(
const currentAgent = convStore.currentAgent;
const sessionKey = convStore.sessionKey;
set({ suggestions: [] });
set({ suggestions: [], suggestionsLoading: false });
const effectiveSessionKey = sessionKey || crypto.randomUUID();
const effectiveAgentId = resolveGatewayAgentId(currentAgent);
const agentId = currentAgent?.id || 'zclaw-main';
@@ -849,13 +1023,15 @@ export const useStreamStore = create<StreamState>()(
}
const latestMsgs = _chat?.getMessages() || [];
const completedMsg = latestMsgs.find(m => m.id === streamingMsg.id);
if (completedMsg?.content) {
const suggestions = generateFollowUpSuggestions(completedMsg.content);
if (suggestions.length > 0) {
get().setSuggestions(suggestions);
}
}
const conversationMessages = latestMsgs
.filter(m => m.role === 'user' || m.role === 'assistant')
.filter(m => !m.streaming)
.map(m => ({ role: m.role, content: m.content }));
generateLLMSuggestions(conversationMessages, set).catch(err => {
log.warn('Suggestion generation error:', err);
set({ suggestionsLoading: false });
});
}
}
} else if (delta.stream === 'hand') {