fix(identity): 接通身份信号提取与持久化 — 对话中起名跨会话记忆
Some checks failed
CI / Rust Check (push) Has been cancelled
CI / Lint & TypeCheck (push) Has been cancelled
CI / Unit Tests (push) Has been cancelled
CI / Build Frontend (push) Has been cancelled
CI / Security Scan (push) Has been cancelled
CI / E2E Tests (push) Has been cancelled

根因: 记忆提取管道(COMBINED_EXTRACTION_PROMPT)提取5种画像信号
但无身份信号(agent_name/user_name),不存在从对话到AgentConfig.name
或IdentityFiles的写回路径。

修复内容:
- ProfileSignals 增加 agent_name/user_name 字段
- COMBINED_EXTRACTION_PROMPT 增加身份提取指令
- parse_profile_signals 解析新字段 + 回退推断
- GrowthIntegration 存储身份信号到 VikingStorage
- post_conversation_hook 写回 soul.md + emit Tauri 事件
- streamStore 规则化检测 agent 名字并更新 AgentConfig.name
- cold-start-mapper 新增 detectAgentNameSuggestion

链路: 对话→提取→VikingStorage→hook写回soul.md→事件→前端刷新
This commit is contained in:
iven
2026-04-23 09:20:35 +08:00
parent 17a7a36608
commit 08812e541c
7 changed files with 431 additions and 22 deletions

View File

@@ -8,6 +8,8 @@
use tracing::{debug, warn};
use std::sync::Arc;
use tauri::Emitter;
use zclaw_growth::VikingStorage;
use crate::intelligence::identity::IdentityManagerState;
use crate::intelligence::heartbeat::HeartbeatEngineState;
@@ -56,12 +58,15 @@ pub async fn pre_conversation_hook(
///
/// 1. Record interaction for heartbeat engine
/// 2. Record conversation for reflection engine, trigger reflection if needed
/// 3. Detect identity signals and write back to identity files
pub async fn post_conversation_hook(
agent_id: &str,
_user_message: &str,
_heartbeat_state: &HeartbeatEngineState,
reflection_state: &ReflectionEngineState,
llm_driver: Option<Arc<dyn LlmDriver>>,
identity_state: &IdentityManagerState,
app: &tauri::AppHandle,
) {
// Step 1: Record interaction for heartbeat
crate::intelligence::heartbeat::record_interaction(agent_id);
@@ -200,6 +205,71 @@ pub async fn post_conversation_hook(
reflection_result.improvements.len()
);
}
// Step 3: Detect identity signals from recent memory extraction and write back
if let Ok(storage) = crate::viking_commands::get_storage().await {
let identity_prefix = format!("agent://{}/identity/", agent_id);
// Check for agent_name identity signal
let agent_name_uri = format!("{}agent-name", identity_prefix);
if let Ok(Some(entry)) = VikingStorage::get(storage.as_ref(), &agent_name_uri).await {
// Extract name from content like "助手的名字是小马"
let name = entry.content.strip_prefix("助手的名字是")
.map(|n| n.trim().to_string())
.unwrap_or_else(|| entry.content.clone());
if !name.is_empty() {
// Update IdentityFiles.soul to include the agent name
let mut manager = identity_state.lock().await;
let current_soul = manager.get_file(agent_id, crate::intelligence::identity::IdentityFile::Soul);
// Only update if the name isn't already in the soul
if !current_soul.contains(&name) {
let updated_soul = if current_soul.is_empty() {
format!("# ZCLAW 人格\n\n你的名字是{}\n\n你是一个成长性的中文 AI 助手。", name)
} else if current_soul.contains("你的名字是") || current_soul.contains("你的名字:") {
// Replace existing name line
let re = regex::Regex::new(r"你的名字是[^\n]+").unwrap();
re.replace(&current_soul, format!("你的名字是{}", name)).to_string()
} else {
// Prepend name to existing soul
format!("你的名字是{}\n\n{}", name, current_soul)
};
if let Err(e) = manager.update_file(agent_id, "soul", &updated_soul) {
warn!("[intelligence_hooks] Failed to update soul with agent name: {}", e);
} else {
debug!("[intelligence_hooks] Updated agent name to '{}' in soul", name);
}
}
drop(manager);
// Emit event for frontend to update AgentConfig.name
let _ = app.emit("zclaw:agent-identity-updated", serde_json::json!({
"agentId": agent_id,
"agentName": name,
}));
}
}
// Check for user_name identity signal
let user_name_uri = format!("{}user-name", identity_prefix);
if let Ok(Some(entry)) = VikingStorage::get(storage.as_ref(), &user_name_uri).await {
let name = entry.content.strip_prefix("用户的名字是")
.map(|n| n.trim().to_string())
.unwrap_or_else(|| entry.content.clone());
if !name.is_empty() {
let mut manager = identity_state.lock().await;
let profile = manager.get_file(agent_id, crate::intelligence::identity::IdentityFile::UserProfile);
if !profile.contains(&name) {
manager.append_to_user_profile(agent_id, &format!("- 用户名字: {}", name));
debug!("[intelligence_hooks] Appended user name '{}' to profile", name);
}
}
}
}
}
/// Build memory context by searching VikingStorage for relevant memories

View File

@@ -324,6 +324,7 @@ pub async fn agent_chat_stream(
let hb_state = heartbeat_state.inner().clone();
let rf_state = reflection_state.inner().clone();
let id_state_hook = identity_state.inner().clone();
// Clone the guard map for cleanup in the spawned task
let guard_map: SessionStreamGuard = stream_guard.inner().clone();
@@ -380,12 +381,14 @@ pub async fn agent_chat_stream(
let hb = hb_state.clone();
let rf = rf_state.clone();
let driver = llm_driver.clone();
let id_state = id_state_hook.clone();
let app_hook = app.clone();
if driver.is_none() {
tracing::debug!("[agent_chat_stream] Post-hook firing without LLM driver (schedule intercept path)");
}
tokio::spawn(async move {
crate::intelligence_hooks::post_conversation_hook(
&agent_id_hook, &message_hook, &hb, &rf, driver,
&agent_id_hook, &message_hook, &hb, &rf, driver, &id_state, &app_hook,
).await;
});
}

View File

@@ -146,6 +146,32 @@ export function detectNameSuggestion(message: string): string | undefined {
return undefined;
}
/**
* Detect if user gives the agent a name (e.g., "叫你小马", "以后叫你小马", "你的名字是小马").
* Returns the detected agent name or undefined.
*/
export function detectAgentNameSuggestion(message: string): string | undefined {
if (!message) return undefined;
const patterns = [
/叫你[""''「」]?(\S{1,8})[""''「」]?[吧。!]?/,
/你的名字[是为][""''「」]?(\S{1,8})[""''「」]?[。!]?/,
/以后叫你[""''「」]?(\S{1,8})[""''「」]?[吧。!]?/,
/给你起[个]?名[字]?(?:叫)?[""''「」]?(\S{1,8})[""''「」]?/,
/name you (\S{1,15})/i,
/call you (\S{1,15})/i,
];
for (const pattern of patterns) {
const match = message.match(pattern);
if (match && match[1]) {
const name = match[1].replace(/[吧。!,、]/g, '').trim();
if (name.length >= 1 && name.length <= 8) {
return name;
}
}
}
return undefined;
}
/**
* Determine the next cold start phase based on current phase and user message.
*/

View File

@@ -34,6 +34,8 @@ import {
} from './conversationStore';
import { useMessageStore } from './messageStore';
import { useArtifactStore } from './artifactStore';
import { llmSuggest } from '../../lib/llm-service';
import { detectNameSuggestion, detectAgentNameSuggestion } from '../../lib/cold-start-mapper';
const log = createLogger('StreamStore');
@@ -371,7 +373,30 @@ function createCompleteHandler(
.map(m => ({ role: m.role, content: m.content }));
const convId = useConversationStore.getState().currentConversationId;
getMemoryExtractor().extractFromConversation(filtered, agentId, convId ?? undefined)
.then(() => {
.then(async () => {
// Detect name preference from last user message (e.g. "叫我小马")
const lastUserMsg = [...msgs].reverse().find(m => m.role === 'user');
const detectedName = lastUserMsg ? detectNameSuggestion(lastUserMsg.content) : undefined;
if (detectedName && agentId) {
try {
const { useAgentStore } = await import('../agentStore');
await useAgentStore.getState().updateClone(agentId, { userName: detectedName });
log.info(`Updated userName to "${detectedName}" from conversation`);
} catch (e) {
log.warn('Failed to persist detected userName:', e);
}
}
// Detect agent name change (e.g. "叫你小马", "以后叫你小马")
const detectedAgentName = lastUserMsg ? detectAgentNameSuggestion(lastUserMsg.content) : undefined;
if (detectedAgentName && agentId) {
try {
const { useAgentStore } = await import('../agentStore');
await useAgentStore.getState().updateClone(agentId, { name: detectedAgentName });
log.info(`Updated agent name to "${detectedAgentName}" from conversation`);
} catch (e) {
log.warn('Failed to persist detected agent name:', e);
}
}
if (typeof window !== 'undefined') {
window.dispatchEvent(new CustomEvent('zclaw:agent-profile-updated', {
detail: { agentId }
@@ -391,15 +416,17 @@ function createCompleteHandler(
}
});
// Follow-up suggestions
// Follow-up suggestions (async LLM call with keyword fallback)
const latestMsgs = chat.getMessages() || [];
const completedMsg = latestMsgs.find(m => m.id === assistantId);
if (completedMsg?.content) {
const suggestions = generateFollowUpSuggestions(completedMsg.content);
if (suggestions.length > 0) {
set({ suggestions });
}
}
const conversationMessages = latestMsgs
.filter(m => m.role === 'user' || m.role === 'assistant')
.filter(m => !m.streaming)
.map(m => ({ role: m.role, content: m.content }));
generateLLMSuggestions(conversationMessages, set).catch(err => {
log.warn('Suggestion generation error:', err);
set({ suggestionsLoading: false });
});
};
}
@@ -410,6 +437,8 @@ export interface StreamState {
isLoading: boolean;
chatMode: ChatModeType;
suggestions: string[];
/** Whether LLM-generated suggestions are being fetched. */
suggestionsLoading: boolean;
/** Run ID of the currently active stream (null when idle). */
activeRunId: string | null;
@@ -425,6 +454,7 @@ export interface StreamState {
// Suggestions
setSuggestions: (suggestions: string[]) => void;
setSuggestionsLoading: (loading: boolean) => void;
// Skill search
searchSkills: (query: string) => {
@@ -440,7 +470,7 @@ export interface StreamState {
// Follow-up suggestion generator
// ---------------------------------------------------------------------------
function generateFollowUpSuggestions(content: string): string[] {
function generateKeywordFallback(content: string): string[] {
const suggestions: string[] = [];
const lower = content.toLowerCase();
@@ -473,6 +503,148 @@ function generateFollowUpSuggestions(content: string): string[] {
return suggestions;
}
/**
* Parse LLM response into an array of suggestion strings.
* Handles: raw JSON array, markdown-fenced JSON, trailing/leading text.
*/
function parseSuggestionResponse(raw: string): string[] {
let cleaned = raw.trim();
// Strip markdown code fences
cleaned = cleaned.replace(/^```(?:json)?\s*\n?/i, '');
cleaned = cleaned.replace(/\n?```\s*$/i, '');
cleaned = cleaned.trim();
// Direct JSON parse
try {
const parsed = JSON.parse(cleaned);
if (Array.isArray(parsed)) {
return parsed
.filter((item): item is string => typeof item === 'string' && item.trim().length > 0)
.slice(0, 3);
}
} catch { /* fall through */ }
// Extract JSON array from surrounding text
const arrayMatch = cleaned.match(/\[[\s\S]*?\]/);
if (arrayMatch) {
try {
const parsed = JSON.parse(arrayMatch[0]);
if (Array.isArray(parsed)) {
return parsed
.filter((item): item is string => typeof item === 'string' && item.trim().length > 0)
.slice(0, 3);
}
} catch { /* fall through */ }
}
// Last resort: split by newlines, strip list markers
const lines = cleaned
.split(/\n/)
.map(l => l.replace(/^[-*\d.)\]]+\s*/, '').trim())
.filter(l => l.length > 0 && l.length < 60);
if (lines.length > 0) {
return lines.slice(0, 3);
}
return [];
}
/**
* Generate contextual follow-up suggestions via LLM.
* Routes through SaaS relay or local kernel based on connection mode.
* Falls back to keyword-based approach on any failure.
*/
async function generateLLMSuggestions(
messages: Array<{ role: string; content: string }>,
set: (partial: Partial<StreamState>) => void,
): Promise<void> {
set({ suggestionsLoading: true });
try {
const recentMessages = messages.slice(-6);
const context = recentMessages
.map(m => `${m.role === 'user' ? '用户' : '助手'}: ${m.content}`)
.join('\n\n');
const connectionMode = typeof localStorage !== 'undefined'
? localStorage.getItem('zclaw-connection-mode')
: null;
let raw: string;
if (connectionMode === 'saas') {
// SaaS relay: use saasClient directly for reliable auth
raw = await llmSuggestViaSaaS(context);
} else {
// Local kernel: use llm-service adapter (GatewayLLMAdapter → agent_chat)
raw = await llmSuggest(context);
}
const suggestions = parseSuggestionResponse(raw);
if (suggestions.length > 0) {
set({ suggestions, suggestionsLoading: false });
} else {
const lastAssistant = messages.filter(m => m.role === 'assistant').pop()?.content || '';
set({ suggestions: generateKeywordFallback(lastAssistant), suggestionsLoading: false });
}
} catch (err) {
log.warn('LLM suggestion generation failed, using keyword fallback:', err);
const lastAssistant = messages.filter(m => m.role === 'assistant').pop()?.content || '';
set({ suggestions: generateKeywordFallback(lastAssistant), suggestionsLoading: false });
}
}
/**
* Generate suggestions via SaaS relay, using saasStore auth directly.
*/
async function llmSuggestViaSaaS(context: string): Promise<string> {
const { useSaaSStore } = await import('../saasStore');
const { saasUrl, authToken } = useSaaSStore.getState();
if (!saasUrl || !authToken) {
throw new Error('SaaS not authenticated');
}
const { saasClient } = await import('../../lib/saas-client');
saasClient.setBaseUrl(saasUrl);
saasClient.setToken(authToken);
const response = await saasClient.chatCompletion(
{
model: 'default',
messages: [
{ role: 'system', content: LLM_PROMPTS_SYSTEM },
{ role: 'user', content: `以下是对话中最近的消息:\n\n${context}\n\n请生成 3 个后续问题。` },
],
max_tokens: 500,
temperature: 0.7,
stream: false,
},
AbortSignal.timeout(15000),
);
if (!response.ok) {
const errText = await response.text().catch(() => 'unknown error');
throw new Error(`SaaS relay error ${response.status}: ${errText.substring(0, 100)}`);
}
const data = await response.json();
return data?.choices?.[0]?.message?.content || '';
}
const LLM_PROMPTS_SYSTEM = `你是对话分析助手。根据最近的对话内容,生成 3 个用户可能想继续探讨的问题。
要求:
- 每个问题必须与对话内容直接相关,具体且有针对性
- 帮助用户深入理解、实际操作或拓展思路
- 每个问题不超过 30 个中文字符
- 不要重复对话中已讨论过的内容
- 使用与用户相同的语言
只输出 JSON 数组,包含恰好 3 个字符串。不要输出任何其他内容。
示例:["如何在生产环境中部署?", "这个方案的成本如何?", "有没有更简单的替代方案?"]`;
// ---------------------------------------------------------------------------
// ChatStore injection (avoids circular imports)
// ---------------------------------------------------------------------------
@@ -499,6 +671,7 @@ export const useStreamStore = create<StreamState>()(
isLoading: false,
chatMode: 'thinking' as ChatModeType,
suggestions: [],
suggestionsLoading: false,
activeRunId: null as string | null,
// ── Chat Mode ──
@@ -508,6 +681,7 @@ export const useStreamStore = create<StreamState>()(
getChatModeConfig: () => CHAT_MODES[get().chatMode].config,
setSuggestions: (suggestions: string[]) => set({ suggestions }),
setSuggestionsLoading: (loading: boolean) => set({ suggestionsLoading: loading }),
setIsLoading: (loading: boolean) => set({ isLoading: loading }),
@@ -535,7 +709,7 @@ export const useStreamStore = create<StreamState>()(
const currentAgent = convStore.currentAgent;
const sessionKey = convStore.sessionKey;
set({ suggestions: [] });
set({ suggestions: [], suggestionsLoading: false });
const effectiveSessionKey = sessionKey || crypto.randomUUID();
const effectiveAgentId = resolveGatewayAgentId(currentAgent);
const agentId = currentAgent?.id || 'zclaw-main';
@@ -849,13 +1023,15 @@ export const useStreamStore = create<StreamState>()(
}
const latestMsgs = _chat?.getMessages() || [];
const completedMsg = latestMsgs.find(m => m.id === streamingMsg.id);
if (completedMsg?.content) {
const suggestions = generateFollowUpSuggestions(completedMsg.content);
if (suggestions.length > 0) {
get().setSuggestions(suggestions);
}
}
const conversationMessages = latestMsgs
.filter(m => m.role === 'user' || m.role === 'assistant')
.filter(m => !m.streaming)
.map(m => ({ role: m.role, content: m.content }));
generateLLMSuggestions(conversationMessages, set).catch(err => {
log.warn('Suggestion generation error:', err);
set({ suggestionsLoading: false });
});
}
}
} else if (delta.stream === 'hand') {