fix(identity): 接通身份信号提取与持久化 — 对话中起名跨会话记忆
Some checks failed
CI / Rust Check (push) Has been cancelled
CI / Lint & TypeCheck (push) Has been cancelled
CI / Unit Tests (push) Has been cancelled
CI / Build Frontend (push) Has been cancelled
CI / Security Scan (push) Has been cancelled
CI / E2E Tests (push) Has been cancelled

根因: 记忆提取管道(COMBINED_EXTRACTION_PROMPT)提取5种画像信号
但无身份信号(agent_name/user_name),不存在从对话到AgentConfig.name
或IdentityFiles的写回路径。

修复内容:
- ProfileSignals 增加 agent_name/user_name 字段
- COMBINED_EXTRACTION_PROMPT 增加身份提取指令
- parse_profile_signals 解析新字段 + 回退推断
- GrowthIntegration 存储身份信号到 VikingStorage
- post_conversation_hook 写回 soul.md + emit Tauri 事件
- streamStore 规则化检测 agent 名字并更新 AgentConfig.name
- cold-start-mapper 新增 detectAgentNameSuggestion

链路: 对话→提取→VikingStorage→hook写回soul.md→事件→前端刷新
This commit is contained in:
iven
2026-04-23 09:20:35 +08:00
parent 17a7a36608
commit 08812e541c
7 changed files with 431 additions and 22 deletions

View File

@@ -253,6 +253,18 @@ impl MemoryExtractor {
Ok(stored)
}
/// Store a single pre-built MemoryEntry to VikingStorage
pub async fn store_memory_entry(&self, entry: &crate::types::MemoryEntry) -> Result<()> {
let viking = match &self.viking {
Some(v) => v,
None => {
tracing::warn!("[MemoryExtractor] No VikingAdapter configured");
return Err(zclaw_types::ZclawError::Internal("No VikingAdapter".to_string()));
}
};
viking.store(entry).await
}
/// 统一提取:单次 LLM 调用同时产出 memories + experiences + profile_signals
///
/// 优先使用 `extract_with_prompt()` 进行单次调用;若 driver 不支持则
@@ -481,6 +493,16 @@ fn parse_profile_signals(obj: &serde_json::Value) -> crate::types::ProfileSignal
.and_then(|s| s.get("communication_style"))
.and_then(|v| v.as_str())
.map(String::from),
agent_name: signals
.and_then(|s| s.get("agent_name"))
.and_then(|v| v.as_str())
.filter(|s| !s.is_empty())
.map(String::from),
user_name: signals
.and_then(|s| s.get("user_name"))
.and_then(|v| v.as_str())
.filter(|s| !s.is_empty())
.map(String::from),
}
}
@@ -525,6 +547,22 @@ fn infer_profile_signals_from_memories(
signals.communication_style = Some(m.content.clone());
}
}
// 身份信号回退: 从 preference 记忆中检测命名/称呼关键词
let lower = m.content.to_lowercase();
if lower.contains("叫你") || lower.contains("助手名字") || lower.contains("称呼") {
if signals.agent_name.is_none() {
// 尝试提取引号内的名字
signals.agent_name = extract_quoted_name(&m.content)
.or_else(|| extract_name_after_pattern(&lower, &m.content, "叫你"));
}
}
if lower.contains("我叫") || lower.contains("我的名字") || lower.contains("用户名") {
if signals.user_name.is_none() {
signals.user_name = extract_name_after_pattern(&lower, &m.content, "我叫")
.or_else(|| extract_name_after_pattern(&lower, &m.content, "我的名字是"))
.or_else(|| extract_name_after_pattern(&lower, &m.content, "我叫"));
}
}
}
crate::types::MemoryType::Knowledge => {
if signals.recent_topic.is_none() && !m.keywords.is_empty() {
@@ -547,6 +585,38 @@ fn infer_profile_signals_from_memories(
signals
}
/// 从引号中提取名字(如"以后叫你'小马'"→"小马"
fn extract_quoted_name(text: &str) -> Option<String> {
for delim in ['"', '\'', '「', '」', '『', '』'] {
let mut parts = text.split(delim);
parts.next(); // skip before first delimiter
if let Some(name) = parts.next() {
let trimmed = name.trim();
if !trimmed.is_empty() && trimmed.chars().count() <= 20 {
return Some(trimmed.to_string());
}
}
}
None
}
/// 从指定模式后提取名字(如"叫你小马"→"小马"
fn extract_name_after_pattern(lower: &str, original: &str, pattern: &str) -> Option<String> {
if let Some(pos) = lower.find(pattern) {
let after = &original[pos + pattern.len()..];
// 取第一个词中文或英文最多10个字符
let name: String = after
.chars()
.take_while(|c| !c.is_whitespace() && !matches!(c, ''| '。' | '' | '' | ',' | '.' | '!' | '?'))
.take(10)
.collect();
if !name.is_empty() {
return Some(name);
}
}
None
}
/// Default extraction prompts for LLM
pub mod prompts {
use crate::types::MemoryType;
@@ -594,7 +664,9 @@ pub mod prompts {
"recent_topic": "最近讨论的主要话题(可选)",
"pain_point": "用户当前痛点(可选)",
"preferred_tool": "用户偏好的工具/技能(可选)",
"communication_style": "沟通风格: concise|detailed|formal|casual(可选)"
"communication_style": "沟通风格: concise|detailed|formal|casual(可选)",
"agent_name": "用户给助手起的名称(可选,仅在用户明确命名时填写,如'以后叫你小马')",
"user_name": "用户提到的自己的名字(可选,仅在用户明确自我介绍时填写,如'我叫张三')"
}
}
```
@@ -604,8 +676,9 @@ pub mod prompts {
1. **memories**: 提取用户偏好(沟通风格/格式/语言)、知识(事实/领域知识/经验教训)、使用经验(技能/工具使用模式和结果)
2. **experiences**: 仅提取明确的"问题→解决"模式要求有清晰的痛点和步骤confidence >= 0.6
3. **profile_signals**: 从对话中推断用户画像信息,只在有明确信号时填写,留空则不填
4. 每个字段都要有实际内容,不确定的宁可省略
5. 只返回 JSON不要附加其他文本
4. **identity**: 检测用户是否给助手命名(如"你叫X"/"以后叫你X"/"你的名字是X")或自我介绍(如"我叫X"/"我的名字是X"),填入 agent_name 或 user_name 字段
5. 每个字段都要有实际内容,不确定的宁可省略
6. 只返回 JSON不要附加其他文本
对话内容:
"#;

View File

@@ -432,6 +432,10 @@ pub struct ProfileSignals {
pub pain_point: Option<String>,
pub preferred_tool: Option<String>,
pub communication_style: Option<String>,
/// 用户给助手起的名称(如"以后叫你小马"
pub agent_name: Option<String>,
/// 用户提到的自己的名字(如"我叫张三"
pub user_name: Option<String>,
}
impl ProfileSignals {
@@ -442,6 +446,8 @@ impl ProfileSignals {
|| self.pain_point.is_some()
|| self.preferred_tool.is_some()
|| self.communication_style.is_some()
|| self.agent_name.is_some()
|| self.user_name.is_some()
}
/// 有效信号数量
@@ -452,8 +458,15 @@ impl ProfileSignals {
if self.pain_point.is_some() { count += 1; }
if self.preferred_tool.is_some() { count += 1; }
if self.communication_style.is_some() { count += 1; }
if self.agent_name.is_some() { count += 1; }
if self.user_name.is_some() { count += 1; }
count
}
/// 是否包含身份信号agent_name 或 user_name
pub fn has_identity_signal(&self) -> bool {
self.agent_name.is_some() || self.user_name.is_some()
}
}
/// 进化事件
@@ -674,8 +687,23 @@ mod tests {
pain_point: None,
preferred_tool: Some("researcher".to_string()),
communication_style: Some("concise".to_string()),
agent_name: None,
user_name: None,
};
assert_eq!(signals.industry.as_deref(), Some("healthcare"));
assert!(signals.pain_point.is_none());
assert!(!signals.has_identity_signal());
}
#[test]
fn test_profile_signals_identity() {
let signals = ProfileSignals {
agent_name: Some("小马".to_string()),
user_name: Some("张三".to_string()),
..Default::default()
};
assert!(signals.has_identity_signal());
assert_eq!(signals.signal_count(), 2);
assert_eq!(signals.agent_name.as_deref(), Some("小马"));
}
}

View File

@@ -440,6 +440,39 @@ impl GrowthIntegration {
}
}
// Store identity signals as special memories for cross-session persistence
if combined.profile_signals.has_identity_signal() {
let agent_id_str = agent_id.to_string();
if let Some(ref agent_name) = combined.profile_signals.agent_name {
let entry = zclaw_growth::types::MemoryEntry::new(
&agent_id_str,
zclaw_growth::types::MemoryType::Preference,
"identity",
format!("助手的名字是{}", agent_name),
).with_importance(8)
.with_keywords(vec!["名字".to_string(), "称呼".to_string(), "identity".to_string(), agent_name.clone()]);
if let Err(e) = self.extractor.store_memory_entry(&entry).await {
tracing::warn!("[GrowthIntegration] Failed to store agent_name signal: {}", e);
} else {
tracing::info!("[GrowthIntegration] Stored agent_name '{}' for {}", agent_name, agent_id_str);
}
}
if let Some(ref user_name) = combined.profile_signals.user_name {
let entry = zclaw_growth::types::MemoryEntry::new(
&agent_id_str,
zclaw_growth::types::MemoryType::Preference,
"identity",
format!("用户的名字是{}", user_name),
).with_importance(8)
.with_keywords(vec!["名字".to_string(), "用户名".to_string(), "identity".to_string(), user_name.clone()]);
if let Err(e) = self.extractor.store_memory_entry(&entry).await {
tracing::warn!("[GrowthIntegration] Failed to store user_name signal: {}", e);
} else {
tracing::info!("[GrowthIntegration] Stored user_name '{}' for {}", user_name, agent_id_str);
}
}
}
// Convert extracted memories to structured facts
let facts: Vec<Fact> = combined
.memories

View File

@@ -8,6 +8,8 @@
use tracing::{debug, warn};
use std::sync::Arc;
use tauri::Emitter;
use zclaw_growth::VikingStorage;
use crate::intelligence::identity::IdentityManagerState;
use crate::intelligence::heartbeat::HeartbeatEngineState;
@@ -56,12 +58,15 @@ pub async fn pre_conversation_hook(
///
/// 1. Record interaction for heartbeat engine
/// 2. Record conversation for reflection engine, trigger reflection if needed
/// 3. Detect identity signals and write back to identity files
pub async fn post_conversation_hook(
agent_id: &str,
_user_message: &str,
_heartbeat_state: &HeartbeatEngineState,
reflection_state: &ReflectionEngineState,
llm_driver: Option<Arc<dyn LlmDriver>>,
identity_state: &IdentityManagerState,
app: &tauri::AppHandle,
) {
// Step 1: Record interaction for heartbeat
crate::intelligence::heartbeat::record_interaction(agent_id);
@@ -200,6 +205,71 @@ pub async fn post_conversation_hook(
reflection_result.improvements.len()
);
}
// Step 3: Detect identity signals from recent memory extraction and write back
if let Ok(storage) = crate::viking_commands::get_storage().await {
let identity_prefix = format!("agent://{}/identity/", agent_id);
// Check for agent_name identity signal
let agent_name_uri = format!("{}agent-name", identity_prefix);
if let Ok(Some(entry)) = VikingStorage::get(storage.as_ref(), &agent_name_uri).await {
// Extract name from content like "助手的名字是小马"
let name = entry.content.strip_prefix("助手的名字是")
.map(|n| n.trim().to_string())
.unwrap_or_else(|| entry.content.clone());
if !name.is_empty() {
// Update IdentityFiles.soul to include the agent name
let mut manager = identity_state.lock().await;
let current_soul = manager.get_file(agent_id, crate::intelligence::identity::IdentityFile::Soul);
// Only update if the name isn't already in the soul
if !current_soul.contains(&name) {
let updated_soul = if current_soul.is_empty() {
format!("# ZCLAW 人格\n\n你的名字是{}\n\n你是一个成长性的中文 AI 助手。", name)
} else if current_soul.contains("你的名字是") || current_soul.contains("你的名字:") {
// Replace existing name line
let re = regex::Regex::new(r"你的名字是[^\n]+").unwrap();
re.replace(&current_soul, format!("你的名字是{}", name)).to_string()
} else {
// Prepend name to existing soul
format!("你的名字是{}\n\n{}", name, current_soul)
};
if let Err(e) = manager.update_file(agent_id, "soul", &updated_soul) {
warn!("[intelligence_hooks] Failed to update soul with agent name: {}", e);
} else {
debug!("[intelligence_hooks] Updated agent name to '{}' in soul", name);
}
}
drop(manager);
// Emit event for frontend to update AgentConfig.name
let _ = app.emit("zclaw:agent-identity-updated", serde_json::json!({
"agentId": agent_id,
"agentName": name,
}));
}
}
// Check for user_name identity signal
let user_name_uri = format!("{}user-name", identity_prefix);
if let Ok(Some(entry)) = VikingStorage::get(storage.as_ref(), &user_name_uri).await {
let name = entry.content.strip_prefix("用户的名字是")
.map(|n| n.trim().to_string())
.unwrap_or_else(|| entry.content.clone());
if !name.is_empty() {
let mut manager = identity_state.lock().await;
let profile = manager.get_file(agent_id, crate::intelligence::identity::IdentityFile::UserProfile);
if !profile.contains(&name) {
manager.append_to_user_profile(agent_id, &format!("- 用户名字: {}", name));
debug!("[intelligence_hooks] Appended user name '{}' to profile", name);
}
}
}
}
}
/// Build memory context by searching VikingStorage for relevant memories

View File

@@ -324,6 +324,7 @@ pub async fn agent_chat_stream(
let hb_state = heartbeat_state.inner().clone();
let rf_state = reflection_state.inner().clone();
let id_state_hook = identity_state.inner().clone();
// Clone the guard map for cleanup in the spawned task
let guard_map: SessionStreamGuard = stream_guard.inner().clone();
@@ -380,12 +381,14 @@ pub async fn agent_chat_stream(
let hb = hb_state.clone();
let rf = rf_state.clone();
let driver = llm_driver.clone();
let id_state = id_state_hook.clone();
let app_hook = app.clone();
if driver.is_none() {
tracing::debug!("[agent_chat_stream] Post-hook firing without LLM driver (schedule intercept path)");
}
tokio::spawn(async move {
crate::intelligence_hooks::post_conversation_hook(
&agent_id_hook, &message_hook, &hb, &rf, driver,
&agent_id_hook, &message_hook, &hb, &rf, driver, &id_state, &app_hook,
).await;
});
}

View File

@@ -146,6 +146,32 @@ export function detectNameSuggestion(message: string): string | undefined {
return undefined;
}
/**
* Detect if user gives the agent a name (e.g., "叫你小马", "以后叫你小马", "你的名字是小马").
* Returns the detected agent name or undefined.
*/
export function detectAgentNameSuggestion(message: string): string | undefined {
if (!message) return undefined;
const patterns = [
/叫你[""''「」]?(\S{1,8})[""''「」]?[吧。!]?/,
/你的名字[是为][""''「」]?(\S{1,8})[""''「」]?[。!]?/,
/以后叫你[""''「」]?(\S{1,8})[""''「」]?[吧。!]?/,
/给你起[个]?名[字]?(?:叫)?[""''「」]?(\S{1,8})[""''「」]?/,
/name you (\S{1,15})/i,
/call you (\S{1,15})/i,
];
for (const pattern of patterns) {
const match = message.match(pattern);
if (match && match[1]) {
const name = match[1].replace(/[吧。!,、]/g, '').trim();
if (name.length >= 1 && name.length <= 8) {
return name;
}
}
}
return undefined;
}
/**
* Determine the next cold start phase based on current phase and user message.
*/

View File

@@ -34,6 +34,8 @@ import {
} from './conversationStore';
import { useMessageStore } from './messageStore';
import { useArtifactStore } from './artifactStore';
import { llmSuggest } from '../../lib/llm-service';
import { detectNameSuggestion, detectAgentNameSuggestion } from '../../lib/cold-start-mapper';
const log = createLogger('StreamStore');
@@ -371,7 +373,30 @@ function createCompleteHandler(
.map(m => ({ role: m.role, content: m.content }));
const convId = useConversationStore.getState().currentConversationId;
getMemoryExtractor().extractFromConversation(filtered, agentId, convId ?? undefined)
.then(() => {
.then(async () => {
// Detect name preference from last user message (e.g. "叫我小马")
const lastUserMsg = [...msgs].reverse().find(m => m.role === 'user');
const detectedName = lastUserMsg ? detectNameSuggestion(lastUserMsg.content) : undefined;
if (detectedName && agentId) {
try {
const { useAgentStore } = await import('../agentStore');
await useAgentStore.getState().updateClone(agentId, { userName: detectedName });
log.info(`Updated userName to "${detectedName}" from conversation`);
} catch (e) {
log.warn('Failed to persist detected userName:', e);
}
}
// Detect agent name change (e.g. "叫你小马", "以后叫你小马")
const detectedAgentName = lastUserMsg ? detectAgentNameSuggestion(lastUserMsg.content) : undefined;
if (detectedAgentName && agentId) {
try {
const { useAgentStore } = await import('../agentStore');
await useAgentStore.getState().updateClone(agentId, { name: detectedAgentName });
log.info(`Updated agent name to "${detectedAgentName}" from conversation`);
} catch (e) {
log.warn('Failed to persist detected agent name:', e);
}
}
if (typeof window !== 'undefined') {
window.dispatchEvent(new CustomEvent('zclaw:agent-profile-updated', {
detail: { agentId }
@@ -391,15 +416,17 @@ function createCompleteHandler(
}
});
// Follow-up suggestions
// Follow-up suggestions (async LLM call with keyword fallback)
const latestMsgs = chat.getMessages() || [];
const completedMsg = latestMsgs.find(m => m.id === assistantId);
if (completedMsg?.content) {
const suggestions = generateFollowUpSuggestions(completedMsg.content);
if (suggestions.length > 0) {
set({ suggestions });
}
}
const conversationMessages = latestMsgs
.filter(m => m.role === 'user' || m.role === 'assistant')
.filter(m => !m.streaming)
.map(m => ({ role: m.role, content: m.content }));
generateLLMSuggestions(conversationMessages, set).catch(err => {
log.warn('Suggestion generation error:', err);
set({ suggestionsLoading: false });
});
};
}
@@ -410,6 +437,8 @@ export interface StreamState {
isLoading: boolean;
chatMode: ChatModeType;
suggestions: string[];
/** Whether LLM-generated suggestions are being fetched. */
suggestionsLoading: boolean;
/** Run ID of the currently active stream (null when idle). */
activeRunId: string | null;
@@ -425,6 +454,7 @@ export interface StreamState {
// Suggestions
setSuggestions: (suggestions: string[]) => void;
setSuggestionsLoading: (loading: boolean) => void;
// Skill search
searchSkills: (query: string) => {
@@ -440,7 +470,7 @@ export interface StreamState {
// Follow-up suggestion generator
// ---------------------------------------------------------------------------
function generateFollowUpSuggestions(content: string): string[] {
function generateKeywordFallback(content: string): string[] {
const suggestions: string[] = [];
const lower = content.toLowerCase();
@@ -473,6 +503,148 @@ function generateFollowUpSuggestions(content: string): string[] {
return suggestions;
}
/**
* Parse LLM response into an array of suggestion strings.
* Handles: raw JSON array, markdown-fenced JSON, trailing/leading text.
*/
function parseSuggestionResponse(raw: string): string[] {
let cleaned = raw.trim();
// Strip markdown code fences
cleaned = cleaned.replace(/^```(?:json)?\s*\n?/i, '');
cleaned = cleaned.replace(/\n?```\s*$/i, '');
cleaned = cleaned.trim();
// Direct JSON parse
try {
const parsed = JSON.parse(cleaned);
if (Array.isArray(parsed)) {
return parsed
.filter((item): item is string => typeof item === 'string' && item.trim().length > 0)
.slice(0, 3);
}
} catch { /* fall through */ }
// Extract JSON array from surrounding text
const arrayMatch = cleaned.match(/\[[\s\S]*?\]/);
if (arrayMatch) {
try {
const parsed = JSON.parse(arrayMatch[0]);
if (Array.isArray(parsed)) {
return parsed
.filter((item): item is string => typeof item === 'string' && item.trim().length > 0)
.slice(0, 3);
}
} catch { /* fall through */ }
}
// Last resort: split by newlines, strip list markers
const lines = cleaned
.split(/\n/)
.map(l => l.replace(/^[-*\d.)\]]+\s*/, '').trim())
.filter(l => l.length > 0 && l.length < 60);
if (lines.length > 0) {
return lines.slice(0, 3);
}
return [];
}
/**
* Generate contextual follow-up suggestions via LLM.
* Routes through SaaS relay or local kernel based on connection mode.
* Falls back to keyword-based approach on any failure.
*/
async function generateLLMSuggestions(
messages: Array<{ role: string; content: string }>,
set: (partial: Partial<StreamState>) => void,
): Promise<void> {
set({ suggestionsLoading: true });
try {
const recentMessages = messages.slice(-6);
const context = recentMessages
.map(m => `${m.role === 'user' ? '用户' : '助手'}: ${m.content}`)
.join('\n\n');
const connectionMode = typeof localStorage !== 'undefined'
? localStorage.getItem('zclaw-connection-mode')
: null;
let raw: string;
if (connectionMode === 'saas') {
// SaaS relay: use saasClient directly for reliable auth
raw = await llmSuggestViaSaaS(context);
} else {
// Local kernel: use llm-service adapter (GatewayLLMAdapter → agent_chat)
raw = await llmSuggest(context);
}
const suggestions = parseSuggestionResponse(raw);
if (suggestions.length > 0) {
set({ suggestions, suggestionsLoading: false });
} else {
const lastAssistant = messages.filter(m => m.role === 'assistant').pop()?.content || '';
set({ suggestions: generateKeywordFallback(lastAssistant), suggestionsLoading: false });
}
} catch (err) {
log.warn('LLM suggestion generation failed, using keyword fallback:', err);
const lastAssistant = messages.filter(m => m.role === 'assistant').pop()?.content || '';
set({ suggestions: generateKeywordFallback(lastAssistant), suggestionsLoading: false });
}
}
/**
* Generate suggestions via SaaS relay, using saasStore auth directly.
*/
async function llmSuggestViaSaaS(context: string): Promise<string> {
const { useSaaSStore } = await import('../saasStore');
const { saasUrl, authToken } = useSaaSStore.getState();
if (!saasUrl || !authToken) {
throw new Error('SaaS not authenticated');
}
const { saasClient } = await import('../../lib/saas-client');
saasClient.setBaseUrl(saasUrl);
saasClient.setToken(authToken);
const response = await saasClient.chatCompletion(
{
model: 'default',
messages: [
{ role: 'system', content: LLM_PROMPTS_SYSTEM },
{ role: 'user', content: `以下是对话中最近的消息:\n\n${context}\n\n请生成 3 个后续问题。` },
],
max_tokens: 500,
temperature: 0.7,
stream: false,
},
AbortSignal.timeout(15000),
);
if (!response.ok) {
const errText = await response.text().catch(() => 'unknown error');
throw new Error(`SaaS relay error ${response.status}: ${errText.substring(0, 100)}`);
}
const data = await response.json();
return data?.choices?.[0]?.message?.content || '';
}
const LLM_PROMPTS_SYSTEM = `你是对话分析助手。根据最近的对话内容,生成 3 个用户可能想继续探讨的问题。
要求:
- 每个问题必须与对话内容直接相关,具体且有针对性
- 帮助用户深入理解、实际操作或拓展思路
- 每个问题不超过 30 个中文字符
- 不要重复对话中已讨论过的内容
- 使用与用户相同的语言
只输出 JSON 数组,包含恰好 3 个字符串。不要输出任何其他内容。
示例:["如何在生产环境中部署?", "这个方案的成本如何?", "有没有更简单的替代方案?"]`;
// ---------------------------------------------------------------------------
// ChatStore injection (avoids circular imports)
// ---------------------------------------------------------------------------
@@ -499,6 +671,7 @@ export const useStreamStore = create<StreamState>()(
isLoading: false,
chatMode: 'thinking' as ChatModeType,
suggestions: [],
suggestionsLoading: false,
activeRunId: null as string | null,
// ── Chat Mode ──
@@ -508,6 +681,7 @@ export const useStreamStore = create<StreamState>()(
getChatModeConfig: () => CHAT_MODES[get().chatMode].config,
setSuggestions: (suggestions: string[]) => set({ suggestions }),
setSuggestionsLoading: (loading: boolean) => set({ suggestionsLoading: loading }),
setIsLoading: (loading: boolean) => set({ isLoading: loading }),
@@ -535,7 +709,7 @@ export const useStreamStore = create<StreamState>()(
const currentAgent = convStore.currentAgent;
const sessionKey = convStore.sessionKey;
set({ suggestions: [] });
set({ suggestions: [], suggestionsLoading: false });
const effectiveSessionKey = sessionKey || crypto.randomUUID();
const effectiveAgentId = resolveGatewayAgentId(currentAgent);
const agentId = currentAgent?.id || 'zclaw-main';
@@ -849,13 +1023,15 @@ export const useStreamStore = create<StreamState>()(
}
const latestMsgs = _chat?.getMessages() || [];
const completedMsg = latestMsgs.find(m => m.id === streamingMsg.id);
if (completedMsg?.content) {
const suggestions = generateFollowUpSuggestions(completedMsg.content);
if (suggestions.length > 0) {
get().setSuggestions(suggestions);
}
}
const conversationMessages = latestMsgs
.filter(m => m.role === 'user' || m.role === 'assistant')
.filter(m => !m.streaming)
.map(m => ({ role: m.role, content: m.content }));
generateLLMSuggestions(conversationMessages, set).catch(err => {
log.warn('Suggestion generation error:', err);
set({ suggestionsLoading: false });
});
}
}
} else if (delta.stream === 'hand') {