From 0b0ab00b9ca645ac4a03228c3ba386bbdbc245a7 Mon Sep 17 00:00:00 2001 From: iven Date: Wed, 8 Apr 2026 14:47:43 +0800 Subject: [PATCH] fix(chat): prevent React Maximum update depth exceeded during streaming MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Root cause: Each LLM delta (text/thinking) triggered a synchronous setState via updateMessages → chatStore.setState. With Kimi thinking model emitting many deltas per frame, this caused a React render storm that hit the maximum update depth limit. Fix (two-layer approach): 1. streamStore: Buffer text/thinking deltas locally and flush to store via setTimeout(0), batching multiple deltas per frame 2. chatStore: Microtask batching in injectChatStore.updateMessages to coalesce rapid successive updates Verified: 2-round conversation (4 messages) with Kimi thinking model completes without crash. Previously crashed 100% on 2nd message. --- desktop/src/store/chat/streamStore.ts | 68 +++++++++++++++++++++------ desktop/src/store/chatStore.ts | 48 ++++++++++++++++--- 2 files changed, 96 insertions(+), 20 deletions(-) diff --git a/desktop/src/store/chat/streamStore.ts b/desktop/src/store/chat/streamStore.ts index e7d9454..6d570af 100644 --- a/desktop/src/store/chat/streamStore.ts +++ b/desktop/src/store/chat/streamStore.ts @@ -247,6 +247,41 @@ export const useStreamStore = create()( }); set({ isStreaming: true, activeRunId: null }); + // ── Delta buffering ── + // Accumulate text/thinking deltas in local buffers and flush to store + // at ~60fps intervals. This prevents React "Maximum update depth exceeded" + // when the LLM emits many small deltas per frame (e.g. Kimi thinking). + let textBuffer = ''; + let thinkBuffer = ''; + let flushTimer: ReturnType | null = null; + + const flushBuffers = () => { + flushTimer = null; + const text = textBuffer; + const think = thinkBuffer; + textBuffer = ''; + thinkBuffer = ''; + + if (text || think) { + _chat?.updateMessages(msgs => + msgs.map(m => { + if (m.id !== assistantId) return m; + return { + ...m, + ...(text ? { content: m.content + text } : {}), + ...(think ? { thinkingContent: (m.thinkingContent || '') + think } : {}), + }; + }) + ); + } + }; + + const scheduleFlush = () => { + if (flushTimer === null) { + flushTimer = setTimeout(flushBuffers, 0); + } + }; + try { const client = getClient(); const connectionState = useConnectionStore.getState().connectionState; @@ -265,22 +300,12 @@ export const useStreamStore = create()( content, { onDelta: (delta: string) => { - _chat?.updateMessages(msgs => - msgs.map(m => - m.id === assistantId - ? { ...m, content: m.content + delta } - : m - ) - ); + textBuffer += delta; + scheduleFlush(); }, onThinkingDelta: (delta: string) => { - _chat?.updateMessages(msgs => - msgs.map(m => - m.id === assistantId - ? { ...m, thinkingContent: (m.thinkingContent || '') + delta } - : m - ) - ); + thinkBuffer += delta; + scheduleFlush(); }, onTool: (tool: string, input: string, output: string) => { if (output) { @@ -412,6 +437,13 @@ export const useStreamStore = create()( ); }, onComplete: (inputTokens?: number, outputTokens?: number) => { + // Flush any remaining buffered deltas before finalizing + if (flushTimer !== null) { + clearTimeout(flushTimer); + flushTimer = null; + } + flushBuffers(); + const currentMsgs = _chat?.getMessages(); if (currentMsgs) { @@ -507,6 +539,14 @@ export const useStreamStore = create()( ) ); } catch (err: unknown) { + // Flush remaining buffers on error + if (flushTimer !== null) { + clearTimeout(flushTimer); + flushTimer = null; + } + textBuffer = ''; + thinkBuffer = ''; + const errorMessage = err instanceof Error ? err.message : '无法连接 Gateway'; _chat?.updateMessages(msgs => msgs.map(m => diff --git a/desktop/src/store/chatStore.ts b/desktop/src/store/chatStore.ts index 965279b..8a7c10c 100644 --- a/desktop/src/store/chatStore.ts +++ b/desktop/src/store/chatStore.ts @@ -306,15 +306,51 @@ setMessageStoreChatStore({ }); // 2. Inject chatStore into streamStore for message mutations -injectChatStore({ - addMessage: (msg) => useChatStore.getState().addMessage(msg as Message), - updateMessages: (updater) => { +// Uses microtask batching to prevent React "Maximum update depth exceeded" when +// the LLM emits many deltas per frame (e.g. Kimi thinking model). +// Non-delta updates (onComplete etc.) flush immediately via _flushPending(). +// eslint-disable-next-line @typescript-eslint/no-explicit-any +let _pendingUpdater: ((msgs: any[]) => any[]) | null = null; +let _microtaskScheduled = false; + +function _flushPending() { + _microtaskScheduled = false; + const batchedUpdater = _pendingUpdater; + _pendingUpdater = null; + if (batchedUpdater) { const msgs = useChatStore.getState().messages as unknown[]; - const updated = updater(msgs as Parameters[0]); + const updated = batchedUpdater(msgs); useChatStore.setState({ messages: updated as Message[] }); + } +} + +injectChatStore({ + addMessage: (msg) => { + // addMessage must be synchronous for immediate visibility + _flushPending(); // flush any pending batched updates first + useChatStore.getState().addMessage(msg as Message); + }, + updateMessages: (updater) => { + // Accumulate updaters — only the latest state matters + const prevUpdater = _pendingUpdater; + _pendingUpdater = prevUpdater + ? (msgs: unknown[]) => updater(prevUpdater(msgs) as Parameters[0]) + : updater; + + if (!_microtaskScheduled) { + _microtaskScheduled = true; + void Promise.resolve().then(_flushPending); + } + }, + getMessages: () => { + // Flush pending updates before reading to ensure consistency + _flushPending(); + return useChatStore.getState().messages; + }, + setChatStoreState: (partial) => { + _flushPending(); + useChatStore.setState(partial as Partial); }, - getMessages: () => useChatStore.getState().messages, - setChatStoreState: (partial) => useChatStore.setState(partial as Partial), }); // 3. Sync streamStore state to chatStore mirrors