fix(chat): prevent React Maximum update depth exceeded during streaming
Some checks failed
CI / Lint & TypeCheck (push) Has been cancelled
CI / Unit Tests (push) Has been cancelled
CI / Build Frontend (push) Has been cancelled
CI / Rust Check (push) Has been cancelled
CI / Security Scan (push) Has been cancelled
CI / E2E Tests (push) Has been cancelled
Some checks failed
CI / Lint & TypeCheck (push) Has been cancelled
CI / Unit Tests (push) Has been cancelled
CI / Build Frontend (push) Has been cancelled
CI / Rust Check (push) Has been cancelled
CI / Security Scan (push) Has been cancelled
CI / E2E Tests (push) Has been cancelled
Root cause: Each LLM delta (text/thinking) triggered a synchronous setState via updateMessages → chatStore.setState. With Kimi thinking model emitting many deltas per frame, this caused a React render storm that hit the maximum update depth limit. Fix (two-layer approach): 1. streamStore: Buffer text/thinking deltas locally and flush to store via setTimeout(0), batching multiple deltas per frame 2. chatStore: Microtask batching in injectChatStore.updateMessages to coalesce rapid successive updates Verified: 2-round conversation (4 messages) with Kimi thinking model completes without crash. Previously crashed 100% on 2nd message.
This commit is contained in:
@@ -306,15 +306,51 @@ setMessageStoreChatStore({
|
||||
});
|
||||
|
||||
// 2. Inject chatStore into streamStore for message mutations
|
||||
injectChatStore({
|
||||
addMessage: (msg) => useChatStore.getState().addMessage(msg as Message),
|
||||
updateMessages: (updater) => {
|
||||
// Uses microtask batching to prevent React "Maximum update depth exceeded" when
|
||||
// the LLM emits many deltas per frame (e.g. Kimi thinking model).
|
||||
// Non-delta updates (onComplete etc.) flush immediately via _flushPending().
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
let _pendingUpdater: ((msgs: any[]) => any[]) | null = null;
|
||||
let _microtaskScheduled = false;
|
||||
|
||||
function _flushPending() {
|
||||
_microtaskScheduled = false;
|
||||
const batchedUpdater = _pendingUpdater;
|
||||
_pendingUpdater = null;
|
||||
if (batchedUpdater) {
|
||||
const msgs = useChatStore.getState().messages as unknown[];
|
||||
const updated = updater(msgs as Parameters<typeof updater>[0]);
|
||||
const updated = batchedUpdater(msgs);
|
||||
useChatStore.setState({ messages: updated as Message[] });
|
||||
}
|
||||
}
|
||||
|
||||
injectChatStore({
|
||||
addMessage: (msg) => {
|
||||
// addMessage must be synchronous for immediate visibility
|
||||
_flushPending(); // flush any pending batched updates first
|
||||
useChatStore.getState().addMessage(msg as Message);
|
||||
},
|
||||
updateMessages: (updater) => {
|
||||
// Accumulate updaters — only the latest state matters
|
||||
const prevUpdater = _pendingUpdater;
|
||||
_pendingUpdater = prevUpdater
|
||||
? (msgs: unknown[]) => updater(prevUpdater(msgs) as Parameters<typeof updater>[0])
|
||||
: updater;
|
||||
|
||||
if (!_microtaskScheduled) {
|
||||
_microtaskScheduled = true;
|
||||
void Promise.resolve().then(_flushPending);
|
||||
}
|
||||
},
|
||||
getMessages: () => {
|
||||
// Flush pending updates before reading to ensure consistency
|
||||
_flushPending();
|
||||
return useChatStore.getState().messages;
|
||||
},
|
||||
setChatStoreState: (partial) => {
|
||||
_flushPending();
|
||||
useChatStore.setState(partial as Partial<ChatState>);
|
||||
},
|
||||
getMessages: () => useChatStore.getState().messages,
|
||||
setChatStoreState: (partial) => useChatStore.setState(partial as Partial<ChatState>),
|
||||
});
|
||||
|
||||
// 3. Sync streamStore state to chatStore mirrors
|
||||
|
||||
Reference in New Issue
Block a user