Some checks failed
CI / Lint & TypeCheck (push) Has been cancelled
CI / Unit Tests (push) Has been cancelled
CI / Build Frontend (push) Has been cancelled
CI / Rust Check (push) Has been cancelled
CI / Security Scan (push) Has been cancelled
CI / E2E Tests (push) Has been cancelled
Phase 1.0 — Butler Mode UI: - Hide "自动化" and "技能市场" entries from sidebar navigation - Remove AutomationPanel and SkillMarket view rendering from App.tsx - Simplify MainViewType to only 'chat' - Main interface is now: chat + conversation list + detail panel only Phase 1.1 — Mode Differentiation: - Add subagent_enabled field to ChatModeConfig (Rust), StreamChatRequest (Tauri), gateway-client, kernel-client, saas-relay-client, and streamStore - TaskTool is now only registered when subagent_enabled=true (Ultra mode) - System prompt includes sub-agent delegation instructions only in Ultra mode - Frontend transmits subagent_enabled from ChatMode config through the full stack This connects the 4-tier mode selector (Flash/Thinking/Pro/Ultra) to actual backend behavioral differences — Ultra mode now truly enables sub-agent delegation.
242 lines
6.9 KiB
TypeScript
242 lines
6.9 KiB
TypeScript
/**
|
|
* kernel-chat.ts - Chat & streaming methods for KernelClient
|
|
*
|
|
* Installed onto KernelClient.prototype via installChatMethods().
|
|
*/
|
|
|
|
import { invoke } from '@tauri-apps/api/core';
|
|
import { listen, type UnlistenFn } from '@tauri-apps/api/event';
|
|
import { createLogger } from './logger';
|
|
import type { KernelClient } from './kernel-client';
|
|
import type { ChatResponse, StreamCallbacks, StreamChunkPayload } from './kernel-types';
|
|
|
|
const log = createLogger('KernelClient');
|
|
|
|
export function installChatMethods(ClientClass: { prototype: KernelClient }): void {
|
|
const proto = ClientClass.prototype as unknown as Record<string, unknown>;
|
|
|
|
/**
|
|
* Send a message and get a response
|
|
*/
|
|
proto.chat = async function (
|
|
this: KernelClient,
|
|
message: string,
|
|
opts?: {
|
|
sessionKey?: string;
|
|
agentId?: string;
|
|
}
|
|
): Promise<{ runId: string; sessionId?: string; response?: string }> {
|
|
const agentId = opts?.agentId || this.getDefaultAgentId();
|
|
|
|
if (!agentId) {
|
|
throw new Error('No agent available');
|
|
}
|
|
|
|
const response = await invoke<ChatResponse>('agent_chat', {
|
|
request: {
|
|
agentId,
|
|
message,
|
|
},
|
|
});
|
|
|
|
return {
|
|
runId: `run_${Date.now()}`,
|
|
sessionId: opts?.sessionKey,
|
|
response: response.content,
|
|
};
|
|
};
|
|
|
|
/**
|
|
* Send a message with streaming response via Tauri events
|
|
*/
|
|
proto.chatStream = async function (
|
|
this: KernelClient,
|
|
message: string,
|
|
callbacks: StreamCallbacks,
|
|
opts?: {
|
|
sessionKey?: string;
|
|
agentId?: string;
|
|
thinking_enabled?: boolean;
|
|
reasoning_effort?: string;
|
|
plan_mode?: boolean;
|
|
subagent_enabled?: boolean;
|
|
}
|
|
): Promise<{ runId: string }> {
|
|
const runId = crypto.randomUUID();
|
|
const sessionId = opts?.sessionKey || runId;
|
|
const agentId = opts?.agentId || this.getDefaultAgentId();
|
|
|
|
if (!agentId) {
|
|
callbacks.onError('No agent available');
|
|
return { runId };
|
|
}
|
|
|
|
let unlisten: UnlistenFn | null = null;
|
|
let completed = false;
|
|
// Stream timeout — prevent hanging forever if backend never sends complete/error
|
|
const STREAM_TIMEOUT_MS = 5 * 60 * 1000; // 5 minutes
|
|
const timeoutId = setTimeout(() => {
|
|
if (!completed) {
|
|
completed = true;
|
|
log.warn('Stream timeout — no complete/error event received');
|
|
callbacks.onError('响应超时,请重试');
|
|
if (unlisten) {
|
|
unlisten();
|
|
unlisten = null;
|
|
}
|
|
}
|
|
}, STREAM_TIMEOUT_MS);
|
|
|
|
try {
|
|
// Set up event listener for stream chunks
|
|
unlisten = await listen<StreamChunkPayload>('stream:chunk', (event) => {
|
|
const payload = event.payload;
|
|
|
|
// Only process events for this session
|
|
if (payload.sessionId !== sessionId) {
|
|
return;
|
|
}
|
|
|
|
const streamEvent = payload.event;
|
|
|
|
switch (streamEvent.type) {
|
|
case 'delta':
|
|
callbacks.onDelta(streamEvent.delta);
|
|
break;
|
|
|
|
case 'thinkingDelta':
|
|
if (callbacks.onThinkingDelta) {
|
|
callbacks.onThinkingDelta(streamEvent.delta);
|
|
}
|
|
break;
|
|
|
|
case 'toolStart':
|
|
log.debug('Tool started:', streamEvent.name, streamEvent.input);
|
|
if (callbacks.onTool) {
|
|
callbacks.onTool(
|
|
streamEvent.name,
|
|
JSON.stringify(streamEvent.input),
|
|
''
|
|
);
|
|
}
|
|
break;
|
|
|
|
case 'toolEnd':
|
|
log.debug('Tool ended:', streamEvent.name, streamEvent.output);
|
|
if (callbacks.onTool) {
|
|
callbacks.onTool(
|
|
streamEvent.name,
|
|
'',
|
|
JSON.stringify(streamEvent.output)
|
|
);
|
|
}
|
|
break;
|
|
|
|
case 'handStart':
|
|
log.debug('Hand started:', streamEvent.name, streamEvent.params);
|
|
if (callbacks.onHand) {
|
|
callbacks.onHand(streamEvent.name, 'running', undefined);
|
|
}
|
|
break;
|
|
|
|
case 'handEnd':
|
|
log.debug('Hand ended:', streamEvent.name, streamEvent.result);
|
|
if (callbacks.onHand) {
|
|
callbacks.onHand(streamEvent.name, 'completed', streamEvent.result);
|
|
}
|
|
break;
|
|
|
|
case 'iterationStart':
|
|
log.debug('Iteration started:', streamEvent.iteration, '/', streamEvent.maxIterations);
|
|
// Don't need to notify user about iterations
|
|
break;
|
|
|
|
case 'complete':
|
|
log.debug('Stream complete:', streamEvent.inputTokens, streamEvent.outputTokens);
|
|
completed = true;
|
|
clearTimeout(timeoutId);
|
|
callbacks.onComplete(streamEvent.inputTokens, streamEvent.outputTokens);
|
|
// Clean up listener
|
|
if (unlisten) {
|
|
unlisten();
|
|
unlisten = null;
|
|
}
|
|
break;
|
|
|
|
case 'error':
|
|
log.error('Stream error:', streamEvent.message);
|
|
completed = true;
|
|
clearTimeout(timeoutId);
|
|
callbacks.onError(streamEvent.message);
|
|
// Clean up listener
|
|
if (unlisten) {
|
|
unlisten();
|
|
unlisten = null;
|
|
}
|
|
break;
|
|
}
|
|
});
|
|
|
|
// Invoke the streaming command
|
|
await invoke('agent_chat_stream', {
|
|
request: {
|
|
agentId,
|
|
sessionId,
|
|
message,
|
|
thinkingEnabled: opts?.thinking_enabled,
|
|
reasoningEffort: opts?.reasoning_effort,
|
|
planMode: opts?.plan_mode,
|
|
subagentEnabled: opts?.subagent_enabled,
|
|
},
|
|
});
|
|
} catch (err: unknown) {
|
|
const errorMessage = err instanceof Error ? err.message : String(err);
|
|
callbacks.onError(errorMessage);
|
|
|
|
// Clean up listener on error
|
|
if (unlisten) {
|
|
unlisten();
|
|
}
|
|
}
|
|
|
|
return { runId };
|
|
};
|
|
|
|
/**
|
|
* Cancel an active stream by session ID.
|
|
* Invokes the Rust `cancel_stream` command which sets the AtomicBool flag
|
|
* checked by the spawned streaming task each iteration.
|
|
*/
|
|
proto.cancelStream = async function (this: KernelClient, sessionId: string): Promise<void> {
|
|
try {
|
|
await invoke('cancel_stream', { sessionId });
|
|
log.debug('Cancel stream requested for session:', sessionId);
|
|
} catch (err) {
|
|
log.warn('Failed to cancel stream:', err);
|
|
}
|
|
};
|
|
|
|
// ─── Default Agent ───
|
|
|
|
/**
|
|
* Fetch default agent ID (returns current default)
|
|
*/
|
|
proto.fetchDefaultAgentId = async function (this: KernelClient): Promise<string | null> {
|
|
return this.getDefaultAgentId();
|
|
};
|
|
|
|
/**
|
|
* Set default agent ID
|
|
*/
|
|
proto.setDefaultAgentId = function (this: KernelClient, agentId: string): void {
|
|
this.defaultAgentId = agentId;
|
|
};
|
|
|
|
/**
|
|
* Get default agent ID
|
|
*/
|
|
proto.getDefaultAgentId = function (this: KernelClient): string {
|
|
return this.defaultAgentId || '';
|
|
};
|
|
}
|