chore: 提交所有工作进度 — SaaS 后端增强、Admin UI、桌面端集成

包含大量 SaaS 平台改进、Admin 管理后台更新、桌面端集成完善、
文档同步、测试文件重构等内容。为 QA 测试准备干净工作树。
This commit is contained in:
iven
2026-03-29 10:46:26 +08:00
parent 9a5fad2b59
commit 5fdf96c3f5
268 changed files with 22011 additions and 3886 deletions

View File

@@ -342,13 +342,9 @@ class GatewayLLMAdapter implements LLMServiceAdapter {
latencyMs,
};
} catch (err) {
console.warn('[LLMService] Kernel chat failed, falling back to mock:', err);
// Return empty response instead of throwing
return {
content: '',
tokensUsed: { input: 0, output: 0 },
latencyMs: Date.now() - startTime,
};
console.error('[LLMService] Kernel chat failed:', err);
const message = err instanceof Error ? err.message : String(err);
throw new Error(`[Gateway] Kernel chat failed: ${message}`);
}
}
@@ -470,7 +466,7 @@ class SaasLLMAdapter implements LLMServiceAdapter {
const data = await response.json();
const latencyMs = Date.now() - startTime;
return {
const result = {
content: data.choices?.[0]?.message?.content || '',
tokensUsed: {
input: data.usage?.prompt_tokens || 0,
@@ -479,6 +475,19 @@ class SaasLLMAdapter implements LLMServiceAdapter {
model: data.model,
latencyMs,
};
// Record telemetry for SaaS relay usage
try {
const { recordLLMUsage } = await import('./telemetry-collector');
recordLLMUsage(
result.model || 'saas-relay',
result.tokensUsed.input,
result.tokensUsed.output,
{ latencyMs, success: true, connectionMode: 'saas' },
);
} catch { /* non-blocking */ }
return result;
}
isAvailable(): boolean {
@@ -559,12 +568,17 @@ export function saveConfig(config: LLMConfig): void {
delete safeConfig.apiKey;
localStorage.setItem(LLM_CONFIG_KEY, JSON.stringify(safeConfig));
// Mark config as dirty for SaaS push sync
localStorage.setItem('zclaw-config-dirty.llm.default', '1');
resetLLMAdapter();
}
// === Prompt Templates ===
export const LLM_PROMPTS = {
// 硬编码默认值 — 当 SaaS 不可用且本地无缓存时的终极兜底
const HARDCODED_PROMPTS: Record<string, { system: string; user: (arg: string) => string }> = {
reflection: {
system: `你是一个 AI Agent 的自我反思引擎。分析最近的对话历史,识别行为模式,并生成改进建议。
@@ -587,11 +601,7 @@ export const LLM_PROMPTS = {
],
"identityProposals": []
}`,
user: (context: string) => `分析以下对话历史,进行自我反思:
${context}
请识别行为模式(积极和消极),并提供具体的改进建议。`,
user: (context: string) => `分析以下对话历史,进行自我反思:\n\n${context}\n\n请识别行为模式积极和消极并提供具体的改进建议。`,
},
compaction: {
@@ -603,9 +613,7 @@ ${context}
3. 保留未完成的任务
4. 保持时间顺序
5. 摘要应能在后续对话中替代原始内容`,
user: (messages: string) => `请将以下对话压缩为简洁摘要,保留关键信息:
${messages}`,
user: (messages: string) => `请将以下对话压缩为简洁摘要,保留关键信息:\n\n${messages}`,
},
extraction: {
@@ -626,14 +634,200 @@ ${messages}`,
"tags": ["标签1", "标签2"]
}
]`,
user: (conversation: string) => `从以下对话中提取值得长期记住的信息:
${conversation}
如果没有值得记忆的内容,返回空数组 []。`,
user: (conversation: string) => `从以下对话中提取值得长期记住的信息:\n\n${conversation}\n\n如果没有值得记忆的内容返回空数组 []。`,
},
};
// === Prompt Cache (SaaS OTA) ===
const PROMPT_CACHE_KEY = 'zclaw-prompt-cache';
interface CachedPrompt {
name: string;
version: number;
source: string;
system: string;
userTemplate: string | null;
syncedAt: string;
}
/** 获取本地缓存的提示词版本号映射 */
function loadPromptCache(): Record<string, CachedPrompt> {
if (typeof window === 'undefined') return {};
try {
const raw = localStorage.getItem(PROMPT_CACHE_KEY);
return raw ? JSON.parse(raw) : {};
} catch {
return {};
}
}
/** 保存提示词缓存到 localStorage */
function savePromptCache(cache: Record<string, CachedPrompt>): void {
if (typeof window === 'undefined') return;
localStorage.setItem(PROMPT_CACHE_KEY, JSON.stringify(cache));
}
/**
* 获取指定提示词的系统提示词
* 优先级:本地缓存 → 硬编码默认值
*/
export function getSystemPrompt(name: string): string {
const cache = loadPromptCache();
if (cache[name]?.system) {
return cache[name].system;
}
return HARDCODED_PROMPTS[name]?.system ?? '';
}
/**
* 获取指定提示词的用户提示词模板
* 优先级:本地缓存 → 硬编码默认值
*/
export function getUserPromptTemplate(name: string): string | ((arg: string) => string) | null {
const cache = loadPromptCache();
if (cache[name]) {
const tmpl = cache[name].userTemplate;
if (tmpl) return tmpl;
}
return HARDCODED_PROMPTS[name]?.user ?? null;
}
/** 获取提示词当前版本号(本地缓存) */
export function getPromptVersion(name: string): number {
const cache = loadPromptCache();
return cache[name]?.version ?? 0;
}
/** 获取所有本地缓存的提示词版本(用于 OTA 检查) */
export function getAllPromptVersions(): Record<string, number> {
const cache = loadPromptCache();
const versions: Record<string, number> = {};
for (const [name, entry] of Object.entries(cache)) {
versions[name] = entry.version;
}
return versions;
}
/**
* 应用 SaaS OTA 更新到本地缓存
* @param updates 从 SaaS 拉取的更新列表
*/
export function applyPromptUpdates(updates: Array<{
name: string;
version: number;
system_prompt: string;
user_prompt_template: string | null;
source: string;
changelog?: string | null;
}>): number {
const cache = loadPromptCache();
let applied = 0;
for (const update of updates) {
cache[update.name] = {
name: update.name,
version: update.version,
source: update.source,
system: update.system_prompt,
userTemplate: update.user_prompt_template,
syncedAt: new Date().toISOString(),
};
applied++;
}
if (applied > 0) {
savePromptCache(cache);
}
return applied;
}
/**
* 后台异步检查 SaaS 提示词更新
* 启动时和每 30 分钟调用一次
*/
let promptSyncTimer: ReturnType<typeof setInterval> | null = null;
export function startPromptOTASync(deviceId: string): void {
if (promptSyncTimer) return; // 已启动
if (typeof window === 'undefined') return;
const doSync = async () => {
try {
const { saasClient } = await import('./saas-client');
const { useSaaSStore } = await import('../store/saasStore');
const { saasUrl, authToken } = useSaaSStore.getState();
if (!saasUrl || !authToken) return;
saasClient.setBaseUrl(saasUrl);
saasClient.setToken(authToken);
const versions = getAllPromptVersions();
const result = await saasClient.checkPromptUpdates(deviceId, versions);
if (result.updates.length > 0) {
const applied = applyPromptUpdates(result.updates);
if (applied > 0) {
console.log(`[Prompt OTA] 已更新 ${applied} 个提示词模板`);
}
}
} catch (err) {
// 静默失败,不影响正常使用
console.debug('[Prompt OTA] 检查更新失败:', err);
}
};
// 立即执行一次
doSync();
// 每 30 分钟检查一次
promptSyncTimer = setInterval(doSync, 30 * 60 * 1000);
}
export function stopPromptOTASync(): void {
if (promptSyncTimer) {
clearInterval(promptSyncTimer);
promptSyncTimer = null;
}
}
// 保留向后兼容的 LLM_PROMPTS 导出(读取走 PromptCache
export const LLM_PROMPTS = {
get reflection() { return { system: getSystemPrompt('reflection'), user: getUserPromptTemplate('reflection')! }; },
get compaction() { return { system: getSystemPrompt('compaction'), user: getUserPromptTemplate('compaction')! }; },
get extraction() { return { system: getSystemPrompt('extraction'), user: getUserPromptTemplate('extraction')! }; },
};
// === Telemetry Integration ===
/**
* 记录一次 LLM 调用结果到遥测收集器。
* 所有 adapter 的 complete() 返回后应调用此函数。
*/
function trackLLMCall(
adapter: LLMServiceAdapter,
response: LLMResponse,
error?: unknown,
): void {
try {
const { recordLLMUsage } = require('./telemetry-collector');
recordLLMUsage(
response.model || adapter.getProvider(),
response.tokensUsed?.input ?? 0,
response.tokensUsed?.output ?? 0,
{
latencyMs: response.latencyMs,
success: !error,
errorType: error instanceof Error ? error.message.slice(0, 80) : undefined,
connectionMode: adapter.getProvider() === 'saas' ? 'saas' : 'tauri',
},
);
} catch {
// telemetry-collector may not be available (e.g., SSR)
}
}
// === Helper Functions ===
export async function llmReflect(context: string, adapter?: LLMServiceAdapter): Promise<string> {
@@ -641,9 +835,10 @@ export async function llmReflect(context: string, adapter?: LLMServiceAdapter):
const response = await llm.complete([
{ role: 'system', content: LLM_PROMPTS.reflection.system },
{ role: 'user', content: LLM_PROMPTS.reflection.user(context) },
{ role: 'user', content: typeof LLM_PROMPTS.reflection.user === 'function' ? LLM_PROMPTS.reflection.user(context) : LLM_PROMPTS.reflection.user },
]);
trackLLMCall(llm, response);
return response.content;
}
@@ -652,9 +847,10 @@ export async function llmCompact(messages: string, adapter?: LLMServiceAdapter):
const response = await llm.complete([
{ role: 'system', content: LLM_PROMPTS.compaction.system },
{ role: 'user', content: LLM_PROMPTS.compaction.user(messages) },
{ role: 'user', content: typeof LLM_PROMPTS.compaction.user === 'function' ? LLM_PROMPTS.compaction.user(messages) : LLM_PROMPTS.compaction.user },
]);
trackLLMCall(llm, response);
return response.content;
}
@@ -666,8 +862,9 @@ export async function llmExtract(
const response = await llm.complete([
{ role: 'system', content: LLM_PROMPTS.extraction.system },
{ role: 'user', content: LLM_PROMPTS.extraction.user(conversation) },
{ role: 'user', content: typeof LLM_PROMPTS.extraction.user === 'function' ? LLM_PROMPTS.extraction.user(conversation) : LLM_PROMPTS.extraction.user },
]);
trackLLMCall(llm, response);
return response.content;
}