feat(saas): Phase 3 桌面端 SaaS 集成 — 客户端、Store、UI、LLM 适配器

- saas-client.ts: SaaS HTTP 客户端 (登录/注册/Token/模型列表/Chat Relay/配置同步)
- saasStore.ts: Zustand 状态管理 (登录态、连接模式、可用模型、localStorage 持久化)
- connectionStore.ts: 集成 SaaS 模式分支 (connect() 优先检查 SaaS 连接模式)
- llm-service.ts: SaasLLMAdapter 实现 (通过 SaaS Relay 代理 LLM 调用)
- SaaSLogin.tsx: 登录/注册表单 (服务器地址、用户名、密码、邮箱)
- SaaSStatus.tsx: 连接状态展示 (账号信息、健康检查、可用模型列表)
- SaaSSettings.tsx: SaaS 设置页面入口 (登录态切换、功能列表)
- SettingsLayout.tsx: 添加 SaaS 平台菜单项
- store/index.ts: 导出 useSaaSStore
This commit is contained in:
iven
2026-03-27 14:21:23 +08:00
parent a66b675675
commit 15450ca895
9 changed files with 1407 additions and 1 deletions

View File

@@ -18,7 +18,7 @@ import { DEFAULT_MODEL_ID, DEFAULT_OPENAI_BASE_URL } from '../constants/models';
// === Types ===
export type LLMProvider = 'openai' | 'volcengine' | 'gateway' | 'mock';
export type LLMProvider = 'openai' | 'volcengine' | 'gateway' | 'saas' | 'mock';
export interface LLMConfig {
provider: LLMProvider;
@@ -77,6 +77,12 @@ const DEFAULT_CONFIGS: Record<LLMProvider, LLMConfig> = {
temperature: 0.7,
timeout: 60000,
},
saas: {
provider: 'saas',
maxTokens: 4096,
temperature: 0.7,
timeout: 300000, // 5 min for streaming
},
mock: {
provider: 'mock',
maxTokens: 100,
@@ -412,6 +418,85 @@ class GatewayLLMAdapter implements LLMServiceAdapter {
}
}
// === SaaS Relay Adapter (via SaaS backend) ===
class SaasLLMAdapter implements LLMServiceAdapter {
private config: LLMConfig;
constructor(config: LLMConfig) {
this.config = { ...DEFAULT_CONFIGS.saas, ...config };
}
async complete(messages: LLMMessage[], options?: Partial<LLMConfig>): Promise<LLMResponse> {
const config = { ...this.config, ...options };
const startTime = Date.now();
// Dynamic import to avoid circular dependency
const { useSaaSStore } = await import('../store/saasStore');
const { saasUrl, authToken } = useSaaSStore.getState();
if (!saasUrl || !authToken) {
throw new Error('[SaaS] 未登录 SaaS 平台,请先在设置中登录');
}
// Dynamic import of SaaSClient singleton
const { saasClient } = await import('./saas-client');
saasClient.setBaseUrl(saasUrl);
saasClient.setToken(authToken);
const openaiBody = {
model: config.model || 'default',
messages,
max_tokens: config.maxTokens || 4096,
temperature: config.temperature ?? 0.7,
stream: false,
};
const response = await saasClient.chatCompletion(
openaiBody,
AbortSignal.timeout(config.timeout || 300000),
);
if (!response.ok) {
const errorData = await response.json().catch(() => ({
error: 'unknown',
message: `SaaS relay 请求失败 (${response.status})`,
}));
throw new Error(
`[SaaS] ${errorData.message || errorData.error || `请求失败: ${response.status}`}`,
);
}
const data = await response.json();
const latencyMs = Date.now() - startTime;
return {
content: data.choices?.[0]?.message?.content || '',
tokensUsed: {
input: data.usage?.prompt_tokens || 0,
output: data.usage?.completion_tokens || 0,
},
model: data.model,
latencyMs,
};
}
isAvailable(): boolean {
// Check synchronously via localStorage for availability check
// Dynamic import would be async, so we use a simpler check
try {
const token = localStorage.getItem('zclaw-saas-token');
return !!token;
} catch {
return false;
}
}
getProvider(): LLMProvider {
return 'saas';
}
}
// === Factory ===
let cachedAdapter: LLMServiceAdapter | null = null;
@@ -427,6 +512,8 @@ export function createLLMAdapter(config?: Partial<LLMConfig>): LLMServiceAdapter
return new VolcengineLLMAdapter(finalConfig);
case 'gateway':
return new GatewayLLMAdapter(finalConfig);
case 'saas':
return new SaasLLMAdapter(finalConfig);
case 'mock':
default:
return new MockLLMAdapter(finalConfig);