Some checks failed
CI / Lint & TypeCheck (push) Has been cancelled
CI / Unit Tests (push) Has been cancelled
CI / Build Frontend (push) Has been cancelled
CI / Rust Check (push) Has been cancelled
CI / Security Scan (push) Has been cancelled
CI / E2E Tests (push) Has been cancelled
Zhipu AI has deprecated glm-4-flash, causing 404 errors on all chat requests. Updated all references: - config: glm-4-flash → glm-4-flash-250414, added glm-z1-flash - frontend: defaultModel, conversationStore, ChatArea fallback, ModelsAPI
241 lines
5.7 KiB
TOML
241 lines
5.7 KiB
TOML
# ZCLAW Chinese LLM Providers Configuration
|
|
# ZCLAW TOML 格式的中文模型提供商配置
|
|
#
|
|
# 使用方法:
|
|
# 1. 复制此文件到 ~/.zclaw/config.d/ 目录
|
|
# 2. 或者将内容追加到 ~/.zclaw/config.toml
|
|
# 3. 设置环境变量: ZHIPU_API_KEY, QWEN_API_KEY, KIMI_API_KEY, MINIMAX_API_KEY
|
|
|
|
# ============================================================
|
|
# 智谱 AI (Zhipu GLM)
|
|
# https://open.bigmodel.cn/
|
|
# ============================================================
|
|
|
|
[[llm.providers]]
|
|
name = "zhipu"
|
|
display_name = "智谱 AI (Zhipu GLM)"
|
|
api_key = "${ZHIPU_API_KEY}"
|
|
base_url = "https://open.bigmodel.cn/api/paas/v4"
|
|
|
|
[[llm.providers.models]]
|
|
id = "glm-4-plus"
|
|
alias = "GLM-4-Plus"
|
|
context_window = 128000
|
|
max_output_tokens = 4096
|
|
supports_streaming = true
|
|
|
|
[[llm.providers.models]]
|
|
id = "glm-4-flash-250414"
|
|
alias = "GLM-4-Flash (免费)"
|
|
context_window = 128000
|
|
max_output_tokens = 4096
|
|
supports_streaming = true
|
|
|
|
[[llm.providers.models]]
|
|
id = "glm-z1-flash"
|
|
alias = "GLM-Z1-Flash (免费推理)"
|
|
context_window = 128000
|
|
max_output_tokens = 16384
|
|
supports_streaming = true
|
|
|
|
[[llm.providers.models]]
|
|
id = "glm-4v-plus"
|
|
alias = "GLM-4V-Plus (视觉)"
|
|
context_window = 128000
|
|
max_output_tokens = 4096
|
|
supports_vision = true
|
|
supports_streaming = true
|
|
|
|
[[llm.providers.models]]
|
|
id = "glm-z1-airx"
|
|
alias = "GLM-Z1-AirX (推理)"
|
|
context_window = 128000
|
|
max_output_tokens = 16384
|
|
supports_streaming = true
|
|
|
|
# ============================================================
|
|
# 通义千问 (Qwen / Alibaba Cloud)
|
|
# https://dashscope.aliyun.com/
|
|
# ============================================================
|
|
|
|
[[llm.providers]]
|
|
name = "qwen"
|
|
display_name = "通义千问 (Qwen)"
|
|
api_key = "${QWEN_API_KEY}"
|
|
base_url = "https://dashscope.aliyuncs.com/compatible-mode/v1"
|
|
|
|
[[llm.providers.models]]
|
|
id = "qwen-max"
|
|
alias = "Qwen-Max"
|
|
context_window = 32768
|
|
max_output_tokens = 8192
|
|
supports_streaming = true
|
|
|
|
[[llm.providers.models]]
|
|
id = "qwen-plus"
|
|
alias = "Qwen-Plus"
|
|
context_window = 128000
|
|
max_output_tokens = 8192
|
|
supports_streaming = true
|
|
|
|
[[llm.providers.models]]
|
|
id = "qwen-turbo"
|
|
alias = "Qwen-Turbo"
|
|
context_window = 128000
|
|
max_output_tokens = 8192
|
|
supports_streaming = true
|
|
|
|
[[llm.providers.models]]
|
|
id = "qwen-vl-max"
|
|
alias = "Qwen-VL-Max (视觉)"
|
|
context_window = 32768
|
|
max_output_tokens = 8192
|
|
supports_vision = true
|
|
supports_streaming = true
|
|
|
|
[[llm.providers.models]]
|
|
id = "qwen-long"
|
|
alias = "Qwen-Long (长上下文)"
|
|
context_window = 1000000
|
|
max_output_tokens = 10000
|
|
supports_streaming = true
|
|
|
|
# ============================================================
|
|
# Kimi / Moonshot AI
|
|
# https://moonshot.cn/
|
|
# ============================================================
|
|
|
|
[[llm.providers]]
|
|
name = "kimi"
|
|
display_name = "Kimi (Moonshot)"
|
|
api_key = "${KIMI_API_KEY}"
|
|
base_url = "https://api.moonshot.cn/v1"
|
|
|
|
[[llm.providers.models]]
|
|
id = "moonshot-v1-8k"
|
|
alias = "Kimi (8K)"
|
|
context_window = 8192
|
|
max_output_tokens = 4096
|
|
supports_streaming = true
|
|
|
|
[[llm.providers.models]]
|
|
id = "moonshot-v1-32k"
|
|
alias = "Kimi (32K)"
|
|
context_window = 32768
|
|
max_output_tokens = 4096
|
|
supports_streaming = true
|
|
|
|
[[llm.providers.models]]
|
|
id = "moonshot-v1-128k"
|
|
alias = "Kimi (128K)"
|
|
context_window = 131072
|
|
max_output_tokens = 4096
|
|
supports_streaming = true
|
|
|
|
# ============================================================
|
|
# MiniMax
|
|
# https://www.minimaxi.com/
|
|
# ============================================================
|
|
|
|
[[llm.providers]]
|
|
name = "minimax"
|
|
display_name = "MiniMax"
|
|
api_key = "${MINIMAX_API_KEY}"
|
|
base_url = "https://api.minimax.chat/v1"
|
|
|
|
[[llm.providers.models]]
|
|
id = "abab6.5s-chat"
|
|
alias = "MiniMax-6.5s"
|
|
context_window = 245000
|
|
max_output_tokens = 16384
|
|
supports_streaming = true
|
|
|
|
[[llm.providers.models]]
|
|
id = "abab6.5g-chat"
|
|
alias = "MiniMax-6.5g"
|
|
context_window = 128000
|
|
max_output_tokens = 8192
|
|
supports_streaming = true
|
|
|
|
[[llm.providers.models]]
|
|
id = "abab5.5-chat"
|
|
alias = "MiniMax-5.5"
|
|
context_window = 16384
|
|
max_output_tokens = 4096
|
|
supports_streaming = true
|
|
|
|
# ============================================================
|
|
# DeepSeek
|
|
# https://www.deepseek.com/
|
|
# ============================================================
|
|
|
|
[[llm.providers]]
|
|
name = "deepseek"
|
|
display_name = "DeepSeek"
|
|
api_key = "${DEEPSEEK_API_KEY}"
|
|
base_url = "https://api.deepseek.com/v1"
|
|
|
|
[[llm.providers.models]]
|
|
id = "deepseek-chat"
|
|
alias = "DeepSeek Chat"
|
|
context_window = 64000
|
|
max_output_tokens = 4096
|
|
supports_streaming = true
|
|
|
|
[[llm.providers.models]]
|
|
id = "deepseek-reasoner"
|
|
alias = "DeepSeek Reasoner (R1)"
|
|
context_window = 64000
|
|
max_output_tokens = 8192
|
|
supports_streaming = true
|
|
|
|
# ============================================================
|
|
# 百度文心一言 (Baidu ERNIE)
|
|
# https://cloud.baidu.com/
|
|
# ============================================================
|
|
|
|
[[llm.providers]]
|
|
name = "baidu"
|
|
display_name = "百度文心 (ERNIE)"
|
|
api_key = "${BAIDU_API_KEY}"
|
|
base_url = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat"
|
|
|
|
[[llm.providers.models]]
|
|
id = "ernie-4.0-8k"
|
|
alias = "ERNIE-4.0 (8K)"
|
|
context_window = 8192
|
|
max_output_tokens = 2048
|
|
supports_streaming = true
|
|
|
|
[[llm.providers.models]]
|
|
id = "ernie-3.5-8k"
|
|
alias = "ERNIE-3.5 (8K)"
|
|
context_window = 8192
|
|
max_output_tokens = 2048
|
|
supports_streaming = true
|
|
|
|
# ============================================================
|
|
# 讯飞星火 (iFlytek Spark)
|
|
# https://xinghuo.xfyun.cn/
|
|
# ============================================================
|
|
|
|
[[llm.providers]]
|
|
name = "spark"
|
|
display_name = "讯飞星火 (Spark)"
|
|
api_key = "${SPARK_API_KEY}"
|
|
base_url = "https://spark-api-open.xf-yun.com/v1"
|
|
|
|
[[llm.providers.models]]
|
|
id = "generalv3.5"
|
|
alias = "星火 3.5"
|
|
context_window = 8192
|
|
max_output_tokens = 4096
|
|
supports_streaming = true
|
|
|
|
[[llm.providers.models]]
|
|
id = "generalv4.0"
|
|
alias = "星火 4.0"
|
|
context_window = 8192
|
|
max_output_tokens = 4096
|
|
supports_streaming = true
|