Files
zclaw_openfang/config/chinese-providers.toml
iven 0d4fa96b82
Some checks failed
CI / Lint & TypeCheck (push) Has been cancelled
CI / Unit Tests (push) Has been cancelled
CI / Build Frontend (push) Has been cancelled
CI / Rust Check (push) Has been cancelled
CI / Security Scan (push) Has been cancelled
CI / E2E Tests (push) Has been cancelled
refactor: 统一项目名称从OpenFang到ZCLAW
重构所有代码和文档中的项目名称,将OpenFang统一更新为ZCLAW。包括:
- 配置文件中的项目名称
- 代码注释和文档引用
- 环境变量和路径
- 类型定义和接口名称
- 测试用例和模拟数据

同时优化部分代码结构,移除未使用的模块,并更新相关依赖项。
2026-03-27 07:36:03 +08:00

234 lines
5.5 KiB
TOML

# ZCLAW Chinese LLM Providers Configuration
# ZCLAW TOML 格式的中文模型提供商配置
#
# 使用方法:
# 1. 复制此文件到 ~/.zclaw/config.d/ 目录
# 2. 或者将内容追加到 ~/.zclaw/config.toml
# 3. 设置环境变量: ZHIPU_API_KEY, QWEN_API_KEY, KIMI_API_KEY, MINIMAX_API_KEY
# ============================================================
# 智谱 AI (Zhipu GLM)
# https://open.bigmodel.cn/
# ============================================================
[[llm.providers]]
name = "zhipu"
display_name = "智谱 AI (Zhipu GLM)"
api_key = "${ZHIPU_API_KEY}"
base_url = "https://open.bigmodel.cn/api/paas/v4"
[[llm.providers.models]]
id = "glm-4-plus"
alias = "GLM-4-Plus"
context_window = 128000
max_output_tokens = 4096
supports_streaming = true
[[llm.providers.models]]
id = "glm-4-flash"
alias = "GLM-4-Flash"
context_window = 128000
max_output_tokens = 4096
supports_streaming = true
[[llm.providers.models]]
id = "glm-4v-plus"
alias = "GLM-4V-Plus (视觉)"
context_window = 128000
max_output_tokens = 4096
supports_vision = true
supports_streaming = true
[[llm.providers.models]]
id = "glm-z1-airx"
alias = "GLM-Z1-AirX (推理)"
context_window = 128000
max_output_tokens = 16384
supports_streaming = true
# ============================================================
# 通义千问 (Qwen / Alibaba Cloud)
# https://dashscope.aliyun.com/
# ============================================================
[[llm.providers]]
name = "qwen"
display_name = "通义千问 (Qwen)"
api_key = "${QWEN_API_KEY}"
base_url = "https://dashscope.aliyuncs.com/compatible-mode/v1"
[[llm.providers.models]]
id = "qwen-max"
alias = "Qwen-Max"
context_window = 32768
max_output_tokens = 8192
supports_streaming = true
[[llm.providers.models]]
id = "qwen-plus"
alias = "Qwen-Plus"
context_window = 128000
max_output_tokens = 8192
supports_streaming = true
[[llm.providers.models]]
id = "qwen-turbo"
alias = "Qwen-Turbo"
context_window = 128000
max_output_tokens = 8192
supports_streaming = true
[[llm.providers.models]]
id = "qwen-vl-max"
alias = "Qwen-VL-Max (视觉)"
context_window = 32768
max_output_tokens = 8192
supports_vision = true
supports_streaming = true
[[llm.providers.models]]
id = "qwen-long"
alias = "Qwen-Long (长上下文)"
context_window = 1000000
max_output_tokens = 10000
supports_streaming = true
# ============================================================
# Kimi / Moonshot AI
# https://moonshot.cn/
# ============================================================
[[llm.providers]]
name = "kimi"
display_name = "Kimi (Moonshot)"
api_key = "${KIMI_API_KEY}"
base_url = "https://api.moonshot.cn/v1"
[[llm.providers.models]]
id = "moonshot-v1-8k"
alias = "Kimi (8K)"
context_window = 8192
max_output_tokens = 4096
supports_streaming = true
[[llm.providers.models]]
id = "moonshot-v1-32k"
alias = "Kimi (32K)"
context_window = 32768
max_output_tokens = 4096
supports_streaming = true
[[llm.providers.models]]
id = "moonshot-v1-128k"
alias = "Kimi (128K)"
context_window = 131072
max_output_tokens = 4096
supports_streaming = true
# ============================================================
# MiniMax
# https://www.minimaxi.com/
# ============================================================
[[llm.providers]]
name = "minimax"
display_name = "MiniMax"
api_key = "${MINIMAX_API_KEY}"
base_url = "https://api.minimax.chat/v1"
[[llm.providers.models]]
id = "abab6.5s-chat"
alias = "MiniMax-6.5s"
context_window = 245000
max_output_tokens = 16384
supports_streaming = true
[[llm.providers.models]]
id = "abab6.5g-chat"
alias = "MiniMax-6.5g"
context_window = 128000
max_output_tokens = 8192
supports_streaming = true
[[llm.providers.models]]
id = "abab5.5-chat"
alias = "MiniMax-5.5"
context_window = 16384
max_output_tokens = 4096
supports_streaming = true
# ============================================================
# DeepSeek
# https://www.deepseek.com/
# ============================================================
[[llm.providers]]
name = "deepseek"
display_name = "DeepSeek"
api_key = "${DEEPSEEK_API_KEY}"
base_url = "https://api.deepseek.com/v1"
[[llm.providers.models]]
id = "deepseek-chat"
alias = "DeepSeek Chat"
context_window = 64000
max_output_tokens = 4096
supports_streaming = true
[[llm.providers.models]]
id = "deepseek-reasoner"
alias = "DeepSeek Reasoner (R1)"
context_window = 64000
max_output_tokens = 8192
supports_streaming = true
# ============================================================
# 百度文心一言 (Baidu ERNIE)
# https://cloud.baidu.com/
# ============================================================
[[llm.providers]]
name = "baidu"
display_name = "百度文心 (ERNIE)"
api_key = "${BAIDU_API_KEY}"
base_url = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat"
[[llm.providers.models]]
id = "ernie-4.0-8k"
alias = "ERNIE-4.0 (8K)"
context_window = 8192
max_output_tokens = 2048
supports_streaming = true
[[llm.providers.models]]
id = "ernie-3.5-8k"
alias = "ERNIE-3.5 (8K)"
context_window = 8192
max_output_tokens = 2048
supports_streaming = true
# ============================================================
# 讯飞星火 (iFlytek Spark)
# https://xinghuo.xfyun.cn/
# ============================================================
[[llm.providers]]
name = "spark"
display_name = "讯飞星火 (Spark)"
api_key = "${SPARK_API_KEY}"
base_url = "https://spark-api-open.xf-yun.com/v1"
[[llm.providers.models]]
id = "generalv3.5"
alias = "星火 3.5"
context_window = 8192
max_output_tokens = 4096
supports_streaming = true
[[llm.providers.models]]
id = "generalv4.0"
alias = "星火 4.0"
context_window = 8192
max_output_tokens = 4096
supports_streaming = true