fix: migrate glm-4-flash to glm-4-flash-250414 (model deprecated by Zhipu)
Some checks failed
CI / Lint & TypeCheck (push) Has been cancelled
CI / Unit Tests (push) Has been cancelled
CI / Build Frontend (push) Has been cancelled
CI / Rust Check (push) Has been cancelled
CI / Security Scan (push) Has been cancelled
CI / E2E Tests (push) Has been cancelled
Some checks failed
CI / Lint & TypeCheck (push) Has been cancelled
CI / Unit Tests (push) Has been cancelled
CI / Build Frontend (push) Has been cancelled
CI / Rust Check (push) Has been cancelled
CI / Security Scan (push) Has been cancelled
CI / E2E Tests (push) Has been cancelled
Zhipu AI has deprecated glm-4-flash, causing 404 errors on all chat requests. Updated all references: - config: glm-4-flash → glm-4-flash-250414, added glm-z1-flash - frontend: defaultModel, conversationStore, ChatArea fallback, ModelsAPI
This commit is contained in:
@@ -25,12 +25,19 @@ max_output_tokens = 4096
|
||||
supports_streaming = true
|
||||
|
||||
[[llm.providers.models]]
|
||||
id = "glm-4-flash"
|
||||
alias = "GLM-4-Flash"
|
||||
id = "glm-4-flash-250414"
|
||||
alias = "GLM-4-Flash (免费)"
|
||||
context_window = 128000
|
||||
max_output_tokens = 4096
|
||||
supports_streaming = true
|
||||
|
||||
[[llm.providers.models]]
|
||||
id = "glm-z1-flash"
|
||||
alias = "GLM-Z1-Flash (免费推理)"
|
||||
context_window = 128000
|
||||
max_output_tokens = 16384
|
||||
supports_streaming = true
|
||||
|
||||
[[llm.providers.models]]
|
||||
id = "glm-4v-plus"
|
||||
alias = "GLM-4V-Plus (视觉)"
|
||||
|
||||
@@ -129,7 +129,7 @@ retry_delay = "1s"
|
||||
|
||||
[llm.aliases]
|
||||
# 智谱 GLM 模型 (使用正确的 API 模型 ID)
|
||||
"glm-4-flash" = "zhipu/glm-4-flash"
|
||||
"glm-4-flash" = "zhipu/glm-4-flash-250414"
|
||||
"glm-4-plus" = "zhipu/glm-4-plus"
|
||||
"glm-4.5" = "zhipu/glm-4.5"
|
||||
# 其他模型
|
||||
|
||||
Reference in New Issue
Block a user