Files
zclaw_openfang/hands/quiz.HAND.toml
iven 3ff08faa56 release(v0.2.0): streaming, MCP protocol, Browser Hand, security enhancements
## Major Features

### Streaming Response System
- Implement LlmDriver trait with `stream()` method returning async Stream
- Add SSE parsing for Anthropic and OpenAI API streaming
- Integrate Tauri event system for frontend streaming (`stream:chunk` events)
- Add StreamChunk types: Delta, ToolStart, ToolEnd, Complete, Error

### MCP Protocol Implementation
- Add MCP JSON-RPC 2.0 types (mcp_types.rs)
- Implement stdio-based MCP transport (mcp_transport.rs)
- Support tool discovery, execution, and resource operations

### Browser Hand Implementation
- Complete browser automation with Playwright-style actions
- Support Navigate, Click, Type, Scrape, Screenshot, Wait actions
- Add educational Hands: Whiteboard, Slideshow, Speech, Quiz

### Security Enhancements
- Implement command whitelist/blacklist for shell_exec tool
- Add SSRF protection with private IP blocking
- Create security.toml configuration file

## Test Improvements
- Fix test import paths (security-utils, setup)
- Fix vi.mock hoisting issues with vi.hoisted()
- Update test expectations for validateUrl and sanitizeFilename
- Add getUnsupportedLocalGatewayStatus mock

## Documentation Updates
- Update architecture documentation
- Improve configuration reference
- Add quick-start guide updates

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-24 03:24:24 +08:00

122 lines
2.6 KiB
TOML

# Quiz Hand - 测验生成与评估能力包
#
# ZCLAW Hand 配置
# 提供测验题目生成、答题评估和反馈能力
[hand]
name = "quiz"
version = "1.0.0"
description = "测验能力包 - 生成测验题目、评估答案、提供反馈"
author = "ZCLAW Team"
type = "education"
requires_approval = false
timeout = 60
max_concurrent = 5
tags = ["quiz", "test", "assessment", "education", "learning", "evaluation"]
[hand.config]
# 支持的题型
supported_question_types = [
"multiple_choice",
"true_false",
"fill_blank",
"short_answer",
"matching",
"ordering",
"essay"
]
# 默认难度: easy, medium, hard, adaptive
default_difficulty = "medium"
# 每次生成的题目数量
default_question_count = 5
# 是否提供解析
show_explanation = true
# 是否显示正确答案
show_correct_answer = true
# 答案反馈模式: immediate, after_submit, after_all
feedback_mode = "immediate"
# 评分方式: exact, partial, rubric
grading_mode = "exact"
# 及格分数(百分比)
passing_score = 60
[hand.triggers]
manual = true
schedule = false
webhook = false
[[hand.triggers.events]]
type = "chat.intent"
pattern = "测验|测试|题目|考核|quiz|test|question|exam"
priority = 5
[hand.permissions]
requires = [
"quiz.generate",
"quiz.grade",
"quiz.analyze"
]
roles = ["operator.read", "operator.write"]
[hand.rate_limit]
max_requests = 50
window_seconds = 3600
[hand.audit]
log_inputs = true
log_outputs = true
retention_days = 30
# 测验动作定义
[[hand.actions]]
id = "generate"
name = "生成测验"
description = "根据主题或内容生成测验题目"
params = { topic = "string", content = "string?", question_type = "string?", count = "number?", difficulty = "string?" }
[[hand.actions]]
id = "grade"
name = "评估答案"
description = "评估用户提交的答案"
params = { quiz_id = "string", answers = "array" }
[[hand.actions]]
id = "analyze"
name = "分析表现"
description = "分析用户的测验表现和学习进度"
params = { quiz_id = "string", user_id = "string?" }
[[hand.actions]]
id = "hint"
name = "提供提示"
description = "为当前题目提供提示"
params = { question_id = "string", hint_level = "number?" }
[[hand.actions]]
id = "explain"
name = "解释答案"
description = "提供题目的详细解析"
params = { question_id = "string" }
[[hand.actions]]
id = "adaptive_next"
name = "自适应下一题"
description = "根据当前表现推荐下一题难度"
params = { current_score = "number", questions_answered = "number" }
[[hand.actions]]
id = "generate_report"
name = "生成报告"
description = "生成测验结果报告"
params = { quiz_id = "string", format = "string?" }