Compare commits

...

5 Commits

Author SHA1 Message Date
iven
5a5a4b322d docs: update CLAUDE.md with stabilization rules, security audit, and production checklist
Some checks failed
CI / Lint & TypeCheck (push) Has been cancelled
CI / Unit Tests (push) Has been cancelled
CI / Build Frontend (push) Has been cancelled
CI / Rust Check (push) Has been cancelled
CI / Security Scan (push) Has been cancelled
CI / E2E Tests (push) Has been cancelled
2026-04-03 00:29:21 +08:00
iven
d8e2954d73 docs: stabilization directive + TRUTH document + AI session prompts + dockerignore
- STABILIZATION_DIRECTIVE.md: feature freeze rules, banned actions, priorities
- TRUTH.md: single source of truth for system state (crate counts, store counts)
- AI_SESSION_PROMPTS.md: three-layer prompt system for AI sessions
- Industry agent delivery design spec
- Stabilization test suite for regression prevention
- Delete stale ISSUE-TRACKER.md
- Add .dockerignore for container builds
- Add brainstorm session artifacts
2026-04-03 00:29:16 +08:00
iven
5c74e74f2a fix(desktop): component cleanup + dead code removal + DeerFlow ai-elements
- ChatArea: DeerFlow ai-elements annotations for accessibility
- Conversation: remove unused Context, simplify message rendering
- Delete dead modules: audit-logger.ts, gateway-reconnect.ts
- Replace console.log with structured logger across components
- Add idb dependency for IndexedDB persistence
- Fix kernel-skills type safety improvements
2026-04-03 00:28:58 +08:00
iven
15d578c5bc fix(tauri): replace silent let _ = with structured logging across 20 modules
Replace error-swallowing let _ = patterns with tracing::warn! in browser,
classroom, gateway, intelligence, memory, pipeline, secure_storage, and
viking command handlers. Ensures errors are observable in production logs.
2026-04-03 00:28:39 +08:00
iven
52bdafa633 refactor(crates): kernel/generation module split + DeerFlow optimizations + middleware + dead code cleanup
- Split zclaw-kernel/kernel.rs (1486 lines) into 9 domain modules
- Split zclaw-kernel/generation.rs (1080 lines) into 3 modules
- Add DeerFlow-inspired middleware: DanglingTool, SubagentLimit, ToolError, ToolOutputGuard
- Add PromptBuilder for structured system prompt assembly
- Add FactStore (zclaw-memory) for persistent fact extraction
- Add task builtin tool for agent task management
- Driver improvements: Anthropic/OpenAI extended thinking, Gemini safety settings
- Replace let _ = with proper log::warn! across SaaS handlers
- Remove unused dependency (url) from zclaw-hands
2026-04-03 00:28:03 +08:00
117 changed files with 6432 additions and 2691 deletions

42
.dockerignore Normal file
View File

@@ -0,0 +1,42 @@
# Build artifacts
target/
node_modules/
# Environment and secrets
.env
.env.*
*.pem
*.key
# IDE and OS
.vscode/
.idea/
.DS_Store
Thumbs.db
# Git
.git/
.gitignore
# Logs
*.log
# Docker
docker-compose.yml
Dockerfile
# Documentation (not needed in image)
docs/
*.md
!README.md
# Test files
tests/
tests/e2e/
admin-v2/tests/
# Claude/development tools
.claude/
.planning/
.superpowers/
plans/

View File

@@ -0,0 +1 @@
{"reason":"owner process exited","timestamp":1774933144596}

View File

@@ -0,0 +1 @@
1454

View File

@@ -0,0 +1,151 @@
<h2>Admin 管理后台的设计方向</h2>
<p class="subtitle">选择一个整体设计风格方向,后续所有页面都将基于此展开</p>
<div class="cards">
<div class="card" data-choice="modern-minimal" onclick="toggleSelect(this)">
<div class="card-image">
<div style="background: linear-gradient(135deg, #f8fafc 0%, #e2e8f0 100%); padding: 24px; min-height: 180px; display: flex; flex-direction: column; gap: 12px;">
<div style="display: flex; gap: 12px; align-items: center;">
<div style="width: 40px; height: 40px; border-radius: 10px; background: #6366f1;"></div>
<div>
<div style="font-weight: 700; color: #1e293b; font-size: 14px;">ZCLAW Admin</div>
<div style="color: #94a3b8; font-size: 12px;">现代极简</div>
</div>
</div>
<div style="display: flex; gap: 8px;">
<div style="flex: 1; height: 8px; border-radius: 4px; background: #6366f1; opacity: 0.2;"></div>
<div style="flex: 2; height: 8px; border-radius: 4px; background: #6366f1; opacity: 0.1;"></div>
<div style="flex: 1; height: 8px; border-radius: 4px; background: #6366f1; opacity: 0.15;"></div>
</div>
<div style="display: flex; gap: 8px; margin-top: 4px;">
<div style="flex: 1; height: 60px; border-radius: 8px; background: white; border: 1px solid #e2e8f0;"></div>
<div style="flex: 1; height: 60px; border-radius: 8px; background: white; border: 1px solid #e2e8f0;"></div>
<div style="flex: 1; height: 60px; border-radius: 8px; background: white; border: 1px solid #e2e8f0;"></div>
</div>
<div style="display: flex; gap: 4px; margin-top: auto;">
<div style="width: 20px; height: 20px; border-radius: 4px; background: #6366f1;"></div>
<div style="width: 20px; height: 20px; border-radius: 4px; background: #8b5cf6;"></div>
<div style="width: 20px; height: 20px; border-radius: 4px; background: #a78bfa;"></div>
<div style="width: 20px; height: 20px; border-radius: 4px; background: #c4b5fd;"></div>
<div style="width: 20px; height: 20px; border-radius: 4px; background: #e0e7ff;"></div>
</div>
</div>
</div>
<div class="card-body">
<h3>A. 现代极简 (Modern Minimal)</h3>
<p>大量留白Indigo/Purple 主色调,圆角卡片,轻量阴影。类似 Linear、Vercel Dashboard 风格。</p>
</div>
</div>
<div class="card" data-choice="tech-dark" onclick="toggleSelect(this)">
<div class="card-image">
<div style="background: linear-gradient(135deg, #0f172a 0%, #1e293b 100%); padding: 24px; min-height: 180px; display: flex; flex-direction: column; gap: 12px;">
<div style="display: flex; gap: 12px; align-items: center;">
<div style="width: 40px; height: 40px; border-radius: 10px; background: linear-gradient(135deg, #06b6d4, #3b82f6);"></div>
<div>
<div style="font-weight: 700; color: #f1f5f9; font-size: 14px;">ZCLAW Admin</div>
<div style="color: #64748b; font-size: 12px;">科技暗色</div>
</div>
</div>
<div style="display: flex; gap: 8px;">
<div style="flex: 1; height: 8px; border-radius: 4px; background: #06b6d4; opacity: 0.3;"></div>
<div style="flex: 2; height: 8px; border-radius: 4px; background: #06b6d4; opacity: 0.15;"></div>
<div style="flex: 1; height: 8px; border-radius: 4px; background: #06b6d4; opacity: 0.2;"></div>
</div>
<div style="display: flex; gap: 8px; margin-top: 4px;">
<div style="flex: 1; height: 60px; border-radius: 8px; background: #1e293b; border: 1px solid #334155;"></div>
<div style="flex: 1; height: 60px; border-radius: 8px; background: #1e293b; border: 1px solid #334155;"></div>
<div style="flex: 1; height: 60px; border-radius: 8px; background: #1e293b; border: 1px solid #334155;"></div>
</div>
<div style="display: flex; gap: 4px; margin-top: auto;">
<div style="width: 20px; height: 20px; border-radius: 4px; background: #06b6d4;"></div>
<div style="width: 20px; height: 20px; border-radius: 4px; background: #3b82f6;"></div>
<div style="width: 20px; height: 20px; border-radius: 4px; background: #8b5cf6;"></div>
<div style="width: 20px; height: 20px; border-radius: 4px; background: #22d3ee;"></div>
<div style="width: 20px; height: 20px; border-radius: 4px; background: #1e293b; border: 1px solid #334155;"></div>
</div>
</div>
</div>
<div class="card-body">
<h3>B. 科技暗色 (Tech Dark)</h3>
<p>深色基底Cyan/Blue 渐变高亮,发光边框,数据密集感。类似 Grafana、DataDog 风格。</p>
</div>
</div>
<div class="card" data-choice="warm-professional" onclick="toggleSelect(this)">
<div class="card-image">
<div style="background: linear-gradient(135deg, #fffbeb 0%, #fef3c7 50%, #f5f5f4 100%); padding: 24px; min-height: 180px; display: flex; flex-direction: column; gap: 12px;">
<div style="display: flex; gap: 12px; align-items: center;">
<div style="width: 40px; height: 40px; border-radius: 10px; background: linear-gradient(135deg, #f59e0b, #ef4444);"></div>
<div>
<div style="font-weight: 700; color: #292524; font-size: 14px;">ZCLAW Admin</div>
<div style="color: #a8a29e; font-size: 12px;">温暖专业</div>
</div>
</div>
<div style="display: flex; gap: 8px;">
<div style="flex: 1; height: 8px; border-radius: 4px; background: #f59e0b; opacity: 0.3;"></div>
<div style="flex: 2; height: 8px; border-radius: 4px; background: #f59e0b; opacity: 0.15;"></div>
<div style="flex: 1; height: 8px; border-radius: 4px; background: #f59e0b; opacity: 0.2;"></div>
</div>
<div style="display: flex; gap: 8px; margin-top: 4px;">
<div style="flex: 1; height: 60px; border-radius: 8px; background: white; border: 1px solid #e7e5e4; box-shadow: 0 1px 3px rgba(0,0,0,0.05);"></div>
<div style="flex: 1; height: 60px; border-radius: 8px; background: white; border: 1px solid #e7e5e4; box-shadow: 0 1px 3px rgba(0,0,0,0.05);"></div>
<div style="flex: 1; height: 60px; border-radius: 8px; background: white; border: 1px solid #e7e5e4; box-shadow: 0 1px 3px rgba(0,0,0,0.05);"></div>
</div>
<div style="display: flex; gap: 4px; margin-top: auto;">
<div style="width: 20px; height: 20px; border-radius: 4px; background: #f59e0b;"></div>
<div style="width: 20px; height: 20px; border-radius: 4px; background: #ef4444;"></div>
<div style="width: 20px; height: 20px; border-radius: 4px; background: #f97316;"></div>
<div style="width: 20px; height: 20px; border-radius: 4px; background: #d97706;"></div>
<div style="width: 20px; height: 20px; border-radius: 4px; background: #fef3c7;"></div>
</div>
</div>
</div>
<div class="card-body">
<h3>C. 温暖专业 (Warm Professional)</h3>
<p>暖白底色Amber/Orange 主色调,圆润设计,亲切感。类似 Notion、Stripe Dashboard 风格。</p>
</div>
</div>
<div class="card" data-choice="brand-zclaw" onclick="toggleSelect(this)">
<div class="card-image">
<div style="background: linear-gradient(135deg, #faf5ff 0%, #ede9fe 50%, #f5f3ff 100%); padding: 24px; min-height: 180px; display: flex; flex-direction: column; gap: 12px;">
<div style="display: flex; gap: 12px; align-items: center;">
<div style="width: 40px; height: 40px; border-radius: 10px; background: linear-gradient(135deg, #863bff, #47bfff);"></div>
<div>
<div style="font-weight: 700; color: #1e1b4b; font-size: 14px;">ZCLAW Admin</div>
<div style="color: #a78bfa; font-size: 12px;">品牌紫蓝</div>
</div>
</div>
<div style="display: flex; gap: 8px;">
<div style="flex: 1; height: 8px; border-radius: 4px; background: #863bff; opacity: 0.3;"></div>
<div style="flex: 2; height: 8px; border-radius: 4px; background: #863bff; opacity: 0.15;"></div>
<div style="flex: 1; height: 8px; border-radius: 4px; background: #47bfff; opacity: 0.2;"></div>
</div>
<div style="display: flex; gap: 8px; margin-top: 4px;">
<div style="flex: 1; height: 60px; border-radius: 8px; background: white; border: 1px solid #e9d5ff; box-shadow: 0 1px 3px rgba(134,59,255,0.08);"></div>
<div style="flex: 1; height: 60px; border-radius: 8px; background: white; border: 1px solid #e9d5ff; box-shadow: 0 1px 3px rgba(134,59,255,0.08);"></div>
<div style="flex: 1; height: 60px; border-radius: 8px; background: white; border: 1px solid #e9d5ff; box-shadow: 0 1px 3px rgba(134,59,255,0.08);"></div>
</div>
<div style="display: flex; gap: 4px; margin-top: auto;">
<div style="width: 20px; height: 20px; border-radius: 4px; background: #863bff;"></div>
<div style="width: 20px; height: 20px; border-radius: 4px; background: #47bfff;"></div>
<div style="width: 20px; height: 20px; border-radius: 4px; background: #a78bfa;"></div>
<div style="width: 20px; height: 20px; border-radius: 4px; background: #67e8f9;"></div>
<div style="width: 20px; height: 20px; border-radius: 4px; background: #ede9fe;"></div>
</div>
</div>
</div>
<div class="card-body">
<h3>D. 品牌紫蓝 (Brand ZCLAW)</h3>
<p>延续 ZCLAW 品牌色(紫色 #863bff + 蓝色 #47bfff渐变点缀现代感与品牌一致性。</p>
</div>
</div>
</div>
<div class="section" style="margin-top: 24px; padding: 16px; background: rgba(99,102,241,0.05); border-radius: 8px;">
<p style="margin: 0; color: #64748b; font-size: 14px;">
<strong>提示:</strong>点击卡片选择你偏好的设计方向。这个选择将影响配色方案、组件风格、以及整体视觉语言。
后续的暗色模式将基于所选方向的暗色变体。
</p>
</div>

View File

@@ -0,0 +1 @@
{"reason":"owner process exited","timestamp":1775026601420}

View File

@@ -0,0 +1 @@
1627

View File

@@ -0,0 +1,68 @@
<h2>ZCLAW 功能优先级矩阵</h2>
<p class="subtitle">哪些功能能让用户"啊"的一声觉得值?点击选择你认为的杀手级功能(可多选)</p>
<div class="options" data-multiselect>
<div class="option" data-choice="smart-chat" onclick="toggleSelect(this)">
<div class="letter">A</div>
<div class="content">
<h3>智能对话(深度优化)</h3>
<p>多模型无缝切换、流式响应、上下文记忆闭环、Tool Call 可视化。<br><strong>现状:</strong>基础已好,需打磨体验细节(消息虚拟化、搜索、导出)</p>
</div>
</div>
<div class="option" data-choice="hands" onclick="toggleSelect(this)">
<div class="letter">B</div>
<div class="content">
<h3>自主 Hands数字员工</h3>
<p>Browser 自动化、深度研究、数据采集、Twitter 运营——让 AI 真正干活。<br><strong>现状:</strong>9个 Hand 有实现,但需真实场景验证 + 可视化执行流程</p>
</div>
</div>
<div class="option" data-choice="pipeline" onclick="toggleSelect(this)">
<div class="letter">C</div>
<div class="content">
<h3>Pipeline 工作流</h3>
<p>拖拽式自动化编排:多步骤、多模型、并行/条件分支、定时触发。<br><strong>现状:</strong>引擎完成、UI 有基础版,需完善可视化编辑器 + 模板市场</p>
</div>
</div>
<div class="option" data-choice="memory" onclick="toggleSelect(this)">
<div class="letter">D</div>
<div class="content">
<h3>记忆与成长系统</h3>
<p>跨会话记忆、事实提取、偏好学习、知识图谱——AI 越用越懂你。<br><strong>现状:</strong>Growth 系统完成Fact 提取可用,需增强检索质量和可视化</p>
</div>
</div>
<div class="option" data-choice="skills" onclick="toggleSelect(this)">
<div class="letter">E</div>
<div class="content">
<h3>技能市场</h3>
<p>75+ 预置技能 + 社区技能分享 + 一键安装——AI 能力的 App Store。<br><strong>现状:</strong>SKILL.md 体系完成需技能发现UI + 安装/卸载流程</p>
</div>
</div>
<div class="option" data-choice="gateway" onclick="toggleSelect(this)">
<div class="letter">F</div>
<div class="content">
<h3>LLM 网关SaaS 变现核心)</h3>
<p>Key Pool 代理、用量计费、配额管理、组织级 API Key 管理——企业买单的理由。<br><strong>现状:</strong>Relay+Key Pool 完成,缺计费/配额/支付闭环</p>
</div>
</div>
<div class="option" data-choice="multi-agent" onclick="toggleSelect(this)">
<div class="letter">G</div>
<div class="content">
<h3>多 Agent 协作</h3>
<p>Director 编排、A2A 协议、角色分配——多个 AI 角色协同解决复杂问题。<br><strong>现状:</strong>代码完成但 feature-gated未接入桌面端</p>
</div>
</div>
<div class="option" data-choice="admin" onclick="toggleSelect(this)">
<div class="letter">H</div>
<div class="content">
<h3>Admin V2 管理面板</h3>
<p>用户管理、模型配置、用量统计、操作审计——SaaS 运维必备。<br><strong>现状:</strong>10个页面完成需测试 + 告警 + 数据看板</p>
</div>
</div>
</div>

View File

@@ -0,0 +1,123 @@
<h2>ZCLAW 系统现状全景</h2>
<p class="subtitle">基于代码库深度扫描2026-04-01</p>
<div class="section">
<h3>技术架构成熟度</h3>
<div style="display:grid; grid-template-columns: 1fr 1fr; gap: 12px; margin-top: 12px;">
<div style="background: #1a2332; border-radius: 8px; padding: 16px; border-left: 4px solid #22c55e;">
<div style="font-size: 13px; color: #94a3b8;">核心类型 (zclaw-types)</div>
<div style="font-size: 20px; font-weight: 700; color: #22c55e;">95%</div>
<div style="font-size: 12px; color: #64748b;">ID/Message/Event/Capability/Error 全套</div>
</div>
<div style="background: #1a2332; border-radius: 8px; padding: 16px; border-left: 4px solid #22c55e;">
<div style="font-size: 13px; color: #94a3b8;">存储层 (zclaw-memory)</div>
<div style="font-size: 20px; font-weight: 700; color: #22c55e;">90%</div>
<div style="font-size: 12px; color: #64748b;">SQLite + Fact提取 + KV Store</div>
</div>
<div style="background: #1a2332; border-radius: 8px; padding: 16px; border-left: 4px solid #22c55e;">
<div style="font-size: 13px; color: #94a3b8;">运行时 (zclaw-runtime)</div>
<div style="font-size: 20px; font-weight: 700; color: #22c55e;">90%</div>
<div style="font-size: 12px; color: #64748b;">4驱动 + 11中间件 + Agent Loop</div>
</div>
<div style="background: #1a2332; border-radius: 8px; padding: 16px; border-left: 4px solid #eab308;">
<div style="font-size: 13px; color: #94a3b8;">协调层 (zclaw-kernel)</div>
<div style="font-size: 20px; font-weight: 700; color: #eab308;">85%</div>
<div style="font-size: 12px; color: #64748b;">注册/调度/事件/Director(feature-gated)</div>
</div>
<div style="background: #1a2332; border-radius: 8px; padding: 16px; border-left: 4px solid #22c55e;">
<div style="font-size: 13px; color: #94a3b8;">SaaS 后端 (zclaw-saas)</div>
<div style="font-size: 20px; font-weight: 700; color: #22c55e;">95%</div>
<div style="font-size: 12px; color: #64748b;">76+ API / 17表 / Relay代理 / Key Pool</div>
</div>
<div style="background: #1a2332; border-radius: 8px; padding: 16px; border-left: 4px solid #22c55e;">
<div style="font-size: 13px; color: #94a3b8;">桌面端 (Tauri+React)</div>
<div style="font-size: 20px; font-weight: 700; color: #22c55e;">85%</div>
<div style="font-size: 12px; color: #64748b;">60+组件 / 13 Store / 3连接模式</div>
</div>
<div style="background: #1a2332; border-radius: 8px; padding: 16px; border-left: 4px solid #22c55e;">
<div style="font-size: 13px; color: #94a3b8;">技能系统 (75 SKILL.md)</div>
<div style="font-size: 20px; font-weight: 700; color: #22c55e;">80%</div>
<div style="font-size: 12px; color: #64748b;">PromptOnly可执行 / Wasm+Native未完成</div>
</div>
<div style="background: #1a2332; border-radius: 8px; padding: 16px; border-left: 4px solid #22c55e;">
<div style="font-size: 13px; color: #94a3b8;">安全体系</div>
<div style="font-size: 20px; font-weight: 700; color: #22c55e;">HIGH</div>
<div style="font-size: 12px; color: #64748b;">16层防御 / 渗透测试15项修复完成</div>
</div>
</div>
</div>
<div class="section">
<h3>商业基础设施 vs 商业能力</h3>
<div style="display:grid; grid-template-columns: 1fr 1fr; gap: 16px; margin-top: 12px;">
<div style="background: #0c1a0c; border: 1px solid #22c55e33; border-radius: 8px; padding: 16px;">
<h4 style="color: #22c55e; margin:0 0 10px 0;">已建成的基础设施</h4>
<ul style="margin:0; padding-left: 18px; color: #cbd5e1; font-size: 14px; line-height: 1.8;">
<li>LLM Relay 代理 (Key Pool + 429处理 + RPM/TPM)</li>
<li>每模型定价元数据 (input/output pricing)</li>
<li>用量追踪 (per-account/per-model token)</li>
<li>账户路由 (relay vs local 模式)</li>
<li>RBAC 权限体系 (3角色 + 细粒度权限)</li>
<li>Admin V2 管理面板 (10页面)</li>
<li>Docker + Nginx 部署方案</li>
<li>Admin V2 前端 (Ant Design Pro)</li>
</ul>
</div>
<div style="background: #1a0c0c; border: 1px solid #ef444433; border-radius: 8px; padding: 16px;">
<h4 style="color: #ef4444; margin:0 0 10px 0;">缺失的商业能力</h4>
<ul style="margin:0; padding-left: 18px; color: #cbd5e1; font-size: 14px; line-height: 1.8;">
<li><strong>无订阅/计费系统</strong> — 无Stripe/支付宝/微信支付</li>
<li><strong>无配额管理</strong> — quota字段已被移除</li>
<li><strong>无计划/层级定义</strong> — 无 free/pro/enterprise</li>
<li><strong>无发票/账单</strong> — 无成本计算逻辑</li>
<li><strong>无支付集成</strong> — 无任何支付网关代码</li>
</ul>
</div>
</div>
</div>
<div class="section">
<h3>核心差异化竞争力</h3>
<div style="display:grid; grid-template-columns: repeat(3, 1fr); gap: 12px; margin-top: 12px;">
<div style="background: linear-gradient(135deg, #1e293b, #0f172a); border-radius: 8px; padding: 16px; text-align: center;">
<div style="font-size: 28px; margin-bottom: 6px;"></div>
<div style="font-size: 14px; font-weight: 600; color: #e2e8f0;">Rust 原生性能</div>
<div style="font-size: 12px; color: #64748b; margin-top: 4px;">~40MB RAM / &lt;200ms 冷启动<br>vs Electron 400MB+</div>
</div>
<div style="background: linear-gradient(135deg, #1e293b, #0f172a); border-radius: 8px; padding: 16px; text-align: center;">
<div style="font-size: 28px; margin-bottom: 6px;">🤖</div>
<div style="font-size: 14px; font-weight: 600; color: #e2e8f0;">9个自主 Hands</div>
<div style="font-size: 12px; color: #64748b; margin-top: 4px;">Browser/Researcher/Twitter<br>预置数字员工</div>
</div>
<div style="background: linear-gradient(135deg, #1e293b, #0f172a); border-radius: 8px; padding: 16px; text-align: center;">
<div style="font-size: 28px; margin-bottom: 6px;">🧩</div>
<div style="font-size: 14px; font-weight: 600; color: #e2e8f0;">75+ 技能 + Pipeline</div>
<div style="font-size: 12px; color: #64748b; margin-top: 4px;">SKILL.md 声明式定义<br>12种 Pipeline Action</div>
</div>
<div style="background: linear-gradient(135deg, #1e293b, #0f172a); border-radius: 8px; padding: 16px; text-align: center;">
<div style="font-size: 28px; margin-bottom: 6px;">🇨🇳</div>
<div style="font-size: 14px; font-weight: 600; color: #e2e8f0;">中文市场原生</div>
<div style="font-size: 12px; color: #64748b; margin-top: 4px;">GLM/Qwen/Kimi/DeepSeek<br>27+ LLM Provider</div>
</div>
<div style="background: linear-gradient(135deg, #1e293b, #0f172a); border-radius: 8px; padding: 16px; text-align: center;">
<div style="font-size: 28px; margin-bottom: 6px;">☁️</div>
<div style="font-size: 14px; font-weight: 600; color: #e2e8f0;">自托管 SaaS 网关</div>
<div style="font-size: 12px; color: #64748b; margin-top: 4px;">Key Pool 代理 / 用量追踪<br>组织级 LLM 管理</div>
</div>
<div style="background: linear-gradient(135deg, #1e293b, #0f172a); border-radius: 8px; padding: 16px; text-align: center;">
<div style="font-size: 28px; margin-bottom: 6px;">🔒</div>
<div style="font-size: 14px; font-weight: 600; color: #e2e8f0;">16层安全防护</div>
<div style="font-size: 12px; color: #64748b; margin-top: 4px;">渗透测试通过<br>企业级安全合规</div>
</div>
</div>
</div>
<div class="section" style="margin-top: 20px; padding: 16px; background: #1e293b; border-radius: 8px;">
<h3 style="margin: 0 0 8px 0;">战略定位一句话</h3>
<p style="color: #f59e0b; font-size: 16px; margin: 0; font-weight: 600;">
ZCLAW = 中文市场的 AI Agent OS不是另一个 ChatGPT 套壳。
</p>
<p style="color: #94a3b8; font-size: 13px; margin: 8px 0 0 0;">
核心问题:技术基础设施已建成 ~90%,但商业变现路径从 0 → 1 尚未打通。
</p>
</div>

View File

@@ -0,0 +1 @@
{"reason":"owner process exited","timestamp":1775055441855}

View File

@@ -0,0 +1 @@
1917

View File

@@ -0,0 +1,166 @@
<h2>知识库管理 - UI 布局方案</h2>
<p class="subtitle">三种页面布局方案,请选择最适合的方案</p>
<div class="cards">
<div class="card" data-choice="layout-a" onclick="toggleSelect(this)">
<div class="card-image">
<div style="background:#f8f9fa;border-radius:8px;padding:16px;font-size:12px;">
<div class="mock-nav" style="background:#1a1a2e;color:#fff;padding:8px;margin:-8px -8px 8px;border-radius:4px;">
知识库管理
</div>
<div style="display:flex;gap:8px;">
<div style="width:200px;background:#fff;border:1px solid #e0e0e0;border-radius:4px;padding:8px;">
<div style="font-weight:bold;margin-bottom:8px;color:#1890ff;">📁 行业分类</div>
<div style="padding:4px 8px;background:#e6f7ff;border-radius:2px;margin-bottom:4px;">🏭 制造业</div>
<div style="padding:4px 8px;margin-bottom:4px;">🏥 医疗健康</div>
<div style="padding:4px 8px;margin-bottom:4px;">🎓 教育培训</div>
<div style="padding:4px 8px;margin-bottom:4px;">👔 企业管理</div>
<div style="padding:4px 8px;color:#999;">+ 新增分类</div>
</div>
<div style="flex:1;background:#fff;border:1px solid #e0e0e0;border-radius:4px;padding:8px;">
<div style="display:flex;justify-content:space-between;margin-bottom:8px;">
<span style="font-weight:bold;">🏭 制造业 (24条)</span>
<div>
<span style="background:#1890ff;color:#fff;padding:2px 8px;border-radius:2px;font-size:11px;">+ 新增</span>
<span style="background:#f0f0f0;padding:2px 8px;border-radius:2px;font-size:11px;margin-left:4px;">导入</span>
</div>
</div>
<div style="border:1px solid #f0f0f0;border-radius:2px;padding:6px;margin-bottom:4px;">
<b>注塑成型工艺参数指南</b><br>
<span style="font-size:10px;color:#999;">关键词: 注塑, 工艺参数, 温度控制 | 更新于 2小时前</span>
</div>
<div style="border:1px solid #f0f0f0;border-radius:2px;padding:6px;margin-bottom:4px;">
<b>模具设计常见问题集</b><br>
<span style="font-size:10px;color:#999;">关键词: 模具, 设计, FAQ | 更新于 1天前</span>
</div>
<div style="border:1px solid #f0f0f0;border-radius:2px;padding:6px;">
<b>QC 质检标准流程</b><br>
<span style="font-size:10px;color:#999;">关键词: 质检, QC, 流程 | 更新于 3天前</span>
</div>
</div>
</div>
</div>
</div>
<div class="card-body">
<h3>A: 左树右表(经典管理布局)</h3>
<p>左侧分类树 + 右侧条目列表。空间利用率高,浏览效率好。适合分类层级清晰的场景。</p>
</div>
</div>
<div class="card" data-choice="layout-b" onclick="toggleSelect(this)">
<div class="card-image">
<div style="background:#f8f9fa;border-radius:8px;padding:16px;font-size:12px;">
<div class="mock-nav" style="background:#1a1a2e;color:#fff;padding:8px;margin:-8px -8px 8px;border-radius:4px;">
知识库管理
</div>
<div style="display:flex;gap:8px;margin-bottom:8px;">
<span style="background:#1890ff;color:#fff;padding:4px 12px;border-radius:12px;font-size:11px;">全部 (68)</span>
<span style="background:#f0f0f0;padding:4px 12px;border-radius:12px;font-size:11px;">🏭 制造业 (24)</span>
<span style="background:#f0f0f0;padding:4px 12px;border-radius:12px;font-size:11px;">🏥 医疗健康 (18)</span>
<span style="background:#f0f0f0;padding:4px 12px;border-radius:12px;font-size:11px;">🎓 教育培训 (15)</span>
<span style="background:#f0f0f0;padding:4px 12px;border-radius:12px;font-size:11px;">👔 企业管理 (11)</span>
</div>
<div style="display:grid;grid-template-columns:1fr 1fr;gap:8px;">
<div style="border:1px solid #e0e0e0;border-radius:4px;padding:8px;">
<b>注塑成型工艺参数指南</b>
<p style="font-size:10px;color:#666;margin:4px 0;">详细描述注塑成型的温度、压力、冷却时间等关键参数...</p>
<span style="font-size:10px;color:#1890ff;">🏭 制造业</span>
<span style="font-size:10px;color:#999;margin-left:8px;">引用 42 次</span>
</div>
<div style="border:1px solid #e0e0e0;border-radius:4px;padding:8px;">
<b>药品 GMP 合规检查清单</b>
<p style="font-size:10px;color:#666;margin:4px 0;">涵盖药品生产质量管理的完整合规要求...</p>
<span style="font-size:10px;color:#52c41a;">🏥 医疗健康</span>
<span style="font-size:10px;color:#999;margin-left:8px;">引用 38 次</span>
</div>
<div style="border:1px solid #e0e0e0;border-radius:4px;padding:8px;">
<b>模具设计常见问题集</b>
<p style="font-size:10px;color:#666;margin:4px 0;">汇总模具设计过程中的常见技术问题和解决方案...</p>
<span style="font-size:10px;color:#1890ff;">🏭 制造业</span>
<span style="font-size:10px;color:#999;margin-left:8px;">引用 27 次</span>
</div>
<div style="border:1px solid #e0e0e0;border-radius:4px;padding:8px;">
<b>在线课程设计方法论</b>
<p style="font-size:10px;color:#666;margin:4px 0;">系统化的在线教育课程设计和评估方法...</p>
<span style="font-size:10px;color:#fa8c16;">🎓 教育培训</span>
<span style="font-size:10px;color:#999;margin-left:8px;">引用 19 次</span>
</div>
</div>
</div>
</div>
<div class="card-body">
<h3>B: 卡片网格(标签筛选)</h3>
<p>顶部标签切换 + 卡片网格展示。视觉友好,快速浏览内容概要。适合知识条目不多且偏内容展示的场景。</p>
</div>
</div>
<div class="card" data-choice="layout-c" onclick="toggleSelect(this)">
<div class="card-image">
<div style="background:#f8f9fa;border-radius:8px;padding:16px;font-size:12px;">
<div class="mock-nav" style="background:#1a1a2e;color:#fff;padding:8px;margin:-8px -8px 8px;border-radius:4px;">
知识库管理
</div>
<div style="display:flex;gap:8px;margin-bottom:8px;">
<div style="background:#1890ff;color:#fff;padding:4px 12px;border-radius:4px;font-size:11px;">📋 知识条目</div>
<div style="background:#f0f0f0;padding:4px 12px;border-radius:4px;font-size:11px;">📂 分类管理</div>
<div style="background:#f0f0f0;padding:4px 12px;border-radius:4px;font-size:11px;">📊 分析看板</div>
</div>
<div style="margin-bottom:8px;display:flex;gap:4px;">
<input style="flex:1;padding:4px 8px;border:1px solid #d9d9d9;border-radius:4px;font-size:11px;" placeholder="搜索知识条目...">
<select style="padding:4px 8px;border:1px solid #d9d9d9;border-radius:4px;font-size:11px;">
<option>全部分类</option><option>制造业</option><option>医疗健康</option>
</select>
<select style="padding:4px 8px;border:1px solid #d9d9d9;border-radius:4px;font-size:11px;">
<option>状态</option><option>活跃</option><option>已归档</option>
</select>
</div>
<div style="border-collapse:collapse;width:100%;">
<div style="display:flex;background:#fafafa;padding:6px;border:1px solid #f0f0f0;font-size:10px;font-weight:bold;">
<span style="width:30px;"></span>
<span style="flex:2;">标题</span>
<span style="flex:1;">分类</span>
<span style="flex:1;">关键词</span>
<span style="width:60px;">引用</span>
<span style="width:60px;">状态</span>
<span style="width:80px;">更新时间</span>
<span style="width:60px;">操作</span>
</div>
<div style="display:flex;padding:6px;border:1px solid #f0f0f0;border-top:0;font-size:10px;">
<span style="width:30px;"></span>
<span style="flex:2;font-weight:bold;">注塑成型工艺参数指南</span>
<span style="flex:1;color:#1890ff;">🏭 制造业</span>
<span style="flex:1;color:#999;">注塑, 工艺</span>
<span style="width:60px;">42</span>
<span style="width:60px;color:#52c41a;">活跃</span>
<span style="width:80px;color:#999;">2h 前</span>
<span style="width:60px;color:#1890ff;">编辑</span>
</div>
<div style="display:flex;padding:6px;border:1px solid #f0f0f0;border-top:0;font-size:10px;">
<span style="width:30px;"></span>
<span style="flex:2;font-weight:bold;">药品 GMP 合规检查清单</span>
<span style="flex:1;color:#52c41a;">🏥 医疗</span>
<span style="flex:1;color:#999;">GMP, 合规</span>
<span style="width:60px;">38</span>
<span style="width:60px;color:#52c41a;">活跃</span>
<span style="width:80px;color:#999;">1d 前</span>
<span style="width:60px;color:#1890ff;">编辑</span>
</div>
<div style="display:flex;padding:6px;border:1px solid #f0f0f0;border-top:0;font-size:10px;">
<span style="width:30px;"></span>
<span style="flex:2;font-weight:bold;">模具设计常见问题集</span>
<span style="flex:1;color:#1890ff;">🏭 制造业</span>
<span style="flex:1;color:#999;">模具, FAQ</span>
<span style="width:60px;">27</span>
<span style="width:60px;color:#52c41a;">活跃</span>
<span style="width:80px;color:#999;">3d 前</span>
<span style="width:60px;color:#1890ff;">编辑</span>
</div>
</div>
</div>
</div>
<div class="card-body">
<h3>C: 标签页表格Ant Design 风格)</h3>
<p>顶部标签页切换模块 + 标准表格。最符合现有 Admin V2 风格,信息密度高,适合批量操作。与现有页面一致。</p>
</div>
</div>
</div>

View File

@@ -0,0 +1,3 @@
<div style="display:flex;align-items:center;justify-content:center;min-height:60vh">
<p class="subtitle">Continuing in terminal...</p>
</div>

View File

@@ -0,0 +1,3 @@
<div style="display:flex;align-items:center;justify-content:center;min-height:60vh">
<p class="subtitle">正在准备知识库 UI 布局方案...</p>
</div>

View File

@@ -0,0 +1 @@
{"reason":"owner process exited","timestamp":1775043250470}

View File

@@ -0,0 +1 @@
237

View File

@@ -0,0 +1,68 @@
<h2>ZCLAW 功能优先级矩阵</h2>
<p class="subtitle">哪些功能能让用户"啊"的一声觉得值?点击选择你认为的杀手级功能(可多选)</p>
<div class="options" data-multiselect>
<div class="option" data-choice="smart-chat" onclick="toggleSelect(this)">
<div class="letter">A</div>
<div class="content">
<h3>智能对话(深度优化)</h3>
<p>多模型无缝切换、流式响应、上下文记忆闭环、Tool Call 可视化。<br><strong>现状:</strong>基础已好,需打磨体验细节(消息虚拟化、搜索、导出)</p>
</div>
</div>
<div class="option" data-choice="hands" onclick="toggleSelect(this)">
<div class="letter">B</div>
<div class="content">
<h3>自主 Hands数字员工</h3>
<p>Browser 自动化、深度研究、数据采集、Twitter 运营——让 AI 真正干活。<br><strong>现状:</strong>9个 Hand 有实现,但需真实场景验证 + 可视化执行流程</p>
</div>
</div>
<div class="option" data-choice="pipeline" onclick="toggleSelect(this)">
<div class="letter">C</div>
<div class="content">
<h3>Pipeline 工作流</h3>
<p>拖拽式自动化编排:多步骤、多模型、并行/条件分支、定时触发。<br><strong>现状:</strong>引擎完成、UI 有基础版,需完善可视化编辑器 + 模板市场</p>
</div>
</div>
<div class="option" data-choice="memory" onclick="toggleSelect(this)">
<div class="letter">D</div>
<div class="content">
<h3>记忆与成长系统</h3>
<p>跨会话记忆、事实提取、偏好学习、知识图谱——AI 越用越懂你。<br><strong>现状:</strong>Growth 系统完成Fact 提取可用,需增强检索质量和可视化</p>
</div>
</div>
<div class="option" data-choice="skills" onclick="toggleSelect(this)">
<div class="letter">E</div>
<div class="content">
<h3>技能市场</h3>
<p>75+ 预置技能 + 社区技能分享 + 一键安装——AI 能力的 App Store。<br><strong>现状:</strong>SKILL.md 体系完成需技能发现UI + 安装/卸载流程</p>
</div>
</div>
<div class="option" data-choice="gateway" onclick="toggleSelect(this)">
<div class="letter">F</div>
<div class="content">
<h3>LLM 网关SaaS 变现核心)</h3>
<p>Key Pool 代理、用量计费、配额管理、组织级 API Key 管理——企业买单的理由。<br><strong>现状:</strong>Relay+Key Pool 完成,缺计费/配额/支付闭环</p>
</div>
</div>
<div class="option" data-choice="multi-agent" onclick="toggleSelect(this)">
<div class="letter">G</div>
<div class="content">
<h3>多 Agent 协作</h3>
<p>Director 编排、A2A 协议、角色分配——多个 AI 角色协同解决复杂问题。<br><strong>现状:</strong>代码完成但 feature-gated未接入桌面端</p>
</div>
</div>
<div class="option" data-choice="admin" onclick="toggleSelect(this)">
<div class="letter">H</div>
<div class="content">
<h3>Admin V2 管理面板</h3>
<p>用户管理、模型配置、用量统计、操作审计——SaaS 运维必备。<br><strong>现状:</strong>10个页面完成需测试 + 告警 + 数据看板</p>
</div>
</div>
</div>

View File

@@ -2,43 +2,65 @@
> **ZCLAW 是一个独立成熟的 AI Agent 桌面客户端**,专注于提供真实可用的 AI 能力,而不是演示 UI。
> **当前阶段: 稳定化。** 参见 [docs/STABILIZATION_DIRECTIVE.md](docs/STABILIZATION_DIRECTIVE.md)
> 在 P0 缺陷修复完成前,不接受任何新功能。所有 AI 会话必须先确认稳定化状态。
## 1. 项目定位
### 1.1 ZCLAW 是什么
ZCLAW 是面向中文用户的 AI Agent 桌面端,核心能力包括:
- **智能对话** - 多模型支持、流式响应、上下文管理
- **自主能力** - 11 个 Hands9 启用 + 2 禁用: Predictor, Lead
- **技能系统** - 可扩展的 SKILL.md 技能定义
- **工作流编排** - 多步骤自动化任务
- **智能对话** - 多模型支持8 Provider、流式响应、上下文管理
- **自主能力** - 9 个启用的 Hands另有 Predictor/Lead 已禁用
- **技能系统** - 75 个 SKILL.md 技能定义
- **工作流编排** - Pipeline DSL + 10 行业模板
- **安全审计** - 完整的操作日志和权限控制
### 1.2 决策原则
**任何改动都要问:这对 ZCLAW 有用吗?对 ZCLAW 有影响吗?**
**任何改动都要问:这对 ZCLAW 用户今天能产生价值吗?**
-对 ZCLAW 用户有价值的功能 → 优先实现
-提升 ZCLAW 稳定性和可用性 → 必须做
- ❌ 只为兼容其他系统的妥协 → 谨慎评估
-增加复杂度但无实际价值 → 不做
- ✅解决问题要寻找根因,从源头解决问题。不要为了消除问题而选择折中办法,从而导致系统架构、代码安全性、代码质量出现问题
-修复已知的 P0/P1 缺陷 → 最高优先
-接通"写了没接"的断链 → 高优先
- ✅ 清理死代码和孤立文件 → 应该做
-新增功能/页面/端点 → 稳定化完成前禁止
- ❌ 增加复杂度但无实际价值 → 永远不做
- ❌ 折中方案掩盖根因 → 永远不做
### 1.3 稳定化铁律
**在 [STABILIZATION_DIRECTIVE.md](docs/STABILIZATION_DIRECTIVE.md) 完成标准达标前,以下行为被禁止:**
| 禁止行为 | 原因 |
|----------|------|
| 新增 SaaS API 端点 | 已有 93 个(含 2 个 dev-only前端未全部接通 |
| 新增 SKILL.md 文件 | 已有 75 个,大部分未执行验证 |
| 新增 Tauri 命令 | 已有 171 个24 个无前端调用 |
| 新增中间件/Store | 已有 11 层中间件 + 18 个 Store |
| 新增 admin 页面 | 已有 13 页 |
### 1.4 系统真实状态
参见 [docs/TRUTH.md](docs/TRUTH.md) — 这是唯一的真相源,所有其他文档中的数字如果与此冲突,以 TRUTH.md 为准。
***
## 2. 项目结构
```text
ZCLAW/
├── crates/ # Rust Workspace (核心能力)
├── crates/ # Rust Workspace (10 crates)
│ ├── zclaw-types/ # L1: 基础类型 (AgentId, Message, Error)
│ ├── zclaw-memory/ # L2: 存储层 (SQLite, KV, 会话管理)
│ ├── zclaw-runtime/ # L3: 运行时 (LLM驱动, 工具, Agent循环)
│ ├── zclaw-kernel/ # L4: 核心协调 (注册, 调度, 事件, 工作流)
│ ├── zclaw-skills/ # 技能系统 (SKILL.md解析, 执行器)
│ ├── zclaw-hands/ # 自主能力 (Hand/Trigger 注册管理)
│ ├── zclaw-protocols/ # 协议支持 (MCP, A2A)
── zclaw-saas/ # SaaS 后端 (账号, 模型配置, 中转, 配置同步)
├── admin/ # Next.js 管理后台
│ ├── zclaw-runtime/ # L3: 运行时 (4 Driver, 7 工具, 11 层中间件)
│ ├── zclaw-kernel/ # L4: 核心协调 (171 Tauri 命令)
│ ├── zclaw-skills/ # 技能系统 (76 SKILL.md 解析, 语义路由)
│ ├── zclaw-hands/ # 自主能力 (9 启用, 155 Rust 测试)
│ ├── zclaw-protocols/ # 协议支持 (MCP 完整, A2A feature-gated)
── zclaw-pipeline/ # Pipeline DSL (v1/v2, 10 行业模板)
│ ├── zclaw-growth/ # 记忆增长 (FTS5 + TF-IDF)
│ └── zclaw-saas/ # SaaS 后端 (93 API, Axum + PostgreSQL)
├── admin-v2/ # 管理后台 (Vite + Ant Design Pro, 13 页)
├── desktop/ # Tauri 桌面应用
│ ├── src/
│ │ ├── components/ # React UI 组件 (含 SaaS 集成)
@@ -64,14 +86,14 @@ ZCLAW/
| 层级 | 技术 |
| ---- | --------------------- |
| 前端框架 | React 18 + TypeScript |
| 状态管理 | Zustand |
| 前端框架 | React 19 + TypeScript |
| 状态管理 | Zustand 5 |
| 桌面框架 | Tauri 2.x |
| 样式方案 | Tailwind CSS |
| 样式方案 | Tailwind 4 |
| 配置格式 | TOML |
| 后端核心 | Rust Workspace (10 crates) |
| 后端核心 | Rust Workspace (10 crates, ~66K 行) |
| SaaS 后端 | Axum + PostgreSQL (zclaw-saas) |
| 管理后台 | Next.js (admin/) |
| 管理后台 | Vite + Ant Design Pro (admin-v2/) |
### 2.3 Crate 依赖关系
@@ -130,18 +152,28 @@ desktop/src-tauri (→ kernel, skills, hands, protocols)
**禁止**在组件内直接创建 WebSocket 或拼装 HTTP 请求。
### 4.2 発能层客户端
### 4.2 分层职责
````
```
UI 组件 → 只负责展示和交互
Store → 负责状态组织和流程编排
Client → 负责网络通信和```
Client → 负责网络通信和协议转换
```
### 4.3 代码自检规则
**每次修改代码前必须检查:**
1. **是否已有相同能力的代码?** — 先搜索再写,避免重复
2. **前端是否有人调用?** — 没有 Rust 调用者的 Tauri 命令,先标注 `@reserved`
3. **错误是否静默吞掉?**`let _ =` 必须替换为 `log::warn!` 或更高级别处理
4. **文档数字是否需要更新?** — 改了数量就要改文档```
---
### 4.3 代码规范
### 4.4 代码规范
**TypeScript:**
- 避免 `any`,优先 `unknown + 类型守卫`
@@ -188,7 +220,7 @@ Client → 负责网络通信和```
## 6. 自主能力系统 (Hands)
ZCLAW 提供 11 个自主能力包:
ZCLAW 提供 11 个自主能力包9 启用 + 2 禁用)
| Hand | 功能 | 状态 |
|------|------|------|

View File

@@ -291,6 +291,27 @@ impl sqlx::FromRow<'_, SqliteRow> for MemoryRow {
/// Private helper methods on SqliteStorage (NOT in impl VikingStorage block)
impl SqliteStorage {
/// Sanitize a user query for FTS5 MATCH syntax.
///
/// FTS5 treats several characters as operators (`+`, `-`, `*`, `"`, `(`, `)`, `:`).
/// Strips these and keeps only alphanumeric + CJK tokens with length > 1,
/// then joins them with `OR` for broad matching.
fn sanitize_fts_query(query: &str) -> String {
let terms: Vec<String> = query
.to_lowercase()
.split(|c: char| !c.is_alphanumeric())
.filter(|s| !s.is_empty() && s.len() > 1)
.map(|s| s.to_string())
.collect();
if terms.is_empty() {
return String::new();
}
// Join with OR so any term can match (broad recall, then rerank by similarity)
terms.join(" OR ")
}
/// Fetch memories by scope with importance-based ordering.
/// Used internally by find() for scope-based queries.
pub(crate) async fn fetch_by_scope_priv(&self, scope: Option<&str>, limit: usize) -> Result<Vec<MemoryRow>> {
@@ -363,7 +384,10 @@ impl VikingStorage for SqliteStorage {
let _ = sqlx::query("DELETE FROM memories_fts WHERE uri = ?")
.bind(&entry.uri)
.execute(&self.pool)
.await;
.await
.map_err(|e| {
tracing::warn!("[SqliteStorage] Failed to delete old FTS entry: {}", e);
});
let keywords_text = entry.keywords.join(" ");
let _ = sqlx::query(
@@ -376,7 +400,10 @@ impl VikingStorage for SqliteStorage {
.bind(&entry.content)
.bind(&keywords_text)
.execute(&self.pool)
.await;
.await
.map_err(|e| {
tracing::warn!("[SqliteStorage] Failed to insert FTS entry: {}", e);
});
// Update semantic scorer (use embedding when available)
let mut scorer = self.scorer.write().await;
@@ -416,8 +443,21 @@ impl VikingStorage for SqliteStorage {
// Strategy: use FTS5 for initial filtering when query is non-empty,
// then score candidates with TF-IDF / embedding for precise ranking.
// Fallback to scope-only scan when query is empty (e.g., "list all").
// When FTS5 returns nothing, we return empty — do NOT fall back to
// scope scan (that returns irrelevant high-importance memories).
let rows = if !query.is_empty() {
// Sanitize query for FTS5: strip operators that cause syntax errors
let sanitized = Self::sanitize_fts_query(query);
if sanitized.is_empty() {
// Query had no meaningful terms after sanitization (e.g., "1+2")
tracing::debug!(
"[SqliteStorage] Query '{}' produced no FTS5-searchable terms, skipping",
query.chars().take(50).collect::<String>()
);
return Ok(Vec::new());
}
// FTS5-powered candidate retrieval (fast, index-based)
let fts_candidates = if let Some(ref scope) = options.scope {
sqlx::query_as::<_, MemoryRow>(
@@ -432,7 +472,7 @@ impl VikingStorage for SqliteStorage {
LIMIT ?
"#
)
.bind(query)
.bind(&sanitized)
.bind(format!("{}%", scope))
.bind(limit as i64)
.fetch_all(&self.pool)
@@ -449,7 +489,7 @@ impl VikingStorage for SqliteStorage {
LIMIT ?
"#
)
.bind(query)
.bind(&sanitized)
.bind(limit as i64)
.fetch_all(&self.pool)
.await
@@ -457,11 +497,25 @@ impl VikingStorage for SqliteStorage {
match fts_candidates {
Ok(rows) if !rows.is_empty() => rows,
Ok(_) | Err(_) => {
// FTS5 returned nothing or query syntax was invalid —
// fallback to scope-based scan (no full table scan unless no scope)
tracing::debug!("[SqliteStorage] FTS5 returned no results, falling back to scope scan");
self.fetch_by_scope_priv(options.scope.as_deref(), limit).await?
Ok(_) => {
// FTS5 returned no results — memories are genuinely irrelevant.
// Do NOT fall back to scope scan (that was the root cause of
// injecting "广东光华" memories into "1+9" queries).
tracing::debug!(
"[SqliteStorage] FTS5 returned no results for query: '{}'",
query.chars().take(50).collect::<String>()
);
return Ok(Vec::new());
}
Err(e) => {
// FTS5 syntax error after sanitization — return empty rather
// than falling back to irrelevant scope-based results.
tracing::debug!(
"[SqliteStorage] FTS5 query failed for '{}': {}",
query.chars().take(50).collect::<String>(),
e
);
return Ok(Vec::new());
}
}
} else {
@@ -557,7 +611,10 @@ impl VikingStorage for SqliteStorage {
let _ = sqlx::query("DELETE FROM memories_fts WHERE uri = ?")
.bind(uri)
.execute(&self.pool)
.await;
.await
.map_err(|e| {
tracing::warn!("[SqliteStorage] Failed to delete FTS entry: {}", e);
});
// Remove from in-memory scorer
let mut scorer = self.scorer.write().await;

View File

@@ -134,18 +134,18 @@ impl Default for InMemoryStorage {
#[async_trait]
impl VikingStorage for InMemoryStorage {
async fn store(&self, entry: &MemoryEntry) -> Result<()> {
let mut memories = self.memories.write().unwrap();
let mut memories = self.memories.write().expect("InMemoryStorage lock poisoned");
memories.insert(entry.uri.clone(), entry.clone());
Ok(())
}
async fn get(&self, uri: &str) -> Result<Option<MemoryEntry>> {
let memories = self.memories.read().unwrap();
let memories = self.memories.read().expect("InMemoryStorage lock poisoned");
Ok(memories.get(uri).cloned())
}
async fn find(&self, query: &str, options: FindOptions) -> Result<Vec<MemoryEntry>> {
let memories = self.memories.read().unwrap();
let memories = self.memories.read().expect("InMemoryStorage lock poisoned");
let mut results: Vec<MemoryEntry> = memories
.values()
@@ -187,7 +187,7 @@ impl VikingStorage for InMemoryStorage {
}
async fn find_by_prefix(&self, prefix: &str) -> Result<Vec<MemoryEntry>> {
let memories = self.memories.read().unwrap();
let memories = self.memories.read().expect("InMemoryStorage lock poisoned");
let results: Vec<MemoryEntry> = memories
.values()
@@ -199,19 +199,19 @@ impl VikingStorage for InMemoryStorage {
}
async fn delete(&self, uri: &str) -> Result<()> {
let mut memories = self.memories.write().unwrap();
let mut memories = self.memories.write().expect("InMemoryStorage lock poisoned");
memories.remove(uri);
Ok(())
}
async fn store_metadata_json(&self, key: &str, json: &str) -> Result<()> {
let mut metadata = self.metadata.write().unwrap();
let mut metadata = self.metadata.write().expect("InMemoryStorage lock poisoned");
metadata.insert(key.to_string(), json.to_string());
Ok(())
}
async fn get_metadata_json(&self, key: &str) -> Result<Option<String>> {
let metadata = self.metadata.read().unwrap();
let metadata = self.metadata.read().expect("InMemoryStorage lock poisoned");
Ok(metadata.get(key).cloned())
}
}

View File

@@ -20,6 +20,4 @@ thiserror = { workspace = true }
tracing = { workspace = true }
async-trait = { workspace = true }
reqwest = { workspace = true }
hmac = "0.12"
sha1 = "0.10"
base64 = { workspace = true }

View File

@@ -182,6 +182,9 @@ impl QuizGenerator for LlmQuizGenerator {
temperature: Some(0.7),
stop: Vec::new(),
stream: false,
thinking_enabled: false,
reasoning_effort: None,
plan_mode: false,
};
let response = self.driver.complete(request).await.map_err(|e| {

View File

@@ -96,7 +96,8 @@ pub struct SlideContent {
pub background: Option<String>,
}
/// Content block types
/// Presentation/slideshow rendering content block. Domain-specific for slide content.
/// Distinct from zclaw_types::ContentBlock (LLM messages) and zclaw_protocols::ContentBlock (MCP).
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(tag = "type", rename_all = "snake_case")]
pub enum ContentBlock {

View File

@@ -311,7 +311,7 @@ impl KernelConfig {
}
/// Find the config file path.
fn find_config_path() -> Option<PathBuf> {
pub fn find_config_path() -> Option<PathBuf> {
// 1. Environment variable override
if let Ok(path) = std::env::var("ZCLAW_CONFIG") {
return Some(PathBuf::from(path));

View File

@@ -755,6 +755,7 @@ mod tests {
order: 0,
},
],
agents: vec![],
metadata: ClassroomMetadata::default(),
}
}

View File

@@ -563,6 +563,7 @@ mod tests {
order: 1,
},
],
agents: vec![],
metadata: ClassroomMetadata::default(),
}
}

View File

@@ -601,6 +601,7 @@ mod tests {
order: 0,
},
],
agents: vec![],
metadata: ClassroomMetadata::default(),
}
}

View File

@@ -0,0 +1,345 @@
//! Agent Profile Generation for Interactive Classroom
//!
//! Generates multi-agent classroom roles (Teacher, Assistant, Students)
//! with distinct personas, avatars, and action permissions.
use serde::{Deserialize, Serialize};
use uuid::Uuid;
/// Agent role in the classroom
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum AgentRole {
Teacher,
Assistant,
Student,
}
impl Default for AgentRole {
fn default() -> Self {
Self::Teacher
}
}
impl std::fmt::Display for AgentRole {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
AgentRole::Teacher => write!(f, "teacher"),
AgentRole::Assistant => write!(f, "assistant"),
AgentRole::Student => write!(f, "student"),
}
}
}
/// Agent profile for classroom participants
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct AgentProfile {
/// Unique ID for this agent
pub id: String,
/// Display name (e.g., "陈老师", "小助手", "张伟")
pub name: String,
/// Role type
pub role: AgentRole,
/// Persona description (system prompt for this agent)
pub persona: String,
/// Avatar emoji or URL
pub avatar: String,
/// Theme color (hex)
pub color: String,
/// Actions this agent is allowed to perform
pub allowed_actions: Vec<String>,
/// Speaking priority (higher = speaks first in multi-agent)
pub priority: u8,
}
/// Request for generating agent profiles
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AgentProfileRequest {
/// Topic for context-aware persona generation
pub topic: String,
/// Teaching style hint
pub style: String,
/// Difficulty level hint
pub level: String,
/// Total agent count (default: 5)
pub agent_count: Option<usize>,
/// Language code (default: "zh-CN")
pub language: Option<String>,
}
impl Default for AgentProfileRequest {
fn default() -> Self {
Self {
topic: String::new(),
style: "lecture".to_string(),
level: "intermediate".to_string(),
agent_count: None,
language: Some("zh-CN".to_string()),
}
}
}
/// Generate agent profiles for a classroom session.
///
/// Returns hardcoded defaults that match the OpenMAIC experience.
/// Future: optionally use LLM for dynamic persona generation.
pub fn generate_agent_profiles(request: &AgentProfileRequest) -> Vec<AgentProfile> {
let lang = request.language.as_deref().unwrap_or("zh-CN");
let count = request.agent_count.unwrap_or(5);
let student_count = count.saturating_sub(2).max(1);
if lang.starts_with("zh") {
generate_chinese_profiles(&request.topic, &request.style, student_count)
} else {
generate_english_profiles(&request.topic, &request.style, student_count)
}
}
fn generate_chinese_profiles(topic: &str, style: &str, student_count: usize) -> Vec<AgentProfile> {
let style_desc = match style {
"discussion" => "善于引导讨论的",
"pbl" => "注重项目实践的",
"socratic" => "擅长提问式教学的",
_ => "经验丰富的",
};
let mut agents = Vec::with_capacity(student_count + 2);
// Teacher
agents.push(AgentProfile {
id: format!("agent_teacher_{}", Uuid::new_v4()),
name: "陈老师".to_string(),
role: AgentRole::Teacher,
persona: format!(
"你是一位{}教师,正在教授「{}」这个主题。你的教学风格清晰有条理,\
善于使用生活中的比喻和类比帮助学生理解抽象概念。你注重核心原理的透彻理解,\
会用通俗易懂的语言解释复杂概念。",
style_desc, topic
),
avatar: "👩‍🏫".to_string(),
color: "#4F46E5".to_string(),
allowed_actions: vec![
"speech".into(),
"whiteboard_draw".into(),
"slideshow_control".into(),
"quiz_create".into(),
],
priority: 10,
});
// Assistant
agents.push(AgentProfile {
id: format!("agent_assistant_{}", Uuid::new_v4()),
name: "小助手".to_string(),
role: AgentRole::Assistant,
persona: format!(
"你是一位耐心的助教,正在协助教授「{}」。你擅长用代码示例和图表辅助讲解,\
善于回答学生问题补充老师遗漏的知识点。你说话简洁明了喜欢用emoji点缀语气。",
topic
),
avatar: "🤝".to_string(),
color: "#10B981".to_string(),
allowed_actions: vec![
"speech".into(),
"whiteboard_draw".into(),
],
priority: 7,
});
// Students — up to 3 distinct personalities
let student_templates = [
(
"李思",
"你是一个好奇且活跃的学生,正在学习「{topic}」。你有一定编程基础,但概念理解上容易混淆。\
你经常问'为什么'和'如果...呢'这类深入问题,喜欢和老师互动。",
"🤔",
"#EF4444",
),
(
"王明",
"你是一个认真笔记的学生,正在学习「{topic}」。你学习态度端正,善于总结和归纳要点。\
你经常复述和确认自己的理解,喜欢有条理的讲解方式。",
"📝",
"#F59E0B",
),
(
"张伟",
"你是一个思维跳跃的学生,正在学习「{topic}」。你经常联想到其他概念和实际应用场景,\
善于举一反三但有时会跑题。你喜欢动手实践和探索。",
"💡",
"#8B5CF6",
),
];
for i in 0..student_count {
let (name, persona_tmpl, avatar, color) = &student_templates[i % student_templates.len()];
agents.push(AgentProfile {
id: format!("agent_student_{}_{}", i + 1, Uuid::new_v4()),
name: name.to_string(),
role: AgentRole::Student,
persona: persona_tmpl.replace("{topic}", topic),
avatar: avatar.to_string(),
color: color.to_string(),
allowed_actions: vec!["speech".into(), "ask_question".into()],
priority: (5 - i as u8).max(1),
});
}
agents
}
fn generate_english_profiles(topic: &str, style: &str, student_count: usize) -> Vec<AgentProfile> {
let style_desc = match style {
"discussion" => "discussion-oriented",
"pbl" => "project-based",
"socratic" => "Socratic method",
_ => "experienced",
};
let mut agents = Vec::with_capacity(student_count + 2);
// Teacher
agents.push(AgentProfile {
id: format!("agent_teacher_{}", Uuid::new_v4()),
name: "Prof. Chen".to_string(),
role: AgentRole::Teacher,
persona: format!(
"You are a {} instructor teaching 「{}」. Your teaching style is clear and organized, \
skilled at using metaphors and analogies to explain complex concepts in accessible language. \
You focus on thorough understanding of core principles.",
style_desc, topic
),
avatar: "👩‍🏫".to_string(),
color: "#4F46E5".to_string(),
allowed_actions: vec![
"speech".into(),
"whiteboard_draw".into(),
"slideshow_control".into(),
"quiz_create".into(),
],
priority: 10,
});
// Assistant
agents.push(AgentProfile {
id: format!("agent_assistant_{}", Uuid::new_v4()),
name: "TA Alex".to_string(),
role: AgentRole::Assistant,
persona: format!(
"You are a patient teaching assistant helping with 「{}」. \
You provide code examples, diagrams, and fill in gaps. You are concise and friendly.",
topic
),
avatar: "🤝".to_string(),
color: "#10B981".to_string(),
allowed_actions: vec!["speech".into(), "whiteboard_draw".into()],
priority: 7,
});
// Students
let student_templates = [
(
"Sam",
"A curious and active student learning 「{topic}」. Has some programming background \
but gets confused on concepts. Often asks 'why?' and 'what if?'",
"🤔",
"#EF4444",
),
(
"Jordan",
"A diligent note-taking student learning 「{topic}」. Methodical learner, \
good at summarizing key points. Prefers structured explanations.",
"📝",
"#F59E0B",
),
(
"Alex",
"A creative thinker learning 「{topic}」. Connects concepts to real-world applications. \
Good at lateral thinking but sometimes goes off-topic.",
"💡",
"#8B5CF6",
),
];
for i in 0..student_count {
let (name, persona_tmpl, avatar, color) = &student_templates[i % student_templates.len()];
agents.push(AgentProfile {
id: format!("agent_student_{}_{}", i + 1, Uuid::new_v4()),
name: name.to_string(),
role: AgentRole::Student,
persona: persona_tmpl.replace("{topic}", topic),
avatar: avatar.to_string(),
color: color.to_string(),
allowed_actions: vec!["speech".into(), "ask_question".into()],
priority: (5 - i as u8).max(1),
});
}
agents
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_generate_chinese_profiles() {
let req = AgentProfileRequest {
topic: "Rust 所有权".to_string(),
style: "lecture".to_string(),
level: "intermediate".to_string(),
agent_count: Some(5),
language: Some("zh-CN".to_string()),
};
let agents = generate_agent_profiles(&req);
assert_eq!(agents.len(), 5);
assert_eq!(agents[0].role, AgentRole::Teacher);
assert!(agents[0].name.contains("陈老师"));
assert!(agents[0].persona.contains("Rust 所有权"));
assert_eq!(agents[1].role, AgentRole::Assistant);
assert!(agents[1].name.contains("小助手"));
assert_eq!(agents[2].role, AgentRole::Student);
assert_eq!(agents[3].role, AgentRole::Student);
assert_eq!(agents[4].role, AgentRole::Student);
// Priority ordering
assert!(agents[0].priority > agents[1].priority);
assert!(agents[1].priority > agents[2].priority);
}
#[test]
fn test_generate_english_profiles() {
let req = AgentProfileRequest {
topic: "Python Basics".to_string(),
style: "discussion".to_string(),
level: "beginner".to_string(),
agent_count: Some(4),
language: Some("en-US".to_string()),
};
let agents = generate_agent_profiles(&req);
assert_eq!(agents.len(), 4); // 1 teacher + 1 assistant + 2 students
assert_eq!(agents[0].role, AgentRole::Teacher);
assert!(agents[0].persona.contains("discussion-oriented"));
}
#[test]
fn test_agent_role_display() {
assert_eq!(format!("{}", AgentRole::Teacher), "teacher");
assert_eq!(format!("{}", AgentRole::Assistant), "assistant");
assert_eq!(format!("{}", AgentRole::Student), "student");
}
#[test]
fn test_default_request() {
let req = AgentProfileRequest::default();
assert!(req.topic.is_empty());
assert_eq!(req.agent_count, None);
}
}

View File

@@ -0,0 +1,337 @@
//! Classroom Multi-Agent Chat
//!
//! Handles multi-agent conversation within the classroom context.
//! A single LLM call generates responses from multiple agent perspectives.
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use super::agents::AgentProfile;
/// A single chat message in the classroom
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ClassroomChatMessage {
/// Unique message ID
pub id: String,
/// Agent profile ID of the sender
pub agent_id: String,
/// Display name of the sender
pub agent_name: String,
/// Avatar of the sender
pub agent_avatar: String,
/// Message content
pub content: String,
/// Unix timestamp (milliseconds)
pub timestamp: i64,
/// Role of the sender
pub role: String,
/// Theme color of the sender
pub color: String,
}
/// Chat state for a classroom session
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ClassroomChatState {
/// All chat messages
pub messages: Vec<ClassroomChatMessage>,
/// Whether chat is active
pub active: bool,
}
/// Request for generating a chat response
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ClassroomChatRequest {
/// Classroom ID
pub classroom_id: String,
/// User's message
pub user_message: String,
/// Available agents
pub agents: Vec<AgentProfile>,
/// Current scene context (optional, for contextual responses)
pub scene_context: Option<String>,
/// Chat history for context
pub history: Vec<ClassroomChatMessage>,
}
/// Response from multi-agent chat generation
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ClassroomChatResponse {
/// Agent responses (may be 1-3 agents responding)
pub responses: Vec<ClassroomChatMessage>,
}
impl ClassroomChatMessage {
/// Create a user message
pub fn user_message(content: &str) -> Self {
Self {
id: format!("msg_{}", Uuid::new_v4()),
agent_id: "user".to_string(),
agent_name: "You".to_string(),
agent_avatar: "👤".to_string(),
content: content.to_string(),
timestamp: current_timestamp_millis(),
role: "user".to_string(),
color: "#6B7280".to_string(),
}
}
/// Create an agent message
pub fn agent_message(agent: &AgentProfile, content: &str) -> Self {
Self {
id: format!("msg_{}", Uuid::new_v4()),
agent_id: agent.id.clone(),
agent_name: agent.name.clone(),
agent_avatar: agent.avatar.clone(),
content: content.to_string(),
timestamp: current_timestamp_millis(),
role: agent.role.to_string(),
color: agent.color.clone(),
}
}
}
/// Build the LLM prompt for multi-agent chat response generation.
///
/// This function constructs a prompt that instructs the LLM to generate
/// responses from multiple agent perspectives in a structured JSON format.
pub fn build_chat_prompt(request: &ClassroomChatRequest) -> String {
let agent_descriptions: Vec<String> = request.agents.iter()
.map(|a| format!(
"- **{}** ({}): {}",
a.name, a.role, a.persona
))
.collect();
let history_text = if request.history.is_empty() {
"No previous messages.".to_string()
} else {
request.history.iter()
.map(|m| format!("**{}**: {}", m.agent_name, m.content))
.collect::<Vec<_>>()
.join("\n")
};
let scene_hint = request.scene_context.as_deref()
.map(|ctx| format!("\n当前场景上下文:{}", ctx))
.unwrap_or_default();
format!(
r#"你是一个课堂多智能体讨论的协调器。根据学生的问题选择1-3个合适的角色来回复。
## 可用角色
{agents}
## 对话历史
{history}
{scene_hint}
## 学生最新问题
{question}
## 回复规则
1. 选择最合适的1-3个角色来回复
2. 老师角色应该给出权威、清晰的解释
3. 助教角色可以补充代码示例或图表说明
4. 学生角色可以表达理解、提出追问或分享自己的理解
5. 每个角色的回复应该符合其个性设定
6. 回复应该自然、有教育意义
## 输出格式
你必须返回合法的JSON数组每个元素包含
```json
[
{{
"agentName": "角色名",
"content": "回复内容"
}}
]
```
只返回JSON数组不要包含其他文字。"#,
agents = agent_descriptions.join("\n"),
history = history_text,
scene_hint = scene_hint,
question = request.user_message,
)
}
/// Parse multi-agent responses from LLM output.
///
/// Extracts agent messages from the LLM's JSON response.
/// Falls back to a single teacher response if parsing fails.
pub fn parse_chat_responses(
llm_output: &str,
agents: &[AgentProfile],
) -> Vec<ClassroomChatMessage> {
// Try to extract JSON from the response
let json_text = extract_json_array(llm_output);
// Try parsing as JSON array
if let Ok(parsed) = serde_json::from_str::<Vec<serde_json::Value>>(&json_text) {
let mut messages = Vec::new();
for item in &parsed {
if let (Some(name), Some(content)) = (
item.get("agentName").and_then(|v| v.as_str()),
item.get("content").and_then(|v| v.as_str()),
) {
// Find matching agent
if let Some(agent) = agents.iter().find(|a| a.name == name) {
messages.push(ClassroomChatMessage::agent_message(agent, content));
}
}
}
if !messages.is_empty() {
return messages;
}
}
// Fallback: teacher responds with the raw LLM output
if let Some(teacher) = agents.iter().find(|a| a.role == super::agents::AgentRole::Teacher) {
vec![ClassroomChatMessage::agent_message(
teacher,
&clean_fallback_response(llm_output),
)]
} else if let Some(first) = agents.first() {
vec![ClassroomChatMessage::agent_message(first, llm_output)]
} else {
vec![]
}
}
/// Extract JSON array from text (handles markdown code blocks)
fn extract_json_array(text: &str) -> String {
// Try markdown code block first
if let Some(start) = text.find("```json") {
if let Some(end) = text[start + 7..].find("```") {
return text[start + 7..start + 7 + end].trim().to_string();
}
}
// Try to find JSON array directly
if let Some(start) = text.find('[') {
if let Some(end) = text.rfind(']') {
if end > start {
return text[start..=end].to_string();
}
}
}
text.to_string()
}
/// Clean up fallback response (remove JSON artifacts if present)
fn clean_fallback_response(text: &str) -> String {
let trimmed = text.trim();
// If it looks like JSON attempt, extract just the text content
if trimmed.starts_with('[') || trimmed.starts_with('{') {
if let Ok(values) = serde_json::from_str::<Vec<serde_json::Value>>(trimmed) {
if let Some(first) = values.first() {
if let Some(content) = first.get("content").and_then(|v| v.as_str()) {
return content.to_string();
}
}
}
}
trimmed.to_string()
}
fn current_timestamp_millis() -> i64 {
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_millis() as i64
}
#[cfg(test)]
mod tests {
use super::*;
use crate::generation::agents::{AgentProfile, AgentRole};
fn test_agents() -> Vec<AgentProfile> {
vec![
AgentProfile {
id: "t1".into(),
name: "陈老师".into(),
role: AgentRole::Teacher,
persona: "Test teacher".into(),
avatar: "👩‍🏫".into(),
color: "#4F46E5".into(),
allowed_actions: vec![],
priority: 10,
},
AgentProfile {
id: "s1".into(),
name: "李思".into(),
role: AgentRole::Student,
persona: "Curious student".into(),
avatar: "🤔".into(),
color: "#EF4444".into(),
allowed_actions: vec![],
priority: 5,
},
]
}
#[test]
fn test_parse_chat_responses_valid_json() {
let agents = test_agents();
let llm_output = r#"```json
[
{"agentName": "陈老师", "content": "好问题!让我来解释一下..."},
{"agentName": "李思", "content": "原来如此,那如果..."}
]
```"#;
let messages = parse_chat_responses(llm_output, &agents);
assert_eq!(messages.len(), 2);
assert_eq!(messages[0].agent_name, "陈老师");
assert_eq!(messages[1].agent_name, "李思");
}
#[test]
fn test_parse_chat_responses_fallback() {
let agents = test_agents();
let llm_output = "这是一个关于Rust的好问题。所有权意味着每个值只有一个主人。";
let messages = parse_chat_responses(llm_output, &agents);
assert_eq!(messages.len(), 1);
assert_eq!(messages[0].agent_name, "陈老师"); // Falls back to teacher
}
#[test]
fn test_build_chat_prompt() {
let agents = test_agents();
let request = ClassroomChatRequest {
classroom_id: "test".into(),
user_message: "什么是所有权?".into(),
agents,
scene_context: Some("Rust 所有权核心规则".into()),
history: vec![],
};
let prompt = build_chat_prompt(&request);
assert!(prompt.contains("陈老师"));
assert!(prompt.contains("什么是所有权?"));
assert!(prompt.contains("Rust 所有权核心规则"));
}
#[test]
fn test_user_message() {
let msg = ClassroomChatMessage::user_message("Hello");
assert_eq!(msg.agent_name, "You");
assert_eq!(msg.role, "user");
}
#[test]
fn test_agent_message() {
let agent = &test_agents()[0];
let msg = ClassroomChatMessage::agent_message(agent, "Test");
assert_eq!(msg.agent_name, "陈老师");
assert_eq!(msg.role, "teacher");
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,268 @@
//! A2A (Agent-to-Agent) messaging
//!
//! All items in this module are gated by the `multi-agent` feature flag.
#[cfg(feature = "multi-agent")]
use zclaw_types::{AgentId, Capability, Event, Result};
#[cfg(feature = "multi-agent")]
use zclaw_protocols::{A2aAgentProfile, A2aCapability, A2aEnvelope, A2aMessageType, A2aRecipient};
#[cfg(feature = "multi-agent")]
use super::Kernel;
#[cfg(feature = "multi-agent")]
impl Kernel {
// ============================================================
// A2A (Agent-to-Agent) Messaging
// ============================================================
/// Derive an A2A agent profile from an AgentConfig
pub(super) fn agent_config_to_a2a_profile(config: &zclaw_types::AgentConfig) -> A2aAgentProfile {
let caps: Vec<A2aCapability> = config.tools.iter().map(|tool_name| {
A2aCapability {
name: tool_name.clone(),
description: format!("Tool: {}", tool_name),
input_schema: None,
output_schema: None,
requires_approval: false,
version: "1.0.0".to_string(),
tags: vec![],
}
}).collect();
A2aAgentProfile {
id: config.id,
name: config.name.clone(),
description: config.description.clone().unwrap_or_default(),
capabilities: caps,
protocols: vec!["a2a".to_string()],
role: "worker".to_string(),
priority: 5,
metadata: std::collections::HashMap::new(),
groups: vec![],
last_seen: 0,
}
}
/// Check if an agent is authorized to send messages to a target
pub(super) fn check_a2a_permission(&self, from: &AgentId, to: &AgentId) -> Result<()> {
let caps = self.capabilities.get(from);
match caps {
Some(cap_set) => {
let has_permission = cap_set.capabilities.iter().any(|cap| {
match cap {
Capability::AgentMessage { pattern } => {
pattern == "*" || to.to_string().starts_with(pattern)
}
_ => false,
}
});
if !has_permission {
return Err(zclaw_types::ZclawError::PermissionDenied(
format!("Agent {} does not have AgentMessage capability for {}", from, to)
));
}
Ok(())
}
None => {
// No capabilities registered — deny by default
Err(zclaw_types::ZclawError::PermissionDenied(
format!("Agent {} has no capabilities registered", from)
))
}
}
}
/// Send a direct A2A message from one agent to another
pub async fn a2a_send(
&self,
from: &AgentId,
to: &AgentId,
payload: serde_json::Value,
message_type: Option<A2aMessageType>,
) -> Result<()> {
// Validate sender exists
self.registry.get(from)
.ok_or_else(|| zclaw_types::ZclawError::NotFound(
format!("Sender agent not found: {}", from)
))?;
// Validate receiver exists and is running
self.registry.get(to)
.ok_or_else(|| zclaw_types::ZclawError::NotFound(
format!("Target agent not found: {}", to)
))?;
// Check capability permission
self.check_a2a_permission(from, to)?;
// Build and route envelope
let envelope = A2aEnvelope::new(
*from,
A2aRecipient::Direct { agent_id: *to },
message_type.unwrap_or(A2aMessageType::Notification),
payload,
);
self.a2a_router.route(envelope).await?;
// Emit event
self.events.publish(Event::A2aMessageSent {
from: *from,
to: format!("{}", to),
message_type: "direct".to_string(),
});
Ok(())
}
/// Broadcast a message from one agent to all other agents
pub async fn a2a_broadcast(
&self,
from: &AgentId,
payload: serde_json::Value,
) -> Result<()> {
// Validate sender exists
self.registry.get(from)
.ok_or_else(|| zclaw_types::ZclawError::NotFound(
format!("Sender agent not found: {}", from)
))?;
let envelope = A2aEnvelope::new(
*from,
A2aRecipient::Broadcast,
A2aMessageType::Notification,
payload,
);
self.a2a_router.route(envelope).await?;
self.events.publish(Event::A2aMessageSent {
from: *from,
to: "broadcast".to_string(),
message_type: "broadcast".to_string(),
});
Ok(())
}
/// Discover agents that have a specific capability
pub async fn a2a_discover(&self, capability: &str) -> Result<Vec<A2aAgentProfile>> {
let result = self.a2a_router.discover(capability).await?;
self.events.publish(Event::A2aAgentDiscovered {
agent_id: AgentId::new(),
capabilities: vec![capability.to_string()],
});
Ok(result)
}
/// Try to receive a pending A2A message for an agent (non-blocking)
pub async fn a2a_receive(&self, agent_id: &AgentId) -> Result<Option<A2aEnvelope>> {
let inbox = self.a2a_inboxes.get(agent_id)
.ok_or_else(|| zclaw_types::ZclawError::NotFound(
format!("No A2A inbox for agent: {}", agent_id)
))?;
let mut inbox = inbox.lock().await;
match inbox.try_recv() {
Ok(envelope) => {
self.events.publish(Event::A2aMessageReceived {
from: envelope.from,
to: format!("{}", agent_id),
message_type: "direct".to_string(),
});
Ok(Some(envelope))
}
Err(_) => Ok(None),
}
}
/// Delegate a task to another agent and wait for response with timeout
pub async fn a2a_delegate_task(
&self,
from: &AgentId,
to: &AgentId,
task_description: String,
timeout_ms: u64,
) -> Result<serde_json::Value> {
// Validate both agents exist
self.registry.get(from)
.ok_or_else(|| zclaw_types::ZclawError::NotFound(
format!("Sender agent not found: {}", from)
))?;
self.registry.get(to)
.ok_or_else(|| zclaw_types::ZclawError::NotFound(
format!("Target agent not found: {}", to)
))?;
// Check capability permission
self.check_a2a_permission(from, to)?;
// Send task request
let task_id = uuid::Uuid::new_v4().to_string();
let envelope = A2aEnvelope::new(
*from,
A2aRecipient::Direct { agent_id: *to },
A2aMessageType::Task,
serde_json::json!({
"task_id": task_id,
"description": task_description,
}),
).with_conversation(task_id.clone());
let envelope_id = envelope.id.clone();
self.a2a_router.route(envelope).await?;
self.events.publish(Event::A2aMessageSent {
from: *from,
to: format!("{}", to),
message_type: "task".to_string(),
});
// Wait for response with timeout
let timeout = tokio::time::Duration::from_millis(timeout_ms);
let result = tokio::time::timeout(timeout, async {
let inbox_entry = self.a2a_inboxes.get(from)
.ok_or_else(|| zclaw_types::ZclawError::NotFound(
format!("No A2A inbox for agent: {}", from)
))?;
let mut inbox = inbox_entry.lock().await;
// Poll for matching response
loop {
match inbox.recv().await {
Some(msg) => {
// Check if this is a response to our task
if msg.message_type == A2aMessageType::Response
&& msg.reply_to.as_deref() == Some(&envelope_id) {
return Ok::<_, zclaw_types::ZclawError>(msg.payload);
}
// Not our response — requeue it for later processing
tracing::debug!("Re-queuing non-matching A2A message: {}", msg.id);
inbox.requeue(msg);
}
None => {
return Err(zclaw_types::ZclawError::Internal(
"A2A inbox channel closed".to_string()
));
}
}
}
}).await;
match result {
Ok(Ok(payload)) => Ok(payload),
Ok(Err(e)) => Err(e),
Err(_) => Err(zclaw_types::ZclawError::Timeout(
format!("A2A task delegation timed out after {}ms", timeout_ms)
)),
}
}
/// Get all online agents via A2A profiles
pub async fn a2a_get_online_agents(&self) -> Result<Vec<A2aAgentProfile>> {
Ok(self.a2a_router.list_profiles().await)
}
}

View File

@@ -0,0 +1,138 @@
//! Adapter types bridging runtime interfaces
use std::pin::Pin;
use std::sync::Arc;
use async_trait::async_trait;
use serde_json::Value;
use zclaw_runtime::{LlmDriver, tool::SkillExecutor};
use zclaw_skills::{SkillRegistry, LlmCompleter};
use zclaw_types::Result;
/// Adapter that bridges `zclaw_runtime::LlmDriver` -> `zclaw_skills::LlmCompleter`
pub(crate) struct LlmDriverAdapter {
pub(crate) driver: Arc<dyn LlmDriver>,
pub(crate) max_tokens: u32,
pub(crate) temperature: f32,
}
impl LlmCompleter for LlmDriverAdapter {
fn complete(
&self,
prompt: &str,
) -> Pin<Box<dyn std::future::Future<Output = std::result::Result<String, String>> + Send + '_>> {
let driver = self.driver.clone();
let prompt = prompt.to_string();
Box::pin(async move {
let request = zclaw_runtime::CompletionRequest {
messages: vec![zclaw_types::Message::user(prompt)],
max_tokens: Some(self.max_tokens),
temperature: Some(self.temperature),
..Default::default()
};
let response = driver.complete(request).await
.map_err(|e| format!("LLM completion error: {}", e))?;
// Extract text from content blocks
let text: String = response.content.iter()
.filter_map(|block| match block {
zclaw_runtime::ContentBlock::Text { text } => Some(text.as_str()),
_ => None,
})
.collect::<Vec<_>>()
.join("");
Ok(text)
})
}
}
/// Skill executor implementation for Kernel
pub struct KernelSkillExecutor {
pub(crate) skills: Arc<SkillRegistry>,
pub(crate) llm: Arc<dyn LlmCompleter>,
}
impl KernelSkillExecutor {
pub fn new(skills: Arc<SkillRegistry>, driver: Arc<dyn LlmDriver>) -> Self {
let llm: Arc<dyn zclaw_skills::LlmCompleter> = Arc::new(LlmDriverAdapter { driver, max_tokens: 4096, temperature: 0.7 });
Self { skills, llm }
}
}
#[async_trait]
impl SkillExecutor for KernelSkillExecutor {
async fn execute_skill(
&self,
skill_id: &str,
agent_id: &str,
session_id: &str,
input: Value,
) -> Result<Value> {
let context = zclaw_skills::SkillContext {
agent_id: agent_id.to_string(),
session_id: session_id.to_string(),
llm: Some(self.llm.clone()),
..Default::default()
};
let result = self.skills.execute(&zclaw_types::SkillId::new(skill_id), &context, input).await?;
Ok(result.output)
}
fn get_skill_detail(&self, skill_id: &str) -> Option<zclaw_runtime::tool::SkillDetail> {
let manifests = self.skills.manifests_snapshot();
let manifest = manifests.get(&zclaw_types::SkillId::new(skill_id))?;
Some(zclaw_runtime::tool::SkillDetail {
id: manifest.id.as_str().to_string(),
name: manifest.name.clone(),
description: manifest.description.clone(),
category: manifest.category.clone(),
input_schema: manifest.input_schema.clone(),
triggers: manifest.triggers.clone(),
capabilities: manifest.capabilities.clone(),
})
}
fn list_skill_index(&self) -> Vec<zclaw_runtime::tool::SkillIndexEntry> {
let manifests = self.skills.manifests_snapshot();
manifests.values()
.filter(|m| m.enabled)
.map(|m| zclaw_runtime::tool::SkillIndexEntry {
id: m.id.as_str().to_string(),
description: m.description.clone(),
triggers: m.triggers.clone(),
})
.collect()
}
}
/// Inbox wrapper for A2A message receivers that supports re-queuing
/// non-matching messages instead of dropping them.
#[cfg(feature = "multi-agent")]
pub(crate) struct AgentInbox {
pub(crate) rx: tokio::sync::mpsc::Receiver<zclaw_protocols::A2aEnvelope>,
pub(crate) pending: std::collections::VecDeque<zclaw_protocols::A2aEnvelope>,
}
#[cfg(feature = "multi-agent")]
impl AgentInbox {
pub(crate) fn new(rx: tokio::sync::mpsc::Receiver<zclaw_protocols::A2aEnvelope>) -> Self {
Self { rx, pending: std::collections::VecDeque::new() }
}
pub(crate) fn try_recv(&mut self) -> std::result::Result<zclaw_protocols::A2aEnvelope, tokio::sync::mpsc::error::TryRecvError> {
if let Some(msg) = self.pending.pop_front() {
return Ok(msg);
}
self.rx.try_recv()
}
pub(crate) async fn recv(&mut self) -> Option<zclaw_protocols::A2aEnvelope> {
if let Some(msg) = self.pending.pop_front() {
return Some(msg);
}
self.rx.recv().await
}
pub(crate) fn requeue(&mut self, envelope: zclaw_protocols::A2aEnvelope) {
self.pending.push_back(envelope);
}
}

View File

@@ -0,0 +1,113 @@
//! Agent CRUD operations
use zclaw_types::{AgentConfig, AgentId, AgentInfo, Event, Result};
#[cfg(feature = "multi-agent")]
use std::sync::Arc;
#[cfg(feature = "multi-agent")]
use tokio::sync::Mutex;
#[cfg(feature = "multi-agent")]
use super::adapters::AgentInbox;
use super::Kernel;
impl Kernel {
/// Spawn a new agent
pub async fn spawn_agent(&self, config: AgentConfig) -> Result<AgentId> {
let id = config.id;
// Validate capabilities
self.capabilities.validate(&config.capabilities)?;
// Register in memory
self.memory.save_agent(&config).await?;
// Register with A2A router for multi-agent messaging (before config is moved)
#[cfg(feature = "multi-agent")]
{
let profile = Self::agent_config_to_a2a_profile(&config);
let rx = self.a2a_router.register_agent(profile).await;
self.a2a_inboxes.insert(id, Arc::new(Mutex::new(AgentInbox::new(rx))));
}
// Register in registry (consumes config)
let name = config.name.clone();
self.registry.register(config);
// Emit event
self.events.publish(Event::AgentSpawned {
agent_id: id,
name,
});
Ok(id)
}
/// Kill an agent
pub async fn kill_agent(&self, id: &AgentId) -> Result<()> {
// Remove from registry
self.registry.unregister(id);
// Remove from memory
self.memory.delete_agent(id).await?;
// Unregister from A2A router
#[cfg(feature = "multi-agent")]
{
self.a2a_router.unregister_agent(id).await;
self.a2a_inboxes.remove(id);
}
// Emit event
self.events.publish(Event::AgentTerminated {
agent_id: *id,
reason: "killed".to_string(),
});
Ok(())
}
/// Update an existing agent's configuration
pub async fn update_agent(&self, config: AgentConfig) -> Result<()> {
let id = config.id;
// Validate the agent exists
if self.registry.get(&id).is_none() {
return Err(zclaw_types::ZclawError::NotFound(
format!("Agent not found: {}", id)
));
}
// Validate capabilities
self.capabilities.validate(&config.capabilities)?;
// Save updated config to memory
self.memory.save_agent(&config).await?;
// Update in registry (preserves state and message count)
self.registry.update(config.clone());
// Emit event
self.events.publish(Event::AgentConfigUpdated {
agent_id: id,
name: config.name.clone(),
});
Ok(())
}
/// List all agents
pub fn list_agents(&self) -> Vec<AgentInfo> {
self.registry.list()
}
/// Get agent info
pub fn get_agent(&self, id: &AgentId) -> Option<AgentInfo> {
self.registry.get_info(id)
}
/// Get agent config (for export)
pub fn get_agent_config(&self, id: &AgentId) -> Option<AgentConfig> {
self.registry.get(id)
}
}

View File

@@ -0,0 +1,155 @@
//! Approval management
use std::sync::Arc;
use serde_json::Value;
use zclaw_types::{Result, HandRun, HandRunId, HandRunStatus, TriggerSource};
use zclaw_hands::HandContext;
use super::Kernel;
impl Kernel {
// ============================================================
// Approval Management
// ============================================================
/// List pending approvals
pub async fn list_approvals(&self) -> Vec<super::ApprovalEntry> {
let approvals = self.pending_approvals.lock().await;
approvals.iter().filter(|a| a.status == "pending").cloned().collect()
}
/// Get a single approval by ID (any status, not just pending)
///
/// Returns None if no approval with the given ID exists.
pub async fn get_approval(&self, id: &str) -> Option<super::ApprovalEntry> {
let approvals = self.pending_approvals.lock().await;
approvals.iter().find(|a| a.id == id).cloned()
}
/// Create a pending approval (called when a needs_approval hand is triggered)
pub async fn create_approval(&self, hand_id: String, input: serde_json::Value) -> super::ApprovalEntry {
let entry = super::ApprovalEntry {
id: uuid::Uuid::new_v4().to_string(),
hand_id,
status: "pending".to_string(),
created_at: chrono::Utc::now(),
input,
reject_reason: None,
};
let mut approvals = self.pending_approvals.lock().await;
approvals.push(entry.clone());
entry
}
/// Respond to an approval
pub async fn respond_to_approval(
&self,
id: &str,
approved: bool,
reason: Option<String>,
) -> Result<()> {
let mut approvals = self.pending_approvals.lock().await;
let entry = approvals.iter_mut().find(|a| a.id == id && a.status == "pending")
.ok_or_else(|| zclaw_types::ZclawError::NotFound(format!("Approval not found: {}", id)))?;
entry.status = if approved { "approved".to_string() } else { "rejected".to_string() };
if let Some(r) = reason {
entry.reject_reason = Some(r);
}
if approved {
let hand_id = entry.hand_id.clone();
let input = entry.input.clone();
drop(approvals); // Release lock before async hand execution
// Execute the hand in background with HandRun tracking
let hands = self.hands.clone();
let approvals = self.pending_approvals.clone();
let memory = self.memory.clone();
let running_hand_runs = self.running_hand_runs.clone();
let id_owned = id.to_string();
tokio::spawn(async move {
// Create HandRun record for tracking
let run_id = HandRunId::new();
let now = chrono::Utc::now().to_rfc3339();
let mut run = HandRun {
id: run_id,
hand_name: hand_id.clone(),
trigger_source: TriggerSource::Manual,
params: input.clone(),
status: HandRunStatus::Pending,
result: None,
error: None,
duration_ms: None,
created_at: now.clone(),
started_at: None,
completed_at: None,
};
let _ = memory.save_hand_run(&run).await.map_err(|e| {
tracing::warn!("[Approval] Failed to save hand run: {}", e);
});
run.status = HandRunStatus::Running;
run.started_at = Some(chrono::Utc::now().to_rfc3339());
let _ = memory.update_hand_run(&run).await.map_err(|e| {
tracing::warn!("[Approval] Failed to update hand run (running): {}", e);
});
// Register cancellation flag
let cancel_flag = Arc::new(std::sync::atomic::AtomicBool::new(false));
running_hand_runs.insert(run.id, cancel_flag.clone());
let context = HandContext::default();
let start = std::time::Instant::now();
let result = hands.execute(&hand_id, &context, input).await;
let duration = start.elapsed();
// Remove from running map
running_hand_runs.remove(&run.id);
// Update HandRun with result
let completed_at = chrono::Utc::now().to_rfc3339();
match &result {
Ok(res) => {
run.status = HandRunStatus::Completed;
run.result = Some(res.output.clone());
run.error = res.error.clone();
}
Err(e) => {
run.status = HandRunStatus::Failed;
run.error = Some(e.to_string());
}
}
run.duration_ms = Some(duration.as_millis() as u64);
run.completed_at = Some(completed_at);
let _ = memory.update_hand_run(&run).await.map_err(|e| {
tracing::warn!("[Approval] Failed to update hand run (completed): {}", e);
});
// Update approval status based on execution result
let mut approvals = approvals.lock().await;
if let Some(entry) = approvals.iter_mut().find(|a| a.id == id_owned) {
match result {
Ok(_) => entry.status = "completed".to_string(),
Err(e) => {
entry.status = "failed".to_string();
if let Some(obj) = entry.input.as_object_mut() {
obj.insert("error".to_string(), Value::String(format!("{}", e)));
}
}
}
}
});
}
Ok(())
}
/// Cancel a pending approval
pub async fn cancel_approval(&self, id: &str) -> Result<()> {
let mut approvals = self.pending_approvals.lock().await;
let entry = approvals.iter_mut().find(|a| a.id == id && a.status == "pending")
.ok_or_else(|| zclaw_types::ZclawError::NotFound(format!("Approval not found: {}", id)))?;
entry.status = "cancelled".to_string();
Ok(())
}
}

View File

@@ -0,0 +1,209 @@
//! Hand execution and run tracking
use std::sync::Arc;
use zclaw_types::{Result, HandRun, HandRunId, HandRunStatus, HandRunFilter, TriggerSource};
use zclaw_hands::{HandContext, HandResult};
use super::Kernel;
impl Kernel {
/// Get the hands registry
pub fn hands(&self) -> &Arc<zclaw_hands::HandRegistry> {
&self.hands
}
/// List all registered hands
pub async fn list_hands(&self) -> Vec<zclaw_hands::HandConfig> {
self.hands.list().await
}
/// Execute a hand with the given input, tracking the run
pub async fn execute_hand(
&self,
hand_id: &str,
input: serde_json::Value,
) -> Result<(HandResult, HandRunId)> {
let run_id = HandRunId::new();
let now = chrono::Utc::now().to_rfc3339();
// Create the initial HandRun record
let mut run = HandRun {
id: run_id,
hand_name: hand_id.to_string(),
trigger_source: TriggerSource::Manual,
params: input.clone(),
status: HandRunStatus::Pending,
result: None,
error: None,
duration_ms: None,
created_at: now.clone(),
started_at: None,
completed_at: None,
};
self.memory.save_hand_run(&run).await?;
// Transition to Running
run.status = HandRunStatus::Running;
run.started_at = Some(chrono::Utc::now().to_rfc3339());
self.memory.update_hand_run(&run).await?;
// Register cancellation flag
let cancel_flag = Arc::new(std::sync::atomic::AtomicBool::new(false));
self.running_hand_runs.insert(run_id, cancel_flag.clone());
// Execute the hand
let context = HandContext::default();
let start = std::time::Instant::now();
let hand_result = self.hands.execute(hand_id, &context, input).await;
let duration = start.elapsed();
// Check if cancelled during execution
if cancel_flag.load(std::sync::atomic::Ordering::Relaxed) {
let mut run_update = run.clone();
run_update.status = HandRunStatus::Cancelled;
run_update.completed_at = Some(chrono::Utc::now().to_rfc3339());
run_update.duration_ms = Some(duration.as_millis() as u64);
self.memory.update_hand_run(&run_update).await?;
self.running_hand_runs.remove(&run_id);
return Err(zclaw_types::ZclawError::Internal("Hand execution cancelled".to_string()));
}
// Remove from running map
self.running_hand_runs.remove(&run_id);
// Update HandRun with result
let completed_at = chrono::Utc::now().to_rfc3339();
match &hand_result {
Ok(res) => {
run.status = HandRunStatus::Completed;
run.result = Some(res.output.clone());
run.error = res.error.clone();
}
Err(e) => {
run.status = HandRunStatus::Failed;
run.error = Some(e.to_string());
}
}
run.duration_ms = Some(duration.as_millis() as u64);
run.completed_at = Some(completed_at);
self.memory.update_hand_run(&run).await?;
hand_result.map(|res| (res, run_id))
}
/// Execute a hand with a specific trigger source (for scheduled/event triggers)
pub async fn execute_hand_with_source(
&self,
hand_id: &str,
input: serde_json::Value,
trigger_source: TriggerSource,
) -> Result<(HandResult, HandRunId)> {
let run_id = HandRunId::new();
let now = chrono::Utc::now().to_rfc3339();
let mut run = HandRun {
id: run_id,
hand_name: hand_id.to_string(),
trigger_source,
params: input.clone(),
status: HandRunStatus::Pending,
result: None,
error: None,
duration_ms: None,
created_at: now,
started_at: None,
completed_at: None,
};
self.memory.save_hand_run(&run).await?;
run.status = HandRunStatus::Running;
run.started_at = Some(chrono::Utc::now().to_rfc3339());
self.memory.update_hand_run(&run).await?;
let cancel_flag = Arc::new(std::sync::atomic::AtomicBool::new(false));
self.running_hand_runs.insert(run_id, cancel_flag.clone());
let context = HandContext::default();
let start = std::time::Instant::now();
let hand_result = self.hands.execute(hand_id, &context, input).await;
let duration = start.elapsed();
// Check if cancelled during execution
if cancel_flag.load(std::sync::atomic::Ordering::Relaxed) {
run.status = HandRunStatus::Cancelled;
run.completed_at = Some(chrono::Utc::now().to_rfc3339());
run.duration_ms = Some(duration.as_millis() as u64);
self.memory.update_hand_run(&run).await?;
self.running_hand_runs.remove(&run_id);
return Err(zclaw_types::ZclawError::Internal("Hand execution cancelled".to_string()));
}
self.running_hand_runs.remove(&run_id);
let completed_at = chrono::Utc::now().to_rfc3339();
match &hand_result {
Ok(res) => {
run.status = HandRunStatus::Completed;
run.result = Some(res.output.clone());
run.error = res.error.clone();
}
Err(e) => {
run.status = HandRunStatus::Failed;
run.error = Some(e.to_string());
}
}
run.duration_ms = Some(duration.as_millis() as u64);
run.completed_at = Some(completed_at);
self.memory.update_hand_run(&run).await?;
hand_result.map(|res| (res, run_id))
}
// ============================================================
// Hand Run Tracking
// ============================================================
/// Get a hand run by ID
pub async fn get_hand_run(&self, id: &HandRunId) -> Result<Option<HandRun>> {
self.memory.get_hand_run(id).await
}
/// List hand runs with filter
pub async fn list_hand_runs(&self, filter: &HandRunFilter) -> Result<Vec<HandRun>> {
self.memory.list_hand_runs(filter).await
}
/// Count hand runs matching filter
pub async fn count_hand_runs(&self, filter: &HandRunFilter) -> Result<u32> {
self.memory.count_hand_runs(filter).await
}
/// Cancel a running hand execution
pub async fn cancel_hand_run(&self, id: &HandRunId) -> Result<()> {
if let Some((_, flag)) = self.running_hand_runs.remove(id) {
flag.store(true, std::sync::atomic::Ordering::Relaxed);
// Note: the actual status update happens in execute_hand_with_source
// when it detects the cancel flag
Ok(())
} else {
// Not currently running — check if exists at all
let run = self.memory.get_hand_run(id).await?;
match run {
Some(r) if r.status == HandRunStatus::Pending => {
let mut updated = r;
updated.status = HandRunStatus::Cancelled;
updated.completed_at = Some(chrono::Utc::now().to_rfc3339());
self.memory.update_hand_run(&updated).await?;
Ok(())
}
Some(r) => Err(zclaw_types::ZclawError::InvalidInput(
format!("Cannot cancel hand run {} with status {}", id, r.status)
)),
None => Err(zclaw_types::ZclawError::NotFound(
format!("Hand run {} not found", id)
)),
}
}
}
}

View File

@@ -0,0 +1,314 @@
//! Message sending (non-streaming, streaming, system prompt building)
use tokio::sync::mpsc;
use zclaw_types::{AgentId, Result};
/// Chat mode configuration passed from the frontend.
/// Controls thinking, reasoning, and plan mode behavior.
#[derive(Debug, Clone)]
pub struct ChatModeConfig {
pub thinking_enabled: Option<bool>,
pub reasoning_effort: Option<String>,
pub plan_mode: Option<bool>,
}
use zclaw_runtime::{AgentLoop, tool::builtin::PathValidator};
use super::Kernel;
use super::super::MessageResponse;
impl Kernel {
/// Send a message to an agent
pub async fn send_message(
&self,
agent_id: &AgentId,
message: String,
) -> Result<MessageResponse> {
self.send_message_with_chat_mode(agent_id, message, None).await
}
/// Send a message to an agent with optional chat mode configuration
pub async fn send_message_with_chat_mode(
&self,
agent_id: &AgentId,
message: String,
chat_mode: Option<ChatModeConfig>,
) -> Result<MessageResponse> {
let agent_config = self.registry.get(agent_id)
.ok_or_else(|| zclaw_types::ZclawError::NotFound(format!("Agent not found: {}", agent_id)))?;
// Create or get session
let session_id = self.memory.create_session(agent_id).await?;
// Always use Kernel's current model configuration
// This ensures user's "模型与 API" settings are respected
let model = self.config.model().to_string();
// Create agent loop with model configuration
let tools = self.create_tool_registry();
let mut loop_runner = AgentLoop::new(
*agent_id,
self.driver.clone(),
tools,
self.memory.clone(),
)
.with_model(&model)
.with_skill_executor(self.skill_executor.clone())
.with_max_tokens(agent_config.max_tokens.unwrap_or_else(|| self.config.max_tokens()))
.with_temperature(agent_config.temperature.unwrap_or_else(|| self.config.temperature()))
.with_compaction_threshold(
agent_config.compaction_threshold
.map(|t| t as usize)
.unwrap_or_else(|| self.config.compaction_threshold()),
);
// Set path validator from agent's workspace directory (if configured)
if let Some(ref workspace) = agent_config.workspace {
let path_validator = PathValidator::new().with_workspace(workspace.clone());
tracing::info!(
"[Kernel] Setting path_validator with workspace: {} for agent {}",
workspace.display(),
agent_id
);
loop_runner = loop_runner.with_path_validator(path_validator);
}
// Inject middleware chain if available
if let Some(chain) = self.create_middleware_chain() {
loop_runner = loop_runner.with_middleware_chain(chain);
}
// Apply chat mode configuration (thinking/reasoning/plan mode)
if let Some(ref mode) = chat_mode {
if mode.thinking_enabled.unwrap_or(false) {
loop_runner = loop_runner.with_thinking_enabled(true);
}
if let Some(ref effort) = mode.reasoning_effort {
loop_runner = loop_runner.with_reasoning_effort(effort.clone());
}
if mode.plan_mode.unwrap_or(false) {
loop_runner = loop_runner.with_plan_mode(true);
}
}
// Build system prompt with skill information injected
let system_prompt = self.build_system_prompt_with_skills(agent_config.system_prompt.as_ref()).await;
let loop_runner = loop_runner.with_system_prompt(&system_prompt);
// Run the loop
let result = loop_runner.run(session_id, message).await?;
// Track message count
self.registry.increment_message_count(agent_id);
Ok(MessageResponse {
content: result.response,
input_tokens: result.input_tokens,
output_tokens: result.output_tokens,
})
}
/// Send a message with streaming
pub async fn send_message_stream(
&self,
agent_id: &AgentId,
message: String,
) -> Result<mpsc::Receiver<zclaw_runtime::LoopEvent>> {
self.send_message_stream_with_prompt(agent_id, message, None, None, None).await
}
/// Send a message with streaming, optional system prompt, optional session reuse,
/// and optional chat mode configuration (thinking/reasoning/plan mode).
pub async fn send_message_stream_with_prompt(
&self,
agent_id: &AgentId,
message: String,
system_prompt_override: Option<String>,
session_id_override: Option<zclaw_types::SessionId>,
chat_mode: Option<ChatModeConfig>,
) -> Result<mpsc::Receiver<zclaw_runtime::LoopEvent>> {
let agent_config = self.registry.get(agent_id)
.ok_or_else(|| zclaw_types::ZclawError::NotFound(format!("Agent not found: {}", agent_id)))?;
// Reuse existing session or create new one
let session_id = match session_id_override {
Some(id) => {
// Use get_or_create to ensure the frontend's session ID is persisted.
// This is the critical bridge: without it, the kernel generates a
// different UUID each turn, so conversation history is never found.
tracing::debug!("Reusing frontend session ID: {}", id);
self.memory.get_or_create_session(&id, agent_id).await?
}
None => self.memory.create_session(agent_id).await?,
};
// Always use Kernel's current model configuration
// This ensures user's "模型与 API" settings are respected
let model = self.config.model().to_string();
// Create agent loop with model configuration
let tools = self.create_tool_registry();
let mut loop_runner = AgentLoop::new(
*agent_id,
self.driver.clone(),
tools,
self.memory.clone(),
)
.with_model(&model)
.with_skill_executor(self.skill_executor.clone())
.with_max_tokens(agent_config.max_tokens.unwrap_or_else(|| self.config.max_tokens()))
.with_temperature(agent_config.temperature.unwrap_or_else(|| self.config.temperature()))
.with_compaction_threshold(
agent_config.compaction_threshold
.map(|t| t as usize)
.unwrap_or_else(|| self.config.compaction_threshold()),
);
// Set path validator from agent's workspace directory (if configured)
// This enables file_read / file_write tools to access the workspace
if let Some(ref workspace) = agent_config.workspace {
let path_validator = PathValidator::new().with_workspace(workspace.clone());
tracing::info!(
"[Kernel] Setting path_validator with workspace: {} for agent {}",
workspace.display(),
agent_id
);
loop_runner = loop_runner.with_path_validator(path_validator);
}
// Inject middleware chain if available
if let Some(chain) = self.create_middleware_chain() {
loop_runner = loop_runner.with_middleware_chain(chain);
}
// Apply chat mode configuration (thinking/reasoning/plan mode from frontend)
if let Some(ref mode) = chat_mode {
if mode.thinking_enabled.unwrap_or(false) {
loop_runner = loop_runner.with_thinking_enabled(true);
}
if let Some(ref effort) = mode.reasoning_effort {
loop_runner = loop_runner.with_reasoning_effort(effort.clone());
}
if mode.plan_mode.unwrap_or(false) {
loop_runner = loop_runner.with_plan_mode(true);
}
}
// Use external prompt if provided, otherwise build default
let system_prompt = match system_prompt_override {
Some(prompt) => prompt,
None => self.build_system_prompt_with_skills(agent_config.system_prompt.as_ref()).await,
};
let loop_runner = loop_runner.with_system_prompt(&system_prompt);
// Run with streaming
self.registry.increment_message_count(agent_id);
loop_runner.run_streaming(session_id, message).await
}
/// Build a system prompt with skill information injected
pub(super) async fn build_system_prompt_with_skills(&self, base_prompt: Option<&String>) -> String {
// Get skill list asynchronously
let skills = self.skills.list().await;
let mut prompt = base_prompt
.map(|p| p.clone())
.unwrap_or_else(|| "You are a helpful AI assistant.".to_string());
// Inject skill information with categories
if !skills.is_empty() {
prompt.push_str("\n\n## Available Skills\n\n");
prompt.push_str("You have access to specialized skills. Analyze user intent and autonomously call `execute_skill` with the appropriate skill_id.\n\n");
// Group skills by category based on their ID patterns
let categories = self.categorize_skills(&skills);
for (category, category_skills) in categories {
prompt.push_str(&format!("### {}\n", category));
for skill in category_skills {
prompt.push_str(&format!(
"- **{}**: {}",
skill.id.as_str(),
skill.description
));
prompt.push('\n');
}
prompt.push('\n');
}
prompt.push_str("### When to use skills:\n");
prompt.push_str("- **IMPORTANT**: You should autonomously decide when to use skills based on your understanding of the user's intent.\n");
prompt.push_str("- Do not wait for explicit skill names - recognize the need and act.\n");
prompt.push_str("- Match user's request to the most appropriate skill's domain.\n");
prompt.push_str("- If multiple skills could apply, choose the most specialized one.\n\n");
prompt.push_str("### Example:\n");
prompt.push_str("User: \"分析腾讯财报\" → Intent: Financial analysis → Call: execute_skill(\"finance-tracker\", {...})\n");
}
prompt
}
/// Categorize skills into logical groups
///
/// Priority:
/// 1. Use skill's `category` field if defined in SKILL.md
/// 2. Fall back to pattern matching for backward compatibility
pub(super) fn categorize_skills<'a>(&self, skills: &'a [zclaw_skills::SkillManifest]) -> Vec<(String, Vec<&'a zclaw_skills::SkillManifest>)> {
let mut categories: std::collections::HashMap<String, Vec<&zclaw_skills::SkillManifest>> = std::collections::HashMap::new();
// Fallback category patterns for skills without explicit category
let fallback_patterns = [
("开发工程", vec!["senior-developer", "frontend-developer", "backend-architect", "ai-engineer", "devops-automator", "rapid-prototyper", "lsp-index-engineer"]),
("测试质量", vec!["api-tester", "evidence-collector", "reality-checker", "performance-benchmarker", "test-results-analyzer", "accessibility-auditor", "code-review"]),
("安全合规", vec!["security-engineer", "legal-compliance-checker", "agentic-identity-trust"]),
("数据分析", vec!["analytics-reporter", "finance-tracker", "data-analysis", "sales-data-extraction-agent", "data-consolidation-agent", "report-distribution-agent"]),
("项目管理", vec!["senior-pm", "project-shepherd", "sprint-prioritizer", "experiment-tracker", "feedback-synthesizer", "trend-researcher", "agents-orchestrator"]),
("设计UX", vec!["ui-designer", "ux-architect", "ux-researcher", "visual-storyteller", "image-prompt-engineer", "whimsy-injector", "brand-guardian"]),
("内容营销", vec!["content-creator", "chinese-writing", "executive-summary-generator", "social-media-strategist"]),
("社交平台", vec!["twitter-engager", "instagram-curator", "tiktok-strategist", "reddit-community-builder", "zhihu-strategist", "xiaohongshu-specialist", "wechat-official-account", "growth-hacker", "app-store-optimizer"]),
("运营支持", vec!["studio-operations", "studio-producer", "support-responder", "workflow-optimizer", "infrastructure-maintainer", "tool-evaluator"]),
("XR/空间计算", vec!["visionos-spatial-engineer", "macos-spatial-metal-engineer", "xr-immersive-developer", "xr-interface-architect", "xr-cockpit-interaction-specialist", "terminal-integration-specialist"]),
("基础工具", vec!["web-search", "file-operations", "shell-command", "git", "translation", "feishu-docs"]),
];
// Categorize each skill
for skill in skills {
// Priority 1: Use skill's explicit category
if let Some(ref category) = skill.category {
if !category.is_empty() {
categories.entry(category.clone()).or_default().push(skill);
continue;
}
}
// Priority 2: Fallback to pattern matching
let skill_id = skill.id.as_str();
let mut categorized = false;
for (category, patterns) in &fallback_patterns {
if patterns.iter().any(|p| skill_id.contains(p) || *p == skill_id) {
categories.entry(category.to_string()).or_default().push(skill);
categorized = true;
break;
}
}
// Put uncategorized skills in "其他"
if !categorized {
categories.entry("其他".to_string()).or_default().push(skill);
}
}
// Convert to ordered vector
let mut result: Vec<(String, Vec<_>)> = categories.into_iter().collect();
result.sort_by(|a, b| {
// Sort by predefined order
let order = ["开发工程", "测试质量", "安全合规", "数据分析", "项目管理", "设计UX", "内容营销", "社交平台", "运营支持", "XR/空间计算", "基础工具", "其他"];
let a_idx = order.iter().position(|&x| x == a.0).unwrap_or(99);
let b_idx = order.iter().position(|&x| x == b.0).unwrap_or(99);
a_idx.cmp(&b_idx)
});
result
}
}

View File

@@ -0,0 +1,345 @@
//! Kernel - central coordinator
mod adapters;
mod agents;
mod messaging;
mod skills;
mod hands;
mod triggers;
mod approvals;
#[cfg(feature = "multi-agent")]
mod a2a;
use std::sync::Arc;
use tokio::sync::{broadcast, Mutex};
use zclaw_types::{Event, Result};
#[cfg(feature = "multi-agent")]
use zclaw_types::AgentId;
#[cfg(feature = "multi-agent")]
use zclaw_protocols::A2aRouter;
use crate::registry::AgentRegistry;
use crate::capabilities::CapabilityManager;
use crate::events::EventBus;
use crate::config::KernelConfig;
use zclaw_memory::MemoryStore;
use zclaw_runtime::{LlmDriver, ToolRegistry, tool::SkillExecutor};
use zclaw_skills::SkillRegistry;
use zclaw_hands::{HandRegistry, hands::{BrowserHand, SlideshowHand, SpeechHand, QuizHand, WhiteboardHand, ResearcherHand, CollectorHand, ClipHand, TwitterHand, quiz::LlmQuizGenerator}};
pub use adapters::KernelSkillExecutor;
pub use messaging::ChatModeConfig;
/// The ZCLAW Kernel
pub struct Kernel {
config: KernelConfig,
registry: AgentRegistry,
capabilities: CapabilityManager,
events: EventBus,
memory: Arc<MemoryStore>,
driver: Arc<dyn LlmDriver>,
llm_completer: Arc<dyn zclaw_skills::LlmCompleter>,
skills: Arc<SkillRegistry>,
skill_executor: Arc<KernelSkillExecutor>,
hands: Arc<HandRegistry>,
trigger_manager: crate::trigger_manager::TriggerManager,
pending_approvals: Arc<Mutex<Vec<ApprovalEntry>>>,
/// Running hand runs that can be cancelled (run_id -> cancelled flag)
running_hand_runs: Arc<dashmap::DashMap<zclaw_types::HandRunId, Arc<std::sync::atomic::AtomicBool>>>,
/// Shared memory storage backend for Growth system
viking: Arc<zclaw_runtime::VikingAdapter>,
/// Optional LLM driver for memory extraction (set by Tauri desktop layer)
extraction_driver: Option<Arc<dyn zclaw_runtime::LlmDriverForExtraction>>,
/// A2A router for inter-agent messaging (gated by multi-agent feature)
#[cfg(feature = "multi-agent")]
a2a_router: Arc<A2aRouter>,
/// Per-agent A2A inbox receivers (supports re-queuing non-matching messages)
#[cfg(feature = "multi-agent")]
a2a_inboxes: Arc<dashmap::DashMap<AgentId, Arc<Mutex<adapters::AgentInbox>>>>,
}
impl Kernel {
/// Boot the kernel with the given configuration
pub async fn boot(config: KernelConfig) -> Result<Self> {
// Initialize memory store
let memory = Arc::new(MemoryStore::new(&config.database_url).await?);
// Initialize driver based on config
let driver = config.create_driver()?;
// Initialize subsystems
let registry = AgentRegistry::new();
let capabilities = CapabilityManager::new();
let events = EventBus::new();
// Initialize skill registry
let skills = Arc::new(SkillRegistry::new());
// Scan skills directory if configured
if let Some(ref skills_dir) = config.skills_dir {
if skills_dir.exists() {
skills.add_skill_dir(skills_dir.clone()).await?;
}
}
// Initialize hand registry with built-in hands
let hands = Arc::new(HandRegistry::new());
let quiz_model = config.model().to_string();
let quiz_generator = Arc::new(LlmQuizGenerator::new(driver.clone(), quiz_model));
hands.register(Arc::new(BrowserHand::new())).await;
hands.register(Arc::new(SlideshowHand::new())).await;
hands.register(Arc::new(SpeechHand::new())).await;
hands.register(Arc::new(QuizHand::with_generator(quiz_generator))).await;
hands.register(Arc::new(WhiteboardHand::new())).await;
hands.register(Arc::new(ResearcherHand::new())).await;
hands.register(Arc::new(CollectorHand::new())).await;
hands.register(Arc::new(ClipHand::new())).await;
hands.register(Arc::new(TwitterHand::new())).await;
// Create skill executor
let skill_executor = Arc::new(KernelSkillExecutor::new(skills.clone(), driver.clone()));
// Create LLM completer for skill system (shared with skill_executor)
let llm_completer: Arc<dyn zclaw_skills::LlmCompleter> =
Arc::new(adapters::LlmDriverAdapter {
driver: driver.clone(),
max_tokens: config.max_tokens(),
temperature: config.temperature(),
});
// Initialize trigger manager
let trigger_manager = crate::trigger_manager::TriggerManager::new(hands.clone());
// Initialize Growth system — shared VikingAdapter for memory storage
let viking = Arc::new(zclaw_runtime::VikingAdapter::in_memory());
// Restore persisted agents
let persisted = memory.list_agents().await?;
for agent in persisted {
registry.register(agent);
}
// Initialize A2A router for multi-agent support
#[cfg(feature = "multi-agent")]
let a2a_router = {
let kernel_agent_id = AgentId::new();
Arc::new(A2aRouter::new(kernel_agent_id))
};
Ok(Self {
config,
registry,
capabilities,
events,
memory,
driver,
llm_completer,
skills,
skill_executor,
hands,
trigger_manager,
pending_approvals: Arc::new(Mutex::new(Vec::new())),
running_hand_runs: Arc::new(dashmap::DashMap::new()),
viking,
extraction_driver: None,
#[cfg(feature = "multi-agent")]
a2a_router,
#[cfg(feature = "multi-agent")]
a2a_inboxes: Arc::new(dashmap::DashMap::new()),
})
}
/// Create a tool registry with built-in tools
pub(crate) fn create_tool_registry(&self) -> ToolRegistry {
let mut tools = ToolRegistry::new();
zclaw_runtime::tool::builtin::register_builtin_tools(&mut tools);
// Register TaskTool with driver and memory for sub-agent delegation
let task_tool = zclaw_runtime::tool::builtin::TaskTool::new(
self.driver.clone(),
self.memory.clone(),
self.config.model(),
);
tools.register(Box::new(task_tool));
tools
}
/// Create the middleware chain for the agent loop.
///
/// When middleware is configured, cross-cutting concerns (compaction, loop guard,
/// token calibration, etc.) are delegated to the chain. When no middleware is
/// registered, the legacy inline path in `AgentLoop` is used instead.
pub(crate) fn create_middleware_chain(&self) -> Option<zclaw_runtime::middleware::MiddlewareChain> {
let mut chain = zclaw_runtime::middleware::MiddlewareChain::new();
// Growth integration — shared VikingAdapter for memory middleware & compaction
let mut growth = zclaw_runtime::GrowthIntegration::new(self.viking.clone());
if let Some(ref driver) = self.extraction_driver {
growth = growth.with_llm_driver(driver.clone());
}
// Compaction middleware — only register when threshold > 0
let threshold = self.config.compaction_threshold();
if threshold > 0 {
use std::sync::Arc;
let mut growth_for_compaction = zclaw_runtime::GrowthIntegration::new(self.viking.clone());
if let Some(ref driver) = self.extraction_driver {
growth_for_compaction = growth_for_compaction.with_llm_driver(driver.clone());
}
let mw = zclaw_runtime::middleware::compaction::CompactionMiddleware::new(
threshold,
zclaw_runtime::CompactionConfig::default(),
Some(self.driver.clone()),
Some(growth_for_compaction),
);
chain.register(Arc::new(mw));
}
// Memory middleware — auto-extract memories after conversations
{
use std::sync::Arc;
let mw = zclaw_runtime::middleware::memory::MemoryMiddleware::new(growth);
chain.register(Arc::new(mw));
}
// Loop guard middleware
{
use std::sync::Arc;
let mw = zclaw_runtime::middleware::loop_guard::LoopGuardMiddleware::with_defaults();
chain.register(Arc::new(mw));
}
// Token calibration middleware
{
use std::sync::Arc;
let mw = zclaw_runtime::middleware::token_calibration::TokenCalibrationMiddleware::new();
chain.register(Arc::new(mw));
}
// Skill index middleware — inject lightweight index instead of full descriptions
{
use std::sync::Arc;
let entries = self.skill_executor.list_skill_index();
if !entries.is_empty() {
let mw = zclaw_runtime::middleware::skill_index::SkillIndexMiddleware::new(entries);
chain.register(Arc::new(mw));
}
}
// Title middleware — auto-generate conversation titles after first exchange
{
use std::sync::Arc;
let mw = zclaw_runtime::middleware::title::TitleMiddleware::new();
chain.register(Arc::new(mw));
}
// Dangling tool repair — patch missing tool results before LLM calls
{
use std::sync::Arc;
let mw = zclaw_runtime::middleware::dangling_tool::DanglingToolMiddleware::new();
chain.register(Arc::new(mw));
}
// Tool error middleware — format tool errors for LLM recovery
{
use std::sync::Arc;
let mw = zclaw_runtime::middleware::tool_error::ToolErrorMiddleware::new();
chain.register(Arc::new(mw));
}
// Tool output guard — post-execution output sanitization checks
{
use std::sync::Arc;
let mw = zclaw_runtime::middleware::tool_output_guard::ToolOutputGuardMiddleware::new();
chain.register(Arc::new(mw));
}
// Guardrail middleware — safety rules for tool calls
{
use std::sync::Arc;
let mw = zclaw_runtime::middleware::guardrail::GuardrailMiddleware::new(true)
.with_builtin_rules();
chain.register(Arc::new(mw));
}
// Sub-agent limit — cap concurrent sub-agent spawning
{
use std::sync::Arc;
let mw = zclaw_runtime::middleware::subagent_limit::SubagentLimitMiddleware::new();
chain.register(Arc::new(mw));
}
// Only return Some if we actually registered middleware
if chain.is_empty() {
None
} else {
tracing::info!("[Kernel] Middleware chain created with {} middlewares", chain.len());
Some(chain)
}
}
/// Subscribe to events
pub fn subscribe(&self) -> broadcast::Receiver<Event> {
self.events.subscribe()
}
/// Shutdown the kernel
pub async fn shutdown(&self) -> Result<()> {
self.events.publish(Event::KernelShutdown);
Ok(())
}
/// Get the kernel configuration
pub fn config(&self) -> &KernelConfig {
&self.config
}
/// Get the LLM driver
pub fn driver(&self) -> Arc<dyn LlmDriver> {
self.driver.clone()
}
/// Replace the default in-memory VikingAdapter with a persistent one.
///
/// Called by the Tauri desktop layer after `Kernel::boot()` to bridge
/// the kernel's Growth system to the same SqliteStorage used by
/// viking_commands and intelligence_hooks.
pub fn set_viking(&mut self, viking: Arc<zclaw_runtime::VikingAdapter>) {
tracing::info!("[Kernel] Replacing in-memory VikingAdapter with persistent storage");
self.viking = viking;
}
/// Get a reference to the shared VikingAdapter
pub fn viking(&self) -> Arc<zclaw_runtime::VikingAdapter> {
self.viking.clone()
}
/// Set the LLM extraction driver for the Growth system.
///
/// Required for `MemoryMiddleware` to extract memories from conversations
/// via LLM analysis. If not set, memory extraction is silently skipped.
pub fn set_extraction_driver(&mut self, driver: Arc<dyn zclaw_runtime::LlmDriverForExtraction>) {
tracing::info!("[Kernel] Extraction driver configured for Growth system");
self.extraction_driver = Some(driver);
}
}
#[derive(Debug, Clone)]
pub struct ApprovalEntry {
pub id: String,
pub hand_id: String,
pub status: String,
pub created_at: chrono::DateTime<chrono::Utc>,
pub input: serde_json::Value,
pub reject_reason: Option<String>,
}
/// Response from sending a message
#[derive(Debug, Clone)]
pub struct MessageResponse {
pub content: String,
pub input_tokens: u32,
pub output_tokens: u32,
}

View File

@@ -0,0 +1,79 @@
//! Skills management methods
use std::sync::Arc;
use zclaw_types::Result;
use super::Kernel;
impl Kernel {
/// Get the skills registry
pub fn skills(&self) -> &Arc<zclaw_skills::SkillRegistry> {
&self.skills
}
/// List all discovered skills
pub async fn list_skills(&self) -> Vec<zclaw_skills::SkillManifest> {
self.skills.list().await
}
/// Refresh skills from a directory
pub async fn refresh_skills(&self, dir: Option<std::path::PathBuf>) -> Result<()> {
if let Some(path) = dir {
self.skills.add_skill_dir(path).await?;
} else if let Some(ref skills_dir) = self.config.skills_dir {
self.skills.add_skill_dir(skills_dir.clone()).await?;
}
Ok(())
}
/// Get the configured skills directory
pub fn skills_dir(&self) -> Option<&std::path::PathBuf> {
self.config.skills_dir.as_ref()
}
/// Create a new skill in the skills directory
pub async fn create_skill(&self, manifest: zclaw_skills::SkillManifest) -> Result<()> {
let skills_dir = self.config.skills_dir.as_ref()
.ok_or_else(|| zclaw_types::ZclawError::InvalidInput(
"Skills directory not configured".into()
))?;
self.skills.create_skill(skills_dir, manifest).await
}
/// Update an existing skill
pub async fn update_skill(
&self,
id: &zclaw_types::SkillId,
manifest: zclaw_skills::SkillManifest,
) -> Result<zclaw_skills::SkillManifest> {
let skills_dir = self.config.skills_dir.as_ref()
.ok_or_else(|| zclaw_types::ZclawError::InvalidInput(
"Skills directory not configured".into()
))?;
self.skills.update_skill(skills_dir, id, manifest).await
}
/// Delete a skill
pub async fn delete_skill(&self, id: &zclaw_types::SkillId) -> Result<()> {
let skills_dir = self.config.skills_dir.as_ref()
.ok_or_else(|| zclaw_types::ZclawError::InvalidInput(
"Skills directory not configured".into()
))?;
self.skills.delete_skill(skills_dir, id).await
}
/// Execute a skill with the given ID and input
pub async fn execute_skill(
&self,
id: &str,
context: zclaw_skills::SkillContext,
input: serde_json::Value,
) -> Result<zclaw_skills::SkillResult> {
// Inject LLM completer into context for PromptOnly skills
let mut ctx = context;
if ctx.llm.is_none() {
ctx.llm = Some(self.llm_completer.clone());
}
self.skills.execute(&zclaw_types::SkillId::new(id), &ctx, input).await
}
}

View File

@@ -0,0 +1,52 @@
//! Trigger CRUD operations
use zclaw_types::Result;
use super::Kernel;
impl Kernel {
// ============================================================
// Trigger Management
// ============================================================
/// List all triggers
pub async fn list_triggers(&self) -> Vec<crate::trigger_manager::TriggerEntry> {
self.trigger_manager.list_triggers().await
}
/// Get a specific trigger
pub async fn get_trigger(&self, id: &str) -> Option<crate::trigger_manager::TriggerEntry> {
self.trigger_manager.get_trigger(id).await
}
/// Create a new trigger
pub async fn create_trigger(
&self,
config: zclaw_hands::TriggerConfig,
) -> Result<crate::trigger_manager::TriggerEntry> {
self.trigger_manager.create_trigger(config).await
}
/// Update a trigger
pub async fn update_trigger(
&self,
id: &str,
updates: crate::trigger_manager::TriggerUpdateRequest,
) -> Result<crate::trigger_manager::TriggerEntry> {
self.trigger_manager.update_trigger(id, updates).await
}
/// Delete a trigger
pub async fn delete_trigger(&self, id: &str) -> Result<()> {
self.trigger_manager.delete_trigger(id).await
}
/// Execute a trigger
pub async fn execute_trigger(
&self,
id: &str,
input: serde_json::Value,
) -> Result<zclaw_hands::TriggerResult> {
self.trigger_manager.execute_trigger(id, input).await
}
}

View File

@@ -24,3 +24,6 @@ libsqlite3-sys = { workspace = true }
# Async utilities
futures = { workspace = true }
async-trait = { workspace = true }
anyhow = { workspace = true }

View File

@@ -0,0 +1,202 @@
//! Structured fact extraction and storage.
//!
//! Inspired by DeerFlow's LLM-driven fact extraction with deduplication
//! and confidence scoring. Facts are natural language statements extracted
//! from conversations, categorized and scored for retrieval quality.
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::sync::atomic::{AtomicU64, Ordering};
use std::time::{SystemTime, UNIX_EPOCH};
/// Global counter for generating unique fact IDs without uuid dependency overhead.
static FACT_COUNTER: AtomicU64 = AtomicU64::new(0);
fn now_secs() -> u64 {
SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_secs()
}
fn next_fact_id() -> String {
let ts = now_secs();
let seq = FACT_COUNTER.fetch_add(1, Ordering::Relaxed);
format!("fact-{}-{}", ts, seq)
}
/// A structured fact extracted from conversation.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Fact {
/// Unique identifier
pub id: String,
/// The fact content (natural language)
pub content: String,
/// Category of the fact
pub category: FactCategory,
/// Confidence score (0.0 - 1.0)
pub confidence: f64,
/// When this fact was extracted (unix timestamp in seconds)
pub created_at: u64,
/// Source session ID
pub source: Option<String>,
}
/// Categories for structured facts.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "snake_case")]
pub enum FactCategory {
/// User preference (language, style, format)
Preference,
/// Domain knowledge or context
Knowledge,
/// Behavioral pattern or habit
Behavior,
/// Task-specific context
TaskContext,
/// General information
General,
}
impl Fact {
/// Create a new fact with auto-generated ID and timestamp.
pub fn new(content: impl Into<String>, category: FactCategory, confidence: f64) -> Self {
Self {
id: next_fact_id(),
content: content.into(),
category,
confidence: confidence.clamp(0.0, 1.0),
created_at: now_secs(),
source: None,
}
}
/// Attach a source session ID (builder pattern).
pub fn with_source(mut self, source: impl Into<String>) -> Self {
self.source = Some(source.into());
self
}
}
/// Result of a fact extraction batch.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ExtractedFactBatch {
pub facts: Vec<Fact>,
pub agent_id: String,
pub session_id: String,
}
impl ExtractedFactBatch {
/// Deduplicate facts by trimmed, lowercased content comparison.
/// When duplicates are found, keep the one with higher confidence.
pub fn deduplicate(mut self) -> Self {
let mut best_index: HashMap<String, usize> = HashMap::new();
let mut to_remove: Vec<usize> = Vec::new();
for (i, fact) in self.facts.iter().enumerate() {
let key = fact.content.trim().to_lowercase();
if let Some(&prev_idx) = best_index.get(&key) {
// Keep the one with higher confidence
if self.facts[prev_idx].confidence >= fact.confidence {
to_remove.push(i);
} else {
to_remove.push(prev_idx);
best_index.insert(key, i);
}
} else {
best_index.insert(key, i);
}
}
// Remove in reverse order to maintain valid indices
for idx in to_remove.into_iter().rev() {
self.facts.remove(idx);
}
self
}
/// Filter facts below the given confidence threshold.
pub fn filter_by_confidence(mut self, min_confidence: f64) -> Self {
self.facts.retain(|f| f.confidence >= min_confidence);
self
}
/// Returns true if there are no facts in the batch.
pub fn is_empty(&self) -> bool {
self.facts.is_empty()
}
/// Returns the number of facts in the batch.
pub fn len(&self) -> usize {
self.facts.len()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_fact_new_clamps_confidence() {
let f = Fact::new("hello", FactCategory::General, 1.5);
assert!((f.confidence - 1.0).abs() < f64::EPSILON);
}
#[test]
fn test_fact_with_source() {
let f = Fact::new("prefers dark mode", FactCategory::Preference, 0.9)
.with_source("sess-123");
assert_eq!(f.source.as_deref(), Some("sess-123"));
}
#[test]
fn test_deduplicate_keeps_higher_confidence() {
let batch = ExtractedFactBatch {
facts: vec![
Fact::new("likes Python", FactCategory::Preference, 0.8),
Fact::new("Likes Python", FactCategory::Preference, 0.95),
Fact::new("uses VSCode", FactCategory::Behavior, 0.7),
],
agent_id: "agent-1".into(),
session_id: "sess-1".into(),
};
let deduped = batch.deduplicate();
assert_eq!(deduped.facts.len(), 2);
// The "likes Python" fact with 0.95 confidence should survive
let python_fact = deduped
.facts
.iter()
.find(|f| f.content.contains("Python"))
.unwrap();
assert!((python_fact.confidence - 0.95).abs() < f64::EPSILON);
}
#[test]
fn test_filter_by_confidence() {
let batch = ExtractedFactBatch {
facts: vec![
Fact::new("high", FactCategory::General, 0.9),
Fact::new("medium", FactCategory::General, 0.75),
Fact::new("low", FactCategory::General, 0.3),
],
agent_id: "agent-1".into(),
session_id: "sess-1".into(),
};
let filtered = batch.filter_by_confidence(0.7);
assert_eq!(filtered.facts.len(), 2);
}
#[test]
fn test_is_empty_and_len() {
let batch = ExtractedFactBatch {
facts: vec![],
agent_id: "agent-1".into(),
session_id: "sess-1".into(),
};
assert!(batch.is_empty());
assert_eq!(batch.len(), 0);
}
}

View File

@@ -5,7 +5,9 @@
mod store;
mod session;
mod schema;
pub mod fact;
pub use store::*;
pub use session::*;
pub use schema::*;
pub use fact::{Fact, FactCategory, ExtractedFactBatch};

View File

@@ -278,7 +278,8 @@ pub struct PromptMessage {
// === Content Blocks ===
/// Content block for tool results and messages
/// MCP protocol wire format content block. Used for Model Context Protocol resource responses.
/// Distinct from zclaw_types::ContentBlock (LLM messages) and zclaw_hands::ContentBlock (presentations).
#[derive(Debug, Clone, Deserialize)]
#[serde(tag = "type", rename_all = "snake_case")]
pub enum ContentBlock {

View File

@@ -454,6 +454,9 @@ async fn generate_llm_summary(
temperature: Some(0.3),
stop: Vec::new(),
stream: false,
thinking_enabled: false,
reasoning_effort: None,
plan_mode: false,
};
let response = driver

View File

@@ -181,8 +181,12 @@ impl LlmDriver for AnthropicDriver {
}
}
"error" => {
let error_msg = serde_json::from_str::<serde_json::Value>(&data)
.ok()
.and_then(|v| v.get("error").and_then(|e| e.get("message")).and_then(|m| m.as_str().map(String::from)))
.unwrap_or_else(|| format!("Stream error: {}", &data[..data.len().min(200)]));
yield Ok(StreamChunk::Error {
message: "Stream error".to_string(),
message: error_msg,
});
}
_ => {}
@@ -251,15 +255,42 @@ impl AnthropicDriver {
})
.collect();
let requested_max = request.max_tokens.unwrap_or(4096);
let (thinking, budget) = if request.thinking_enabled {
let budget = match request.reasoning_effort.as_deref() {
Some("low") => 2000,
Some("medium") => 10000,
Some("high") => 32000,
_ => 10000, // default
};
(Some(AnthropicThinking {
r#type: "enabled".to_string(),
budget_tokens: budget,
}), budget)
} else {
(None, 0)
};
// When thinking is enabled, max_tokens is the TOTAL budget (thinking + text).
// Use the maximum output limit (65536) so thinking can consume whatever it
// needs without starving the text response. We only pay for tokens actually
// generated, so a high limit costs nothing extra.
let effective_max = if budget > 0 {
65536
} else {
requested_max
};
AnthropicRequest {
model: request.model.clone(),
max_tokens: request.max_tokens.unwrap_or(4096),
max_tokens: effective_max,
system: request.system.clone(),
messages,
tools: if tools.is_empty() { None } else { Some(tools) },
temperature: request.temperature,
stop_sequences: if request.stop.is_empty() { None } else { Some(request.stop.clone()) },
stream: request.stream,
thinking,
}
}
@@ -313,6 +344,14 @@ struct AnthropicRequest {
stop_sequences: Option<Vec<String>>,
#[serde(default)]
stream: bool,
#[serde(skip_serializing_if = "Option::is_none")]
thinking: Option<AnthropicThinking>,
}
#[derive(Serialize)]
struct AnthropicThinking {
r#type: String,
budget_tokens: u32,
}
#[derive(Serialize)]

View File

@@ -265,6 +265,10 @@ impl GeminiDriver {
/// - Tool definitions use `functionDeclarations`
/// - Tool results are sent as `functionResponse` parts in `user` messages
fn build_api_request(&self, request: &CompletionRequest) -> GeminiRequest {
if request.thinking_enabled {
tracing::debug!("[GeminiDriver] thinking_enabled=true but Gemini does not support native thinking mode; ignoring");
}
let mut contents: Vec<GeminiContent> = Vec::new();
for msg in &request.messages {

View File

@@ -58,6 +58,10 @@ impl LocalDriver {
// ----------------------------------------------------------------
fn build_api_request(&self, request: &CompletionRequest) -> LocalApiRequest {
if request.thinking_enabled {
tracing::debug!("[LocalDriver] thinking_enabled=true but local driver does not support native thinking mode; ignoring");
}
let messages: Vec<LocalApiMessage> = request
.messages
.iter()
@@ -183,7 +187,7 @@ impl LocalDriver {
.unwrap_or(false);
let blocks = if has_tool_calls {
let tool_calls = c.message.tool_calls.as_ref().unwrap();
let tool_calls = c.message.tool_calls.as_deref().unwrap_or_default();
tool_calls
.iter()
.map(|tc| {
@@ -199,7 +203,7 @@ impl LocalDriver {
.collect()
} else if has_content {
vec![ContentBlock::Text {
text: c.message.content.clone().unwrap(),
text: c.message.content.clone().unwrap_or_default(),
}]
} else {
vec![ContentBlock::Text {

View File

@@ -60,6 +60,15 @@ pub struct CompletionRequest {
pub stop: Vec<String>,
/// Enable streaming
pub stream: bool,
/// Enable extended thinking/reasoning
#[serde(default)]
pub thinking_enabled: bool,
/// Reasoning effort level (for providers that support it)
#[serde(default)]
pub reasoning_effort: Option<String>,
/// Enable plan mode
#[serde(default)]
pub plan_mode: bool,
}
impl Default for CompletionRequest {
@@ -73,27 +82,16 @@ impl Default for CompletionRequest {
temperature: Some(0.7),
stop: Vec::new(),
stream: false,
thinking_enabled: false,
reasoning_effort: None,
plan_mode: false,
}
}
}
/// Tool definition for LLM
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ToolDefinition {
pub name: String,
pub description: String,
pub input_schema: serde_json::Value,
}
impl ToolDefinition {
pub fn new(name: impl Into<String>, description: impl Into<String>, schema: serde_json::Value) -> Self {
Self {
name: name.into(),
description: description.into(),
input_schema: schema,
}
}
}
/// Tool definition for LLM function calling.
/// Re-exported from `zclaw_types::tool::ToolDefinition` (canonical definition).
pub use zclaw_types::tool::ToolDefinition;
/// Completion response
#[derive(Debug, Clone, Serialize, Deserialize)]
@@ -110,7 +108,8 @@ pub struct CompletionResponse {
pub stop_reason: StopReason,
}
/// Content block in response
/// LLM driver response content block (subset of canonical zclaw_types::ContentBlock).
/// Used internally by Anthropic/OpenAI/Gemini/Local drivers for API response parsing.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(tag = "type", rename_all = "snake_case")]
pub enum ContentBlock {

View File

@@ -130,8 +130,8 @@ impl LlmDriver for OpenAiDriver {
let api_key = self.api_key.expose_secret().to_string();
Box::pin(stream! {
println!("[OpenAI:stream] POST to {}/chat/completions", base_url);
println!("[OpenAI:stream] Request model={}, stream={}", stream_request.model, stream_request.stream);
tracing::debug!("[OpenAI:stream] POST to {}/chat/completions", base_url);
tracing::debug!("[OpenAI:stream] Request model={}, stream={}", stream_request.model, stream_request.stream);
let response = match self.client
.post(format!("{}/chat/completions", base_url))
.header("Authorization", format!("Bearer {}", api_key))
@@ -142,11 +142,11 @@ impl LlmDriver for OpenAiDriver {
.await
{
Ok(r) => {
println!("[OpenAI:stream] Response status: {}, content-type: {:?}", r.status(), r.headers().get("content-type"));
tracing::debug!("[OpenAI:stream] Response status: {}, content-type: {:?}", r.status(), r.headers().get("content-type"));
r
},
Err(e) => {
println!("[OpenAI:stream] HTTP request FAILED: {:?}", e);
tracing::debug!("[OpenAI:stream] HTTP request FAILED: {:?}", e);
yield Err(ZclawError::LlmError(format!("HTTP request failed: {}", e)));
return;
}
@@ -155,7 +155,7 @@ impl LlmDriver for OpenAiDriver {
if !response.status().is_success() {
let status = response.status();
let body = response.text().await.unwrap_or_default();
println!("[OpenAI:stream] API error {}: {}", status, &body[..body.len().min(500)]);
tracing::debug!("[OpenAI:stream] API error {}: {}", status, &body[..body.len().min(500)]);
yield Err(ZclawError::LlmError(format!("API error {}: {}", status, body)));
return;
}
@@ -170,7 +170,7 @@ impl LlmDriver for OpenAiDriver {
let chunk = match chunk_result {
Ok(c) => c,
Err(e) => {
println!("[OpenAI:stream] Byte stream error: {:?}", e);
tracing::debug!("[OpenAI:stream] Byte stream error: {:?}", e);
yield Err(ZclawError::LlmError(format!("Stream error: {}", e)));
continue;
}
@@ -180,7 +180,7 @@ impl LlmDriver for OpenAiDriver {
let text = String::from_utf8_lossy(&chunk);
// Log first 500 bytes of raw data for debugging SSE format
if raw_bytes_total <= 600 {
println!("[OpenAI:stream] RAW chunk ({} bytes): {:?}", text.len(), &text[..text.len().min(500)]);
tracing::debug!("[OpenAI:stream] RAW chunk ({} bytes): {:?}", text.len(), &text[..text.len().min(500)]);
}
for line in text.lines() {
let trimmed = line.trim();
@@ -198,10 +198,10 @@ impl LlmDriver for OpenAiDriver {
if let Some(data) = data {
sse_event_count += 1;
if sse_event_count <= 3 || data == "[DONE]" {
println!("[OpenAI:stream] SSE #{}: {}", sse_event_count, &data[..data.len().min(300)]);
tracing::debug!("[OpenAI:stream] SSE #{}: {}", sse_event_count, &data[..data.len().min(300)]);
}
if data == "[DONE]" {
println!("[OpenAI:stream] Received [DONE], total SSE events: {}, raw bytes: {}", sse_event_count, raw_bytes_total);
tracing::debug!("[OpenAI:stream] Received [DONE], total SSE events: {}, raw bytes: {}", sse_event_count, raw_bytes_total);
// Emit ToolUseEnd for all accumulated tool calls (skip invalid ones with empty name)
for (id, (name, args)) in &accumulated_tool_calls {
@@ -319,7 +319,7 @@ impl LlmDriver for OpenAiDriver {
}
}
}
println!("[OpenAI:stream] Byte stream ended. Total: {} SSE events, {} raw bytes", sse_event_count, raw_bytes_total);
tracing::debug!("[OpenAI:stream] Byte stream ended. Total: {} SSE events, {} raw bytes", sse_event_count, raw_bytes_total);
})
}
}
@@ -496,6 +496,7 @@ impl OpenAiDriver {
stop: if request.stop.is_empty() { None } else { Some(request.stop.clone()) },
stream: request.stream,
tools: if tools.is_empty() { None } else { Some(tools) },
reasoning_effort: request.reasoning_effort.clone(),
};
// Pre-send payload size validation
@@ -581,8 +582,8 @@ impl OpenAiDriver {
let has_reasoning = c.message.reasoning_content.as_ref().map(|t| !t.is_empty()).unwrap_or(false);
let blocks = if has_tool_calls {
// Tool calls take priority
let tool_calls = c.message.tool_calls.as_ref().unwrap();
// Tool calls take priority — safe to unwrap after has_tool_calls check
let tool_calls = c.message.tool_calls.as_ref().cloned().unwrap_or_default();
tracing::debug!("[OpenAiDriver:convert_response] Using tool_calls: {} calls", tool_calls.len());
tool_calls.iter().map(|tc| ContentBlock::ToolUse {
id: tc.id.clone(),
@@ -590,15 +591,15 @@ impl OpenAiDriver {
input: serde_json::from_str(&tc.function.arguments).unwrap_or(serde_json::Value::Null),
}).collect()
} else if has_content {
// Non-empty content
let text = c.message.content.as_ref().unwrap();
// Non-empty content — safe to unwrap after has_content check
let text = c.message.content.as_deref().unwrap_or("");
tracing::debug!("[OpenAiDriver:convert_response] Using text content: {} chars", text.len());
vec![ContentBlock::Text { text: text.clone() }]
vec![ContentBlock::Text { text: text.to_string() }]
} else if has_reasoning {
// Content empty but reasoning_content present (Kimi, Qwen, DeepSeek)
let reasoning = c.message.reasoning_content.as_ref().unwrap();
let reasoning = c.message.reasoning_content.as_deref().unwrap_or("");
tracing::debug!("[OpenAiDriver:convert_response] Using reasoning_content: {} chars", reasoning.len());
vec![ContentBlock::Text { text: reasoning.clone() }]
vec![ContentBlock::Text { text: reasoning.to_string() }]
} else {
// No content or tool_calls
tracing::debug!("[OpenAiDriver:convert_response] No content or tool_calls, using empty text");
@@ -771,6 +772,8 @@ struct OpenAiRequest {
stream: bool,
#[serde(skip_serializing_if = "Option::is_none")]
tools: Option<Vec<OpenAiTool>>,
#[serde(skip_serializing_if = "Option::is_none")]
reasoning_effort: Option<String>,
}
#[derive(Serialize)]
@@ -833,7 +836,7 @@ struct OpenAiResponse {
usage: Option<OpenAiUsage>,
}
#[derive(Deserialize, Default)]
#[derive(Deserialize, Default, Clone)]
struct OpenAiChoice {
#[serde(default)]
message: OpenAiResponseMessage,
@@ -841,7 +844,7 @@ struct OpenAiChoice {
finish_reason: Option<String>,
}
#[derive(Deserialize, Default)]
#[derive(Deserialize, Default, Clone)]
struct OpenAiResponseMessage {
#[serde(default)]
content: Option<String>,
@@ -851,7 +854,7 @@ struct OpenAiResponseMessage {
tool_calls: Option<Vec<OpenAiToolCallResponse>>,
}
#[derive(Deserialize, Default)]
#[derive(Deserialize, Default, Clone)]
struct OpenAiToolCallResponse {
#[serde(default)]
id: String,
@@ -859,7 +862,7 @@ struct OpenAiToolCallResponse {
function: FunctionCallResponse,
}
#[derive(Deserialize, Default)]
#[derive(Deserialize, Default, Clone)]
struct FunctionCallResponse {
#[serde(default)]
name: String,

View File

@@ -16,6 +16,7 @@ use zclaw_growth::{
MemoryExtractor, MemoryRetriever, PromptInjector, RetrievalResult,
VikingAdapter,
};
use zclaw_memory::{ExtractedFactBatch, Fact, FactCategory};
use zclaw_types::{AgentId, Message, Result, SessionId};
/// Growth system integration for AgentLoop
@@ -212,6 +213,80 @@ impl GrowthIntegration {
Ok(count)
}
/// Combined extraction: single LLM call that produces both stored memories
/// and structured facts, avoiding double extraction overhead.
///
/// Returns `(memory_count, Option<ExtractedFactBatch>)` on success.
pub async fn extract_combined(
&self,
agent_id: &AgentId,
messages: &[Message],
session_id: &SessionId,
) -> Result<Option<(usize, ExtractedFactBatch)>> {
if !self.config.enabled || !self.config.auto_extract {
return Ok(None);
}
// Single LLM extraction call
let extracted = self
.extractor
.extract(messages, session_id.clone())
.await
.unwrap_or_else(|e| {
tracing::warn!("[GrowthIntegration] Combined extraction failed: {}", e);
Vec::new()
});
if extracted.is_empty() {
return Ok(None);
}
let mem_count = extracted.len();
// Store raw memories
self.extractor
.store_memories(&agent_id.to_string(), &extracted)
.await?;
// Track learning event
self.tracker
.record_learning(agent_id, &session_id.to_string(), mem_count)
.await?;
// Convert same extracted memories to structured facts (no extra LLM call)
let facts: Vec<Fact> = extracted
.into_iter()
.map(|m| {
let category = match m.memory_type {
zclaw_growth::types::MemoryType::Preference => FactCategory::Preference,
zclaw_growth::types::MemoryType::Knowledge => FactCategory::Knowledge,
zclaw_growth::types::MemoryType::Experience => FactCategory::Behavior,
_ => FactCategory::General,
};
Fact::new(m.content, category, f64::from(m.confidence))
.with_source(session_id.to_string())
})
.collect();
let batch = ExtractedFactBatch {
facts,
agent_id: agent_id.to_string(),
session_id: session_id.to_string(),
}
.deduplicate()
.filter_by_confidence(0.7);
if batch.is_empty() {
return Ok(Some((mem_count, ExtractedFactBatch {
facts: vec![],
agent_id: agent_id.to_string(),
session_id: session_id.to_string(),
})));
}
Ok(Some((mem_count, batch)))
}
/// Retrieve memories for a query without injection
pub async fn retrieve_memories(
&self,

View File

@@ -16,6 +16,7 @@ pub mod stream;
pub mod growth;
pub mod compaction;
pub mod middleware;
pub mod prompt;
// Re-export main types
pub use driver::{
@@ -31,3 +32,4 @@ pub use zclaw_growth::VikingAdapter;
pub use zclaw_growth::EmbeddingClient;
pub use zclaw_growth::LlmDriverForExtraction;
pub use compaction::{CompactionConfig, CompactionOutcome};
pub use prompt::{PromptBuilder, PromptContext, PromptSection};

View File

@@ -14,6 +14,7 @@ use crate::loop_guard::{LoopGuard, LoopGuardResult};
use crate::growth::GrowthIntegration;
use crate::compaction::{self, CompactionConfig};
use crate::middleware::{self, MiddlewareChain};
use crate::prompt::{PromptBuilder, PromptContext};
use zclaw_memory::MemoryStore;
/// Agent loop runner
@@ -25,6 +26,8 @@ pub struct AgentLoop {
loop_guard: Mutex<LoopGuard>,
model: String,
system_prompt: Option<String>,
/// Custom agent personality for prompt assembly
soul: Option<String>,
max_tokens: u32,
temperature: f32,
skill_executor: Option<Arc<dyn SkillExecutor>>,
@@ -39,6 +42,12 @@ pub struct AgentLoop {
/// delegated to the chain instead of the inline code below.
/// When `None`, the legacy inline path is used (100% backward compatible).
middleware_chain: Option<MiddlewareChain>,
/// Chat mode: extended thinking enabled
thinking_enabled: bool,
/// Chat mode: reasoning effort level
reasoning_effort: Option<String>,
/// Chat mode: plan mode
plan_mode: bool,
}
impl AgentLoop {
@@ -56,7 +65,8 @@ impl AgentLoop {
loop_guard: Mutex::new(LoopGuard::default()),
model: String::new(), // Must be set via with_model()
system_prompt: None,
max_tokens: 4096,
soul: None,
max_tokens: 16384,
temperature: 0.7,
skill_executor: None,
path_validator: None,
@@ -64,6 +74,9 @@ impl AgentLoop {
compaction_threshold: 0,
compaction_config: CompactionConfig::default(),
middleware_chain: None,
thinking_enabled: false,
reasoning_effort: None,
plan_mode: false,
}
}
@@ -91,6 +104,30 @@ impl AgentLoop {
self
}
/// Set the agent personality (SOUL.md equivalent)
pub fn with_soul(mut self, soul: impl Into<String>) -> Self {
self.soul = Some(soul.into());
self
}
/// Enable extended thinking/reasoning mode
pub fn with_thinking_enabled(mut self, enabled: bool) -> Self {
self.thinking_enabled = enabled;
self
}
/// Set reasoning effort level (low/medium/high)
pub fn with_reasoning_effort(mut self, effort: impl Into<String>) -> Self {
self.reasoning_effort = Some(effort.into());
self
}
/// Enable plan mode
pub fn with_plan_mode(mut self, enabled: bool) -> Self {
self.plan_mode = enabled;
self
}
/// Set max tokens
pub fn with_max_tokens(mut self, max_tokens: u32) -> Self {
self.max_tokens = max_tokens;
@@ -214,7 +251,15 @@ impl AgentLoop {
// Enhance system prompt — skip when middleware chain handles it
let mut enhanced_prompt = if use_middleware {
self.system_prompt.clone().unwrap_or_default()
let prompt_ctx = PromptContext {
base_prompt: self.system_prompt.clone(),
soul: self.soul.clone(),
thinking_enabled: self.thinking_enabled,
plan_mode: self.plan_mode,
tool_definitions: self.tools.definitions(),
agent_name: None,
};
PromptBuilder::new().build(&prompt_ctx)
} else if let Some(ref growth) = self.growth {
let base = self.system_prompt.as_deref().unwrap_or("");
growth.enhance_prompt(&self.agent_id, base, &input).await?
@@ -279,6 +324,9 @@ impl AgentLoop {
temperature: Some(self.temperature),
stop: Vec::new(),
stream: false,
thinking_enabled: self.thinking_enabled,
reasoning_effort: self.reasoning_effort.clone(),
plan_mode: self.plan_mode,
};
// Call LLM
@@ -352,7 +400,12 @@ impl AgentLoop {
// Create tool context and execute all tools
let tool_context = self.create_tool_context(session_id.clone());
let mut circuit_breaker_triggered = false;
let mut abort_result: Option<AgentLoopResult> = None;
for (id, name, input) in tool_calls {
// Check if loop was already aborted
if abort_result.is_some() {
break;
}
// Check tool call safety — via middleware chain or inline loop guard
if let Some(ref chain) = self.middleware_chain {
let mw_ctx_ref = middleware::MiddlewareContext {
@@ -382,6 +435,17 @@ impl AgentLoop {
messages.push(Message::tool_result(id, zclaw_types::ToolId::new(&name), tool_result, false));
continue;
}
middleware::ToolCallDecision::AbortLoop(reason) => {
tracing::warn!("[AgentLoop] Loop aborted by middleware: {}", reason);
let msg = format!("{}\n已自动终止", reason);
self.memory.append_message(&session_id, &Message::assistant(&msg)).await?;
abort_result = Some(AgentLoopResult {
response: msg,
input_tokens: total_input_tokens,
output_tokens: total_output_tokens,
iterations,
});
}
}
} else {
// Legacy inline path
@@ -421,6 +485,11 @@ impl AgentLoop {
// Continue the loop - LLM will process tool results and generate final response
// If middleware aborted the loop, return immediately
if let Some(result) = abort_result {
break result;
}
// If circuit breaker was triggered, terminate immediately
if circuit_breaker_triggered {
let msg = "检测到工具调用循环,已自动终止";
@@ -502,7 +571,15 @@ impl AgentLoop {
// Enhance system prompt — skip when middleware chain handles it
let mut enhanced_prompt = if use_middleware {
self.system_prompt.clone().unwrap_or_default()
let prompt_ctx = PromptContext {
base_prompt: self.system_prompt.clone(),
soul: self.soul.clone(),
thinking_enabled: self.thinking_enabled,
plan_mode: self.plan_mode,
tool_definitions: self.tools.definitions(),
agent_name: None,
};
PromptBuilder::new().build(&prompt_ctx)
} else if let Some(ref growth) = self.growth {
let base = self.system_prompt.as_deref().unwrap_or("");
growth.enhance_prompt(&self.agent_id, base, &input).await?
@@ -552,6 +629,9 @@ impl AgentLoop {
let model = self.model.clone();
let max_tokens = self.max_tokens;
let temperature = self.temperature;
let thinking_enabled = self.thinking_enabled;
let reasoning_effort = self.reasoning_effort.clone();
let plan_mode = self.plan_mode;
tokio::spawn(async move {
let mut messages = messages;
@@ -584,6 +664,9 @@ impl AgentLoop {
temperature: Some(temperature),
stop: Vec::new(),
stream: true,
thinking_enabled,
reasoning_effort: reasoning_effort.clone(),
plan_mode,
};
let mut stream = driver.stream(request);
@@ -596,9 +679,12 @@ impl AgentLoop {
let mut chunk_count: usize = 0;
let mut text_delta_count: usize = 0;
let mut thinking_delta_count: usize = 0;
while let Some(chunk_result) = stream.next().await {
match chunk_result {
Ok(chunk) => {
let mut stream_errored = false;
let chunk_timeout = std::time::Duration::from_secs(60);
loop {
match tokio::time::timeout(chunk_timeout, stream.next()).await {
Ok(Some(Ok(chunk))) => {
chunk_count += 1;
match &chunk {
StreamChunk::TextDelta { delta } => {
@@ -610,8 +696,8 @@ impl AgentLoop {
StreamChunk::ThinkingDelta { delta } => {
thinking_delta_count += 1;
tracing::debug!("[AgentLoop] ThinkingDelta #{}: {} chars", thinking_delta_count, delta.len());
// Accumulate reasoning separately — not mixed into iteration_text
reasoning_text.push_str(delta);
let _ = tx.send(LoopEvent::ThinkingDelta(delta.clone())).await;
}
StreamChunk::ToolUseStart { id, name } => {
tracing::debug!("[AgentLoop] ToolUseStart: id={}, name={}", id, name);
@@ -651,21 +737,43 @@ impl AgentLoop {
StreamChunk::Error { message } => {
tracing::error!("[AgentLoop] Stream error: {}", message);
let _ = tx.send(LoopEvent::Error(message.clone())).await;
stream_errored = true;
}
}
}
Err(e) => {
Ok(Some(Err(e))) => {
tracing::error!("[AgentLoop] Chunk error: {}", e);
let _ = tx.send(LoopEvent::Error(e.to_string())).await;
let _ = tx.send(LoopEvent::Error(format!("LLM 锥应错误: {}", e.to_string()))).await;
stream_errored = true;
}
Ok(None) => break, // Stream ended normally
Err(_) => {
tracing::error!("[AgentLoop] Stream chunk timeout ({}s)", chunk_timeout.as_secs());
let _ = tx.send(LoopEvent::Error("LLM 响应超时,请重试".to_string())).await;
stream_errored = true;
}
}
if stream_errored {
break;
}
}
tracing::info!("[AgentLoop] Stream ended: {} total chunks (text={}, thinking={}, tools={}), iteration_text={} chars",
chunk_count, text_delta_count, thinking_delta_count, pending_tool_calls.len(),
iteration_text.len());
if iteration_text.is_empty() {
tracing::warn!("[AgentLoop] WARNING: iteration_text is EMPTY after {} chunks! text_delta={}, thinking_delta={}",
chunk_count, text_delta_count, thinking_delta_count);
// Fallback: if model generated reasoning but no text content,
// use reasoning as text response. This happens with some thinking models
// (DeepSeek R1, QWQ) that put the answer in reasoning_content instead of content.
// Safe now because: (1) context is clean (no stale user_profile/memory injection),
// (2) max_tokens=16384 prevents truncation, (3) reasoning is about the correct topic.
if iteration_text.is_empty() && !reasoning_text.is_empty() {
tracing::info!("[AgentLoop] Model generated {} chars of reasoning but no text — using reasoning as response",
reasoning_text.len());
let _ = tx.send(LoopEvent::Delta(reasoning_text.clone())).await;
iteration_text = reasoning_text.clone();
} else if iteration_text.is_empty() {
tracing::warn!("[AgentLoop] No text content after {} chunks (thinking_delta={})",
chunk_count, thinking_delta_count);
}
// If no tool calls, we have the final response
@@ -706,6 +814,12 @@ impl AgentLoop {
break 'outer;
}
// Skip tool processing if stream errored or timed out
if stream_errored {
tracing::debug!("[AgentLoop] Stream errored, skipping tool processing and breaking");
break 'outer;
}
tracing::debug!("[AgentLoop] Processing {} tool calls (reasoning: {} chars)", pending_tool_calls.len(), reasoning_text.len());
// Push assistant message with reasoning before tool calls (required by Kimi and other thinking-enabled APIs)
@@ -745,6 +859,11 @@ impl AgentLoop {
messages.push(Message::tool_result(id, zclaw_types::ToolId::new(&name), error_output, true));
continue;
}
Ok(middleware::ToolCallDecision::AbortLoop(reason)) => {
tracing::warn!("[AgentLoop] Loop aborted by middleware: {}", reason);
let _ = tx.send(LoopEvent::Error(reason)).await;
break 'outer;
}
Ok(middleware::ToolCallDecision::ReplaceInput(new_input)) => {
// Execute with replaced input (same path_validator logic below)
let pv = path_validator.clone().unwrap_or_else(|| {
@@ -883,6 +1002,8 @@ pub struct AgentLoopResult {
pub enum LoopEvent {
/// Text delta from LLM
Delta(String),
/// Thinking/reasoning delta from LLM (extended thinking)
ThinkingDelta(String),
/// Tool execution started
ToolStart { name: String, input: serde_json::Value },
/// Tool execution completed

View File

@@ -41,6 +41,8 @@ pub enum ToolCallDecision {
Block(String),
/// Allow the call but replace the tool input with *new_input*.
ReplaceInput(Value),
/// Terminate the entire agent loop immediately (e.g. circuit breaker).
AbortLoop(String),
}
// ---------------------------------------------------------------------------
@@ -194,6 +196,25 @@ impl MiddlewareChain {
Ok(ToolCallDecision::Allow)
}
/// Run all `before_tool_call` hooks with mutable context.
pub async fn run_before_tool_call_mut(
&self,
ctx: &mut MiddlewareContext,
tool_name: &str,
tool_input: &Value,
) -> Result<ToolCallDecision> {
for mw in &self.middlewares {
match mw.before_tool_call(ctx, tool_name, tool_input).await? {
ToolCallDecision::Allow => {}
other => {
tracing::info!("[MiddlewareChain] '{}' decided {:?} for tool '{}'", mw.name(), other, tool_name);
return Ok(other);
}
}
}
Ok(ToolCallDecision::Allow)
}
/// Run all `after_tool_call` hooks in order.
pub async fn run_after_tool_call(
&self,
@@ -245,8 +266,13 @@ impl Default for MiddlewareChain {
// ---------------------------------------------------------------------------
pub mod compaction;
pub mod dangling_tool;
pub mod guardrail;
pub mod loop_guard;
pub mod memory;
pub mod skill_index;
pub mod subagent_limit;
pub mod title;
pub mod token_calibration;
pub mod tool_error;
pub mod tool_output_guard;

View File

@@ -0,0 +1,125 @@
//! Dangling tool-call repair middleware — detects and patches missing tool-result
//! messages that would cause LLM API errors.
//!
//! When the LLM produces a `ToolUse` content block but the agent loop fails to
//! produce a corresponding `ToolResult` message (e.g. due to a crash or timeout),
//! the conversation history becomes inconsistent. The next LLM call would fail with
//! an API error because ToolUse messages must be followed by ToolResult messages.
//!
//! This middleware inspects the message history before each completion and appends
//! placeholder ToolResult messages for any dangling ToolUse entries.
use std::collections::HashSet;
use async_trait::async_trait;
use zclaw_types::{Message, Result};
use crate::middleware::{AgentMiddleware, MiddlewareContext, MiddlewareDecision};
/// Middleware that repairs dangling tool-use blocks in conversation history.
///
/// Priority 300 — runs before tool error middleware (350) and guardrail (400).
pub struct DanglingToolMiddleware;
impl DanglingToolMiddleware {
pub fn new() -> Self {
Self
}
}
impl Default for DanglingToolMiddleware {
fn default() -> Self {
Self::new()
}
}
#[async_trait]
impl AgentMiddleware for DanglingToolMiddleware {
fn name(&self) -> &str { "dangling_tool" }
fn priority(&self) -> i32 { 300 }
async fn before_completion(&self, ctx: &mut MiddlewareContext) -> Result<MiddlewareDecision> {
let mut patched_count = 0usize;
// Step 1: Collect all ToolUse IDs and matched ToolResult IDs across the
// entire message list (not just adjacent pairs).
let mut tool_use_ids: Vec<(String, String)> = Vec::new(); // (id, tool_name)
let mut tool_result_ids: HashSet<String> = HashSet::new();
for msg in &ctx.messages {
match msg {
Message::ToolUse { ref id, ref tool, .. } => {
tool_use_ids.push((id.clone(), tool.as_str().to_string()));
}
Message::ToolResult { ref tool_call_id, ref output, .. } => {
// Original results always count as matched regardless of patch status.
// We insert unconditionally so that the HashSet contains the ID,
// preventing false-positive "dangling" detection.
let _ = output; // suppress unused warning — patch check is informational only
tool_result_ids.insert(tool_call_id.clone());
}
_ => {}
}
}
// Step 2: Find dangling ToolUse entries that have no matching ToolResult.
let dangling_ids: HashSet<String> = tool_use_ids.iter()
.filter(|(id, _)| !tool_result_ids.contains(id))
.map(|(id, _)| id.clone())
.collect();
if dangling_ids.is_empty() {
return Ok(MiddlewareDecision::Continue);
}
// Step 3: Insert placeholder ToolResult for each dangling ToolUse.
// Also skip ToolUse entries that already have a patched placeholder further
// down the list (prevents double-patching if the middleware runs twice).
let capacity = ctx.messages.len() + dangling_ids.len();
let mut patched_messages: Vec<Message> = Vec::with_capacity(capacity);
for msg in &ctx.messages {
patched_messages.push(msg.clone());
if let Message::ToolUse { ref id, ref tool, .. } = msg {
if dangling_ids.contains(id) {
tracing::warn!(
"[DanglingToolMiddleware] Patching dangling ToolUse: tool={}, id={}",
tool.as_str(), id
);
let placeholder = Message::tool_result(
id.clone(),
tool.clone(),
serde_json::json!({
"error": "Tool execution was interrupted. Please retry or use an alternative approach.",
"tool_patch": true,
}),
true, // is_error
);
patched_messages.push(placeholder);
patched_count += 1;
}
}
}
// Step 4: Detect streaming interrupt — if the last message is an Assistant
// response while there were dangling tools, the user likely interrupted a
// streaming response mid-tool-execution. No additional action is needed
// beyond the patched ToolResult messages that now prevent API errors.
if let Some(Message::Assistant { .. }) = patched_messages.last() {
tracing::debug!(
"[DanglingToolMiddleware] Streaming interrupt detected with {} dangling tools",
patched_count
);
}
if patched_count > 0 {
tracing::info!(
"[DanglingToolMiddleware] Patched {} dangling tool-use blocks",
patched_count
);
ctx.messages = patched_messages;
}
Ok(MiddlewareDecision::Continue)
}
}

View File

@@ -41,7 +41,7 @@ impl AgentMiddleware for LoopGuardMiddleware {
match result {
LoopGuardResult::CircuitBreaker => {
tracing::warn!("[LoopGuardMiddleware] Circuit breaker triggered by tool '{}'", tool_name);
Ok(ToolCallDecision::Block("检测到工具调用循环,已自动终止".to_string()))
Ok(ToolCallDecision::AbortLoop("检测到工具调用循环,已自动终止".to_string()))
}
LoopGuardResult::Blocked => {
tracing::warn!("[LoopGuardMiddleware] Tool '{}' blocked", tool_name);

View File

@@ -60,34 +60,39 @@ impl AgentMiddleware for MemoryMiddleware {
fn priority(&self) -> i32 { 150 }
async fn before_completion(&self, ctx: &mut MiddlewareContext) -> Result<MiddlewareDecision> {
// Skip memory injection for very short queries.
// Short queries (e.g., "1+6", "hi", "好") don't benefit from memory context.
// Worse, the retriever's scope-based fallback may return high-importance but
// irrelevant old memories, causing the model to think about past conversations
// instead of answering the current question.
// Use char count (not byte count) so CJK queries are handled correctly:
// a single Chinese char is 3 UTF-8 bytes but 1 meaningful character.
let query = ctx.user_input.trim();
if query.chars().count() < 2 {
tracing::debug!(
"[MemoryMiddleware] Skipping enhancement for short query ({:?}): no memory context needed",
query
"[MemoryMiddleware] before_completion for query: {:?}",
ctx.user_input.chars().take(50).collect::<String>()
);
return Ok(MiddlewareDecision::Continue);
}
match self.growth.enhance_prompt(
&ctx.agent_id,
&ctx.system_prompt,
&ctx.user_input,
).await {
// Retrieve relevant memories and inject into system prompt.
// The SqliteStorage retriever now uses FTS5-only matching — if FTS5 finds
// no relevant results, no memories are returned (no scope-based fallback).
// This prevents irrelevant high-importance memories from leaking into
// unrelated conversations.
let base = &ctx.system_prompt;
match self.growth.enhance_prompt(&ctx.agent_id, base, &ctx.user_input).await {
Ok(enhanced) => {
if enhanced != *base {
tracing::info!(
"[MemoryMiddleware] Injected memories into system prompt for agent {}",
ctx.agent_id
);
ctx.system_prompt = enhanced;
} else {
tracing::debug!(
"[MemoryMiddleware] No relevant memories found for query: {:?}",
ctx.user_input.chars().take(50).collect::<String>()
);
}
Ok(MiddlewareDecision::Continue)
}
Err(e) => {
// Non-fatal: memory retrieval failure should not block the loop
tracing::warn!("[MemoryMiddleware] Prompt enhancement failed: {}", e);
// Non-fatal: retrieval failure should not block the conversation
tracing::warn!(
"[MemoryMiddleware] Memory retrieval failed (non-fatal): {}",
e
);
Ok(MiddlewareDecision::Continue)
}
}

View File

@@ -0,0 +1,87 @@
//! Sub-agent limit middleware — enforces limits on sub-agent spawning.
//!
//! Prevents runaway sub-agent spawning by enforcing a per-turn total cap.
//! The `running` counter was removed because it leaked when subsequent
//! middleware blocked the tool call (before_tool_call increments but
//! after_tool_call never fires for blocked tools).
use async_trait::async_trait;
use serde_json::Value;
use zclaw_types::Result;
use crate::middleware::{AgentMiddleware, MiddlewareContext, ToolCallDecision};
/// Default maximum total sub-agents per conversation turn.
const DEFAULT_MAX_TOTAL: usize = 10;
/// Middleware that limits total sub-agent spawn count per turn.
///
/// Priority 550 — runs after loop guard (500).
pub struct SubagentLimitMiddleware {
/// Maximum total sub-agents per conversation turn.
max_total: usize,
/// Total sub-agents spawned in this turn.
total_spawned: std::sync::atomic::AtomicUsize,
}
impl SubagentLimitMiddleware {
pub fn new() -> Self {
Self {
max_total: DEFAULT_MAX_TOTAL,
total_spawned: std::sync::atomic::AtomicUsize::new(0),
}
}
pub fn with_max_total(mut self, n: usize) -> Self {
self.max_total = n;
self
}
/// Check if a tool call is a sub-agent spawn request.
fn is_subagent_tool(tool_name: &str) -> bool {
matches!(tool_name, "task" | "delegate" | "spawn_agent" | "subagent")
}
}
impl Default for SubagentLimitMiddleware {
fn default() -> Self {
Self::new()
}
}
#[async_trait]
impl AgentMiddleware for SubagentLimitMiddleware {
fn name(&self) -> &str { "subagent_limit" }
fn priority(&self) -> i32 { 550 }
async fn before_tool_call(
&self,
_ctx: &MiddlewareContext,
tool_name: &str,
_tool_input: &Value,
) -> Result<ToolCallDecision> {
if !Self::is_subagent_tool(tool_name) {
return Ok(ToolCallDecision::Allow);
}
let total = self.total_spawned.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
if total >= self.max_total {
self.total_spawned.fetch_sub(1, std::sync::atomic::Ordering::SeqCst);
tracing::warn!(
"[SubagentLimitMiddleware] Total sub-agent limit ({}) reached — blocking spawn",
self.max_total
);
return Ok(ToolCallDecision::Block(format!(
"子Agent总数量已达上限 ({}),请优先完成现有任务后再发起新任务。",
self.max_total
)));
}
Ok(ToolCallDecision::Allow)
}
async fn after_completion(&self, _ctx: &MiddlewareContext) -> Result<()> {
// Reset per-turn counter after the agent loop turn completes.
self.total_spawned.store(0, std::sync::atomic::Ordering::SeqCst);
Ok(())
}
}

View File

@@ -5,22 +5,29 @@
//! "新对话" or truncating the user's first message.
//!
//! Priority 180 — runs after compaction (100) and memory (150), before skill index (200).
//!
//! NOTE: This is a structural placeholder. Full implementation requires an LLM driver
//! reference to generate titles asynchronously, which will be wired through the
//! middleware context in a future iteration. For now it simply passes through.
use async_trait::async_trait;
use zclaw_types::Result;
use crate::middleware::{AgentMiddleware, MiddlewareContext};
use crate::middleware::{AgentMiddleware, MiddlewareDecision};
/// Middleware that auto-generates conversation titles after the first exchange.
///
/// When fully implemented, this will:
/// 1. Detect the first user-assistant exchange (via message count)
/// 2. Call the LLM with a short prompt to generate a descriptive title
/// 3. Update the session title via the middleware context
///
/// For now, it serves as a registered placeholder in the middleware chain.
pub struct TitleMiddleware {
/// Whether a title has been generated for the current session.
titled: std::sync::atomic::AtomicBool,
_reserved: (),
}
impl TitleMiddleware {
pub fn new() -> Self {
Self {
titled: std::sync::atomic::AtomicBool::new(false),
}
Self { _reserved: () }
}
}
@@ -34,4 +41,9 @@ impl Default for TitleMiddleware {
impl AgentMiddleware for TitleMiddleware {
fn name(&self) -> &str { "title" }
fn priority(&self) -> i32 { 180 }
// All hooks default to Continue — placeholder until LLM driver is wired in.
async fn before_completion(&self, _ctx: &mut crate::middleware::MiddlewareContext) -> zclaw_types::Result<MiddlewareDecision> {
Ok(MiddlewareDecision::Continue)
}
}

View File

@@ -0,0 +1,111 @@
//! Tool error middleware — catches tool execution errors and converts them
//! into well-formed tool-result messages for the LLM to recover from.
//!
//! Inspired by DeerFlow's ToolErrorMiddleware: instead of propagating raw errors
//! that crash the agent loop, this middleware wraps tool errors into a structured
//! format that the LLM can use to self-correct.
use async_trait::async_trait;
use serde_json::Value;
use zclaw_types::Result;
use crate::driver::ContentBlock;
use crate::middleware::{AgentMiddleware, MiddlewareContext, ToolCallDecision};
/// Middleware that intercepts tool call errors and formats recovery messages.
///
/// Priority 350 — runs after dangling tool repair (300) and before guardrail (400).
pub struct ToolErrorMiddleware {
/// Maximum error message length before truncation.
max_error_length: usize,
}
impl ToolErrorMiddleware {
pub fn new() -> Self {
Self {
max_error_length: 500,
}
}
/// Create with a custom max error length.
pub fn with_max_error_length(mut self, len: usize) -> Self {
self.max_error_length = len;
self
}
/// Format a tool error into a guided recovery message for the LLM.
///
/// The caller is responsible for truncation before passing `error`.
fn format_tool_error(&self, tool_name: &str, error: &str) -> String {
format!(
"工具 '{}' 执行失败。错误信息: {}\n请分析错误原因,尝试修正参数后重试,或使用其他方法完成任务。",
tool_name, error
)
}
}
impl Default for ToolErrorMiddleware {
fn default() -> Self {
Self::new()
}
}
#[async_trait]
impl AgentMiddleware for ToolErrorMiddleware {
fn name(&self) -> &str { "tool_error" }
fn priority(&self) -> i32 { 350 }
async fn before_tool_call(
&self,
_ctx: &MiddlewareContext,
tool_name: &str,
tool_input: &Value,
) -> Result<ToolCallDecision> {
// Pre-validate tool input structure for common issues.
// This catches malformed JSON inputs before they reach the tool executor.
if tool_input.is_null() {
tracing::warn!(
"[ToolErrorMiddleware] Tool '{}' received null input — replacing with empty object",
tool_name
);
return Ok(ToolCallDecision::ReplaceInput(serde_json::json!({})));
}
Ok(ToolCallDecision::Allow)
}
async fn after_tool_call(
&self,
ctx: &mut MiddlewareContext,
tool_name: &str,
result: &Value,
) -> Result<()> {
// Check if the tool result indicates an error.
if let Some(error) = result.get("error") {
let error_msg = match error {
Value::String(s) => s.clone(),
other => other.to_string(),
};
let truncated = if error_msg.len() > self.max_error_length {
// Use char-boundary-safe truncation to avoid panic on UTF-8 strings (e.g. Chinese)
let end = error_msg.floor_char_boundary(self.max_error_length);
format!("{}...(truncated)", &error_msg[..end])
} else {
error_msg.clone()
};
tracing::warn!(
"[ToolErrorMiddleware] Tool '{}' failed: {}",
tool_name, truncated
);
// Build a guided recovery message so the LLM can self-correct.
let guided_message = self.format_tool_error(tool_name, &truncated);
// Inject into response_content so the agent loop feeds this back
// to the LLM alongside the raw tool result.
ctx.response_content.push(ContentBlock::Text {
text: guided_message,
});
}
Ok(())
}
}

View File

@@ -0,0 +1,132 @@
//! Tool output sanitization middleware — inspects tool results for risky content
//! before they flow back into the LLM context.
//!
//! Inspired by DeerFlow's missing "Toxic Output Loop" defense — ZCLAW proactively
//! implements post-execution output checking.
//!
//! Rules:
//! - Output length cap: warns when tool output exceeds threshold
//! - Sensitive pattern detection: flags API keys, tokens, passwords
//! - Injection marker detection: flags common prompt-injection patterns
//!
//! This middleware does NOT modify content. It only logs warnings at appropriate levels.
use async_trait::async_trait;
use serde_json::Value;
use zclaw_types::Result;
use crate::middleware::{AgentMiddleware, MiddlewareContext, ToolCallDecision};
/// Maximum safe output length in characters.
const MAX_OUTPUT_LENGTH: usize = 50_000;
/// Patterns that indicate sensitive information in tool output.
const SENSITIVE_PATTERNS: &[&str] = &[
"api_key",
"apikey",
"api-key",
"secret_key",
"secretkey",
"access_token",
"auth_token",
"password",
"private_key",
"-----BEGIN RSA",
"-----BEGIN PRIVATE",
"sk-", // OpenAI API keys
"sk_live_", // Stripe keys
"AKIA", // AWS access keys
];
/// Patterns that may indicate prompt injection in tool output.
const INJECTION_PATTERNS: &[&str] = &[
"ignore previous instructions",
"ignore all previous",
"disregard your instructions",
"you are now",
"new instructions:",
"system:",
"[INST]",
"</scratchpad>",
"think step by step about",
];
/// Tool output sanitization middleware.
///
/// Priority 360 — runs after ToolErrorMiddleware (350), before GuardrailMiddleware (400).
pub struct ToolOutputGuardMiddleware {
max_output_length: usize,
}
impl ToolOutputGuardMiddleware {
pub fn new() -> Self {
Self {
max_output_length: MAX_OUTPUT_LENGTH,
}
}
}
impl Default for ToolOutputGuardMiddleware {
fn default() -> Self {
Self::new()
}
}
#[async_trait]
impl AgentMiddleware for ToolOutputGuardMiddleware {
fn name(&self) -> &str { "tool_output_guard" }
fn priority(&self) -> i32 { 360 }
async fn before_tool_call(
&self,
_ctx: &MiddlewareContext,
_tool_name: &str,
_tool_input: &Value,
) -> Result<ToolCallDecision> {
// No pre-execution checks — this middleware only inspects output
Ok(ToolCallDecision::Allow)
}
async fn after_tool_call(
&self,
_ctx: &mut MiddlewareContext,
tool_name: &str,
result: &Value,
) -> Result<()> {
let output_str = serde_json::to_string(result).unwrap_or_default();
let output_len = output_str.len();
// Rule 1: Output length check
if output_len > self.max_output_length {
tracing::warn!(
"[ToolOutputGuard] Tool '{}' returned oversized output: {} chars (limit: {})",
tool_name, output_len, self.max_output_length
);
}
// Rule 2: Sensitive information detection
let output_lower = output_str.to_lowercase();
for pattern in SENSITIVE_PATTERNS {
if output_lower.contains(pattern) {
tracing::warn!(
"[ToolOutputGuard] Tool '{}' output contains sensitive pattern: '{}'",
tool_name, pattern
);
break; // Only warn once per tool call
}
}
// Rule 3: Injection marker detection
for pattern in INJECTION_PATTERNS {
if output_lower.contains(pattern) {
tracing::warn!(
"[ToolOutputGuard] Tool '{}' output contains potential injection marker: '{}'",
tool_name, pattern
);
break; // Only warn once per tool call
}
}
Ok(())
}
}

View File

@@ -0,0 +1,120 @@
use std::fmt::Write;
use crate::driver::ToolDefinition;
/// Runtime context that determines which prompt sections are included.
pub struct PromptContext {
/// Base system prompt from AgentConfig
pub base_prompt: Option<String>,
/// Custom agent personality (SOUL.md equivalent)
pub soul: Option<String>,
/// Whether thinking/extended reasoning is enabled
pub thinking_enabled: bool,
/// Whether plan mode is active
pub plan_mode: bool,
/// Tool definitions available for dynamic injection
pub tool_definitions: Vec<ToolDefinition>,
/// Agent name for personalization
pub agent_name: Option<String>,
}
/// A single section in the assembled prompt.
pub struct PromptSection {
pub name: &'static str,
pub template: String,
pub priority: u32,
}
/// Builds structured system prompts from conditional sections.
pub struct PromptBuilder {
sections: Vec<PromptSection>,
}
impl PromptBuilder {
pub fn new() -> Self {
Self {
sections: Vec::new(),
}
}
/// Add a section unconditionally.
pub fn add_section(
mut self,
name: &'static str,
template: impl Into<String>,
priority: u32,
) -> Self {
self.sections.push(PromptSection {
name,
template: template.into(),
priority,
});
self
}
/// Assemble the final system prompt based on runtime context.
pub fn build(&self, ctx: &PromptContext) -> String {
let mut sections: Vec<&PromptSection> = self.sections.iter().collect();
sections.sort_by_key(|s| s.priority);
let mut result = String::with_capacity(4096);
// Base prompt (always included)
if let Some(ref base) = ctx.base_prompt {
result.push_str(base);
} else {
result.push_str("You are a helpful AI assistant.");
}
// Soul/personality section
if let Some(ref soul) = ctx.soul {
result.push_str("\n\n## Agent Personality\n\n");
result.push_str(soul);
}
// Agent name personalization
if let Some(ref name) = ctx.agent_name {
let _ = write!(result, "\n\nYou are known as \"{name}\". Respond in character.");
}
// Dynamic tool descriptions
if !ctx.tool_definitions.is_empty() {
result.push_str("\n\n## Available Tools\n\n");
for tool in &ctx.tool_definitions {
let _ = writeln!(result, "- **{}**: {}", tool.name, tool.description);
}
}
// Thinking style guidance
if ctx.thinking_enabled {
result.push_str("\n\n## Reasoning Mode\n\n");
result.push_str(
"Extended reasoning is enabled. Think step-by-step before responding. \
Show your reasoning process, then provide the final answer.",
);
}
// Plan mode instructions
if ctx.plan_mode {
result.push_str("\n\n## Plan Mode\n\n");
result.push_str(
"You are in plan mode. Before executing any actions, create a detailed plan. \
Present the plan to the user for approval before proceeding.",
);
}
// Additional registered sections
for section in sections {
result.push_str("\n\n");
result.push_str(&section.template);
}
result
}
}
impl Default for PromptBuilder {
fn default() -> Self {
Self::new()
}
}

View File

@@ -0,0 +1,9 @@
//! Dynamic prompt assembly module.
//!
//! Inspired by DeerFlow's conditional section-based prompt composition.
//! The `PromptBuilder` assembles a structured system prompt from multiple
//! conditional sections before the middleware chain further modifies it.
mod builder;
pub use builder::{PromptBuilder, PromptContext, PromptSection};

View File

@@ -7,6 +7,7 @@ mod web_fetch;
mod execute_skill;
mod skill_load;
mod path_validator;
mod task;
pub use file_read::FileReadTool;
pub use file_write::FileWriteTool;
@@ -15,6 +16,7 @@ pub use web_fetch::WebFetchTool;
pub use execute_skill::ExecuteSkillTool;
pub use skill_load::SkillLoadTool;
pub use path_validator::{PathValidator, PathValidatorConfig};
pub use task::TaskTool;
use crate::tool::ToolRegistry;

View File

@@ -0,0 +1,179 @@
//! Task tool — delegates sub-tasks to a nested AgentLoop.
//!
//! Inspired by DeerFlow's `task_tool`: the lead agent can spawn sub-agent tasks
//! to parallelise complex work. Each sub-task runs its own AgentLoop with a
//! fresh session, isolated context, and a configurable maximum iteration count.
use async_trait::async_trait;
use serde_json::{json, Value};
use zclaw_types::{AgentId, Result, ZclawError};
use zclaw_memory::MemoryStore;
use crate::driver::LlmDriver;
use crate::loop_runner::AgentLoop;
use crate::tool::{Tool, ToolContext, ToolRegistry};
use crate::tool::builtin::register_builtin_tools;
use std::sync::Arc;
/// Default max iterations for a sub-agent task.
const DEFAULT_MAX_ITERATIONS: usize = 5;
/// Tool that delegates sub-tasks to a nested AgentLoop.
pub struct TaskTool {
driver: Arc<dyn LlmDriver>,
memory: Arc<MemoryStore>,
model: String,
max_tokens: u32,
temperature: f32,
}
impl TaskTool {
pub fn new(
driver: Arc<dyn LlmDriver>,
memory: Arc<MemoryStore>,
model: impl Into<String>,
) -> Self {
Self {
driver,
memory,
model: model.into(),
max_tokens: 4096,
temperature: 0.7,
}
}
pub fn with_max_tokens(mut self, max_tokens: u32) -> Self {
self.max_tokens = max_tokens;
self
}
pub fn with_temperature(mut self, temperature: f32) -> Self {
self.temperature = temperature;
self
}
}
#[async_trait]
impl Tool for TaskTool {
fn name(&self) -> &str {
"task"
}
fn description(&self) -> &str {
"Delegate a sub-task to a sub-agent. The sub-agent will work independently \
with its own context and tools. Use this to break complex tasks into \
parallel or sequential sub-tasks. Each sub-task runs in its own session \
with a focused system prompt."
}
fn input_schema(&self) -> Value {
json!({
"type": "object",
"properties": {
"description": {
"type": "string",
"description": "Short description of the sub-task (shown in progress UI)"
},
"prompt": {
"type": "string",
"description": "Detailed instructions for the sub-agent"
},
"max_iterations": {
"type": "integer",
"description": "Maximum tool-call iterations for the sub-agent (default: 5)",
"minimum": 1,
"maximum": 10
}
},
"required": ["description", "prompt"]
})
}
async fn execute(&self, input: Value, context: &ToolContext) -> Result<Value> {
let description = input["description"].as_str()
.ok_or_else(|| ZclawError::InvalidInput("Missing 'description' parameter".into()))?;
let prompt = input["prompt"].as_str()
.ok_or_else(|| ZclawError::InvalidInput("Missing 'prompt' parameter".into()))?;
let max_iterations = input["max_iterations"].as_u64()
.unwrap_or(DEFAULT_MAX_ITERATIONS as u64) as usize;
tracing::info!(
"[TaskTool] Starting sub-agent task: {:?} (max_iterations={})",
description, max_iterations
);
// Create a sub-agent with its own ID
let sub_agent_id = AgentId::new();
// Create a fresh session for the sub-agent
let session_id = self.memory.create_session(&sub_agent_id).await?;
// Build system prompt focused on the sub-task
let system_prompt = format!(
"你是一个专注的子Agent负责完成以下任务{}\n\n\
要求:\n\
- 专注完成分配给你的任务\n\
- 使用可用的工具来完成任务\n\
- 完成后提供简洁的结果摘要\n\
- 如果遇到无法解决的问题,请说明原因",
description
);
// Create a tool registry with builtin tools
// (TaskTool itself is NOT included to prevent infinite nesting)
let mut tools = ToolRegistry::new();
register_builtin_tools(&mut tools);
// Build a lightweight AgentLoop for the sub-agent
let mut sub_loop = AgentLoop::new(
sub_agent_id,
self.driver.clone(),
tools,
self.memory.clone(),
)
.with_model(&self.model)
.with_system_prompt(&system_prompt)
.with_max_tokens(self.max_tokens)
.with_temperature(self.temperature);
// Optionally inject skill executor and path validator from parent context
if let Some(ref executor) = context.skill_executor {
sub_loop = sub_loop.with_skill_executor(executor.clone());
}
if let Some(ref validator) = context.path_validator {
sub_loop = sub_loop.with_path_validator(validator.clone());
}
// Execute the sub-agent loop (non-streaming — collect full result)
let result = match sub_loop.run(session_id.clone(), prompt.to_string()).await {
Ok(loop_result) => {
tracing::info!(
"[TaskTool] Sub-agent completed: {} iterations, {} input tokens, {} output tokens",
loop_result.iterations, loop_result.input_tokens, loop_result.output_tokens
);
json!({
"status": "completed",
"description": description,
"result": loop_result.response,
"iterations": loop_result.iterations,
"input_tokens": loop_result.input_tokens,
"output_tokens": loop_result.output_tokens,
})
}
Err(e) => {
tracing::warn!("[TaskTool] Sub-agent failed: {}", e);
json!({
"status": "failed",
"description": description,
"error": e.to_string(),
})
}
};
Ok(result)
}
}

View File

@@ -185,8 +185,8 @@ pub async fn increment_usage(
input_tokens: i64,
output_tokens: i64,
) -> SaasResult<()> {
// 确保 quota 行存在(幂等)
let _ = get_or_create_usage(pool, account_id).await?;
// 确保 quota 行存在(幂等)— 返回值仅用于确认行存在,无需绑定
get_or_create_usage(pool, account_id).await?;
// 直接用 account_id + period 原子更新,无需 SELECT 获取 ID
let now = chrono::Utc::now();

View File

@@ -887,7 +887,7 @@ async fn fix_seed_data(pool: &PgPool) -> SaasResult<()> {
}
// 也更新 api_tokens 表的 account_id
let _ = sqlx::query("UPDATE api_tokens SET account_id = $1 WHERE account_id != $1")
sqlx::query("UPDATE api_tokens SET account_id = $1 WHERE account_id != $1")
.bind(primary_admin).execute(pool).await?;
tracing::info!("Seed data fix completed");

View File

@@ -231,13 +231,12 @@ pub async fn batch_create_items(
}
match service::create_item(&state.db, &ctx.account_id, req).await {
Ok(item) => {
let _ = state.worker_dispatcher.dispatch(
if let Err(e) = state.worker_dispatcher.dispatch(
"generate_embedding",
serde_json::json!({ "item_id": item.id }),
).await.map_err(|e| {
).await {
tracing::warn!("[Knowledge] Failed to dispatch embedding for item {}: {}", item.id, e);
e
});
}
created.push(item.id);
}
Err(e) => {
@@ -563,13 +562,12 @@ pub async fn import_items(
match service::create_item(&state.db, &ctx.account_id, &item_req).await {
Ok(item) => {
let _ = state.worker_dispatcher.dispatch(
if let Err(e) = state.worker_dispatcher.dispatch(
"generate_embedding",
serde_json::json!({ "item_id": item.id }),
).await.map_err(|e| {
).await {
tracing::warn!("[Knowledge] Failed to dispatch embedding for item {}: {}", item.id, e);
e
});
}
created.push(item.id);
}
Err(e) => {

View File

@@ -259,7 +259,9 @@ pub async fn execute_relay(
}
}
let key_id = current_key_id.as_ref().unwrap().clone();
let key_id = current_key_id.as_ref()
.ok_or_else(|| SaasError::Internal("Key pool selection failed: no key_id".into()))?
.clone();
let api_key = current_api_key.clone();
let mut req_builder = client.post(&url)
@@ -309,7 +311,10 @@ pub async fn execute_relay(
}
}
Err(e) => {
let _ = tx.send(Err(std::io::Error::other(e))).await;
let err_msg = e.to_string();
if tx.send(Err(std::io::Error::other(e))).await.is_err() {
tracing::debug!("SSE relay: client disconnected before error sent: {}", err_msg);
}
break;
}
}
@@ -372,12 +377,12 @@ pub async fn execute_relay(
let (input_tokens, output_tokens) = extract_token_usage(&body);
update_task_status(db, task_id, "completed",
Some(input_tokens), Some(output_tokens), None).await?;
// 记录 Key 使用量
let _ = super::key_pool::record_key_usage(
// 记录 Key 使用量(失败仅记录,不阻塞响应)
if let Err(e) = super::key_pool::record_key_usage(
db, &key_id, Some(input_tokens + output_tokens),
).await.map_err(|e| {
).await {
tracing::warn!("[Relay] Failed to record key usage for billing: {}", e);
});
}
return Ok(RelayResponse::Json(body));
}
}
@@ -557,7 +562,10 @@ fn hash_request(body: &str) -> String {
fn extract_token_usage(body: &str) -> (i64, i64) {
let parsed: serde_json::Value = match serde_json::from_str(body) {
Ok(v) => v,
Err(_) => return (0, 0),
Err(e) => {
tracing::debug!("extract_token_usage: JSON parse failed (body len={}): {}", body.len(), e);
return (0, 0);
}
};
let usage = parsed.get("usage");

View File

@@ -1,5 +1,9 @@
//! WASM skill runner — executes WASM modules in a wasmtime sandbox.
//!
//! **Status**: Active module — fully implemented with real wasmtime integration.
//! Unlike Director/A2A (feature-gated off), this module is compiled by default
//! but only invoked when a `.wasm` skill is loaded. No feature gate needed.
//!
//! Guest modules target `wasm32-wasi` and communicate via stdin/stdout JSON.
//! Host provides optional functions: `zclaw_log`, `zclaw_http_fetch`, `zclaw_file_read`.

View File

@@ -20,6 +20,9 @@ pub struct AgentConfig {
/// System prompt
#[serde(default)]
pub system_prompt: Option<String>,
/// Custom agent personality (SOUL.md equivalent from DeerFlow)
#[serde(default)]
pub soul: Option<String>,
/// Capabilities granted to this agent
#[serde(default)]
pub capabilities: Vec<Capability>,
@@ -56,6 +59,7 @@ impl Default for AgentConfig {
description: None,
model: ModelConfig::default(),
system_prompt: None,
soul: None,
capabilities: Vec::new(),
tools: Vec::new(),
max_tokens: None,
@@ -91,6 +95,11 @@ impl AgentConfig {
self
}
pub fn with_soul(mut self, soul: impl Into<String>) -> Self {
self.soul = Some(soul.into());
self
}
pub fn with_model(mut self, model: ModelConfig) -> Self {
self.model = model;
self

View File

@@ -24,6 +24,8 @@ pub enum Capability {
AgentMessage { pattern: String },
/// Kill agents matching pattern
AgentKill { pattern: String },
/// OpenFang Protocol capabilities (reserved for future A2A mesh networking).
/// Currently defined but not consumed - no implementation or grant path exists.
/// Discover remote peers via OFP
OfpDiscover,
/// Connect to specific OFP peers
@@ -58,7 +60,16 @@ impl Capability {
match self {
Capability::ToolAll => true,
Capability::ToolInvoke { name } => name == tool_name,
_ => false,
Capability::MemoryRead { .. }
| Capability::MemoryWrite { .. }
| Capability::NetConnect { .. }
| Capability::ShellExec { .. }
| Capability::AgentSpawn
| Capability::AgentMessage { .. }
| Capability::AgentKill { .. }
| Capability::OfpDiscover
| Capability::OfpConnect { .. }
| Capability::OfpAdvertise => false,
}
}
@@ -68,7 +79,17 @@ impl Capability {
Capability::MemoryRead { scope: s } => {
s == "*" || s == scope || scope.starts_with(&format!("{}.", s))
}
_ => false,
Capability::ToolAll
| Capability::ToolInvoke { .. }
| Capability::MemoryWrite { .. }
| Capability::NetConnect { .. }
| Capability::ShellExec { .. }
| Capability::AgentSpawn
| Capability::AgentMessage { .. }
| Capability::AgentKill { .. }
| Capability::OfpDiscover
| Capability::OfpConnect { .. }
| Capability::OfpAdvertise => false,
}
}
@@ -78,7 +99,17 @@ impl Capability {
Capability::MemoryWrite { scope: s } => {
s == "*" || s == scope || scope.starts_with(&format!("{}.", s))
}
_ => false,
Capability::ToolAll
| Capability::ToolInvoke { .. }
| Capability::MemoryRead { .. }
| Capability::NetConnect { .. }
| Capability::ShellExec { .. }
| Capability::AgentSpawn
| Capability::AgentMessage { .. }
| Capability::AgentKill { .. }
| Capability::OfpDiscover
| Capability::OfpConnect { .. }
| Capability::OfpAdvertise => false,
}
}
}
@@ -152,6 +183,10 @@ impl Capability {
(Capability::NetConnect { host: a }, Capability::NetConnect { host: b }) => {
a == "*" || a == b
}
// Exhaustive fallback: all remaining (self, other) combinations
// return false. Kept as wildcard because enumerating 12×12
// combinations is impractical; new variants should add explicit
// arms above when they introduce new grant rules.
_ => false,
}
}

View File

@@ -114,7 +114,10 @@ impl Message {
}
}
/// Content block for structured responses
/// Canonical LLM message content block. Used for agent conversation messages.
/// See also: zclaw_runtime::driver::ContentBlock (LLM driver response subset),
/// zclaw_hands::slideshow::ContentBlock (presentation rendering),
/// zclaw_protocols::mcp_types::ContentBlock (MCP protocol wire format).
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(tag = "type", rename_all = "snake_case")]
pub enum ContentBlock {

View File

@@ -43,6 +43,7 @@
"clsx": "^2.1.1",
"dompurify": "^3.3.3",
"framer-motion": "^12.38.0",
"idb": "^8.0.3",
"lucide-react": "^0.577.0",
"react": "^19.2.4",
"react-dom": "^19.2.4",

View File

@@ -29,6 +29,9 @@ importers:
framer-motion:
specifier: ^12.38.0
version: 12.38.0(react-dom@19.2.4(react@19.2.4))(react@19.2.4)
idb:
specifier: ^8.0.3
version: 8.0.3
lucide-react:
specifier: ^0.577.0
version: 0.577.0(react@19.2.4)
@@ -2075,6 +2078,9 @@ packages:
resolution: {integrity: sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==}
engines: {node: '>=0.10.0'}
idb@8.0.3:
resolution: {integrity: sha512-LtwtVyVYO5BqRvcsKuB2iUMnHwPVByPCXFXOpuU96IZPPoPN6xjOGxZQ74pgSVVLQWtUOYgyeL4GE98BY5D3wg==}
ignore@5.3.2:
resolution: {integrity: sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==}
engines: {node: '>= 4'}
@@ -5323,6 +5329,8 @@ snapshots:
dependencies:
safer-buffer: 2.1.2
idb@8.0.3: {}
ignore@5.3.2: {}
ignore@7.0.5: {}

View File

@@ -43,6 +43,7 @@ impl Clone for BrowserState {
// ============================================================================
/// Create a new browser session
// @connected
#[tauri::command]
pub async fn browser_create_session(
state: State<'_, BrowserState>,
@@ -74,6 +75,7 @@ pub async fn browser_create_session(
}
/// Close a browser session
// @connected
#[tauri::command]
pub async fn browser_close_session(
state: State<'_, BrowserState>,
@@ -84,6 +86,7 @@ pub async fn browser_close_session(
}
/// List all browser sessions
// @connected
#[tauri::command]
pub async fn browser_list_sessions(
state: State<'_, BrowserState>,
@@ -106,6 +109,7 @@ pub async fn browser_list_sessions(
}
/// Get session info
// @connected
#[tauri::command]
pub async fn browser_get_session(
state: State<'_, BrowserState>,
@@ -130,6 +134,7 @@ pub async fn browser_get_session(
// ============================================================================
/// Navigate to URL
// @connected
#[tauri::command]
pub async fn browser_navigate(
state: State<'_, BrowserState>,
@@ -146,6 +151,7 @@ pub async fn browser_navigate(
}
/// Go back
// @connected
#[tauri::command]
pub async fn browser_back(
state: State<'_, BrowserState>,
@@ -156,6 +162,7 @@ pub async fn browser_back(
}
/// Go forward
// @connected
#[tauri::command]
pub async fn browser_forward(
state: State<'_, BrowserState>,
@@ -166,6 +173,7 @@ pub async fn browser_forward(
}
/// Refresh page
// @connected
#[tauri::command]
pub async fn browser_refresh(
state: State<'_, BrowserState>,
@@ -176,6 +184,7 @@ pub async fn browser_refresh(
}
/// Get current URL
// @connected
#[tauri::command]
pub async fn browser_get_url(
state: State<'_, BrowserState>,
@@ -186,6 +195,7 @@ pub async fn browser_get_url(
}
/// Get page title
// @connected
#[tauri::command]
pub async fn browser_get_title(
state: State<'_, BrowserState>,
@@ -200,6 +210,7 @@ pub async fn browser_get_title(
// ============================================================================
/// Find element
// @connected
#[tauri::command]
pub async fn browser_find_element(
state: State<'_, BrowserState>,
@@ -225,6 +236,7 @@ pub async fn browser_find_element(
}
/// Find multiple elements
// @connected
#[tauri::command]
pub async fn browser_find_elements(
state: State<'_, BrowserState>,
@@ -253,6 +265,7 @@ pub async fn browser_find_elements(
}
/// Click element
// @connected
#[tauri::command]
pub async fn browser_click(
state: State<'_, BrowserState>,
@@ -264,6 +277,7 @@ pub async fn browser_click(
}
/// Type text into element
// @connected
#[tauri::command]
pub async fn browser_type(
state: State<'_, BrowserState>,
@@ -288,6 +302,7 @@ pub async fn browser_type(
}
/// Get element text
// @connected
#[tauri::command]
pub async fn browser_get_text(
state: State<'_, BrowserState>,
@@ -299,6 +314,7 @@ pub async fn browser_get_text(
}
/// Get element attribute
// @connected
#[tauri::command]
pub async fn browser_get_attribute(
state: State<'_, BrowserState>,
@@ -314,6 +330,7 @@ pub async fn browser_get_attribute(
}
/// Wait for element
// @connected
#[tauri::command]
pub async fn browser_wait_for_element(
state: State<'_, BrowserState>,
@@ -347,6 +364,7 @@ pub async fn browser_wait_for_element(
// ============================================================================
/// Execute JavaScript
// @connected
#[tauri::command]
pub async fn browser_execute_script(
state: State<'_, BrowserState>,
@@ -362,6 +380,7 @@ pub async fn browser_execute_script(
}
/// Take screenshot
// @connected
#[tauri::command]
pub async fn browser_screenshot(
state: State<'_, BrowserState>,
@@ -377,6 +396,7 @@ pub async fn browser_screenshot(
}
/// Take element screenshot
// @connected
#[tauri::command]
pub async fn browser_element_screenshot(
state: State<'_, BrowserState>,
@@ -396,6 +416,7 @@ pub async fn browser_element_screenshot(
}
/// Get page source
// @connected
#[tauri::command]
pub async fn browser_get_source(
state: State<'_, BrowserState>,
@@ -410,6 +431,7 @@ pub async fn browser_get_source(
// ============================================================================
/// Scrape page content
// @connected
#[tauri::command]
pub async fn browser_scrape_page(
state: State<'_, BrowserState>,
@@ -442,6 +464,7 @@ pub async fn browser_scrape_page(
}
/// Fill form
// @connected
#[tauri::command]
pub async fn browser_fill_form(
state: State<'_, BrowserState>,

View File

@@ -47,6 +47,7 @@ pub struct ClassroomChatCmdRequest {
// ---------------------------------------------------------------------------
/// Send a message in the classroom chat and get multi-agent responses.
// @connected
#[tauri::command]
pub async fn classroom_chat(
store: State<'_, ClassroomStore>,
@@ -107,6 +108,7 @@ pub async fn classroom_chat(
}
/// Retrieve chat history for a classroom
// @connected
#[tauri::command]
pub async fn classroom_chat_history(
chat_store: State<'_, ChatStore>,

View File

@@ -32,6 +32,7 @@ pub struct ClassroomExportResponse {
// Command
// ---------------------------------------------------------------------------
// @reserved: 暂无前端集成
#[tauri::command]
pub async fn classroom_export(
store: State<'_, ClassroomStore>,

View File

@@ -88,6 +88,7 @@ fn stage_name(stage: &GenerationStage) -> &'static str {
/// Start classroom generation (4-stage pipeline).
/// Progress events are emitted via `classroom:progress`.
/// Supports cancellation between stages by removing the task from GenerationTasks.
// @connected
#[tauri::command]
pub async fn classroom_generate(
app: AppHandle,
@@ -232,6 +233,7 @@ pub async fn classroom_generate(
}
/// Get current generation progress for a topic
// @reserved: 暂无前端集成
#[tauri::command]
pub async fn classroom_generation_progress(
tasks: State<'_, GenerationTasks>,
@@ -248,6 +250,7 @@ pub async fn classroom_generation_progress(
}
/// Cancel an active generation
// @connected
#[tauri::command]
pub async fn classroom_cancel_generation(
tasks: State<'_, GenerationTasks>,
@@ -259,6 +262,7 @@ pub async fn classroom_cancel_generation(
}
/// Retrieve a generated classroom by ID
// @connected
#[tauri::command]
pub async fn classroom_get(
store: State<'_, ClassroomStore>,
@@ -271,6 +275,7 @@ pub async fn classroom_get(
}
/// List all generated classrooms (id + title only)
// @reserved: 暂无前端集成
#[tauri::command]
pub async fn classroom_list(
store: State<'_, ClassroomStore>,

View File

@@ -52,12 +52,14 @@ pub(crate) struct ProcessLogsResponse {
}
/// Get ZCLAW Kernel status
// @reserved: 暂无前端集成
#[tauri::command]
pub fn zclaw_status(app: AppHandle) -> Result<LocalGatewayStatus, String> {
read_gateway_status(&app)
}
/// Start ZCLAW Kernel
// @reserved: 暂无前端集成
#[tauri::command]
pub fn zclaw_start(app: AppHandle) -> Result<LocalGatewayStatus, String> {
ensure_local_gateway_ready_for_tauri(&app)?;
@@ -67,6 +69,7 @@ pub fn zclaw_start(app: AppHandle) -> Result<LocalGatewayStatus, String> {
}
/// Stop ZCLAW Kernel
// @reserved: 暂无前端集成
#[tauri::command]
pub fn zclaw_stop(app: AppHandle) -> Result<LocalGatewayStatus, String> {
run_zclaw(&app, &["gateway", "stop", "--json"])?;
@@ -75,6 +78,7 @@ pub fn zclaw_stop(app: AppHandle) -> Result<LocalGatewayStatus, String> {
}
/// Restart ZCLAW Kernel
// @reserved: 暂无前端集成
#[tauri::command]
pub fn zclaw_restart(app: AppHandle) -> Result<LocalGatewayStatus, String> {
ensure_local_gateway_ready_for_tauri(&app)?;
@@ -84,18 +88,21 @@ pub fn zclaw_restart(app: AppHandle) -> Result<LocalGatewayStatus, String> {
}
/// Get local auth token from ZCLAW config
// @connected
#[tauri::command]
pub fn zclaw_local_auth() -> Result<LocalGatewayAuth, String> {
read_local_gateway_auth()
}
/// Prepare ZCLAW for Tauri (update allowed origins)
// @connected
#[tauri::command]
pub fn zclaw_prepare_for_tauri(app: AppHandle) -> Result<LocalGatewayPrepareResult, String> {
ensure_local_gateway_ready_for_tauri(&app)
}
/// Approve device pairing request
// @connected
#[tauri::command]
pub fn zclaw_approve_device_pairing(
app: AppHandle,
@@ -107,6 +114,7 @@ pub fn zclaw_approve_device_pairing(
}
/// Run ZCLAW doctor to diagnose issues
// @reserved: 暂无前端集成
#[tauri::command]
pub fn zclaw_doctor(app: AppHandle) -> Result<String, String> {
let result = run_zclaw(&app, &["doctor", "--json"])?;
@@ -114,6 +122,7 @@ pub fn zclaw_doctor(app: AppHandle) -> Result<String, String> {
}
/// List ZCLAW processes
// @connected
#[tauri::command]
pub fn zclaw_process_list(app: AppHandle) -> Result<ProcessListResponse, String> {
let result = run_zclaw(&app, &["process", "list", "--json"])?;
@@ -151,6 +160,7 @@ pub fn zclaw_process_list(app: AppHandle) -> Result<ProcessListResponse, String>
}
/// Get ZCLAW process logs
// @connected
#[tauri::command]
pub fn zclaw_process_logs(
app: AppHandle,
@@ -214,6 +224,7 @@ pub fn zclaw_process_logs(
}
/// Get ZCLAW version information
// @connected
#[tauri::command]
pub fn zclaw_version(app: AppHandle) -> Result<VersionResponse, String> {
let result = run_zclaw(&app, &["--version", "--json"])?;

View File

@@ -112,6 +112,7 @@ fn get_process_uptime(status: &LocalGatewayStatus) -> Option<u64> {
}
/// Perform comprehensive health check on ZCLAW Kernel
// @connected
#[tauri::command]
pub fn zclaw_health_check(
app: AppHandle,
@@ -266,6 +267,7 @@ pub fn zclaw_health_check(
}
/// Quick ping to check if ZCLAW is alive (lightweight check)
// @reserved: 暂无前端集成
#[tauri::command]
pub fn zclaw_ping(app: AppHandle) -> Result<bool, String> {
let port_check = check_port_accessibility("127.0.0.1", ZCLAW_DEFAULT_PORT, 1000);

View File

@@ -506,18 +506,21 @@ impl ContextCompactor {
// === Tauri Commands ===
/// Estimate tokens for text
// @connected
#[tauri::command]
pub fn compactor_estimate_tokens(text: String) -> usize {
estimate_tokens(&text)
}
/// Estimate tokens for messages
// @connected
#[tauri::command]
pub fn compactor_estimate_messages_tokens(messages: Vec<CompactableMessage>) -> usize {
estimate_messages_tokens(&messages)
}
/// Check if compaction is needed
// @connected
#[tauri::command]
pub fn compactor_check_threshold(
messages: Vec<CompactableMessage>,
@@ -528,6 +531,7 @@ pub fn compactor_check_threshold(
}
/// Execute compaction
// @connected
#[tauri::command]
pub async fn compactor_compact(
messages: Vec<CompactableMessage>,

View File

@@ -708,6 +708,7 @@ pub type HeartbeatEngineState = Arc<Mutex<HashMap<String, HeartbeatEngine>>>;
///
/// Restores persisted interaction time from VikingStorage so idle-greeting
/// check works correctly across app restarts.
// @connected
#[tauri::command]
pub async fn heartbeat_init(
agent_id: String,
@@ -756,6 +757,7 @@ async fn restore_last_interaction(agent_id: &str) {
}
/// Start heartbeat engine for an agent
// @connected
#[tauri::command]
pub async fn heartbeat_start(
agent_id: String,
@@ -770,6 +772,7 @@ pub async fn heartbeat_start(
}
/// Stop heartbeat engine for an agent
// @connected
#[tauri::command]
pub async fn heartbeat_stop(
agent_id: String,
@@ -784,6 +787,7 @@ pub async fn heartbeat_stop(
}
/// Execute a single heartbeat tick
// @connected
#[tauri::command]
pub async fn heartbeat_tick(
agent_id: String,
@@ -797,6 +801,7 @@ pub async fn heartbeat_tick(
}
/// Get heartbeat configuration
// @connected
#[tauri::command]
pub async fn heartbeat_get_config(
agent_id: String,
@@ -810,6 +815,7 @@ pub async fn heartbeat_get_config(
}
/// Update heartbeat configuration
// @connected
#[tauri::command]
pub async fn heartbeat_update_config(
agent_id: String,
@@ -825,6 +831,7 @@ pub async fn heartbeat_update_config(
}
/// Get heartbeat history
// @connected
#[tauri::command]
pub async fn heartbeat_get_history(
agent_id: String,
@@ -840,6 +847,7 @@ pub async fn heartbeat_get_history(
/// Update memory stats cache for heartbeat checks
/// This should be called by the frontend after fetching memory stats
// @connected
#[tauri::command]
pub async fn heartbeat_update_memory_stats(
agent_id: String,
@@ -852,6 +860,7 @@ pub async fn heartbeat_update_memory_stats(
}
/// Record a user correction for personality improvement detection
// @connected
#[tauri::command]
pub async fn heartbeat_record_correction(
agent_id: String,
@@ -863,6 +872,7 @@ pub async fn heartbeat_record_correction(
/// Record a user interaction for idle greeting detection
/// Call this from frontend whenever user sends a message
// @connected
#[tauri::command]
pub async fn heartbeat_record_interaction(
agent_id: String,

View File

@@ -545,6 +545,7 @@ use tokio::sync::Mutex;
pub type IdentityManagerState = Arc<Mutex<AgentIdentityManager>>;
/// Initialize identity manager
// @reserved: 暂无前端集成
#[tauri::command]
#[allow(dead_code)] // NOT registered in invoke_handler — identity state is initialized lazily via identity_get
pub async fn identity_init() -> Result<IdentityManagerState, String> {
@@ -552,6 +553,7 @@ pub async fn identity_init() -> Result<IdentityManagerState, String> {
}
/// Get identity files for an agent
// @connected
#[tauri::command]
pub async fn identity_get(
agent_id: String,
@@ -562,6 +564,7 @@ pub async fn identity_get(
}
/// Get a specific file
// @connected
#[tauri::command]
pub async fn identity_get_file(
agent_id: String,
@@ -578,6 +581,7 @@ pub async fn identity_get_file(
}
/// Build system prompt
// @connected
#[tauri::command]
pub async fn identity_build_prompt(
agent_id: String,
@@ -589,6 +593,7 @@ pub async fn identity_build_prompt(
}
/// Update user profile (auto)
// @connected
#[tauri::command]
pub async fn identity_update_user_profile(
agent_id: String,
@@ -601,6 +606,7 @@ pub async fn identity_update_user_profile(
}
/// Append to user profile
// @connected
#[tauri::command]
pub async fn identity_append_user_profile(
agent_id: String,
@@ -613,6 +619,7 @@ pub async fn identity_append_user_profile(
}
/// Propose a change
// @connected
#[tauri::command]
pub async fn identity_propose_change(
agent_id: String,
@@ -631,6 +638,7 @@ pub async fn identity_propose_change(
}
/// Approve a proposal
// @connected
#[tauri::command]
pub async fn identity_approve_proposal(
proposal_id: String,
@@ -641,6 +649,7 @@ pub async fn identity_approve_proposal(
}
/// Reject a proposal
// @connected
#[tauri::command]
pub async fn identity_reject_proposal(
proposal_id: String,
@@ -651,6 +660,7 @@ pub async fn identity_reject_proposal(
}
/// Get pending proposals
// @connected
#[tauri::command]
pub async fn identity_get_pending_proposals(
agent_id: Option<String>,
@@ -665,6 +675,7 @@ pub async fn identity_get_pending_proposals(
}
/// Update file directly
// @connected
#[tauri::command]
pub async fn identity_update_file(
agent_id: String,
@@ -677,6 +688,7 @@ pub async fn identity_update_file(
}
/// Get snapshots
// @connected
#[tauri::command]
pub async fn identity_get_snapshots(
agent_id: String,
@@ -692,6 +704,7 @@ pub async fn identity_get_snapshots(
}
/// Restore snapshot
// @connected
#[tauri::command]
pub async fn identity_restore_snapshot(
agent_id: String,
@@ -703,6 +716,7 @@ pub async fn identity_restore_snapshot(
}
/// List agents
// @connected
#[tauri::command]
pub async fn identity_list_agents(
state: tauri::State<'_, IdentityManagerState>,
@@ -712,6 +726,7 @@ pub async fn identity_list_agents(
}
/// Delete agent identity
// @connected
#[tauri::command]
pub async fn identity_delete_agent(
agent_id: String,

View File

@@ -719,6 +719,7 @@ pub type ReflectionEngineState = Arc<Mutex<ReflectionEngine>>;
/// Initialize reflection engine with config
/// Updates the shared state with new configuration
// @connected
#[tauri::command]
pub async fn reflection_init(
config: Option<ReflectionConfig>,
@@ -732,6 +733,7 @@ pub async fn reflection_init(
}
/// Record a conversation
// @connected
#[tauri::command]
pub async fn reflection_record_conversation(
state: tauri::State<'_, ReflectionEngineState>,
@@ -742,6 +744,7 @@ pub async fn reflection_record_conversation(
}
/// Check if reflection should run
// @connected
#[tauri::command]
pub async fn reflection_should_reflect(
state: tauri::State<'_, ReflectionEngineState>,
@@ -751,6 +754,7 @@ pub async fn reflection_should_reflect(
}
/// Execute reflection
// @connected
#[tauri::command]
pub async fn reflection_reflect(
agent_id: String,
@@ -766,6 +770,7 @@ pub async fn reflection_reflect(
/// Returns in-memory history first. If empty and an agent_id is provided,
/// falls back to the persisted history array from VikingStorage metadata,
/// then to the single latest result for backward compatibility.
// @connected
#[tauri::command]
pub async fn reflection_get_history(
limit: Option<usize>,
@@ -815,6 +820,7 @@ pub async fn reflection_get_history(
}
/// Get reflection state
// @connected
#[tauri::command]
pub async fn reflection_get_state(
state: tauri::State<'_, ReflectionEngineState>,

View File

@@ -325,6 +325,7 @@ impl LlmClient {
// === Tauri Commands ===
// @reserved: 暂无前端集成
#[tauri::command]
pub async fn llm_complete(
provider: String,
@@ -452,6 +453,7 @@ impl EmbeddingClient {
}
}
// @connected
#[tauri::command]
pub async fn embedding_create(
provider: String,
@@ -471,6 +473,7 @@ pub async fn embedding_create(
client.embed(&text).await
}
// @connected
#[tauri::command]
pub async fn embedding_providers() -> Result<Vec<(String, String, String, usize)>, String> {
let configs = get_embedding_provider_configs();

View File

@@ -482,6 +482,7 @@ pub struct FindResult {
// === Tauri Commands ===
// @reserved: 暂无前端集成
#[tauri::command]
pub fn estimate_content_tokens(content: String) -> u32 {
estimate_tokens(&content)

View File

@@ -473,6 +473,7 @@ If no significant memories found, return empty array: []"#,
// === Tauri Commands ===
// @connected
#[tauri::command]
pub async fn extract_session_memories(
messages: Vec<ChatMessage>,
@@ -489,6 +490,7 @@ pub async fn extract_session_memories(
/// Extract memories from session and store to SqliteStorage
/// This combines extraction and storage in one command
// @connected
#[tauri::command]
pub async fn extract_and_store_memories(
messages: Vec<ChatMessage>,

View File

@@ -45,6 +45,7 @@ pub struct MemorySearchOptions {
///
/// Now a no-op for storage (VikingStorage initializes itself in viking_commands).
/// Only initializes PersistentMemoryStore for backward-compatible embedding config.
// @connected
#[tauri::command]
pub async fn memory_init(
app_handle: AppHandle,
@@ -60,6 +61,7 @@ pub async fn memory_init(
/// Store a new memory
///
/// Writes to VikingStorage (SqliteStorage) with FTS5 + TF-IDF indexing.
// @connected
#[tauri::command]
pub async fn memory_store(
entry: MemoryEntryInput,
@@ -125,6 +127,7 @@ fn to_persistent(entry: &zclaw_growth::MemoryEntry) -> PersistentMemory {
}
/// Get a memory by ID (URI)
// @connected
#[tauri::command]
pub async fn memory_get(
id: String,
@@ -141,6 +144,7 @@ pub async fn memory_get(
/// Search memories
///
/// Uses VikingStorage::find() for FTS5 + TF-IDF + optional embedding search.
// @connected
#[tauri::command]
pub async fn memory_search(
options: MemorySearchOptions,
@@ -182,6 +186,7 @@ pub async fn memory_search(
/// Delete a memory by ID (URI)
///
/// Deletes from VikingStorage only (PersistentMemoryStore is no longer primary).
// @connected
#[tauri::command]
pub async fn memory_delete(
id: String,
@@ -195,6 +200,7 @@ pub async fn memory_delete(
}
/// Delete all memories for an agent
// @connected
#[tauri::command]
pub async fn memory_delete_all(
agent_id: String,
@@ -222,6 +228,7 @@ pub async fn memory_delete_all(
}
/// Get memory statistics
// @connected
#[tauri::command]
pub async fn memory_stats(
_state: State<'_, MemoryStoreState>,
@@ -278,6 +285,7 @@ pub async fn memory_stats(
}
/// Export all memories for backup
// @connected
#[tauri::command]
pub async fn memory_export(
_state: State<'_, MemoryStoreState>,
@@ -299,6 +307,7 @@ pub async fn memory_export(
/// Import memories from backup
///
/// Converts PersistentMemory entries to VikingStorage MemoryEntry and stores them.
// @connected
#[tauri::command]
pub async fn memory_import(
memories: Vec<PersistentMemory>,
@@ -343,6 +352,7 @@ pub async fn memory_import(
/// Get the database path
///
/// Now returns the VikingStorage (SqliteStorage) path.
// @connected
#[tauri::command]
pub async fn memory_db_path(
_state: State<'_, MemoryStoreState>,
@@ -354,6 +364,7 @@ pub async fn memory_db_path(
/// Configure embedding for PersistentMemoryStore (chat memory search)
/// This is called alongside viking_configure_embedding to enable vector search in chat flow
// @reserved: 暂无前端集成
#[tauri::command]
pub async fn memory_configure_embedding(
provider: String,
@@ -388,6 +399,7 @@ pub async fn memory_configure_embedding(
}
/// Check if embedding is configured for PersistentMemoryStore
// @reserved: 暂无前端集成
#[tauri::command]
pub fn memory_is_embedding_configured() -> bool {
is_embedding_configured()
@@ -396,6 +408,7 @@ pub fn memory_is_embedding_configured() -> bool {
/// Build layered memory context for chat prompt injection
///
/// Uses VikingStorage (SqliteStorage) with FTS5 + TF-IDF + optional Embedding.
// @connected
#[tauri::command]
pub async fn memory_build_context(
agent_id: String,

View File

@@ -47,6 +47,7 @@ pub struct WorkflowStepInput {
}
/// Create a new pipeline as a YAML file
// @connected
#[tauri::command]
pub async fn pipeline_create(
state: State<'_, Arc<PipelineState>>,
@@ -132,6 +133,7 @@ pub async fn pipeline_create(
}
/// Update an existing pipeline
// @connected
#[tauri::command]
pub async fn pipeline_update(
state: State<'_, Arc<PipelineState>>,
@@ -201,6 +203,7 @@ pub async fn pipeline_update(
}
/// Delete a pipeline
// @connected
#[tauri::command]
pub async fn pipeline_delete(
state: State<'_, Arc<PipelineState>>,

View File

@@ -20,6 +20,7 @@ use super::helpers::{get_pipelines_directory, scan_pipelines_with_paths, scan_pi
use crate::kernel_commands::KernelState;
/// Discover and list all available pipelines
// @connected
#[tauri::command]
pub async fn pipeline_list(
state: State<'_, Arc<PipelineState>>,
@@ -67,6 +68,7 @@ pub async fn pipeline_list(
}
/// Get pipeline details
// @connected
#[tauri::command]
pub async fn pipeline_get(
state: State<'_, Arc<PipelineState>>,
@@ -81,6 +83,7 @@ pub async fn pipeline_get(
}
/// Run a pipeline
// @connected
#[tauri::command]
pub async fn pipeline_run(
app: AppHandle,
@@ -192,6 +195,7 @@ pub async fn pipeline_run(
}
/// Get pipeline run progress
// @connected
#[tauri::command]
pub async fn pipeline_progress(
state: State<'_, Arc<PipelineState>>,
@@ -217,6 +221,7 @@ pub async fn pipeline_progress(
}
/// Cancel a pipeline run
// @connected
#[tauri::command]
pub async fn pipeline_cancel(
state: State<'_, Arc<PipelineState>>,
@@ -227,6 +232,7 @@ pub async fn pipeline_cancel(
}
/// Get pipeline run result
// @connected
#[tauri::command]
pub async fn pipeline_result(
state: State<'_, Arc<PipelineState>>,
@@ -253,6 +259,7 @@ pub async fn pipeline_result(
}
/// List all runs
// @connected
#[tauri::command]
pub async fn pipeline_runs(
state: State<'_, Arc<PipelineState>>,
@@ -278,6 +285,7 @@ pub async fn pipeline_runs(
}
/// Refresh pipeline discovery
// @connected
#[tauri::command]
pub async fn pipeline_refresh(
state: State<'_, Arc<PipelineState>>,

View File

@@ -62,6 +62,7 @@ pub struct PipelineCandidateInfo {
}
/// Route user input to matching pipeline
// @connected
#[tauri::command]
pub async fn route_intent(
state: State<'_, Arc<PipelineState>>,

View File

@@ -9,6 +9,7 @@ use super::types::PipelineInputInfo;
use super::PipelineState;
/// Analyze presentation data
// @connected
#[tauri::command]
pub async fn analyze_presentation(
data: Value,
@@ -43,6 +44,7 @@ pub struct PipelineTemplateInfo {
/// Templates are pipeline YAML files that users can browse and instantiate.
/// They live in `pipelines/_templates/` and are not directly runnable
/// (they serve as blueprints).
// @reserved: 暂无前端集成
#[tauri::command]
pub async fn pipeline_templates(
state: State<'_, Arc<PipelineState>>,

View File

@@ -9,6 +9,7 @@ use keyring::Entry;
const SERVICE_NAME: &str = "zclaw";
/// Store a value securely in the OS keyring
// @connected
#[tauri::command]
pub fn secure_store_set(key: String, value: String) -> Result<(), String> {
let entry = Entry::new(SERVICE_NAME, &key).map_err(|e| {
@@ -31,6 +32,7 @@ pub fn secure_store_set(key: String, value: String) -> Result<(), String> {
}
/// Retrieve a value from the OS keyring
// @connected
#[tauri::command]
pub fn secure_store_get(key: String) -> Result<String, String> {
let entry = Entry::new(SERVICE_NAME, &key).map_err(|e| {
@@ -53,6 +55,7 @@ pub fn secure_store_get(key: String) -> Result<String, String> {
}
/// Delete a value from the OS keyring
// @connected
#[tauri::command]
pub fn secure_store_delete(key: String) -> Result<(), String> {
let entry = Entry::new(SERVICE_NAME, &key).map_err(|e| {
@@ -78,6 +81,7 @@ pub fn secure_store_delete(key: String) -> Result<(), String> {
}
/// Check if secure storage is available on this platform
// @connected
#[tauri::command]
pub fn secure_store_is_available() -> bool {
// Try to create a test entry to verify keyring is working

View File

@@ -131,6 +131,7 @@ fn get_data_dir_string() -> Option<String> {
// === Tauri Commands ===
/// Check if memory storage is available
// @connected
#[tauri::command]
pub async fn viking_status() -> Result<VikingStatus, String> {
match get_storage().await {
@@ -158,6 +159,7 @@ pub async fn viking_status() -> Result<VikingStatus, String> {
}
/// Add a memory entry
// @connected
#[tauri::command]
pub async fn viking_add(uri: String, content: String) -> Result<VikingAddResult, String> {
let storage = get_storage().await?;
@@ -180,6 +182,7 @@ pub async fn viking_add(uri: String, content: String) -> Result<VikingAddResult,
}
/// Add a memory with metadata
// @reserved: 暂无前端集成
#[tauri::command]
pub async fn viking_add_with_metadata(
uri: String,
@@ -210,6 +213,7 @@ pub async fn viking_add_with_metadata(
}
/// Find memories by semantic search
// @connected
#[tauri::command]
pub async fn viking_find(
query: String,
@@ -255,6 +259,7 @@ pub async fn viking_find(
}
/// Grep memories by pattern (uses FTS5)
// @connected
#[tauri::command]
pub async fn viking_grep(
pattern: String,
@@ -308,6 +313,7 @@ pub async fn viking_grep(
}
/// List memories at a path
// @connected
#[tauri::command]
pub async fn viking_ls(path: String) -> Result<Vec<VikingResource>, String> {
let storage = get_storage().await?;
@@ -335,6 +341,7 @@ pub async fn viking_ls(path: String) -> Result<Vec<VikingResource>, String> {
}
/// Read memory content
// @connected
#[tauri::command]
pub async fn viking_read(uri: String, level: Option<String>) -> Result<String, String> {
let storage = get_storage().await?;
@@ -378,6 +385,7 @@ pub async fn viking_read(uri: String, level: Option<String>) -> Result<String, S
}
/// Remove a memory
// @connected
#[tauri::command]
pub async fn viking_remove(uri: String) -> Result<(), String> {
let storage = get_storage().await?;
@@ -391,6 +399,7 @@ pub async fn viking_remove(uri: String) -> Result<(), String> {
}
/// Get memory tree
// @connected
#[tauri::command]
pub async fn viking_tree(path: String, depth: Option<usize>) -> Result<serde_json::Value, String> {
let max_depth = depth.unwrap_or(5);
@@ -441,6 +450,7 @@ pub async fn viking_tree(path: String, depth: Option<usize>) -> Result<serde_jso
}
/// Inject memories into prompt (for agent loop integration)
// @connected
#[tauri::command]
pub async fn viking_inject_prompt(
agent_id: String,
@@ -533,6 +543,7 @@ fn parse_uri(uri: &str) -> Result<(String, MemoryType, String), String> {
/// Configure embedding for semantic memory search
/// Configures both SqliteStorage (VikingPanel) and PersistentMemoryStore (chat flow)
// @connected
#[tauri::command]
pub async fn viking_configure_embedding(
provider: String,
@@ -590,6 +601,7 @@ pub async fn viking_configure_embedding(
}
/// Configure summary driver for L0/L1 auto-generation
// @connected
#[tauri::command]
pub async fn viking_configure_summary_driver(
endpoint: String,
@@ -604,6 +616,7 @@ pub async fn viking_configure_summary_driver(
}
/// Store a memory and optionally generate L0/L1 summaries in the background
// @reserved: 暂无前端集成
#[tauri::command]
pub async fn viking_store_with_summaries(
uri: String,

View File

@@ -1,11 +1,13 @@
import { useState, useEffect, useRef, useCallback, useMemo, type MutableRefObject, type RefObject, type CSSProperties } from 'react';
import { motion, AnimatePresence } from 'framer-motion';
import { List, type ListImperativeAPI } from 'react-window';
import { useChatStore, Message } from '../store/chatStore';
import { useChatStore, type Message } from '../store/chatStore';
import { useConversationStore } from '../store/chat/conversationStore';
import { useArtifactStore } from '../store/chat/artifactStore';
import { useConnectionStore } from '../store/connectionStore';
import { useAgentStore } from '../store/agentStore';
import { useConfigStore } from '../store/configStore';
import { listen, type UnlistenFn } from '@tauri-apps/api/event';
import { Paperclip, SquarePen, ArrowUp, MessageSquare, Download, X, FileText, Image as ImageIcon } from 'lucide-react';
import { Button, EmptyState, MessageListSkeleton, LoadingDots } from './ui';
import { ResizableChatLayout } from './ai/ResizableChatLayout';
@@ -45,11 +47,14 @@ const VIRTUALIZATION_THRESHOLD = 100;
export function ChatArea() {
const {
messages, currentAgent, isStreaming, isLoading, currentModel,
sendMessage: sendToGateway, setCurrentModel, initStreamListener,
messages, isStreaming, isLoading,
sendMessage: sendToGateway, initStreamListener,
newConversation, chatMode, setChatMode, suggestions,
totalInputTokens, totalOutputTokens,
} = useChatStore();
const currentAgent = useConversationStore((s) => s.currentAgent);
const currentModel = useConversationStore((s) => s.currentModel);
const setCurrentModel = useConversationStore((s) => s.setCurrentModel);
const {
artifacts, selectedArtifactId, artifactPanelOpen,
selectArtifact, setArtifactPanelOpen,
@@ -152,6 +157,29 @@ export function ChatArea() {
return unsub;
}, []);
// Listen for hand-execution-complete Tauri events
useEffect(() => {
let unlisten: UnlistenFn | undefined;
listen<{ approvalId: string; handId: string; success: boolean; error?: string | null }>(
'hand-execution-complete',
(event) => {
const { handId, success, error } = event.payload;
useChatStore.getState().addMessage({
id: crypto.randomUUID(),
role: 'hand',
content: success
? `Hand ${handId} 执行完成`
: `Hand ${handId} 执行失败: ${error || '未知错误'}`,
timestamp: new Date(),
handName: handId,
handStatus: success ? 'completed' : 'failed',
handResult: event.payload,
});
},
).then((fn) => { unlisten = fn; });
return () => { unlisten?.(); };
}, []);
// Auto-scroll to bottom on new messages
useEffect(() => {
if (scrollRef.current && !useVirtualization) {

View File

@@ -3,6 +3,7 @@ import { useAgentStore } from '../store/agentStore';
import { useConnectionStore } from '../store/connectionStore';
import { useConfigStore } from '../store/configStore';
import { toChatAgent, useChatStore } from '../store/chatStore';
import { useConversationStore } from '../store/chat/conversationStore';
import { Bot, Plus, X, Globe, Cat, Search, BarChart2, Sparkles } from 'lucide-react';
import { AgentOnboardingWizard } from './AgentOnboardingWizard';
import type { Clone } from '../store/agentStore';
@@ -13,7 +14,8 @@ export function CloneManager() {
const deleteClone = useAgentStore((s) => s.deleteClone);
const connectionState = useConnectionStore((s) => s.connectionState);
const quickConfig = useConfigStore((s) => s.quickConfig);
const { agents, currentAgent, setCurrentAgent } = useChatStore();
const { agents, currentAgent } = useConversationStore();
const setCurrentAgent = useChatStore((s) => s.setCurrentAgent);
const [showWizard, setShowWizard] = useState(false);
const connected = connectionState === 'connected';

View File

@@ -1,4 +1,5 @@
import { useState, useRef, useEffect } from 'react';
import { useConversationStore } from '../store/chat/conversationStore';
import { useChatStore } from '../store/chatStore';
import { MessageSquare, Trash2, SquarePen, Download, Check, X } from 'lucide-react';
import { EmptyConversations } from './ui';
@@ -171,15 +172,14 @@ function ConversationItem({
}
export function ConversationList() {
const {
conversations,
currentConversationId,
switchConversation,
deleteConversation,
} = useChatStore();
const conversations = useConversationStore((s) => s.conversations);
const currentConversationId = useConversationStore((s) => s.currentConversationId);
const { switchConversation, deleteConversation } = useChatStore();
// suppress unused-var lint — these facade actions are needed
void switchConversation; void deleteConversation;
const handleRename = (id: string, newTitle: string) => {
useChatStore.setState((state) => ({
useConversationStore.setState((state) => ({
conversations: state.conversations.map((c) =>
c.id === id ? { ...c, title: newTitle, updatedAt: new Date() } : c
),

View File

@@ -27,7 +27,7 @@ import {
type IdentityChangeProposal as Proposal,
type IdentitySnapshot,
} from '../lib/intelligence-client';
import { useChatStore } from '../store/chatStore';
import { useConversationStore } from '../store/chat/conversationStore';
import { Button, Badge } from './ui';
// === Error Parsing Utility ===
@@ -306,7 +306,7 @@ function HistoryItem({
// === Main Component ===
export function IdentityChangeProposalPanel() {
const { currentAgent } = useChatStore();
const currentAgent = useConversationStore((s) => s.currentAgent);
const [proposals, setProposals] = useState<Proposal[]>([]);
const [snapshots, setSnapshots] = useState<IdentitySnapshot[]>([]);
const [loading, setLoading] = useState(true);

View File

@@ -33,7 +33,7 @@ import {
type GraphEdge,
type MemoryType,
} from '../store/memoryGraphStore';
import { useChatStore } from '../store/chatStore';
import { useConversationStore } from '../store/chat/conversationStore';
import { cardHover, defaultTransition } from '../lib/animations';
// Mark as intentionally unused for future use
@@ -157,7 +157,7 @@ export function MemoryGraph({ className = '' }: MemoryGraphProps) {
const [showFilters, setShowFilters] = useState(false);
const [searchQuery, setSearchQuery] = useState('');
const { currentAgent } = useChatStore();
const currentAgent = useConversationStore((s) => s.currentAgent);
const agentId = currentAgent?.id || 'zclaw-main';
const {

View File

@@ -12,7 +12,7 @@ import {
type MemoryType,
type MemoryStats,
} from '../lib/intelligence-client';
import { useChatStore } from '../store/chatStore';
import { useConversationStore } from '../store/chat/conversationStore';
const TYPE_LABELS: Record<MemoryType, { label: string; emoji: string; color: string }> = {
fact: { label: '事实', emoji: '📋', color: 'bg-blue-100 text-blue-700 dark:bg-blue-900/30 dark:text-blue-300' },
@@ -23,7 +23,7 @@ const TYPE_LABELS: Record<MemoryType, { label: string; emoji: string; color: str
};
export function MemoryPanel() {
const { currentAgent } = useChatStore();
const currentAgent = useConversationStore((s) => s.currentAgent);
const agentId = currentAgent?.id || 'zclaw-main';
const [memories, setMemories] = useState<MemoryEntry[]>([]);

Some files were not shown because too many files have changed in this diff Show More