refactor: 统一项目名称从OpenFang到ZCLAW
Some checks failed
CI / Lint & TypeCheck (push) Has been cancelled
CI / Unit Tests (push) Has been cancelled
CI / Build Frontend (push) Has been cancelled
CI / Rust Check (push) Has been cancelled
CI / Security Scan (push) Has been cancelled
CI / E2E Tests (push) Has been cancelled

重构所有代码和文档中的项目名称,将OpenFang统一更新为ZCLAW。包括:
- 配置文件中的项目名称
- 代码注释和文档引用
- 环境变量和路径
- 类型定义和接口名称
- 测试用例和模拟数据

同时优化部分代码结构,移除未使用的模块,并更新相关依赖项。
This commit is contained in:
iven
2026-03-27 07:36:03 +08:00
parent 4b08804aa9
commit 0d4fa96b82
226 changed files with 7288 additions and 5788 deletions

87
Cargo.lock generated
View File

@@ -975,6 +975,7 @@ dependencies = [
"fantoccini",
"futures",
"keyring",
"libsqlite3-sys",
"rand 0.8.5",
"regex",
"reqwest 0.12.28",
@@ -1149,9 +1150,9 @@ dependencies = [
[[package]]
name = "embed-resource"
version = "3.0.7"
version = "3.0.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "47ec73ddcf6b7f23173d5c3c5a32b5507dc0a734de7730aa14abc5d5e296bb5f"
checksum = "63a1d0de4f2249aa0ff5884d7080814f446bb241a559af6c170a41e878ed2d45"
dependencies = [
"cc",
"memchr",
@@ -2300,9 +2301,9 @@ checksum = "d98f6fed1fde3f8c21bc40a1abb88dd75e67924f9cffc3ef95607bad8017f8e2"
[[package]]
name = "iri-string"
version = "0.7.10"
version = "0.7.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c91338f0783edbd6195decb37bae672fd3b165faffb89bf7b9e6942f8b1a731a"
checksum = "d8e7418f59cc01c88316161279a7f665217ae316b388e58a0d10e29f54f1e5eb"
dependencies = [
"memchr",
"serde",
@@ -2365,7 +2366,7 @@ dependencies = [
"cesu8",
"cfg-if",
"combine",
"jni-sys",
"jni-sys 0.3.1",
"log",
"thiserror 1.0.69",
"walkdir",
@@ -2374,9 +2375,31 @@ dependencies = [
[[package]]
name = "jni-sys"
version = "0.3.0"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130"
checksum = "41a652e1f9b6e0275df1f15b32661cf0d4b78d4d87ddec5e0c3c20f097433258"
dependencies = [
"jni-sys 0.4.1",
]
[[package]]
name = "jni-sys"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c6377a88cb3910bee9b0fa88d4f42e1d2da8e79915598f65fb0c7ee14c878af2"
dependencies = [
"jni-sys-macros",
]
[[package]]
name = "jni-sys-macros"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "38c0b942f458fe50cdac086d2f946512305e5631e720728f2a61aabcd47a6264"
dependencies = [
"quote",
"syn 2.0.117",
]
[[package]]
name = "js-sys"
@@ -2506,9 +2529,9 @@ checksum = "b6d2cec3eae94f9f509c767b45932f1ada8350c4bdb85af2fcab4a3c14807981"
[[package]]
name = "libredox"
version = "0.1.14"
version = "0.1.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1744e39d1d6a9948f4f388969627434e31128196de472883b39f148769bfe30a"
checksum = "7ddbf48fd451246b1f8c2610bd3b4ac0cc6e149d89832867093ab69a17194f08"
dependencies = [
"bitflags 2.11.0",
"libc",
@@ -2717,7 +2740,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c3f42e7bbe13d351b6bead8286a43aac9534b82bd3cc43e47037f012ebfd62d4"
dependencies = [
"bitflags 2.11.0",
"jni-sys",
"jni-sys 0.3.1",
"log",
"ndk-sys",
"num_enum",
@@ -2737,7 +2760,7 @@ version = "0.6.0+11769913"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ee6cda3051665f1fb8d9e08fc35c96d5a244fb1be711a03b71118828afc9a873"
dependencies = [
"jni-sys",
"jni-sys 0.3.1",
]
[[package]]
@@ -2780,9 +2803,9 @@ dependencies = [
[[package]]
name = "num-conv"
version = "0.2.0"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf97ec579c3c42f953ef76dbf8d55ac91fb219dde70e49aa4a6b7d74e9919050"
checksum = "c6673768db2d862beb9b39a78fdcb1a69439615d5794a1be50caa9bc92c81967"
[[package]]
name = "num-integer"
@@ -3485,7 +3508,7 @@ version = "3.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e67ba7e9b2b56446f1d419b1d807906278ffa1a658a8a5d8a39dcb1f5a78614f"
dependencies = [
"toml_edit 0.25.5+spec-1.1.0",
"toml_edit 0.25.8+spec-1.1.0",
]
[[package]]
@@ -4244,9 +4267,9 @@ dependencies = [
[[package]]
name = "serde_spanned"
version = "1.0.4"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f8bbf91e5a4d6315eee45e704372590b30e260ee83af6639d64557f51b067776"
checksum = "876ac351060d4f882bb1032b6369eb0aef79ad9df1ea8bc404874d8cc3d0cd98"
dependencies = [
"serde_core",
]
@@ -4858,9 +4881,9 @@ dependencies = [
[[package]]
name = "tao"
version = "0.34.6"
version = "0.34.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6e06d52c379e63da659a483a958110bbde891695a0ecb53e48cc7786d5eda7bb"
checksum = "9103edf55f2da3c82aea4c7fab7c4241032bfeea0e71fa557d98e00e7ce7cc20"
dependencies = [
"bitflags 2.11.0",
"block2",
@@ -5397,7 +5420,7 @@ checksum = "cf92845e79fc2e2def6a5d828f0801e29a2f8acc037becc5ab08595c7d5e9863"
dependencies = [
"indexmap 2.13.0",
"serde_core",
"serde_spanned 1.0.4",
"serde_spanned 1.1.0",
"toml_datetime 0.7.5+spec-1.1.0",
"toml_parser",
"toml_writer",
@@ -5424,9 +5447,9 @@ dependencies = [
[[package]]
name = "toml_datetime"
version = "1.0.1+spec-1.1.0"
version = "1.1.0+spec-1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b320e741db58cac564e26c607d3cc1fdc4a88fd36c879568c07856ed83ff3e9"
checksum = "97251a7c317e03ad83774a8752a7e81fb6067740609f75ea2b585b569a59198f"
dependencies = [
"serde_core",
]
@@ -5457,30 +5480,30 @@ dependencies = [
[[package]]
name = "toml_edit"
version = "0.25.5+spec-1.1.0"
version = "0.25.8+spec-1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ca1a40644a28bce036923f6a431df0b34236949d111cc07cb6dca830c9ef2e1"
checksum = "16bff38f1d86c47f9ff0647e6838d7bb362522bdf44006c7068c2b1e606f1f3c"
dependencies = [
"indexmap 2.13.0",
"toml_datetime 1.0.1+spec-1.1.0",
"toml_datetime 1.1.0+spec-1.1.0",
"toml_parser",
"winnow 1.0.0",
]
[[package]]
name = "toml_parser"
version = "1.0.10+spec-1.1.0"
version = "1.1.0+spec-1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7df25b4befd31c4816df190124375d5a20c6b6921e2cad937316de3fccd63420"
checksum = "2334f11ee363607eb04df9b8fc8a13ca1715a72ba8662a26ac285c98aabb4011"
dependencies = [
"winnow 1.0.0",
]
[[package]]
name = "toml_writer"
version = "1.0.7+spec-1.1.0"
version = "1.1.0+spec-1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f17aaa1c6e3dc22b1da4b6bba97d066e354c7945cac2f7852d4e4e7ca7a6b56d"
checksum = "d282ade6016312faf3e41e57ebbba0c073e4056dab1232ab1cb624199648f8ed"
[[package]]
name = "tower"
@@ -5697,9 +5720,9 @@ checksum = "7df058c713841ad818f1dc5d3fd88063241cc61f49f5fbea4b951e8cf5a8d71d"
[[package]]
name = "unicode-segmentation"
version = "1.12.0"
version = "1.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493"
checksum = "da36089a805484bcccfffe0739803392c8298778a2d2f09febf76fac5ad9025b"
[[package]]
name = "unicode-xid"
@@ -6957,6 +6980,7 @@ dependencies = [
"async-trait",
"chrono",
"futures",
"libsqlite3-sys",
"serde",
"serde_json",
"sqlx",
@@ -6981,6 +7005,7 @@ dependencies = [
"tokio",
"tracing",
"uuid",
"zclaw-runtime",
"zclaw-types",
]
@@ -7000,6 +7025,7 @@ dependencies = [
"thiserror 2.0.18",
"tokio",
"tokio-stream",
"toml 0.8.2",
"tracing",
"uuid",
"zclaw-hands",
@@ -7017,6 +7043,7 @@ version = "0.1.0"
dependencies = [
"chrono",
"futures",
"libsqlite3-sys",
"serde",
"serde_json",
"sqlx",

View File

@@ -56,6 +56,7 @@ uuid = { version = "1", features = ["v4", "v5", "serde"] }
# Database
sqlx = { version = "0.7", features = ["runtime-tokio", "sqlite"] }
libsqlite3-sys = { version = "0.27", features = ["bundled"] }
# HTTP client (for LLM drivers)
reqwest = { version = "0.12", default-features = false, features = ["json", "stream", "rustls-tls"] }

35
LICENSE Normal file
View File

@@ -0,0 +1,35 @@
MIT License
Copyright (c) 2026 ZCLAW Contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
---
Attribution Notice
==================
This software is based on and incorporates code from the OpenFang project
(https://github.com/nicepkg/openfang), which is licensed under the MIT License.
Original OpenFang Copyright:
Copyright (c) nicepkg
The OpenFang project provided the foundational architecture, security framework,
and agent runtime concepts that were adapted and extended to create ZCLAW.

View File

@@ -4,7 +4,7 @@
.PHONY: help start start-dev start-no-browser desktop desktop-build setup test clean
help: ## Show this help message
@echo "ZCLAW - OpenFang Desktop Client"
@echo "ZCLAW - AI Agent Desktop Client"
@echo ""
@echo "Usage: make [target]"
@echo ""

View File

@@ -1,11 +1,11 @@
# ZCLAW 🦞 — OpenFang 定制版 (Tauri Desktop)
# ZCLAW 🦞 — ZCLAW 定制版 (Tauri Desktop)
基于 [OpenFang](https://openfang.sh/) —— 用 Rust 构建的 Agent 操作系统,打造中文优先的 Tauri 桌面 AI 助手。
基于 [ZCLAW](https://zclaw.sh/) —— 用 Rust 构建的 Agent 操作系统,打造中文优先的 Tauri 桌面 AI 助手。
## 核心定位
```
OpenFang Kernel (Rust 执行引擎)
ZCLAW Kernel (Rust 执行引擎)
↕ WebSocket / HTTP API
ZCLAW Tauri App (桌面 UI)
+ 中文模型 Provider (GLM/Qwen/Kimi/MiniMax/DeepSeek)
@@ -16,11 +16,11 @@ ZCLAW Tauri App (桌面 UI)
+ 自定义 Skills
```
## 为什么选择 OpenFang?
## 为什么选择 ZCLAW?
相比 OpenClawOpenFang 提供了更强的性能和更丰富的功能:
相比 ZCLAWZCLAW 提供了更强的性能和更丰富的功能:
| 特性 | OpenFang | OpenClaw |
| 特性 | ZCLAW | ZCLAW |
|------|----------|----------|
| **开发语言** | Rust | TypeScript |
| **冷启动** | < 200ms | ~6s |
@@ -30,11 +30,11 @@ ZCLAW Tauri App (桌面 UI)
| **渠道适配器** | 40 | 13 |
| **LLM 提供商** | 27 | ~10 |
**详细对比**[OpenFang 架构概览](https://wurang.net/posts/openfang-intro/)
**详细对比**[ZCLAW 架构概览](https://wurang.net/posts/zclaw-intro/)
## 功能特色
- **基于 OpenFang**: 生产级 Agent 操作系统16 层安全防护WASM 沙箱
- **基于 ZCLAW**: 生产级 Agent 操作系统16 层安全防护WASM 沙箱
- **7 个自主 Hands**: Browser/Researcher/Collector/Predictor/Lead/Clip/Twitter - 预构建的"数字员工"
- **中文模型**: 智谱 GLM-4通义千问KimiMiniMaxDeepSeek (OpenAI 兼容 API)
- **40+ 渠道**: 飞书钉钉TelegramDiscordSlack微信等
@@ -47,10 +47,10 @@ ZCLAW Tauri App (桌面 UI)
| 层级 | 技术 |
|------|------|
| **执行引擎** | OpenFang Kernel (Rust, http://127.0.0.1:50051) |
| **执行引擎** | ZCLAW Kernel (Rust, http://127.0.0.1:50051) |
| **桌面壳** | Tauri 2.0 (Rust + React 19) |
| **前端** | React 19 + TailwindCSS + Zustand + Lucide Icons |
| **通信协议** | OpenFang API (REST/WS/SSE) + OpenAI 兼容 API |
| **通信协议** | ZCLAW API (REST/WS/SSE) + OpenAI 兼容 API |
| **安全** | WASM 沙箱 + Merkle 审计追踪 + Ed25519 签名 |
## 项目结构
@@ -61,7 +61,7 @@ ZClaw/
│ ├── src/
│ │ ├── components/ # UI 组件
│ │ ├── store/ # Zustand 状态管理
│ │ └── lib/gateway-client.ts # OpenFang API 客户端
│ │ └── lib/gateway-client.ts # ZCLAW API 客户端
│ └── src-tauri/ # Rust 后端
├── skills/ # 自定义技能 (SKILL.md)
@@ -71,14 +71,14 @@ ZClaw/
├── hands/ # 自定义 Hands (HAND.toml)
│ └── custom-automation/ # 自定义自动化任务
├── config/ # OpenFang 默认配置
├── config/ # ZCLAW 默认配置
│ ├── config.toml # 主配置文件
│ ├── SOUL.md # Agent 人格
│ └── AGENTS.md # Agent 指令
├── docs/
│ ├── setup/ # 设置指南
│ │ ├── OPENFANG-SETUP.md # OpenFang 配置指南
│ │ ├── ZCLAW-SETUP.md # ZCLAW 配置指南
│ │ └── chinese-models.md # 中文模型配置
│ ├── architecture-v2.md # 架构设计
│ └── deviation-analysis.md # 偏离分析报告
@@ -88,20 +88,20 @@ ZClaw/
## 快速开始
### 1. 安装 OpenFang
### 1. 安装 ZCLAW
```bash
# Windows (PowerShell)
iwr -useb https://openfang.sh/install.ps1 | iex
iwr -useb https://zclaw.sh/install.ps1 | iex
# macOS / Linux
curl -fsSL https://openfang.sh/install.sh | bash
curl -fsSL https://zclaw.sh/install.sh | bash
```
### 2. 初始化配置
```bash
openfang init
zclaw init
```
### 3. 配置 API Key
@@ -121,8 +121,8 @@ export DEEPSEEK_API_KEY="your-deepseek-key" # DeepSeek
### 4. 启动服务
```bash
# 启动 OpenFang Kernel
openfang start
# 启动 ZCLAW Kernel
zclaw start
# 在另一个终端启动 ZCLAW 桌面应用
git clone https://github.com/xxx/ZClaw.git
@@ -134,16 +134,16 @@ cd desktop && pnpm tauri dev
### 5. 验证安装
```bash
# 检查 OpenFang 状态
openfang status
# 检查 ZCLAW 状态
zclaw status
# 运行健康检查
openfang doctor
zclaw doctor
```
## OpenFang Hands (自主能力)
## ZCLAW Hands (自主能力)
OpenFang 内置 7 个预构建的自主能力包每个 Hand 都是一个具备完整工作流的"数字员工"
ZCLAW 内置 7 个预构建的自主能力包每个 Hand 都是一个具备完整工作流的"数字员工"
| Hand | 功能 | 状态 |
|------|------|------|
@@ -170,36 +170,36 @@ OpenFang 内置 7 个预构建的自主能力包,每个 Hand 都是一个具
## 文档
### 设置指南
- [OpenFang Kernel 配置指南](docs/setup/OPENFANG-SETUP.md) - 安装配置常见问题
- [ZCLAW Kernel 配置指南](docs/setup/ZCLAW-SETUP.md) - 安装配置常见问题
- [中文模型配置指南](docs/setup/chinese-models.md) - API Key 获取模型选择多模型配置
### 架构设计
- [架构设计](docs/architecture-v2.md) 完整的 v2 架构方案
- [偏离分析](docs/deviation-analysis.md) QClaw/AutoClaw/OpenClaw 对标分析
- [偏离分析](docs/deviation-analysis.md) QClaw/AutoClaw/ZCLAW 对标分析
### 外部资源
- [OpenFang 官方文档](https://openfang.sh/)
- [OpenFang GitHub](https://github.com/RightNow-AI/openfang)
- [OpenFang 架构概览](https://wurang.net/posts/openfang-intro/)
- [ZCLAW 官方文档](https://zclaw.sh/)
- [ZCLAW GitHub](https://github.com/RightNow-AI/zclaw)
- [ZCLAW 架构概览](https://wurang.net/posts/zclaw-intro/)
## 对标参考
| 产品 | 基于 | IM 渠道 | 桌面框架 | 安全层数 |
|------|------|---------|----------|----------|
| **QClaw** (腾讯) | OpenClaw | 微信 + QQ | Electron | 3 |
| **AutoClaw** (智谱) | OpenClaw | 飞书 | 自研 | 3 |
| **ZCLAW** (本项目) | OpenFang | 飞书 + 钉钉 + 40+ | Tauri 2.0 | 16 |
| **QClaw** (腾讯) | ZCLAW | 微信 + QQ | Electron | 3 |
| **AutoClaw** (智谱) | ZCLAW | 飞书 | 自研 | 3 |
| **ZCLAW** (本项目) | ZCLAW | 飞书 + 钉钉 + 40+ | Tauri 2.0 | 16 |
## 从 OpenClaw 迁移
## 从 ZCLAW 迁移
如果你之前使用 OpenClaw可以一键迁移
如果你之前使用 ZCLAW可以一键迁移
```bash
# 迁移所有内容:代理、记忆、技能、配置
openfang migrate --from openclaw
zclaw migrate --from zclaw
# 先试运行查看变更
openfang migrate --from openclaw --dry-run
zclaw migrate --from zclaw --dry-run
```
## License

View File

@@ -1,9 +1,9 @@
# ZClaw Chinese LLM Providers Configuration
# OpenFang TOML 格式的中文模型提供商配置
# ZCLAW Chinese LLM Providers Configuration
# ZCLAW TOML 格式的中文模型提供商配置
#
# 使用方法:
# 1. 复制此文件到 ~/.openfang/config.d/ 目录
# 2. 或者将内容追加到 ~/.openfang/config.toml
# 1. 复制此文件到 ~/.zclaw/config.d/ 目录
# 2. 或者将内容追加到 ~/.zclaw/config.toml
# 3. 设置环境变量: ZHIPU_API_KEY, QWEN_API_KEY, KIMI_API_KEY, MINIMAX_API_KEY
# ============================================================

View File

@@ -1,10 +1,10 @@
# ============================================================
# ZClaw OpenFang Main Configuration
# OpenFang TOML format configuration file
# ZCLAW Main Configuration
# ZCLAW TOML format configuration file
# ============================================================
#
# Usage:
# 1. Copy this file to ~/.openfang/config.toml
# 1. Copy this file to ~/.zclaw/config.toml
# 2. Set environment variables for API keys
# 3. Import chinese-providers.toml for Chinese LLM support
#
@@ -38,7 +38,7 @@ api_version = "v1"
[agent.defaults]
# Default workspace for agent operations
workspace = "~/.openfang/zclaw-workspace"
workspace = "~/.zclaw/zclaw-workspace"
# Default model for new sessions
default_model = "zhipu/glm-4-plus"
@@ -57,7 +57,7 @@ max_sessions = 10
[agent.defaults.sandbox]
# Sandbox root directory
workspace_root = "~/.openfang/zclaw-workspace"
workspace_root = "~/.zclaw/zclaw-workspace"
# Allowed shell commands (empty = all allowed)
# allowed_commands = ["git", "npm", "pnpm", "cargo"]
@@ -104,7 +104,7 @@ execution_timeout = "30m"
# Audit settings
audit_enabled = true
audit_log_path = "~/.openfang/logs/hands-audit.log"
audit_log_path = "~/.zclaw/logs/hands-audit.log"
# ============================================================
# LLM Provider Configuration
@@ -166,7 +166,7 @@ burst_size = 20
# Audit logging
[security.audit]
enabled = true
log_path = "~/.openfang/logs/audit.log"
log_path = "~/.zclaw/logs/audit.log"
log_format = "json"
# ============================================================
@@ -183,7 +183,7 @@ format = "pretty"
# Log file settings
[logging.file]
enabled = true
path = "~/.openfang/logs/openfang.log"
path = "~/.zclaw/logs/zclaw.log"
max_size = "10MB"
max_files = 5
compress = true
@@ -228,7 +228,7 @@ max_results = 10
# File system tool
[tools.fs]
allowed_paths = ["~/.openfang/zclaw-workspace"]
allowed_paths = ["~/.zclaw/zclaw-workspace"]
max_file_size = "10MB"
# ============================================================
@@ -237,7 +237,7 @@ max_file_size = "10MB"
[workflow]
# Workflow storage
storage_path = "~/.openfang/workflows"
storage_path = "~/.zclaw/workflows"
# Execution settings
max_steps = 100

View File

@@ -32,6 +32,7 @@ uuid = { workspace = true }
# Database
sqlx = { workspace = true }
libsqlite3-sys = { workspace = true }
# Internal crates
zclaw-types = { workspace = true }

View File

@@ -388,6 +388,8 @@ mod tests {
access_count: 0,
created_at: Utc::now(),
last_accessed: Utc::now(),
overview: None,
abstract_summary: None,
}
}

View File

@@ -63,6 +63,7 @@ pub mod tracker;
pub mod viking_adapter;
pub mod storage;
pub mod retrieval;
pub mod summarizer;
// Re-export main types for convenience
pub use types::{
@@ -82,7 +83,8 @@ pub use injector::{InjectionFormat, PromptInjector};
pub use tracker::{AgentMetadata, GrowthTracker, LearningEvent};
pub use viking_adapter::{FindOptions, VikingAdapter, VikingLevel, VikingStorage};
pub use storage::SqliteStorage;
pub use retrieval::{MemoryCache, QueryAnalyzer, SemanticScorer};
pub use retrieval::{EmbeddingClient, MemoryCache, QueryAnalyzer, SemanticScorer};
pub use summarizer::SummaryLlmDriver;
/// Growth system configuration
#[derive(Debug, Clone)]

View File

@@ -18,7 +18,8 @@ struct CacheEntry {
access_count: u32,
}
/// Cache key for efficient lookups
/// Cache key for efficient lookups (reserved for future cache optimization)
#[allow(dead_code)]
#[derive(Debug, Clone, Hash, Eq, PartialEq)]
struct CacheKey {
agent_id: String,

View File

@@ -9,6 +9,6 @@ pub mod semantic;
pub mod query;
pub mod cache;
pub use semantic::SemanticScorer;
pub use semantic::{EmbeddingClient, SemanticScorer};
pub use query::QueryAnalyzer;
pub use cache::MemoryCache;

View File

@@ -253,8 +253,13 @@ impl SemanticScorer {
}
}
/// Get pre-computed embedding for an entry
pub fn get_entry_embedding(&self, uri: &str) -> Option<Vec<f32>> {
self.entry_embeddings.get(uri).cloned()
}
/// Compute cosine similarity between two embedding vectors
fn cosine_similarity_embedding(v1: &[f32], v2: &[f32]) -> f32 {
pub fn cosine_similarity_embedding(v1: &[f32], v2: &[f32]) -> f32 {
if v1.is_empty() || v2.is_empty() || v1.len() != v2.len() {
return 0.0;
}

View File

@@ -3,7 +3,7 @@
//! Persistent storage backend using SQLite for production use.
//! Provides efficient querying and full-text search capabilities.
use crate::retrieval::semantic::SemanticScorer;
use crate::retrieval::semantic::{EmbeddingClient, SemanticScorer};
use crate::types::MemoryEntry;
use crate::viking_adapter::{FindOptions, VikingStorage};
use async_trait::async_trait;
@@ -36,6 +36,8 @@ struct MemoryRow {
access_count: i32,
created_at: String,
last_accessed: String,
overview: Option<String>,
abstract_summary: Option<String>,
}
impl SqliteStorage {
@@ -83,6 +85,26 @@ impl SqliteStorage {
Self::new(":memory:").await.expect("Failed to create in-memory database")
}
/// Configure embedding client for semantic search
/// Replaces the current scorer with a new one that has embedding support
pub async fn configure_embedding(
&self,
client: Arc<dyn EmbeddingClient>,
) -> Result<()> {
let new_scorer = SemanticScorer::with_embedding(client);
let mut scorer = self.scorer.write().await;
*scorer = new_scorer;
tracing::info!("[SqliteStorage] Embedding client configured, re-indexing with embeddings...");
self.warmup_scorer_with_embedding().await
}
/// Check if embedding is available
pub async fn is_embedding_available(&self) -> bool {
let scorer = self.scorer.read().await;
scorer.is_embedding_available()
}
/// Initialize database schema with FTS5
async fn initialize_schema(&self) -> Result<()> {
// Create main memories table
@@ -131,6 +153,16 @@ impl SqliteStorage {
.await
.map_err(|e| ZclawError::StorageError(format!("Failed to create importance index: {}", e)))?;
// Migration: add overview column (L1 summary)
let _ = sqlx::query("ALTER TABLE memories ADD COLUMN overview TEXT")
.execute(&self.pool)
.await;
// Migration: add abstract_summary column (L0 keywords)
let _ = sqlx::query("ALTER TABLE memories ADD COLUMN abstract_summary TEXT")
.execute(&self.pool)
.await;
// Create metadata table
sqlx::query(
r#"
@@ -151,7 +183,7 @@ impl SqliteStorage {
/// Warmup semantic scorer with existing entries
async fn warmup_scorer(&self) -> Result<()> {
let rows = sqlx::query_as::<_, MemoryRow>(
"SELECT uri, memory_type, content, keywords, importance, access_count, created_at, last_accessed FROM memories"
"SELECT uri, memory_type, content, keywords, importance, access_count, created_at, last_accessed, overview, abstract_summary FROM memories"
)
.fetch_all(&self.pool)
.await
@@ -173,6 +205,32 @@ impl SqliteStorage {
Ok(())
}
/// Warmup semantic scorer with embedding support for existing entries
async fn warmup_scorer_with_embedding(&self) -> Result<()> {
let rows = sqlx::query_as::<_, MemoryRow>(
"SELECT uri, memory_type, content, keywords, importance, access_count, created_at, last_accessed, overview, abstract_summary FROM memories"
)
.fetch_all(&self.pool)
.await
.map_err(|e| ZclawError::StorageError(format!("Failed to load memories for warmup: {}", e)))?;
let mut scorer = self.scorer.write().await;
for row in rows {
let entry = self.row_to_entry(&row);
scorer.index_entry_with_embedding(&entry).await;
}
let stats = scorer.stats();
tracing::info!(
"[SqliteStorage] Warmed up scorer with {} entries ({} with embeddings), {} terms",
stats.indexed_entries,
stats.embedding_entries,
stats.unique_terms
);
Ok(())
}
/// Convert database row to MemoryEntry
fn row_to_entry(&self, row: &MemoryRow) -> MemoryEntry {
let memory_type = crate::types::MemoryType::parse(&row.memory_type);
@@ -193,6 +251,8 @@ impl SqliteStorage {
access_count: row.access_count as u32,
created_at,
last_accessed,
overview: row.overview.clone(),
abstract_summary: row.abstract_summary.clone(),
}
}
@@ -223,6 +283,8 @@ impl sqlx::FromRow<'_, SqliteRow> for MemoryRow {
access_count: row.try_get("access_count")?,
created_at: row.try_get("created_at")?,
last_accessed: row.try_get("last_accessed")?,
overview: row.try_get("overview").ok(),
abstract_summary: row.try_get("abstract_summary").ok(),
})
}
}
@@ -241,8 +303,8 @@ impl VikingStorage for SqliteStorage {
sqlx::query(
r#"
INSERT OR REPLACE INTO memories
(uri, memory_type, content, keywords, importance, access_count, created_at, last_accessed)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
(uri, memory_type, content, keywords, importance, access_count, created_at, last_accessed, overview, abstract_summary)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
"#,
)
.bind(&entry.uri)
@@ -253,6 +315,8 @@ impl VikingStorage for SqliteStorage {
.bind(entry.access_count as i32)
.bind(&created_at)
.bind(&last_accessed)
.bind(&entry.overview)
.bind(&entry.abstract_summary)
.execute(&self.pool)
.await
.map_err(|e| ZclawError::StorageError(format!("Failed to store memory: {}", e)))?;
@@ -276,9 +340,13 @@ impl VikingStorage for SqliteStorage {
.execute(&self.pool)
.await;
// Update semantic scorer
// Update semantic scorer (use embedding when available)
let mut scorer = self.scorer.write().await;
scorer.index_entry(entry);
if scorer.is_embedding_available() {
scorer.index_entry_with_embedding(entry).await;
} else {
scorer.index_entry(entry);
}
tracing::debug!("[SqliteStorage] Stored memory: {}", entry.uri);
Ok(())
@@ -286,7 +354,7 @@ impl VikingStorage for SqliteStorage {
async fn get(&self, uri: &str) -> Result<Option<MemoryEntry>> {
let row = sqlx::query_as::<_, MemoryRow>(
"SELECT uri, memory_type, content, keywords, importance, access_count, created_at, last_accessed FROM memories WHERE uri = ?"
"SELECT uri, memory_type, content, keywords, importance, access_count, created_at, last_accessed, overview, abstract_summary FROM memories WHERE uri = ?"
)
.bind(uri)
.fetch_optional(&self.pool)
@@ -309,7 +377,7 @@ impl VikingStorage for SqliteStorage {
// Get all matching entries
let rows = if let Some(ref scope) = options.scope {
sqlx::query_as::<_, MemoryRow>(
"SELECT uri, memory_type, content, keywords, importance, access_count, created_at, last_accessed FROM memories WHERE uri LIKE ?"
"SELECT uri, memory_type, content, keywords, importance, access_count, created_at, last_accessed, overview, abstract_summary FROM memories WHERE uri LIKE ?"
)
.bind(format!("{}%", scope))
.fetch_all(&self.pool)
@@ -317,7 +385,7 @@ impl VikingStorage for SqliteStorage {
.map_err(|e| ZclawError::StorageError(format!("Failed to find memories: {}", e)))?
} else {
sqlx::query_as::<_, MemoryRow>(
"SELECT uri, memory_type, content, keywords, importance, access_count, created_at, last_accessed FROM memories"
"SELECT uri, memory_type, content, keywords, importance, access_count, created_at, last_accessed, overview, abstract_summary FROM memories"
)
.fetch_all(&self.pool)
.await
@@ -325,14 +393,49 @@ impl VikingStorage for SqliteStorage {
};
// Convert to entries and compute semantic scores
let scorer = self.scorer.read().await;
let use_embedding = {
let scorer = self.scorer.read().await;
scorer.is_embedding_available()
};
let mut scored_entries: Vec<(f32, MemoryEntry)> = Vec::new();
for row in rows {
let entry = self.row_to_entry(&row);
// Compute semantic score using TF-IDF
let semantic_score = scorer.score_similarity(query, &entry);
// Compute semantic score: use embedding when available, fallback to TF-IDF
let semantic_score = if use_embedding {
let scorer = self.scorer.read().await;
let tfidf_score = scorer.score_similarity(query, &entry);
let entry_embedding = scorer.get_entry_embedding(&entry.uri);
drop(scorer);
match entry_embedding {
Some(entry_emb) => {
// Try embedding the query for hybrid scoring
let embedding_client = {
let scorer2 = self.scorer.read().await;
scorer2.get_embedding_client()
};
match embedding_client.embed(query).await {
Ok(query_emb) => {
let emb_score = SemanticScorer::cosine_similarity_embedding(&query_emb, &entry_emb);
// Hybrid: 70% embedding + 30% TF-IDF
emb_score * 0.7 + tfidf_score * 0.3
}
Err(_) => {
tracing::debug!("[SqliteStorage] Query embedding failed, using TF-IDF only");
tfidf_score
}
}
}
None => tfidf_score,
}
} else {
let scorer = self.scorer.read().await;
scorer.score_similarity(query, &entry)
};
// Apply similarity threshold
if let Some(min_similarity) = options.min_similarity {
@@ -362,7 +465,7 @@ impl VikingStorage for SqliteStorage {
async fn find_by_prefix(&self, prefix: &str) -> Result<Vec<MemoryEntry>> {
let rows = sqlx::query_as::<_, MemoryRow>(
"SELECT uri, memory_type, content, keywords, importance, access_count, created_at, last_accessed FROM memories WHERE uri LIKE ?"
"SELECT uri, memory_type, content, keywords, importance, access_count, created_at, last_accessed, overview, abstract_summary FROM memories WHERE uri LIKE ?"
)
.bind(format!("{}%", prefix))
.fetch_all(&self.pool)

View File

@@ -0,0 +1,192 @@
//! Memory Summarizer - L0/L1 Summary Generation
//!
//! Provides trait and functions for generating layered summaries of memory entries:
//! - L1 Overview: 1-2 sentence summary (~200 tokens)
//! - L0 Abstract: 3-5 keywords (~100 tokens)
//!
//! The trait-based design allows zclaw-growth to remain decoupled from any
//! specific LLM implementation. The Tauri layer provides a concrete implementation.
use crate::types::MemoryEntry;
/// LLM driver for summary generation.
/// Implementations call an LLM API to produce concise summaries.
#[async_trait::async_trait]
pub trait SummaryLlmDriver: Send + Sync {
/// Generate a short summary (1-2 sentences, ~200 tokens) for a memory entry.
async fn generate_overview(&self, entry: &MemoryEntry) -> Result<String, String>;
/// Generate keyword extraction (3-5 keywords, ~100 tokens) for a memory entry.
async fn generate_abstract(&self, entry: &MemoryEntry) -> Result<String, String>;
}
/// Generate an L1 overview prompt for the LLM.
pub fn overview_prompt(entry: &MemoryEntry) -> String {
format!(
r#"Summarize the following memory entry in 1-2 concise sentences (in the same language as the content).
Focus on the key information. Do not add any preamble or explanation.
Memory type: {}
Category: {}
Content: {}"#,
entry.memory_type,
entry.uri.rsplit('/').next().unwrap_or("unknown"),
entry.content
)
}
/// Generate an L0 abstract prompt for the LLM.
pub fn abstract_prompt(entry: &MemoryEntry) -> String {
format!(
r#"Extract 3-5 keywords or key phrases from the following memory entry.
Output ONLY the keywords, comma-separated, in the same language as the content.
Do not add any preamble, explanation, or numbering.
Memory type: {}
Content: {}"#,
entry.memory_type, entry.content
)
}
/// Generate both L1 overview and L0 abstract for a memory entry.
/// Returns (overview, abstract_summary) tuple.
pub async fn generate_summaries(
driver: &dyn SummaryLlmDriver,
entry: &MemoryEntry,
) -> (Option<String>, Option<String>) {
// Generate L1 overview
let overview = match driver.generate_overview(entry).await {
Ok(text) => {
let cleaned = clean_summary(&text);
if !cleaned.is_empty() {
Some(cleaned)
} else {
None
}
}
Err(e) => {
tracing::debug!("[Summarizer] Failed to generate overview for {}: {}", entry.uri, e);
None
}
};
// Generate L0 abstract
let abstract_summary = match driver.generate_abstract(entry).await {
Ok(text) => {
let cleaned = clean_summary(&text);
if !cleaned.is_empty() {
Some(cleaned)
} else {
None
}
}
Err(e) => {
tracing::debug!("[Summarizer] Failed to generate abstract for {}: {}", entry.uri, e);
None
}
};
(overview, abstract_summary)
}
/// Clean LLM response: strip quotes, whitespace, prefixes
fn clean_summary(text: &str) -> String {
text.trim()
.trim_start_matches('"')
.trim_end_matches('"')
.trim_start_matches('\'')
.trim_end_matches('\'')
.trim_start_matches("摘要:")
.trim_start_matches("摘要:")
.trim_start_matches("关键词:")
.trim_start_matches("关键词:")
.trim_start_matches("Overview:")
.trim_start_matches("overview:")
.trim()
.to_string()
}
#[cfg(test)]
mod tests {
use super::*;
use crate::types::MemoryType;
struct MockSummaryDriver;
#[async_trait::async_trait]
impl SummaryLlmDriver for MockSummaryDriver {
async fn generate_overview(&self, entry: &MemoryEntry) -> Result<String, String> {
Ok(format!("Summary of: {}", &entry.content[..entry.content.len().min(30)]))
}
async fn generate_abstract(&self, _entry: &MemoryEntry) -> Result<String, String> {
Ok("keyword1, keyword2, keyword3".to_string())
}
}
fn make_entry(content: &str) -> MemoryEntry {
MemoryEntry::new("test-agent", MemoryType::Knowledge, "test", content.to_string())
}
#[tokio::test]
async fn test_generate_summaries() {
let driver = MockSummaryDriver;
let entry = make_entry("This is a test memory entry about Rust programming.");
let (overview, abstract_summary) = generate_summaries(&driver, &entry).await;
assert!(overview.is_some());
assert!(abstract_summary.is_some());
assert!(overview.unwrap().contains("Summary of"));
assert!(abstract_summary.unwrap().contains("keyword1"));
}
#[tokio::test]
async fn test_generate_summaries_handles_error() {
struct FailingDriver;
#[async_trait::async_trait]
impl SummaryLlmDriver for FailingDriver {
async fn generate_overview(&self, _entry: &MemoryEntry) -> Result<String, String> {
Err("LLM unavailable".to_string())
}
async fn generate_abstract(&self, _entry: &MemoryEntry) -> Result<String, String> {
Err("LLM unavailable".to_string())
}
}
let driver = FailingDriver;
let entry = make_entry("test content");
let (overview, abstract_summary) = generate_summaries(&driver, &entry).await;
assert!(overview.is_none());
assert!(abstract_summary.is_none());
}
#[test]
fn test_clean_summary() {
assert_eq!(clean_summary("\"hello world\""), "hello world");
assert_eq!(clean_summary("摘要:你好"), "你好");
assert_eq!(clean_summary(" keyword1, keyword2 "), "keyword1, keyword2");
assert_eq!(clean_summary("Overview: something"), "something");
}
#[test]
fn test_overview_prompt() {
let entry = make_entry("User prefers dark mode and compact UI");
let prompt = overview_prompt(&entry);
assert!(prompt.contains("1-2 concise sentences"));
assert!(prompt.contains("User prefers dark mode"));
assert!(prompt.contains("knowledge"));
}
#[test]
fn test_abstract_prompt() {
let entry = make_entry("Rust is a systems programming language");
let prompt = abstract_prompt(&entry);
assert!(prompt.contains("3-5 keywords"));
assert!(prompt.contains("Rust is a systems"));
}
}

View File

@@ -72,6 +72,10 @@ pub struct MemoryEntry {
pub created_at: DateTime<Utc>,
/// Last access timestamp
pub last_accessed: DateTime<Utc>,
/// L1 overview: 1-2 sentence summary (~200 tokens)
pub overview: Option<String>,
/// L0 abstract: 3-5 keywords (~100 tokens)
pub abstract_summary: Option<String>,
}
impl MemoryEntry {
@@ -92,6 +96,8 @@ impl MemoryEntry {
access_count: 0,
created_at: Utc::now(),
last_accessed: Utc::now(),
overview: None,
abstract_summary: None,
}
}
@@ -107,6 +113,18 @@ impl MemoryEntry {
self
}
/// Set L1 overview summary
pub fn with_overview(mut self, overview: impl Into<String>) -> Self {
self.overview = Some(overview.into());
self
}
/// Set L0 abstract summary
pub fn with_abstract_summary(mut self, abstract_summary: impl Into<String>) -> Self {
self.abstract_summary = Some(abstract_summary.into());
self
}
/// Mark as accessed
pub fn touch(&mut self) {
self.access_count += 1;

View File

@@ -9,6 +9,7 @@ description = "ZCLAW Hands - autonomous capabilities"
[dependencies]
zclaw-types = { workspace = true }
zclaw-runtime = { workspace = true }
tokio = { workspace = true }
serde = { workspace = true }

View File

@@ -14,7 +14,7 @@
mod whiteboard;
mod slideshow;
mod speech;
mod quiz;
pub mod quiz;
mod browser;
mod researcher;
mod collector;

View File

@@ -14,6 +14,7 @@ use std::sync::Arc;
use tokio::sync::RwLock;
use uuid::Uuid;
use zclaw_types::Result;
use zclaw_runtime::driver::{LlmDriver, CompletionRequest};
use crate::{Hand, HandConfig, HandContext, HandResult, HandStatus};
@@ -44,29 +45,242 @@ impl QuizGenerator for DefaultQuizGenerator {
difficulty: &DifficultyLevel,
_question_types: &[QuestionType],
) -> Result<Vec<QuizQuestion>> {
// Generate placeholder questions
// Generate placeholder questions with randomized correct answers
let options_pool: Vec<Vec<String>> = vec![
vec!["光合作用".into(), "呼吸作用".into(), "蒸腾作用".into(), "运输作用".into()],
vec!["牛顿".into(), "爱因斯坦".into(), "伽利略".into(), "开普勒".into()],
vec!["太平洋".into(), "大西洋".into(), "印度洋".into(), "北冰洋".into()],
vec!["DNA".into(), "RNA".into(), "蛋白质".into(), "碳水化合物".into()],
vec!["引力".into(), "电磁力".into(), "强力".into(), "弱力".into()],
];
Ok((0..count)
.map(|i| QuizQuestion {
id: uuid_v4(),
question_type: QuestionType::MultipleChoice,
question: format!("Question {} about {}", i + 1, topic),
options: Some(vec![
"Option A".to_string(),
"Option B".to_string(),
"Option C".to_string(),
"Option D".to_string(),
]),
correct_answer: Answer::Single("Option A".to_string()),
explanation: Some(format!("Explanation for question {}", i + 1)),
hints: Some(vec![format!("Hint 1 for question {}", i + 1)]),
points: 10.0,
difficulty: difficulty.clone(),
tags: vec![topic.to_string()],
.map(|i| {
let pool_idx = i % options_pool.len();
let mut opts = options_pool[pool_idx].clone();
// Shuffle options to randomize correct answer position
let correct_idx = (i * 3 + 1) % opts.len();
opts.swap(0, correct_idx);
let correct = opts[0].clone();
QuizQuestion {
id: uuid_v4(),
question_type: QuestionType::MultipleChoice,
question: format!("关于{}的第{}题({}难度)", topic, i + 1, match difficulty {
DifficultyLevel::Easy => "简单",
DifficultyLevel::Medium => "中等",
DifficultyLevel::Hard => "困难",
DifficultyLevel::Adaptive => "自适应",
}),
options: Some(opts),
correct_answer: Answer::Single(correct),
explanation: Some(format!("{}题的详细解释", i + 1)),
hints: Some(vec![format!("提示:仔细阅读关于{}的内容", topic)]),
points: 10.0,
difficulty: difficulty.clone(),
tags: vec![topic.to_string()],
}
})
.collect())
}
}
/// LLM-powered quiz generator that produces real questions via an LLM driver.
pub struct LlmQuizGenerator {
driver: Arc<dyn LlmDriver>,
model: String,
}
impl LlmQuizGenerator {
pub fn new(driver: Arc<dyn LlmDriver>, model: String) -> Self {
Self { driver, model }
}
}
#[async_trait]
impl QuizGenerator for LlmQuizGenerator {
async fn generate_questions(
&self,
topic: &str,
content: Option<&str>,
count: usize,
difficulty: &DifficultyLevel,
question_types: &[QuestionType],
) -> Result<Vec<QuizQuestion>> {
let difficulty_str = match difficulty {
DifficultyLevel::Easy => "简单",
DifficultyLevel::Medium => "中等",
DifficultyLevel::Hard => "困难",
DifficultyLevel::Adaptive => "中等",
};
let type_str = if question_types.is_empty() {
String::from("选择题(multiple_choice)")
} else {
question_types
.iter()
.map(|t| match t {
QuestionType::MultipleChoice => "选择题",
QuestionType::TrueFalse => "判断题",
QuestionType::FillBlank => "填空题",
QuestionType::ShortAnswer => "简答题",
QuestionType::Essay => "论述题",
_ => "选择题",
})
.collect::<Vec<_>>()
.join(",")
};
let content_section = match content {
Some(c) if !c.is_empty() => format!("\n\n参考内容:\n{}", &c[..c.len().min(3000)]),
_ => String::new(),
};
let content_note = if content.is_some() && content.map_or(false, |c| !c.is_empty()) {
"(基于提供的参考内容出题)"
} else {
""
};
let prompt = format!(
r#"你是一个专业的出题专家。请根据以下要求生成测验题目:
主题: {}
难度: {}
题目类型: {}
数量: {}{}
{}
请严格按照以下 JSON 格式输出,不要添加任何其他文字:
```json
[
{{
"question": "题目内容",
"options": ["选项A", "选项B", "选项C", "选项D"],
"correct_answer": "正确答案与options中某项完全一致",
"explanation": "答案解释",
"hint": "提示信息"
}}
]
```
要求:
1. 题目要有实际内容,不要使用占位符
2. 正确答案必须随机分布(不要总在第一个选项)
3. 每道题的选项要有区分度,干扰项要合理
4. 解释要清晰准确
5. 直接输出 JSON不要有 markdown 包裹"#,
topic, difficulty_str, type_str, count, content_section, content_note,
);
let request = CompletionRequest {
model: self.model.clone(),
system: Some("你是一个专业的出题专家只输出纯JSON格式。".to_string()),
messages: vec![zclaw_types::Message::user(&prompt)],
tools: Vec::new(),
max_tokens: Some(4096),
temperature: Some(0.7),
stop: Vec::new(),
stream: false,
};
let response = self.driver.complete(request).await.map_err(|e| {
zclaw_types::ZclawError::Internal(format!("LLM quiz generation failed: {}", e))
})?;
// Extract text from response
let text: String = response
.content
.iter()
.filter_map(|block| match block {
zclaw_runtime::driver::ContentBlock::Text { text } => Some(text.clone()),
_ => None,
})
.collect::<Vec<_>>()
.join("");
// Parse JSON from response (handle markdown code fences)
let json_str = extract_json(&text);
let raw_questions: Vec<serde_json::Value> =
serde_json::from_str(json_str).map_err(|e| {
zclaw_types::ZclawError::Internal(format!(
"Failed to parse quiz JSON: {}. Raw: {}",
e,
&text[..text.len().min(200)]
))
})?;
let questions: Vec<QuizQuestion> = raw_questions
.into_iter()
.take(count)
.map(|q| {
let options: Vec<String> = q["options"]
.as_array()
.map(|arr| arr.iter().filter_map(|v| v.as_str().map(String::from)).collect())
.unwrap_or_default();
let correct = q["correct_answer"]
.as_str()
.unwrap_or("")
.to_string();
QuizQuestion {
id: uuid_v4(),
question_type: QuestionType::MultipleChoice,
question: q["question"].as_str().unwrap_or("未知题目").to_string(),
options: if options.is_empty() { None } else { Some(options) },
correct_answer: Answer::Single(correct),
explanation: q["explanation"].as_str().map(String::from),
hints: q["hint"].as_str().map(|h| vec![h.to_string()]),
points: 10.0,
difficulty: difficulty.clone(),
tags: vec![topic.to_string()],
}
})
.collect();
if questions.is_empty() {
// Fallback to default if LLM returns nothing parseable
return DefaultQuizGenerator
.generate_questions(topic, content, count, difficulty, question_types)
.await;
}
Ok(questions)
}
}
/// Extract JSON from a string that may be wrapped in markdown code fences.
fn extract_json(text: &str) -> &str {
let trimmed = text.trim();
// Try to find ```json ... ``` block
if let Some(start) = trimmed.find("```json") {
let after_start = &trimmed[start + 7..];
if let Some(end) = after_start.find("```") {
return after_start[..end].trim();
}
}
// Try to find ``` ... ``` block
if let Some(start) = trimmed.find("```") {
let after_start = &trimmed[start + 3..];
if let Some(end) = after_start.find("```") {
return after_start[..end].trim();
}
}
// Try to find raw JSON array
if let Some(start) = trimmed.find('[') {
if let Some(end) = trimmed.rfind(']') {
return &trimmed[start..=end];
}
}
trimmed
}
/// Quiz action types
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(tag = "action", rename_all = "snake_case")]

View File

@@ -20,6 +20,7 @@ tokio-stream = { workspace = true }
futures = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
toml = { workspace = true }
thiserror = { workspace = true }
uuid = { workspace = true }
chrono = { workspace = true }

View File

@@ -252,10 +252,78 @@ fn default_skills_dir() -> Option<std::path::PathBuf> {
}
impl KernelConfig {
/// Load configuration from file
/// Load configuration from file.
///
/// Search order:
/// 1. Path from `ZCLAW_CONFIG` environment variable
/// 2. `~/.zclaw/config.toml`
/// 3. Fallback to `Self::default()`
///
/// Supports `${VAR_NAME}` environment variable interpolation in string values.
pub async fn load() -> Result<Self> {
// TODO: Load from ~/.zclaw/config.toml
Ok(Self::default())
let config_path = Self::find_config_path();
match config_path {
Some(path) => {
if !path.exists() {
tracing::debug!(target: "kernel_config", "Config file not found: {:?}, using defaults", path);
return Ok(Self::default());
}
tracing::info!(target: "kernel_config", "Loading config from: {:?}", path);
let content = std::fs::read_to_string(&path).map_err(|e| {
zclaw_types::ZclawError::Internal(format!("Failed to read config {}: {}", path.display(), e))
})?;
let interpolated = interpolate_env_vars(&content);
let mut config: KernelConfig = toml::from_str(&interpolated).map_err(|e| {
zclaw_types::ZclawError::Internal(format!("Failed to parse config {}: {}", path.display(), e))
})?;
// Resolve skills_dir if not explicitly set
if config.skills_dir.is_none() {
config.skills_dir = default_skills_dir();
}
tracing::info!(
target: "kernel_config",
model = %config.llm.model,
base_url = %config.llm.base_url,
has_api_key = !config.llm.api_key.is_empty(),
"Config loaded successfully"
);
Ok(config)
}
None => Ok(Self::default()),
}
}
/// Find the config file path.
fn find_config_path() -> Option<PathBuf> {
// 1. Environment variable override
if let Ok(path) = std::env::var("ZCLAW_CONFIG") {
return Some(PathBuf::from(path));
}
// 2. ~/.zclaw/config.toml
if let Some(home) = dirs::home_dir() {
let path = home.join(".zclaw").join("config.toml");
if path.exists() {
return Some(path);
}
}
// 3. Project root config/config.toml (for development)
let project_config = std::env::current_dir()
.ok()
.map(|cwd| cwd.join("config").join("config.toml"))?;
if project_config.exists() {
return Some(project_config);
}
None
}
/// Create the LLM driver
@@ -439,3 +507,81 @@ impl LlmConfig {
self
}
}
// === Environment variable interpolation ===
/// Replace `${VAR_NAME}` patterns in a string with environment variable values.
/// If the variable is not set, the pattern is left as-is.
fn interpolate_env_vars(content: &str) -> String {
let mut result = String::with_capacity(content.len());
let mut chars = content.char_indices().peekable();
while let Some((_, ch)) = chars.next() {
if ch == '$' && chars.peek().map(|(_, c)| *c == '{').unwrap_or(false) {
chars.next(); // consume '{'
let mut var_name = String::new();
while let Some((_, c)) = chars.peek() {
match c {
'}' => {
chars.next(); // consume '}'
if let Ok(value) = std::env::var(&var_name) {
result.push_str(&value);
} else {
result.push_str("${");
result.push_str(&var_name);
result.push('}');
}
break;
}
_ => {
var_name.push(*c);
chars.next();
}
}
}
// Handle unclosed ${... at end of string
if !content[result.len()..].contains('}') && var_name.is_empty() {
// Already consumed, nothing to do
}
} else {
result.push(ch);
}
}
result
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_interpolate_env_vars_basic() {
std::env::set_var("ZCLAW_TEST_VAR", "hello");
let result = interpolate_env_vars("prefix ${ZCLAW_TEST_VAR} suffix");
assert_eq!(result, "prefix hello suffix");
}
#[test]
fn test_interpolate_env_vars_missing() {
let result = interpolate_env_vars("${ZCLAW_NONEXISTENT_VAR_12345}");
assert_eq!(result, "${ZCLAW_NONEXISTENT_VAR_12345}");
}
#[test]
fn test_interpolate_env_vars_no_vars() {
let result = interpolate_env_vars("no variables here");
assert_eq!(result, "no variables here");
}
#[test]
fn test_interpolate_env_vars_multiple() {
std::env::set_var("ZCLAW_TEST_A", "alpha");
std::env::set_var("ZCLAW_TEST_B", "beta");
let result = interpolate_env_vars("${ZCLAW_TEST_A}-${ZCLAW_TEST_B}");
assert_eq!(result, "alpha-beta");
}
}

View File

@@ -1,7 +1,7 @@
//! Kernel - central coordinator
use std::sync::Arc;
use tokio::sync::{broadcast, mpsc};
use tokio::sync::{broadcast, mpsc, Mutex};
use zclaw_types::{AgentConfig, AgentId, AgentInfo, Event, Result};
use async_trait::async_trait;
use serde_json::Value;
@@ -13,7 +13,7 @@ use crate::config::KernelConfig;
use zclaw_memory::MemoryStore;
use zclaw_runtime::{AgentLoop, LlmDriver, ToolRegistry, tool::SkillExecutor};
use zclaw_skills::SkillRegistry;
use zclaw_hands::{HandRegistry, HandContext, HandResult, hands::{BrowserHand, SlideshowHand, SpeechHand, QuizHand, WhiteboardHand, ResearcherHand, CollectorHand, ClipHand, TwitterHand}};
use zclaw_hands::{HandRegistry, HandContext, HandResult, hands::{BrowserHand, SlideshowHand, SpeechHand, QuizHand, WhiteboardHand, ResearcherHand, CollectorHand, ClipHand, TwitterHand, quiz::LlmQuizGenerator}};
/// Skill executor implementation for Kernel
pub struct KernelSkillExecutor {
@@ -57,6 +57,7 @@ pub struct Kernel {
skill_executor: Arc<KernelSkillExecutor>,
hands: Arc<HandRegistry>,
trigger_manager: crate::trigger_manager::TriggerManager,
pending_approvals: Arc<Mutex<Vec<ApprovalEntry>>>,
}
impl Kernel {
@@ -85,10 +86,12 @@ impl Kernel {
// Initialize hand registry with built-in hands
let hands = Arc::new(HandRegistry::new());
let quiz_model = config.model().to_string();
let quiz_generator = Arc::new(LlmQuizGenerator::new(driver.clone(), quiz_model));
hands.register(Arc::new(BrowserHand::new())).await;
hands.register(Arc::new(SlideshowHand::new())).await;
hands.register(Arc::new(SpeechHand::new())).await;
hands.register(Arc::new(QuizHand::new())).await;
hands.register(Arc::new(QuizHand::with_generator(quiz_generator))).await;
hands.register(Arc::new(WhiteboardHand::new())).await;
hands.register(Arc::new(ResearcherHand::new())).await;
hands.register(Arc::new(CollectorHand::new())).await;
@@ -118,6 +121,7 @@ impl Kernel {
skill_executor,
hands,
trigger_manager,
pending_approvals: Arc::new(Mutex::new(Vec::new())),
})
}
@@ -306,7 +310,8 @@ impl Kernel {
.with_model(&model)
.with_skill_executor(self.skill_executor.clone())
.with_max_tokens(agent_config.max_tokens.unwrap_or_else(|| self.config.max_tokens()))
.with_temperature(agent_config.temperature.unwrap_or_else(|| self.config.temperature()));
.with_temperature(agent_config.temperature.unwrap_or_else(|| self.config.temperature()))
.with_compaction_threshold(15_000); // Compact when context exceeds ~15k tokens
// Build system prompt with skill information injected
let system_prompt = self.build_system_prompt_with_skills(agent_config.system_prompt.as_ref()).await;
@@ -327,6 +332,16 @@ impl Kernel {
&self,
agent_id: &AgentId,
message: String,
) -> Result<mpsc::Receiver<zclaw_runtime::LoopEvent>> {
self.send_message_stream_with_prompt(agent_id, message, None).await
}
/// Send a message with streaming and optional external system prompt
pub async fn send_message_stream_with_prompt(
&self,
agent_id: &AgentId,
message: String,
system_prompt_override: Option<String>,
) -> Result<mpsc::Receiver<zclaw_runtime::LoopEvent>> {
let agent_config = self.registry.get(agent_id)
.ok_or_else(|| zclaw_types::ZclawError::NotFound(format!("Agent not found: {}", agent_id)))?;
@@ -349,10 +364,14 @@ impl Kernel {
.with_model(&model)
.with_skill_executor(self.skill_executor.clone())
.with_max_tokens(agent_config.max_tokens.unwrap_or_else(|| self.config.max_tokens()))
.with_temperature(agent_config.temperature.unwrap_or_else(|| self.config.temperature()));
.with_temperature(agent_config.temperature.unwrap_or_else(|| self.config.temperature()))
.with_compaction_threshold(15_000); // Compact when context exceeds ~15k tokens
// Build system prompt with skill information injected
let system_prompt = self.build_system_prompt_with_skills(agent_config.system_prompt.as_ref()).await;
// Use external prompt if provided, otherwise build default
let system_prompt = match system_prompt_override {
Some(prompt) => prompt,
None => self.build_system_prompt_with_skills(agent_config.system_prompt.as_ref()).await,
};
let loop_runner = loop_runner.with_system_prompt(&system_prompt);
// Run with streaming
@@ -477,24 +496,82 @@ impl Kernel {
}
// ============================================================
// Approval Management (Stub Implementation)
// Approval Management
// ============================================================
/// List pending approvals
pub async fn list_approvals(&self) -> Vec<ApprovalEntry> {
// Stub: Return empty list
Vec::new()
let approvals = self.pending_approvals.lock().await;
approvals.iter().filter(|a| a.status == "pending").cloned().collect()
}
/// Create a pending approval (called when a needs_approval hand is triggered)
pub async fn create_approval(&self, hand_id: String, input: serde_json::Value) -> ApprovalEntry {
let entry = ApprovalEntry {
id: uuid::Uuid::new_v4().to_string(),
hand_id,
status: "pending".to_string(),
created_at: chrono::Utc::now(),
input,
};
let mut approvals = self.pending_approvals.lock().await;
approvals.push(entry.clone());
entry
}
/// Respond to an approval
pub async fn respond_to_approval(
&self,
_id: &str,
_approved: bool,
id: &str,
approved: bool,
_reason: Option<String>,
) -> Result<()> {
// Stub: Return error
Err(zclaw_types::ZclawError::NotFound(format!("Approval not found")))
let mut approvals = self.pending_approvals.lock().await;
let entry = approvals.iter_mut().find(|a| a.id == id && a.status == "pending")
.ok_or_else(|| zclaw_types::ZclawError::NotFound(format!("Approval not found: {}", id)))?;
entry.status = if approved { "approved".to_string() } else { "rejected".to_string() };
if approved {
let hand_id = entry.hand_id.clone();
let input = entry.input.clone();
drop(approvals); // Release lock before async hand execution
// Execute the hand in background
let hands = self.hands.clone();
let approvals = self.pending_approvals.clone();
let id_owned = id.to_string();
tokio::spawn(async move {
let context = HandContext::default();
let result = hands.execute(&hand_id, &context, input).await;
// Update approval status based on execution result
let mut approvals = approvals.lock().await;
if let Some(entry) = approvals.iter_mut().find(|a| a.id == id_owned) {
match result {
Ok(_) => entry.status = "completed".to_string(),
Err(e) => {
entry.status = "failed".to_string();
// Store error in input metadata
if let Some(obj) = entry.input.as_object_mut() {
obj.insert("error".to_string(), Value::String(format!("{}", e)));
}
}
}
}
});
}
Ok(())
}
/// Cancel a pending approval
pub async fn cancel_approval(&self, id: &str) -> Result<()> {
let mut approvals = self.pending_approvals.lock().await;
let entry = approvals.iter_mut().find(|a| a.id == id && a.status == "pending")
.ok_or_else(|| zclaw_types::ZclawError::NotFound(format!("Approval not found: {}", id)))?;
entry.status = "cancelled".to_string();
Ok(())
}
}

View File

@@ -20,6 +20,7 @@ tracing = { workspace = true }
# SQLite
sqlx = { workspace = true }
libsqlite3-sys = { workspace = true }
# Async utilities
futures = { workspace = true }

View File

@@ -46,11 +46,14 @@ pub async fn export_files(
.map_err(|e| ActionError::Export(format!("Write error: {}", e)))?;
}
ExportFormat::Pptx => {
// Will integrate with zclaw-kernel export
return Err(ActionError::Export("PPTX export requires kernel integration".to_string()));
return Err(ActionError::Export(
"PPTX 导出暂不可用。桌面端可通过 Pipeline 结果面板使用 JSON 格式导出后转换。".to_string(),
));
}
ExportFormat::Pdf => {
return Err(ActionError::Export("PDF export not yet implemented".to_string()));
return Err(ActionError::Export(
"PDF 导出暂不可用。桌面端可通过 Pipeline 结果面板使用 HTML 格式导出后通过浏览器打印为 PDF。".to_string(),
));
}
}

View File

@@ -1,21 +0,0 @@
//! Hand execution action
use std::collections::HashMap;
use serde_json::Value;
use super::ActionError;
/// Execute a hand action
pub async fn execute_hand(
hand_id: &str,
action: &str,
_params: HashMap<String, Value>,
) -> Result<Value, ActionError> {
// This will be implemented by injecting the hand registry
// For now, return an error indicating it needs configuration
Err(ActionError::Hand(format!(
"Hand '{}' action '{}' requires hand registry configuration",
hand_id, action
)))
}

View File

@@ -7,8 +7,6 @@ mod parallel;
mod render;
mod export;
mod http;
mod skill;
mod hand;
mod orchestration;
pub use llm::*;
@@ -16,8 +14,6 @@ pub use parallel::*;
pub use render::*;
pub use export::*;
pub use http::*;
pub use skill::*;
pub use hand::*;
pub use orchestration::*;
use std::collections::HashMap;
@@ -256,11 +252,14 @@ impl ActionRegistry {
tokio::fs::write(&path, content).await?;
}
ExportFormat::Pptx => {
// Will integrate with pptx exporter
return Err(ActionError::Export("PPTX export not yet implemented".to_string()));
return Err(ActionError::Export(
"PPTX 导出暂不可用。桌面端可通过 Pipeline 结果面板使用 JSON 格式导出后转换。".to_string(),
));
}
ExportFormat::Pdf => {
return Err(ActionError::Export("PDF export not yet implemented".to_string()));
return Err(ActionError::Export(
"PDF 导出暂不可用。桌面端可通过 Pipeline 结果面板使用 HTML 格式导出后通过浏览器打印为 PDF。".to_string(),
));
}
}

View File

@@ -1,20 +0,0 @@
//! Skill execution action
use std::collections::HashMap;
use serde_json::Value;
use super::ActionError;
/// Execute a skill by ID
pub async fn execute_skill(
skill_id: &str,
_input: HashMap<String, Value>,
) -> Result<Value, ActionError> {
// This will be implemented by injecting the skill registry
// For now, return an error indicating it needs configuration
Err(ActionError::Skill(format!(
"Skill '{}' execution requires skill registry configuration",
skill_id
)))
}

View File

@@ -10,11 +10,9 @@
use std::collections::HashMap;
use std::sync::Arc;
use async_trait::async_trait;
use futures::future::join_all;
use serde_json::{Value, json};
use tokio::sync::RwLock;
use crate::types_v2::{Stage, ConditionalBranch, PresentationType};
use crate::types_v2::{Stage, ConditionalBranch};
use crate::engine::context::{ExecutionContextV2, ContextError};
/// Stage execution result
@@ -242,14 +240,6 @@ impl StageEngine {
Ok(result)
}
Err(e) => {
let result = StageResult {
stage_id: stage_id.clone(),
output: Value::Null,
status: StageStatus::Failed,
error: Some(e.to_string()),
duration_ms,
};
self.emit_event(StageEvent::Error {
stage_id,
error: e.to_string(),
@@ -312,7 +302,7 @@ impl StageEngine {
stage_id: &str,
each: &str,
stage_template: &Stage,
max_workers: usize,
_max_workers: usize,
context: &mut ExecutionContextV2,
) -> Result<Value, StageError> {
// Resolve the array to iterate over
@@ -419,7 +409,7 @@ impl StageEngine {
/// Execute compose stage
async fn execute_compose(
&self,
stage_id: &str,
_stage_id: &str,
template: &str,
context: &ExecutionContextV2,
) -> Result<Value, StageError> {
@@ -568,7 +558,8 @@ impl StageEngine {
Ok(resolved_value)
}
/// Clone with drivers
/// Clone with drivers (reserved for future use)
#[allow(dead_code)]
fn clone_with_drivers(&self) -> Self {
Self {
llm_driver: self.llm_driver.clone(),

View File

@@ -396,6 +396,7 @@ pub trait LlmIntentDriver: Send + Sync {
}
/// Default LLM driver implementation using prompt-based matching
#[allow(dead_code)]
pub struct DefaultLlmIntentDriver {
/// Model ID to use
model_id: String,

View File

@@ -57,6 +57,7 @@ pub mod intent;
pub mod engine;
pub mod presentation;
// Glob re-exports with explicit disambiguation for conflicting names
pub use types::*;
pub use types_v2::*;
pub use parser::*;
@@ -67,6 +68,14 @@ pub use trigger::*;
pub use intent::*;
pub use engine::*;
pub use presentation::*;
// Explicit re-exports: presentation::* wins for PresentationType/ExportFormat
// types_v2::* wins for InputMode, engine::* wins for LoopContext
pub use presentation::PresentationType;
pub use presentation::ExportFormat;
pub use types_v2::InputMode;
pub use engine::context::LoopContext;
pub use actions::ActionRegistry;
pub use actions::{LlmActionDriver, SkillActionDriver, HandActionDriver, OrchestrationActionDriver};

View File

@@ -13,7 +13,6 @@
//! - Better recommendations for ambiguous cases
use serde_json::Value;
use std::collections::HashMap;
use super::types::*;

View File

@@ -254,13 +254,13 @@ pub fn compile_pattern(pattern: &str) -> Result<CompiledPattern, PatternError> {
'{' => {
// Named capture group
let mut name = String::new();
let mut has_type = false;
let mut _has_type = false;
while let Some(c) = chars.next() {
match c {
'}' => break,
':' => {
has_type = true;
_has_type = true;
// Skip type part
while let Some(nc) = chars.peek() {
if *nc == '}' {

View File

@@ -0,0 +1,365 @@
//! Context compaction for the agent loop.
//!
//! Provides rule-based token estimation and message compaction to prevent
//! conversations from exceeding LLM context windows. When the estimated
//! token count exceeds the configured threshold, older messages are
//! summarized into a single system message and only recent messages are
//! retained.
use zclaw_types::Message;
/// Number of recent messages to preserve after compaction.
const DEFAULT_KEEP_RECENT: usize = 6;
/// Heuristic token count estimation.
///
/// CJK characters ≈ 1.5 tokens each, English words ≈ 1.3 tokens each.
/// Intentionally conservative (overestimates) to avoid hitting real limits.
pub fn estimate_tokens(text: &str) -> usize {
if text.is_empty() {
return 0;
}
let mut tokens: f64 = 0.0;
for char in text.chars() {
let code = char as u32;
if (0x4E00..=0x9FFF).contains(&code)
|| (0x3400..=0x4DBF).contains(&code)
|| (0x20000..=0x2A6DF).contains(&code)
|| (0xF900..=0xFAFF).contains(&code)
{
// CJK ideographs — ~1.5 tokens
tokens += 1.5;
} else if (0x3000..=0x303F).contains(&code) || (0xFF00..=0xFFEF).contains(&code) {
// CJK / fullwidth punctuation — ~1.0 token
tokens += 1.0;
} else if char == ' ' || char == '\n' || char == '\t' {
// whitespace
tokens += 0.25;
} else {
// ASCII / Latin characters — roughly 4 chars per token
tokens += 0.3;
}
}
tokens.ceil() as usize
}
/// Estimate total tokens for a list of messages (including framing overhead).
pub fn estimate_messages_tokens(messages: &[Message]) -> usize {
let mut total = 0;
for msg in messages {
match msg {
Message::User { content } => {
total += estimate_tokens(content);
total += 4;
}
Message::Assistant { content, thinking } => {
total += estimate_tokens(content);
if let Some(th) = thinking {
total += estimate_tokens(th);
}
total += 4;
}
Message::System { content } => {
total += estimate_tokens(content);
total += 4;
}
Message::ToolUse { input, .. } => {
total += estimate_tokens(&input.to_string());
total += 4;
}
Message::ToolResult { output, .. } => {
total += estimate_tokens(&output.to_string());
total += 4;
}
}
}
total
}
/// Compact a message list by summarizing old messages and keeping recent ones.
///
/// When `messages.len() > keep_recent`, the oldest messages are summarized
/// into a single system message. System messages at the beginning of the
/// conversation are always preserved.
///
/// Returns the compacted message list and the number of original messages removed.
pub fn compact_messages(messages: Vec<Message>, keep_recent: usize) -> (Vec<Message>, usize) {
if messages.len() <= keep_recent {
return (messages, 0);
}
// Preserve leading system messages (they contain compaction summaries from prior runs)
let leading_system_count = messages
.iter()
.take_while(|m| matches!(m, Message::System { .. }))
.count();
// Calculate split point: keep leading system + recent messages
let keep_from_end = keep_recent.min(messages.len().saturating_sub(leading_system_count));
let split_index = messages.len().saturating_sub(keep_from_end);
// Ensure we keep at least the leading system messages
let split_index = split_index.max(leading_system_count);
if split_index == 0 {
return (messages, 0);
}
let old_messages = &messages[..split_index];
let recent_messages = &messages[split_index..];
let summary = generate_summary(old_messages);
let removed_count = old_messages.len();
let mut compacted = Vec::with_capacity(1 + recent_messages.len());
compacted.push(Message::system(summary));
compacted.extend(recent_messages.iter().cloned());
(compacted, removed_count)
}
/// Check if compaction should be triggered and perform it if needed.
///
/// Returns the (possibly compacted) message list.
pub fn maybe_compact(messages: Vec<Message>, threshold: usize) -> Vec<Message> {
let tokens = estimate_messages_tokens(&messages);
if tokens < threshold {
return messages;
}
tracing::info!(
"[Compaction] Triggered: {} tokens > {} threshold, {} messages",
tokens,
threshold,
messages.len(),
);
let (compacted, removed) = compact_messages(messages, DEFAULT_KEEP_RECENT);
tracing::info!(
"[Compaction] Removed {} messages, {} remain",
removed,
compacted.len(),
);
compacted
}
/// Generate a rule-based summary of old messages.
fn generate_summary(messages: &[Message]) -> String {
if messages.is_empty() {
return "[对话开始]".to_string();
}
let mut sections: Vec<String> = vec!["[以下是之前对话的摘要]".to_string()];
let mut user_count = 0;
let mut assistant_count = 0;
let mut topics: Vec<String> = Vec::new();
for msg in messages {
match msg {
Message::User { content } => {
user_count += 1;
let topic = extract_topic(content);
if let Some(t) = topic {
topics.push(t);
}
}
Message::Assistant { .. } => {
assistant_count += 1;
}
Message::System { content } => {
// Skip system messages that are previous compaction summaries
if !content.starts_with("[以下是之前对话的摘要]") {
sections.push(format!("系统提示: {}", truncate(content, 60)));
}
}
Message::ToolUse { tool, .. } => {
sections.push(format!("工具调用: {}", tool.as_str()));
}
Message::ToolResult { .. } => {
// Skip tool results in summary
}
}
}
if !topics.is_empty() {
let topic_list: Vec<String> = topics.iter().take(8).cloned().collect();
sections.push(format!("讨论主题: {}", topic_list.join("; ")));
}
sections.push(format!(
"(已压缩 {} 条消息,其中用户 {} 条,助手 {} 条)",
messages.len(),
user_count,
assistant_count,
));
let summary = sections.join("\n");
// Enforce max length
let max_chars = 800;
if summary.len() > max_chars {
format!("{}...\n(摘要已截断)", &summary[..max_chars])
} else {
summary
}
}
/// Extract the main topic from a user message (first sentence or first 50 chars).
fn extract_topic(content: &str) -> Option<String> {
let trimmed = content.trim();
if trimmed.is_empty() {
return None;
}
// Find sentence end markers
for (i, char) in trimmed.char_indices() {
if char == '。' || char == '' || char == '' || char == '\n' {
let end = i + char.len_utf8();
if end <= 80 {
return Some(trimmed[..end].trim().to_string());
}
break;
}
}
if trimmed.chars().count() <= 50 {
return Some(trimmed.to_string());
}
Some(format!("{}...", trimmed.chars().take(50).collect::<String>()))
}
/// Truncate text to max_chars at char boundary.
fn truncate(text: &str, max_chars: usize) -> String {
if text.chars().count() <= max_chars {
return text.to_string();
}
let truncated: String = text.chars().take(max_chars).collect();
format!("{}...", truncated)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_estimate_tokens_empty() {
assert_eq!(estimate_tokens(""), 0);
}
#[test]
fn test_estimate_tokens_english() {
let tokens = estimate_tokens("Hello world");
assert!(tokens > 0);
}
#[test]
fn test_estimate_tokens_cjk() {
let tokens = estimate_tokens("你好世界");
assert!(tokens > 3); // CJK chars are ~1.5 tokens each
}
#[test]
fn test_estimate_messages_tokens() {
let messages = vec![
Message::user("Hello"),
Message::assistant("Hi there"),
];
let tokens = estimate_messages_tokens(&messages);
assert!(tokens > 0);
}
#[test]
fn test_compact_messages_under_threshold() {
let messages = vec![
Message::user("Hello"),
Message::assistant("Hi"),
];
let (result, removed) = compact_messages(messages, 6);
assert_eq!(removed, 0);
assert_eq!(result.len(), 2);
}
#[test]
fn test_compact_messages_over_threshold() {
let messages: Vec<Message> = (0..10)
.flat_map(|i| {
vec![
Message::user(format!("Question {}", i)),
Message::assistant(format!("Answer {}", i)),
]
})
.collect();
let (result, removed) = compact_messages(messages, 4);
assert!(removed > 0);
// Should have: 1 summary + 4 recent messages
assert_eq!(result.len(), 5);
// First message should be a system summary
assert!(matches!(&result[0], Message::System { .. }));
}
#[test]
fn test_compact_preserves_leading_system() {
let messages = vec![
Message::system("You are helpful"),
Message::user("Q1"),
Message::assistant("A1"),
Message::user("Q2"),
Message::assistant("A2"),
Message::user("Q3"),
Message::assistant("A3"),
];
let (result, removed) = compact_messages(messages, 4);
assert!(removed > 0);
// Should start with compaction summary, then recent messages
assert!(matches!(&result[0], Message::System { .. }));
}
#[test]
fn test_maybe_compact_under_threshold() {
let messages = vec![
Message::user("Short message"),
Message::assistant("Short reply"),
];
let result = maybe_compact(messages, 100_000);
assert_eq!(result.len(), 2);
}
#[test]
fn test_extract_topic_sentence() {
let topic = extract_topic("什么是Rust的所有权系统").unwrap();
assert!(topic.contains("所有权"));
}
#[test]
fn test_extract_topic_short() {
let topic = extract_topic("Hello").unwrap();
assert_eq!(topic, "Hello");
}
#[test]
fn test_extract_topic_long() {
let long = "This is a very long message that exceeds fifty characters in total length";
let topic = extract_topic(long).unwrap();
assert!(topic.ends_with("..."));
}
#[test]
fn test_generate_summary() {
let messages = vec![
Message::user("What is Rust?"),
Message::assistant("Rust is a systems programming language"),
Message::user("How does ownership work?"),
Message::assistant("Ownership is Rust's memory management system"),
];
let summary = generate_summary(&messages);
assert!(summary.contains("摘要"));
assert!(summary.contains("2"));
}
}

View File

@@ -1,9 +1,17 @@
//! Google Gemini driver implementation
//!
//! Implements the Gemini REST API v1beta with full support for:
//! - Text generation (complete and streaming)
//! - Tool / function calling
//! - System instructions
//! - Token usage reporting
use async_trait::async_trait;
use futures::Stream;
use async_stream::stream;
use futures::{Stream, StreamExt};
use secrecy::{ExposeSecret, SecretString};
use reqwest::Client;
use serde::{Deserialize, Serialize};
use std::pin::Pin;
use zclaw_types::{Result, ZclawError};
@@ -11,7 +19,6 @@ use super::{CompletionRequest, CompletionResponse, ContentBlock, LlmDriver, Stop
use crate::stream::StreamChunk;
/// Google Gemini driver
#[allow(dead_code)] // TODO: Implement full Gemini API support
pub struct GeminiDriver {
client: Client,
api_key: SecretString,
@@ -21,11 +28,31 @@ pub struct GeminiDriver {
impl GeminiDriver {
pub fn new(api_key: SecretString) -> Self {
Self {
client: Client::new(),
client: Client::builder()
.user_agent(crate::USER_AGENT)
.http1_only()
.timeout(std::time::Duration::from_secs(120))
.connect_timeout(std::time::Duration::from_secs(30))
.build()
.unwrap_or_else(|_| Client::new()),
api_key,
base_url: "https://generativelanguage.googleapis.com/v1beta".to_string(),
}
}
pub fn with_base_url(api_key: SecretString, base_url: String) -> Self {
Self {
client: Client::builder()
.user_agent(crate::USER_AGENT)
.http1_only()
.timeout(std::time::Duration::from_secs(120))
.connect_timeout(std::time::Duration::from_secs(30))
.build()
.unwrap_or_else(|_| Client::new()),
api_key,
base_url,
}
}
}
#[async_trait]
@@ -39,25 +66,594 @@ impl LlmDriver for GeminiDriver {
}
async fn complete(&self, request: CompletionRequest) -> Result<CompletionResponse> {
// TODO: Implement actual API call
Ok(CompletionResponse {
content: vec![ContentBlock::Text {
text: "Gemini driver not yet implemented".to_string(),
}],
model: request.model,
input_tokens: 0,
output_tokens: 0,
stop_reason: StopReason::EndTurn,
})
let api_request = self.build_api_request(&request);
let url = format!(
"{}/models/{}:generateContent?key={}",
self.base_url,
request.model,
self.api_key.expose_secret()
);
tracing::debug!(target: "gemini_driver", "Sending request to: {}", url);
let response = self.client
.post(&url)
.header("content-type", "application/json")
.json(&api_request)
.send()
.await
.map_err(|e| ZclawError::LlmError(format!("HTTP request failed: {}", e)))?;
if !response.status().is_success() {
let status = response.status();
let body = response.text().await.unwrap_or_default();
tracing::warn!(target: "gemini_driver", "API error {}: {}", status, body);
return Err(ZclawError::LlmError(format!("API error {}: {}", status, body)));
}
let api_response: GeminiResponse = response
.json()
.await
.map_err(|e| ZclawError::LlmError(format!("Failed to parse response: {}", e)))?;
Ok(self.convert_response(api_response, request.model))
}
fn stream(
&self,
_request: CompletionRequest,
request: CompletionRequest,
) -> Pin<Box<dyn Stream<Item = Result<StreamChunk>> + Send + '_>> {
// Placeholder - return error stream
Box::pin(futures::stream::once(async {
Err(ZclawError::LlmError("Gemini streaming not yet implemented".to_string()))
}))
let api_request = self.build_api_request(&request);
let url = format!(
"{}/models/{}:streamGenerateContent?alt=sse&key={}",
self.base_url,
request.model,
self.api_key.expose_secret()
);
tracing::debug!(target: "gemini_driver", "Starting stream request to: {}", url);
Box::pin(stream! {
let response = match self.client
.post(&url)
.header("content-type", "application/json")
.timeout(std::time::Duration::from_secs(120))
.json(&api_request)
.send()
.await
{
Ok(r) => {
tracing::debug!(target: "gemini_driver", "Stream response status: {}", r.status());
r
},
Err(e) => {
tracing::error!(target: "gemini_driver", "HTTP request failed: {:?}", e);
yield Err(ZclawError::LlmError(format!("HTTP request failed: {}", e)));
return;
}
};
if !response.status().is_success() {
let status = response.status();
let body = response.text().await.unwrap_or_default();
yield Err(ZclawError::LlmError(format!("API error {}: {}", status, body)));
return;
}
let mut byte_stream = response.bytes_stream();
let mut accumulated_tool_calls: std::collections::HashMap<usize, (String, String)> = std::collections::HashMap::new();
while let Some(chunk_result) = byte_stream.next().await {
let chunk = match chunk_result {
Ok(c) => c,
Err(e) => {
yield Err(ZclawError::LlmError(format!("Stream error: {}", e)));
continue;
}
};
let text = String::from_utf8_lossy(&chunk);
for line in text.lines() {
if let Some(data) = line.strip_prefix("data: ") {
match serde_json::from_str::<GeminiStreamResponse>(data) {
Ok(resp) => {
if let Some(candidate) = resp.candidates.first() {
let content = match &candidate.content {
Some(c) => c,
None => continue,
};
let parts = &content.parts;
for (idx, part) in parts.iter().enumerate() {
// Handle text content
if let Some(text) = &part.text {
if !text.is_empty() {
yield Ok(StreamChunk::TextDelta { delta: text.clone() });
}
}
// Handle function call (tool use)
if let Some(fc) = &part.function_call {
let name = fc.name.clone().unwrap_or_default();
let args = fc.args.clone().unwrap_or(serde_json::Value::Object(Default::default()));
// Emit ToolUseStart if this is a new tool call
if !accumulated_tool_calls.contains_key(&idx) {
accumulated_tool_calls.insert(idx, (name.clone(), String::new()));
yield Ok(StreamChunk::ToolUseStart {
id: format!("gemini_call_{}", idx),
name,
});
}
// Emit the function arguments as delta
let args_str = serde_json::to_string(&args).unwrap_or_default();
let call_id = format!("gemini_call_{}", idx);
yield Ok(StreamChunk::ToolUseDelta {
id: call_id.clone(),
delta: args_str.clone(),
});
// Accumulate
if let Some(entry) = accumulated_tool_calls.get_mut(&idx) {
entry.1 = args_str;
}
}
}
// When the candidate is finished, emit ToolUseEnd for all pending
if let Some(ref finish_reason) = candidate.finish_reason {
let is_final = finish_reason == "STOP" || finish_reason == "MAX_TOKENS";
if is_final {
// Emit ToolUseEnd for all accumulated tool calls
for (idx, (_name, args_str)) in &accumulated_tool_calls {
let input: serde_json::Value = if args_str.is_empty() {
serde_json::json!({})
} else {
serde_json::from_str(args_str).unwrap_or_else(|e| {
tracing::warn!(target: "gemini_driver", "Failed to parse tool args '{}': {}", args_str, e);
serde_json::json!({})
})
};
yield Ok(StreamChunk::ToolUseEnd {
id: format!("gemini_call_{}", idx),
input,
});
}
// Extract usage metadata from the response
let usage = resp.usage_metadata.as_ref();
let input_tokens = usage.map(|u| u.prompt_token_count.unwrap_or(0)).unwrap_or(0);
let output_tokens = usage.map(|u| u.candidates_token_count.unwrap_or(0)).unwrap_or(0);
let stop_reason = match finish_reason.as_str() {
"STOP" => "end_turn",
"MAX_TOKENS" => "max_tokens",
"SAFETY" => "error",
"RECITATION" => "error",
_ => "end_turn",
};
yield Ok(StreamChunk::Complete {
input_tokens,
output_tokens,
stop_reason: stop_reason.to_string(),
});
}
}
}
}
Err(e) => {
tracing::warn!(target: "gemini_driver", "Failed to parse SSE event: {} - {}", e, data);
}
}
}
}
}
})
}
}
impl GeminiDriver {
/// Convert a CompletionRequest into the Gemini API request format.
///
/// Key mapping decisions:
/// - `system` prompt maps to `systemInstruction`
/// - Messages use Gemini's `contents` array with `role`/`parts`
/// - Tool definitions use `functionDeclarations`
/// - Tool results are sent as `functionResponse` parts in `user` messages
fn build_api_request(&self, request: &CompletionRequest) -> GeminiRequest {
let mut contents: Vec<GeminiContent> = Vec::new();
for msg in &request.messages {
match msg {
zclaw_types::Message::User { content } => {
contents.push(GeminiContent {
role: "user".to_string(),
parts: vec![GeminiPart {
text: Some(content.clone()),
inline_data: None,
function_call: None,
function_response: None,
}],
});
}
zclaw_types::Message::Assistant { content, thinking } => {
let mut parts = Vec::new();
// Gemini does not have a native "thinking" field, so we prepend
// any thinking content as a text part with a marker.
if let Some(think) = thinking {
if !think.is_empty() {
parts.push(GeminiPart {
text: Some(format!("[thinking]\n{}\n[/thinking]", think)),
inline_data: None,
function_call: None,
function_response: None,
});
}
}
parts.push(GeminiPart {
text: Some(content.clone()),
inline_data: None,
function_call: None,
function_response: None,
});
contents.push(GeminiContent {
role: "model".to_string(),
parts,
});
}
zclaw_types::Message::ToolUse { id: _, tool, input } => {
// Tool use from the assistant is represented as a functionCall part
let args = if input.is_null() {
serde_json::json!({})
} else {
input.clone()
};
contents.push(GeminiContent {
role: "model".to_string(),
parts: vec![GeminiPart {
text: None,
inline_data: None,
function_call: Some(GeminiFunctionCall {
name: Some(tool.to_string()),
args: Some(args),
}),
function_response: None,
}],
});
}
zclaw_types::Message::ToolResult { tool_call_id, tool, output, is_error } => {
// Tool results are sent as functionResponse parts in a "user" role message.
// Gemini requires that function responses reference the function name
// and include the response wrapped in a "result" or "error" key.
let response_content = if *is_error {
serde_json::json!({ "error": output.to_string() })
} else {
serde_json::json!({ "result": output.clone() })
};
contents.push(GeminiContent {
role: "user".to_string(),
parts: vec![GeminiPart {
text: None,
inline_data: None,
function_call: None,
function_response: Some(GeminiFunctionResponse {
name: tool.to_string(),
response: response_content,
}),
}],
});
// Gemini ignores tool_call_id, but we log it for debugging
let _ = tool_call_id;
}
zclaw_types::Message::System { content } => {
// System messages are converted to user messages with system context.
// Note: the primary system prompt is handled via systemInstruction.
// Inline system messages in conversation history become user messages.
contents.push(GeminiContent {
role: "user".to_string(),
parts: vec![GeminiPart {
text: Some(content.clone()),
inline_data: None,
function_call: None,
function_response: None,
}],
});
}
}
}
// Build tool declarations
let function_declarations: Vec<GeminiFunctionDeclaration> = request.tools
.iter()
.map(|t| GeminiFunctionDeclaration {
name: t.name.clone(),
description: t.description.clone(),
parameters: t.input_schema.clone(),
})
.collect();
// Build generation config
let mut generation_config = GeminiGenerationConfig::default();
if let Some(temp) = request.temperature {
generation_config.temperature = Some(temp);
}
if let Some(max) = request.max_tokens {
generation_config.max_output_tokens = Some(max);
}
if !request.stop.is_empty() {
generation_config.stop_sequences = Some(request.stop.clone());
}
// Build system instruction
let system_instruction = request.system.as_ref().map(|s| GeminiSystemInstruction {
parts: vec![GeminiPart {
text: Some(s.clone()),
inline_data: None,
function_call: None,
function_response: None,
}],
});
GeminiRequest {
contents,
system_instruction,
generation_config: Some(generation_config),
tools: if function_declarations.is_empty() {
None
} else {
Some(vec![GeminiTool {
function_declarations,
}])
},
}
}
/// Convert a Gemini API response into a CompletionResponse.
fn convert_response(&self, api_response: GeminiResponse, model: String) -> CompletionResponse {
let candidate = api_response.candidates.first();
let (content, stop_reason) = match candidate {
Some(c) => {
let parts = c.content.as_ref()
.map(|content| content.parts.as_slice())
.unwrap_or(&[]);
let mut blocks: Vec<ContentBlock> = Vec::new();
let mut has_tool_use = false;
for part in parts {
// Handle text content
if let Some(text) = &part.text {
// Skip thinking markers we injected
if text.starts_with("[thinking]\n") && text.contains("[/thinking]") {
let thinking_content = text
.strip_prefix("[thinking]\n")
.and_then(|s| s.strip_suffix("\n[/thinking]"))
.unwrap_or("");
if !thinking_content.is_empty() {
blocks.push(ContentBlock::Thinking {
thinking: thinking_content.to_string(),
});
}
} else if !text.is_empty() {
blocks.push(ContentBlock::Text { text: text.clone() });
}
}
// Handle function call (tool use)
if let Some(fc) = &part.function_call {
has_tool_use = true;
blocks.push(ContentBlock::ToolUse {
id: format!("gemini_call_{}", blocks.len()),
name: fc.name.clone().unwrap_or_default(),
input: fc.args.clone().unwrap_or(serde_json::Value::Object(Default::default())),
});
}
}
// If there are no content blocks, add an empty text block
if blocks.is_empty() {
blocks.push(ContentBlock::Text { text: String::new() });
}
let stop = match c.finish_reason.as_deref() {
Some("STOP") => StopReason::EndTurn,
Some("MAX_TOKENS") => StopReason::MaxTokens,
Some("SAFETY") => StopReason::Error,
Some("RECITATION") => StopReason::Error,
Some("TOOL_USE") => StopReason::ToolUse,
_ => {
if has_tool_use {
StopReason::ToolUse
} else {
StopReason::EndTurn
}
}
};
(blocks, stop)
}
None => {
tracing::warn!(target: "gemini_driver", "No candidates in response");
(
vec![ContentBlock::Text { text: String::new() }],
StopReason::EndTurn,
)
}
};
let usage = api_response.usage_metadata.as_ref();
let input_tokens = usage.map(|u| u.prompt_token_count.unwrap_or(0)).unwrap_or(0);
let output_tokens = usage.map(|u| u.candidates_token_count.unwrap_or(0)).unwrap_or(0);
CompletionResponse {
content,
model,
input_tokens,
output_tokens,
stop_reason,
}
}
}
// ---------------------------------------------------------------------------
// Gemini API request types
// ---------------------------------------------------------------------------
#[derive(Serialize)]
struct GeminiRequest {
contents: Vec<GeminiContent>,
#[serde(skip_serializing_if = "Option::is_none")]
system_instruction: Option<GeminiSystemInstruction>,
#[serde(skip_serializing_if = "Option::is_none")]
generation_config: Option<GeminiGenerationConfig>,
#[serde(skip_serializing_if = "Option::is_none")]
tools: Option<Vec<GeminiTool>>,
}
#[derive(Serialize)]
struct GeminiContent {
role: String,
parts: Vec<GeminiPart>,
}
#[derive(Serialize, Clone)]
struct GeminiPart {
#[serde(skip_serializing_if = "Option::is_none")]
text: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
inline_data: Option<serde_json::Value>,
#[serde(rename = "functionCall", skip_serializing_if = "Option::is_none")]
function_call: Option<GeminiFunctionCall>,
#[serde(rename = "functionResponse", skip_serializing_if = "Option::is_none")]
function_response: Option<GeminiFunctionResponse>,
}
#[derive(Serialize)]
struct GeminiSystemInstruction {
parts: Vec<GeminiPart>,
}
#[derive(Serialize)]
struct GeminiGenerationConfig {
#[serde(skip_serializing_if = "Option::is_none")]
temperature: Option<f32>,
#[serde(skip_serializing_if = "Option::is_none")]
max_output_tokens: Option<u32>,
#[serde(rename = "stopSequences", skip_serializing_if = "Option::is_none")]
stop_sequences: Option<Vec<String>>,
}
impl Default for GeminiGenerationConfig {
fn default() -> Self {
Self {
temperature: None,
max_output_tokens: None,
stop_sequences: None,
}
}
}
#[derive(Serialize)]
struct GeminiTool {
#[serde(rename = "functionDeclarations")]
function_declarations: Vec<GeminiFunctionDeclaration>,
}
#[derive(Serialize)]
struct GeminiFunctionDeclaration {
name: String,
description: String,
parameters: serde_json::Value,
}
#[derive(Serialize, Clone)]
struct GeminiFunctionCall {
#[serde(skip_serializing_if = "Option::is_none")]
name: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
args: Option<serde_json::Value>,
}
#[derive(Serialize, Clone)]
struct GeminiFunctionResponse {
name: String,
response: serde_json::Value,
}
// ---------------------------------------------------------------------------
// Gemini API response types
// ---------------------------------------------------------------------------
#[derive(Deserialize)]
struct GeminiResponse {
#[serde(default)]
candidates: Vec<GeminiCandidate>,
#[serde(default)]
usage_metadata: Option<GeminiUsageMetadata>,
}
#[derive(Debug, Deserialize)]
struct GeminiCandidate {
#[serde(default)]
content: Option<GeminiResponseContent>,
#[serde(default)]
finish_reason: Option<String>,
}
#[derive(Debug, Deserialize)]
struct GeminiResponseContent {
#[serde(default)]
parts: Vec<GeminiResponsePart>,
#[serde(default)]
#[allow(dead_code)]
role: Option<String>,
}
#[derive(Debug, Deserialize)]
struct GeminiResponsePart {
#[serde(default)]
text: Option<String>,
#[serde(rename = "functionCall", default)]
function_call: Option<GeminiResponseFunctionCall>,
}
#[derive(Debug, Deserialize)]
struct GeminiResponseFunctionCall {
#[serde(default)]
name: Option<String>,
#[serde(default)]
args: Option<serde_json::Value>,
}
#[derive(Debug, Deserialize)]
struct GeminiUsageMetadata {
#[serde(default)]
prompt_token_count: Option<u32>,
#[serde(default)]
candidates_token_count: Option<u32>,
#[serde(default)]
#[allow(dead_code)]
total_token_count: Option<u32>,
}
// ---------------------------------------------------------------------------
// Gemini streaming types
// ---------------------------------------------------------------------------
/// Streaming response from the Gemini SSE endpoint.
/// Each SSE event contains the same structure as the non-streaming response,
/// but with incremental content.
#[derive(Debug, Deserialize)]
struct GeminiStreamResponse {
#[serde(default)]
candidates: Vec<GeminiCandidate>,
#[serde(default)]
usage_metadata: Option<GeminiUsageMetadata>,
}

View File

@@ -1,40 +1,250 @@
//! Local LLM driver (Ollama, LM Studio, vLLM, etc.)
//!
//! Uses the OpenAI-compatible API format. The only differences from the
//! OpenAI driver are: no API key is required, and base_url points to a
//! local server.
use async_trait::async_trait;
use futures::Stream;
use async_stream::stream;
use futures::{Stream, StreamExt};
use reqwest::Client;
use serde::{Deserialize, Serialize};
use std::pin::Pin;
use zclaw_types::{Result, ZclawError};
use super::{CompletionRequest, CompletionResponse, ContentBlock, LlmDriver, StopReason};
use crate::stream::StreamChunk;
/// Local LLM driver for Ollama, LM Studio, vLLM, etc.
#[allow(dead_code)] // TODO: Implement full Local driver support
/// Local LLM driver for Ollama, LM Studio, vLLM, and other OpenAI-compatible servers.
pub struct LocalDriver {
client: Client,
base_url: String,
}
impl LocalDriver {
/// Create a driver pointing at a custom OpenAI-compatible endpoint.
///
/// The `base_url` should end with `/v1` (e.g. `http://localhost:8080/v1`).
pub fn new(base_url: impl Into<String>) -> Self {
Self {
client: Client::new(),
client: Client::builder()
.user_agent(crate::USER_AGENT)
.http1_only()
.timeout(std::time::Duration::from_secs(300)) // 5 min -- local inference can be slow
.connect_timeout(std::time::Duration::from_secs(10)) // short connect timeout
.build()
.unwrap_or_else(|_| Client::new()),
base_url: base_url.into(),
}
}
/// Ollama default endpoint (`http://localhost:11434/v1`).
pub fn ollama() -> Self {
Self::new("http://localhost:11434/v1")
}
/// LM Studio default endpoint (`http://localhost:1234/v1`).
pub fn lm_studio() -> Self {
Self::new("http://localhost:1234/v1")
}
/// vLLM default endpoint (`http://localhost:8000/v1`).
pub fn vllm() -> Self {
Self::new("http://localhost:8000/v1")
}
// ----------------------------------------------------------------
// Request / response conversion (OpenAI-compatible format)
// ----------------------------------------------------------------
fn build_api_request(&self, request: &CompletionRequest) -> LocalApiRequest {
let messages: Vec<LocalApiMessage> = request
.messages
.iter()
.filter_map(|msg| match msg {
zclaw_types::Message::User { content } => Some(LocalApiMessage {
role: "user".to_string(),
content: Some(content.clone()),
tool_calls: None,
}),
zclaw_types::Message::Assistant {
content,
thinking: _,
} => Some(LocalApiMessage {
role: "assistant".to_string(),
content: Some(content.clone()),
tool_calls: None,
}),
zclaw_types::Message::System { content } => Some(LocalApiMessage {
role: "system".to_string(),
content: Some(content.clone()),
tool_calls: None,
}),
zclaw_types::Message::ToolUse {
id, tool, input, ..
} => {
let args = if input.is_null() {
"{}".to_string()
} else {
serde_json::to_string(input).unwrap_or_else(|_| "{}".to_string())
};
Some(LocalApiMessage {
role: "assistant".to_string(),
content: None,
tool_calls: Some(vec![LocalApiToolCall {
id: id.clone(),
r#type: "function".to_string(),
function: LocalFunctionCall {
name: tool.to_string(),
arguments: args,
},
}]),
})
}
zclaw_types::Message::ToolResult {
output, is_error, ..
} => Some(LocalApiMessage {
role: "tool".to_string(),
content: Some(if *is_error {
format!("Error: {}", output)
} else {
output.to_string()
}),
tool_calls: None,
}),
})
.collect();
// Prepend system prompt when provided.
let mut messages = messages;
if let Some(system) = &request.system {
messages.insert(
0,
LocalApiMessage {
role: "system".to_string(),
content: Some(system.clone()),
tool_calls: None,
},
);
}
let tools: Vec<LocalApiTool> = request
.tools
.iter()
.map(|t| LocalApiTool {
r#type: "function".to_string(),
function: LocalFunctionDef {
name: t.name.clone(),
description: t.description.clone(),
parameters: t.input_schema.clone(),
},
})
.collect();
LocalApiRequest {
model: request.model.clone(),
messages,
max_tokens: request.max_tokens,
temperature: request.temperature,
stop: if request.stop.is_empty() {
None
} else {
Some(request.stop.clone())
},
stream: request.stream,
tools: if tools.is_empty() {
None
} else {
Some(tools)
},
}
}
fn convert_response(
&self,
api_response: LocalApiResponse,
model: String,
) -> CompletionResponse {
let choice = api_response.choices.first();
let (content, stop_reason) = match choice {
Some(c) => {
let has_tool_calls = c
.message
.tool_calls
.as_ref()
.map(|tc| !tc.is_empty())
.unwrap_or(false);
let has_content = c
.message
.content
.as_ref()
.map(|t| !t.is_empty())
.unwrap_or(false);
let blocks = if has_tool_calls {
let tool_calls = c.message.tool_calls.as_ref().unwrap();
tool_calls
.iter()
.map(|tc| {
let input: serde_json::Value =
serde_json::from_str(&tc.function.arguments)
.unwrap_or(serde_json::Value::Null);
ContentBlock::ToolUse {
id: tc.id.clone(),
name: tc.function.name.clone(),
input,
}
})
.collect()
} else if has_content {
vec![ContentBlock::Text {
text: c.message.content.clone().unwrap(),
}]
} else {
vec![ContentBlock::Text {
text: String::new(),
}]
};
let stop = match c.finish_reason.as_deref() {
Some("stop") => StopReason::EndTurn,
Some("length") => StopReason::MaxTokens,
Some("tool_calls") => StopReason::ToolUse,
_ => StopReason::EndTurn,
};
(blocks, stop)
}
None => (
vec![ContentBlock::Text {
text: String::new(),
}],
StopReason::EndTurn,
),
};
let (input_tokens, output_tokens) = api_response
.usage
.map(|u| (u.prompt_tokens, u.completion_tokens))
.unwrap_or((0, 0));
CompletionResponse {
content,
model,
input_tokens,
output_tokens,
stop_reason,
}
}
/// Build the `reqwest::RequestBuilder` with an optional Authorization header.
///
/// Ollama does not need one; LM Studio / vLLM may be configured with an
/// optional API key. We send the header only when a key is present.
fn authenticated_post(&self, url: &str) -> reqwest::RequestBuilder {
self.client.post(url).header("Accept", "*/*")
}
}
#[async_trait]
@@ -44,30 +254,394 @@ impl LlmDriver for LocalDriver {
}
fn is_configured(&self) -> bool {
// Local drivers don't require API keys
// Local drivers never require an API key.
true
}
async fn complete(&self, request: CompletionRequest) -> Result<CompletionResponse> {
// TODO: Implement actual API call (OpenAI-compatible)
Ok(CompletionResponse {
content: vec![ContentBlock::Text {
text: "Local driver not yet implemented".to_string(),
}],
model: request.model,
input_tokens: 0,
output_tokens: 0,
stop_reason: StopReason::EndTurn,
})
let api_request = self.build_api_request(&request);
let url = format!("{}/chat/completions", self.base_url);
tracing::debug!(target: "local_driver", "Sending request to {}", url);
tracing::trace!(
target: "local_driver",
"Request body: {}",
serde_json::to_string(&api_request).unwrap_or_default()
);
let response = self
.authenticated_post(&url)
.json(&api_request)
.send()
.await
.map_err(|e| {
let hint = connection_error_hint(&e);
ZclawError::LlmError(format!("Failed to connect to local LLM server at {}: {}{}", self.base_url, e, hint))
})?;
if !response.status().is_success() {
let status = response.status();
let body = response.text().await.unwrap_or_default();
tracing::warn!(target: "local_driver", "API error {}: {}", status, body);
return Err(ZclawError::LlmError(format!(
"Local LLM API error {}: {}",
status, body
)));
}
let api_response: LocalApiResponse = response
.json()
.await
.map_err(|e| ZclawError::LlmError(format!("Failed to parse response: {}", e)))?;
Ok(self.convert_response(api_response, request.model))
}
fn stream(
&self,
_request: CompletionRequest,
request: CompletionRequest,
) -> Pin<Box<dyn Stream<Item = Result<StreamChunk>> + Send + '_>> {
// Placeholder - return error stream
Box::pin(futures::stream::once(async {
Err(ZclawError::LlmError("Local driver streaming not yet implemented".to_string()))
}))
let mut stream_request = self.build_api_request(&request);
stream_request.stream = true;
let url = format!("{}/chat/completions", self.base_url);
tracing::debug!(target: "local_driver", "Starting stream to {}", url);
Box::pin(stream! {
let response = match self
.authenticated_post(&url)
.header("Content-Type", "application/json")
.timeout(std::time::Duration::from_secs(300))
.json(&stream_request)
.send()
.await
{
Ok(r) => {
tracing::debug!(target: "local_driver", "Stream response status: {}", r.status());
r
}
Err(e) => {
let hint = connection_error_hint(&e);
tracing::error!(target: "local_driver", "Stream connection failed: {}{}", e, hint);
yield Err(ZclawError::LlmError(format!(
"Failed to connect to local LLM server at {}: {}{}",
self.base_url, e, hint
)));
return;
}
};
if !response.status().is_success() {
let status = response.status();
let body = response.text().await.unwrap_or_default();
yield Err(ZclawError::LlmError(format!("API error {}: {}", status, body)));
return;
}
let mut byte_stream = response.bytes_stream();
let mut accumulated_tool_calls: std::collections::HashMap<String, (String, String)> =
std::collections::HashMap::new();
let mut current_tool_id: Option<String> = None;
while let Some(chunk_result) = byte_stream.next().await {
let chunk = match chunk_result {
Ok(c) => c,
Err(e) => {
yield Err(ZclawError::LlmError(format!("Stream error: {}", e)));
continue;
}
};
let text = String::from_utf8_lossy(&chunk);
for line in text.lines() {
if let Some(data) = line.strip_prefix("data: ") {
if data == "[DONE]" {
tracing::debug!(
target: "local_driver",
"Stream done, tool_calls accumulated: {}",
accumulated_tool_calls.len()
);
for (id, (name, args)) in &accumulated_tool_calls {
if name.is_empty() {
tracing::warn!(
target: "local_driver",
"Skipping tool call with empty name: id={}",
id
);
continue;
}
let parsed_args: serde_json::Value = if args.is_empty() {
serde_json::json!({})
} else {
serde_json::from_str(args).unwrap_or_else(|e| {
tracing::warn!(
target: "local_driver",
"Failed to parse tool args '{}': {}",
args, e
);
serde_json::json!({})
})
};
yield Ok(StreamChunk::ToolUseEnd {
id: id.clone(),
input: parsed_args,
});
}
yield Ok(StreamChunk::Complete {
input_tokens: 0,
output_tokens: 0,
stop_reason: "end_turn".to_string(),
});
continue;
}
match serde_json::from_str::<LocalStreamResponse>(data) {
Ok(resp) => {
if let Some(choice) = resp.choices.first() {
let delta = &choice.delta;
// Text content
if let Some(content) = &delta.content {
if !content.is_empty() {
yield Ok(StreamChunk::TextDelta {
delta: content.clone(),
});
}
}
// Tool calls
if let Some(tool_calls) = &delta.tool_calls {
for tc in tool_calls {
// Tool call start
if let Some(id) = &tc.id {
let name = tc
.function
.as_ref()
.and_then(|f| f.name.clone())
.unwrap_or_default();
if !name.is_empty() {
current_tool_id = Some(id.clone());
accumulated_tool_calls
.insert(id.clone(), (name.clone(), String::new()));
yield Ok(StreamChunk::ToolUseStart {
id: id.clone(),
name,
});
} else {
current_tool_id = Some(id.clone());
accumulated_tool_calls
.insert(id.clone(), (String::new(), String::new()));
}
}
// Tool call delta
if let Some(function) = &tc.function {
if let Some(args) = &function.arguments {
let tool_id = tc
.id
.as_ref()
.or(current_tool_id.as_ref())
.cloned()
.unwrap_or_default();
yield Ok(StreamChunk::ToolUseDelta {
id: tool_id.clone(),
delta: args.clone(),
});
if let Some(entry) =
accumulated_tool_calls.get_mut(&tool_id)
{
entry.1.push_str(args);
}
}
}
}
}
}
}
Err(e) => {
tracing::warn!(
target: "local_driver",
"Failed to parse SSE: {}, data: {}",
e, data
);
}
}
}
}
}
})
}
}
// ---------------------------------------------------------------------------
// Connection-error diagnostics
// ---------------------------------------------------------------------------
/// Return a human-readable hint when the local server appears to be unreachable.
fn connection_error_hint(error: &reqwest::Error) -> String {
if error.is_connect() {
format!(
"\n\nHint: Is the local LLM server running at {}?\n\
Make sure the server is started before using this driver.",
// Extract just the host:port from whatever error we have.
"localhost"
)
} else if error.is_timeout() {
"\n\nHint: The request timed out. Local inference can be slow -- \
try a smaller model or increase the timeout."
.to_string()
} else {
String::new()
}
}
// ---------------------------------------------------------------------------
// OpenAI-compatible API types (private to this module)
// ---------------------------------------------------------------------------
#[derive(Serialize)]
struct LocalApiRequest {
model: String,
messages: Vec<LocalApiMessage>,
#[serde(skip_serializing_if = "Option::is_none")]
max_tokens: Option<u32>,
#[serde(skip_serializing_if = "Option::is_none")]
temperature: Option<f32>,
#[serde(skip_serializing_if = "Option::is_none")]
stop: Option<Vec<String>>,
#[serde(default)]
stream: bool,
#[serde(skip_serializing_if = "Option::is_none")]
tools: Option<Vec<LocalApiTool>>,
}
#[derive(Serialize)]
struct LocalApiMessage {
role: String,
#[serde(skip_serializing_if = "Option::is_none")]
content: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
tool_calls: Option<Vec<LocalApiToolCall>>,
}
#[derive(Serialize)]
struct LocalApiToolCall {
id: String,
r#type: String,
function: LocalFunctionCall,
}
#[derive(Serialize)]
struct LocalFunctionCall {
name: String,
arguments: String,
}
#[derive(Serialize)]
struct LocalApiTool {
r#type: String,
function: LocalFunctionDef,
}
#[derive(Serialize)]
struct LocalFunctionDef {
name: String,
description: String,
parameters: serde_json::Value,
}
// --- Response types ---
#[derive(Deserialize, Default)]
struct LocalApiResponse {
#[serde(default)]
choices: Vec<LocalApiChoice>,
#[serde(default)]
usage: Option<LocalApiUsage>,
}
#[derive(Deserialize, Default)]
struct LocalApiChoice {
#[serde(default)]
message: LocalApiResponseMessage,
#[serde(default)]
finish_reason: Option<String>,
}
#[derive(Deserialize, Default)]
struct LocalApiResponseMessage {
#[serde(default)]
content: Option<String>,
#[serde(default)]
tool_calls: Option<Vec<LocalApiToolCallResponse>>,
}
#[derive(Deserialize, Default)]
struct LocalApiToolCallResponse {
#[serde(default)]
id: String,
#[serde(default)]
function: LocalFunctionCallResponse,
}
#[derive(Deserialize, Default)]
struct LocalFunctionCallResponse {
#[serde(default)]
name: String,
#[serde(default)]
arguments: String,
}
#[derive(Deserialize, Default)]
struct LocalApiUsage {
#[serde(default)]
prompt_tokens: u32,
#[serde(default)]
completion_tokens: u32,
}
// --- Streaming types ---
#[derive(Debug, Deserialize)]
struct LocalStreamResponse {
#[serde(default)]
choices: Vec<LocalStreamChoice>,
}
#[derive(Debug, Deserialize)]
struct LocalStreamChoice {
#[serde(default)]
delta: LocalDelta,
#[serde(default)]
#[allow(dead_code)] // Deserialized from SSE, not accessed in code
finish_reason: Option<String>,
}
#[derive(Debug, Deserialize, Default)]
struct LocalDelta {
#[serde(default)]
content: Option<String>,
#[serde(default)]
tool_calls: Option<Vec<LocalToolCallDelta>>,
}
#[derive(Debug, Deserialize)]
struct LocalToolCallDelta {
#[serde(default)]
id: Option<String>,
#[serde(default)]
function: Option<LocalFunctionDelta>,
}
#[derive(Debug, Deserialize)]
struct LocalFunctionDelta {
#[serde(default)]
name: Option<String>,
#[serde(default)]
arguments: Option<String>,
}

View File

@@ -12,6 +12,7 @@ pub mod loop_runner;
pub mod loop_guard;
pub mod stream;
pub mod growth;
pub mod compaction;
// Re-export main types
pub use driver::{

View File

@@ -11,6 +11,7 @@ use crate::tool::{ToolRegistry, ToolContext, SkillExecutor};
use crate::tool::builtin::PathValidator;
use crate::loop_guard::LoopGuard;
use crate::growth::GrowthIntegration;
use crate::compaction;
use zclaw_memory::MemoryStore;
/// Agent loop runner
@@ -29,6 +30,8 @@ pub struct AgentLoop {
path_validator: Option<PathValidator>,
/// Growth system integration (optional)
growth: Option<GrowthIntegration>,
/// Compaction threshold in tokens (0 = disabled)
compaction_threshold: usize,
}
impl AgentLoop {
@@ -51,6 +54,7 @@ impl AgentLoop {
skill_executor: None,
path_validator: None,
growth: None,
compaction_threshold: 0,
}
}
@@ -101,6 +105,16 @@ impl AgentLoop {
self.growth = Some(growth);
}
/// Set compaction threshold in tokens (0 = disabled)
///
/// When the estimated token count of conversation history exceeds this
/// threshold, older messages are summarized into a single system message
/// and only recent messages are sent to the LLM.
pub fn with_compaction_threshold(mut self, threshold: usize) -> Self {
self.compaction_threshold = threshold;
self
}
/// Get growth integration reference
pub fn growth(&self) -> Option<&GrowthIntegration> {
self.growth.as_ref()
@@ -134,6 +148,11 @@ impl AgentLoop {
// Get all messages for context
let mut messages = self.memory.get_messages(&session_id).await?;
// Apply compaction if threshold is configured
if self.compaction_threshold > 0 {
messages = compaction::maybe_compact(messages, self.compaction_threshold);
}
// Enhance system prompt with growth memories
let enhanced_prompt = if let Some(ref growth) = self.growth {
let base = self.system_prompt.as_deref().unwrap_or("");
@@ -260,7 +279,12 @@ impl AgentLoop {
self.memory.append_message(&session_id, &user_message).await?;
// Get all messages for context
let messages = self.memory.get_messages(&session_id).await?;
let mut messages = self.memory.get_messages(&session_id).await?;
// Apply compaction if threshold is configured
if self.compaction_threshold > 0 {
messages = compaction::maybe_compact(messages, self.compaction_threshold);
}
// Enhance system prompt with growth memories
let enhanced_prompt = if let Some(ref growth) = self.growth {

View File

@@ -2,7 +2,7 @@
**测试日期**: 2026-03-13
**测试环境**: Windows 11 Pro, Chrome DevTools MCP
**测试范围**: 前端 UI 组件、OpenFang 集成、设置页面
**测试范围**: 前端 UI 组件、ZCLAW 集成、设置页面
---
@@ -12,7 +12,7 @@
|---------|------|------|------|
| 前端页面加载 | 5 | 0 | 5 |
| 设置页面功能 | 6 | 0 | 6 |
| OpenFang UI 组件 | 5 | 0 | 5 |
| ZCLAW UI 组件 | 5 | 0 | 5 |
| TypeScript 编译 | 1 | 0 | 1 |
| **总计** | **17** | **0** | **17** |
@@ -51,12 +51,12 @@
#### 2.1 后端设置 UI ✓
- **状态**: 通过
- **验证项**:
- Gateway 类型选择器 (OpenClaw/OpenFang) 正常工作
- 切换到 OpenFang 时:
- Gateway 类型选择器 (OpenClaw/ZCLAW) 正常工作
- 切换到 ZCLAW 时:
- 默认端口显示 4200
- 协议显示 "WebSocket + REST API"
- 配置格式显示 "TOML"
- 显示 OpenFang 特有功能提示
- 显示 ZCLAW 特有功能提示
- 切换到 OpenClaw 时:
- 默认端口显示 18789
- 协议显示 "WebSocket RPC"
@@ -105,7 +105,7 @@
---
### 3. OpenFang UI 组件测试
### 3. ZCLAW UI 组件测试
#### 3.1 Hands 面板 ✓
- **状态**: 通过
@@ -159,9 +159,9 @@
### 新增功能
1. **后端设置 UI** (`General.tsx`)
- 添加 OpenClaw/OpenFang 后端类型选择器
- 添加 OpenClaw/ZCLAW 后端类型选择器
- 显示后端特性信息(端口、协议、配置格式)
- OpenFang 特有功能提示
- ZCLAW 特有功能提示
2. **TypeScript 类型修复**
- `gatewayStore.ts`: 添加 `Hand.currentRunId``cancelWorkflow`
@@ -193,7 +193,7 @@ Node.js: v20.x
- CLI 检测功能
- 服务注册功能
2. **连接真实 OpenFang 后测试**
2. **连接真实 ZCLAW 后测试**
- Hands 触发和审批流程
- Workflow 执行
- 审计日志获取
@@ -208,7 +208,7 @@ Node.js: v20.x
## 结论
本次 E2E 测试覆盖了 ZCLAW Desktop 的主要前端功能,所有测试项目均通过。OpenFang 相关 UI 组件已正确集成并显示,后端类型切换功能正常工作。
本次 E2E 测试覆盖了 ZCLAW Desktop 的主要前端功能,所有测试项目均通过。ZCLAW 相关 UI 组件已正确集成并显示,后端类型切换功能正常工作。
**测试状态**: ✅ 全部通过
@@ -216,12 +216,12 @@ Node.js: v20.x
## 5. WebSocket 流式聊天测试 (2026-03-14)
### 5.1 OpenFang 协议发现 ✅
### 5.1 ZCLAW 协议发现 ✅
**测试方法:** 直接 WebSocket 连接到 `ws://127.0.0.1:50051/api/agents/{agentId}/ws`
**发现:**
- OpenFang 实际使用的消息格式与文档不同
- ZCLAW 实际使用的消息格式与文档不同
- 正确的消息格式: `{ type: 'message', content, session_id }`
- 错误的文档格式: `{ type: 'chat', message: { role, content } }`
@@ -258,7 +258,7 @@ Node.js: v20.x
**修复内容:**
1. `gateway-client.ts`:
- 更新 `chatStream()` 使用正确的消息格式
- 更新 `handleOpenFangStreamEvent()` 处理实际的事件类型
- 更新 `handleZCLAWStreamEvent()` 处理实际的事件类型
- 添加 `setDefaultAgentId()``getDefaultAgentId()` 方法
2. `chatStore.ts`:
@@ -309,7 +309,7 @@ curl -X POST http://127.0.0.1:50051/api/agents/{id}/message \
| 测试项 | 状态 | 详情 |
|--------|------|------|
| OpenFang 健康检查 | ✅ PASS | 版本 0.4.0 |
| ZCLAW 健康检查 | ✅ PASS | 版本 0.4.0 |
| Agent 列表 | ✅ PASS | 10 个 Agent |
| Hands 列表 | ✅ PASS | 8 个 Hands |
| WebSocket 流式聊天 | ✅ PASS | 正确接收 text_delta 事件 |
@@ -342,7 +342,7 @@ ws.send(JSON.stringify({
|------|------|------|
| Tauri Desktop | - | ✅ 运行中 (PID 72760) |
| Vite Dev Server | 1420 | ✅ 运行中 |
| OpenFang Backend | 50051 | ✅ 运行中 (v0.4.0) |
| ZCLAW Backend | 50051 | ✅ 运行中 (v0.4.0) |
### 7.4 前端功能待验证

View File

@@ -4,8 +4,8 @@
### 已完成的工作 (2026-03-14)
1. **OpenFang 连接适配**
- ZCLAW Desktop 已成功连接 OpenFang (端口 50051)
1. **ZCLAW 连接适配**
- ZCLAW Desktop 已成功连接 ZCLAW (端口 50051)
- 对话功能测试通过AI 响应正常
2. **WebSocket 流式聊天** ✅ (新完成)
@@ -27,9 +27,9 @@
| `gatewayStore.ts` | loadClones 自动设置默认 Agent |
| `vite.config.ts` | 启用 WebSocket 代理 |
### OpenFang vs OpenClaw 协议差异
### ZCLAW vs OpenClaw 协议差异
| 方面 | OpenClaw | OpenFang |
| 方面 | OpenClaw | ZCLAW |
|------|----------|----------|
| 端口 | 18789 | **50051** |
| 聊天 API | `/api/chat` | `/api/agents/{id}/message` |
@@ -38,7 +38,7 @@
### 运行环境
- **OpenFang**: `~/.openfang/` (config.toml, .env)
- **ZCLAW**: `~/.zclaw/` (config.toml, .env)
- **OpenClaw**: `~/.openclaw/` (openclaw.json, devices/)
- **ZCLAW 前端**: `http://localhost:1420` (Vite)
- **默认 Agent**: 动态获取第一个可用 Agent
@@ -46,7 +46,7 @@
### localStorage 配置
```javascript
localStorage.setItem('zclaw-backend', 'openfang');
localStorage.setItem('zclaw-backend', 'zclaw');
localStorage.setItem('zclaw_gateway_url', 'ws://127.0.0.1:50051/ws');
```
@@ -62,23 +62,23 @@ localStorage.setItem('zclaw_gateway_url', 'ws://127.0.0.1:50051/ws');
### 优先级 P2 - 优化
4. **后端切换优化** - 代理配置应动态切换 (OpenClaw: 18789, OpenFang: 50051)
4. **后端切换优化** - 代理配置应动态切换 (OpenClaw: 18789, ZCLAW: 50051)
5. **错误处理** - 更友好的错误提示
6. **连接状态显示** - 显示 OpenFang 版本号
6. **连接状态显示** - 显示 ZCLAW 版本号
---
## 快速启动命令
```bash
# 启动 OpenFang
cd "desktop/src-tauri/resources/openfang-runtime" && ./openfang.exe start
# 启动 ZCLAW
cd "desktop/src-tauri/resources/zclaw-runtime" && ./zclaw.exe start
# 启动 Vite 开发服务器
cd desktop && pnpm dev
# 检查 OpenFang 状态
./openfang.exe status
# 检查 ZCLAW 状态
./zclaw.exe status
# 测试 API
curl http://127.0.0.1:50051/api/health
@@ -96,7 +96,7 @@ curl http://127.0.0.1:50051/api/agents
| `desktop/src/store/chatStore.ts` | 聊天状态管理 |
| `desktop/src/components/Settings/General.tsx` | 后端切换设置 |
| `desktop/vite.config.ts` | Vite 代理配置 |
| `docs/openfang-technical-reference.md` | OpenFang 技术文档 |
| `docs/zclaw-technical-reference.md` | ZCLAW 技术文档 |
---
@@ -106,7 +106,7 @@ curl http://127.0.0.1:50051/api/agents
请继续 ZCLAW Desktop 的开发工作。
当前状态:
- OpenFang REST API 聊天已可用 ✅
- ZCLAW REST API 聊天已可用 ✅
- WebSocket 流式聊天已实现 ✅
- 动态 Agent 选择已实现 ✅

View File

@@ -1,4 +1,4 @@
# ZClaw OpenFang 系统功能测试报告
# ZClaw ZCLAW 系统功能测试报告
> 测试日期: 2026-03-13
> 测试环境: Windows 11 Pro, Node.js v20+, pnpm 10+
@@ -38,11 +38,11 @@ Duration 1.29s
| gatewayStore.test.ts | 17 | ✅ |
| general-settings.test.tsx | 1 | ✅ |
| ws-client.test.ts | 12 | ✅ |
| openfang-api.test.ts | 34 | ✅ |
| zclaw-api.test.ts | 34 | ✅ |
### 2.2 集成测试覆盖
OpenFang API 集成测试覆盖以下模块:
ZCLAW API 集成测试覆盖以下模块:
| 模块 | 测试数 | 覆盖功能 |
|------|-------|---------|
@@ -73,27 +73,27 @@ Finished `dev` profile [unoptimized + debuginfo] target(s) in 1.60s
| 命令 | 功能 | 状态 |
|------|------|------|
| `openfang_status` | 获取 OpenFang 状态 | ✅ |
| `openfang_start` | 启动 OpenFang | ✅ |
| `openfang_stop` | 停止 OpenFang | ✅ |
| `openfang_restart` | 重启 OpenFang | ✅ |
| `openfang_local_auth` | 获取本地认证 | ✅ |
| `openfang_prepare_for_tauri` | 准备 Tauri 环境 | ✅ |
| `openfang_approve_device_pairing` | 设备配对审批 | ✅ |
| `openfang_doctor` | 诊断检查 | ✅ |
| `openfang_process_list` | 进程列表 | ✅ |
| `openfang_process_logs` | 进程日志 | ✅ |
| `openfang_version` | 版本信息 | ✅ |
| `zclaw_status` | 获取 ZCLAW 状态 | ✅ |
| `zclaw_start` | 启动 ZCLAW | ✅ |
| `zclaw_stop` | 停止 ZCLAW | ✅ |
| `zclaw_restart` | 重启 ZCLAW | ✅ |
| `zclaw_local_auth` | 获取本地认证 | ✅ |
| `zclaw_prepare_for_tauri` | 准备 Tauri 环境 | ✅ |
| `zclaw_approve_device_pairing` | 设备配对审批 | ✅ |
| `zclaw_doctor` | 诊断检查 | ✅ |
| `zclaw_process_list` | 进程列表 | ✅ |
| `zclaw_process_logs` | 进程日志 | ✅ |
| `zclaw_version` | 版本信息 | ✅ |
### 3.3 向后兼容别名
所有 `gateway_*` 命令已正确映射到 `openfang_*` 命令。
所有 `gateway_*` 命令已正确映射到 `zclaw_*` 命令。
---
## 4. 前端组件验证
### 4.1 OpenFang 特性组件
### 4.1 ZCLAW 特性组件
| 组件 | 文件 | 状态 | 功能 |
|------|------|------|------|
@@ -105,7 +105,7 @@ Finished `dev` profile [unoptimized + debuginfo] target(s) in 1.60s
### 4.2 RightPanel 集成
所有 OpenFang 组件已正确集成到 `RightPanel.tsx`:
所有 ZCLAW 组件已正确集成到 `RightPanel.tsx`:
- ✅ SecurityStatus 已渲染
- ✅ HandsPanel 已渲染
- ✅ TriggersPanel 已渲染
@@ -115,7 +115,7 @@ Finished `dev` profile [unoptimized + debuginfo] target(s) in 1.60s
## 5. 状态管理验证
### 5.1 gatewayStore OpenFang 方法
### 5.1 gatewayStore ZCLAW 方法
| 方法 | 功能 | 状态 |
|------|------|------|
@@ -132,7 +132,7 @@ Finished `dev` profile [unoptimized + debuginfo] target(s) in 1.60s
### 5.2 连接后自动加载
`connect()` 成功后自动加载 OpenFang 数据:
`connect()` 成功后自动加载 ZCLAW 数据:
-`loadHands()`
-`loadWorkflows()`
-`loadTriggers()`
@@ -181,7 +181,7 @@ Finished `dev` profile [unoptimized + debuginfo] target(s) in 1.60s
| 脚本 | 功能 | 状态 |
|------|------|------|
| `prepare-openfang-runtime.mjs` | 下载 OpenFang 二进制 | ✅ |
| `prepare-zclaw-runtime.mjs` | 下载 ZCLAW 二进制 | ✅ |
| `preseed-tauri-tools.mjs` | 预置 Tauri 工具 | ✅ |
| `tauri-build-bundled.mjs` | 打包构建 | ✅ |
@@ -193,7 +193,7 @@ Finished `dev` profile [unoptimized + debuginfo] target(s) in 1.60s
| WebSocket 路径 | `/ws` | ✅ |
| REST API 前缀 | `/api` | ✅ |
| 配置格式 | TOML | ✅ |
| 配置目录 | `~/.openfang/` | ✅ |
| 配置目录 | `~/.zclaw/` | ✅ |
---
@@ -203,8 +203,8 @@ Finished `dev` profile [unoptimized + debuginfo] target(s) in 1.60s
| 问题 | 文件 | 修复 |
|------|------|------|
| 集成测试握手超时 | `openfang-api.test.ts` | 改为纯 REST API 测试 |
| 构建脚本引用旧运行时 | `tauri-build-bundled.mjs` | 更新为 `prepare-openfang-runtime.mjs` |
| 集成测试握手超时 | `zclaw-api.test.ts` | 改为纯 REST API 测试 |
| 构建脚本引用旧运行时 | `tauri-build-bundled.mjs` | 更新为 `prepare-zclaw-runtime.mjs` |
| Rust 临时变量生命周期 | `lib.rs` | 使用 owned strings |
### 8.2 无已知问题
@@ -231,13 +231,13 @@ Finished `dev` profile [unoptimized + debuginfo] target(s) in 1.60s
## 10. 结论
**ZClaw OpenFang 迁移项目 Phase 1-7 功能测试通过。**
**ZClaw ZCLAW 迁移项目 Phase 1-7 功能测试通过。**
- ✅ 前端构建成功
- ✅ Tauri 后端编译成功
- ✅ 75 个单元测试全部通过
- ✅ 所有 OpenFang 特性组件已集成
- ✅ 所有 ZCLAW 特性组件已集成
- ✅ 所有 Tauri 命令已实现
- ✅ 中文模型插件支持 7 个提供商
系统功能完整,可用于下一阶段的真实 OpenFang 集成测试。
系统功能完整,可用于下一阶段的真实 ZCLAW 集成测试。

View File

@@ -9,18 +9,18 @@
"preview": "vite preview",
"prepare:openclaw-runtime": "node scripts/prepare-openclaw-runtime.mjs",
"prepare:openclaw-runtime:dry-run": "node scripts/prepare-openclaw-runtime.mjs --dry-run",
"prepare:openfang-runtime": "node scripts/prepare-openfang-runtime.mjs",
"prepare:openfang-runtime:dry-run": "node scripts/prepare-openfang-runtime.mjs --dry-run",
"prepare:zclaw-runtime": "node scripts/prepare-zclaw-runtime.mjs",
"prepare:zclaw-runtime:dry-run": "node scripts/prepare-zclaw-runtime.mjs --dry-run",
"prepare:tauri-tools": "node scripts/preseed-tauri-tools.mjs",
"prepare:tauri-tools:dry-run": "node scripts/preseed-tauri-tools.mjs --dry-run",
"tauri": "tauri",
"tauri:dev": "tauri dev",
"tauri:dev:web": "tauri dev --features dev-server",
"tauri:build": "tauri build",
"tauri:build:bundled": "pnpm prepare:openfang-runtime && node scripts/tauri-build-bundled.mjs",
"tauri:build:bundled:debug": "pnpm prepare:openfang-runtime && node scripts/tauri-build-bundled.mjs --debug",
"tauri:build:nsis:debug": "pnpm prepare:openfang-runtime && node scripts/tauri-build-bundled.mjs --debug --bundles nsis",
"tauri:build:msi:debug": "pnpm prepare:openfang-runtime && node scripts/tauri-build-bundled.mjs --debug --bundles msi",
"tauri:build:bundled": "pnpm prepare:zclaw-runtime && node scripts/tauri-build-bundled.mjs",
"tauri:build:bundled:debug": "pnpm prepare:zclaw-runtime && node scripts/tauri-build-bundled.mjs --debug",
"tauri:build:nsis:debug": "pnpm prepare:zclaw-runtime && node scripts/tauri-build-bundled.mjs --debug --bundles nsis",
"tauri:build:msi:debug": "pnpm prepare:zclaw-runtime && node scripts/tauri-build-bundled.mjs --debug --bundles msi",
"test": "vitest run",
"test:watch": "vitest",
"test:coverage": "vitest run --coverage",

View File

@@ -1,7 +1,7 @@
#!/usr/bin/env node
/**
* OpenFang Binary Downloader
* Automatically downloads the correct OpenFang binary for the current platform
* ZCLAW Binary Downloader
* Automatically downloads the correct ZCLAW binary for the current platform
* Run during Tauri build process
*/
@@ -12,11 +12,11 @@ import { fileURLToPath } from 'url';
import { platform, arch } from 'os';
const __dirname = dirname(fileURLToPath(import.meta.url));
const RESOURCES_DIR = join(__dirname, '../src-tauri/resources/openfang-runtime');
const RESOURCES_DIR = join(__dirname, '../src-tauri/resources/zclaw-runtime');
// OpenFang release info
const OPENFANG_REPO = 'RightNow-AI/openfang';
const OPENFANG_VERSION = process.env.OPENFANG_VERSION || 'latest';
// ZCLAW release info
const ZCLAW_REPO = 'RightNow-AI/zclaw';
const ZCLAW_VERSION = process.env.ZCLAW_VERSION || 'latest';
interface PlatformConfig {
binaryName: string;
@@ -30,28 +30,28 @@ function getPlatformConfig(): PlatformConfig {
switch (currentPlatform) {
case 'win32':
return {
binaryName: 'openfang.exe',
binaryName: 'zclaw.exe',
downloadName: currentArch === 'x64'
? 'openfang-x86_64-pc-windows-msvc.exe'
: 'openfang-aarch64-pc-windows-msvc.exe',
? 'zclaw-x86_64-pc-windows-msvc.exe'
: 'zclaw-aarch64-pc-windows-msvc.exe',
};
case 'darwin':
return {
binaryName: currentArch === 'arm64'
? 'openfang-aarch64-apple-darwin'
: 'openfang-x86_64-apple-darwin',
? 'zclaw-aarch64-apple-darwin'
: 'zclaw-x86_64-apple-darwin',
downloadName: currentArch === 'arm64'
? 'openfang-aarch64-apple-darwin'
: 'openfang-x86_64-apple-darwin',
? 'zclaw-aarch64-apple-darwin'
: 'zclaw-x86_64-apple-darwin',
};
case 'linux':
return {
binaryName: currentArch === 'arm64'
? 'openfang-aarch64-unknown-linux-gnu'
: 'openfang-x86_64-unknown-linux-gnu',
? 'zclaw-aarch64-unknown-linux-gnu'
: 'zclaw-x86_64-unknown-linux-gnu',
downloadName: currentArch === 'arm64'
? 'openfang-aarch64-unknown-linux-gnu'
: 'openfang-x86_64-unknown-linux-gnu',
? 'zclaw-aarch64-unknown-linux-gnu'
: 'zclaw-x86_64-unknown-linux-gnu',
};
default:
throw new Error(`Unsupported platform: ${currentPlatform}`);
@@ -60,19 +60,19 @@ function getPlatformConfig(): PlatformConfig {
function downloadBinary(): void {
const config = getPlatformConfig();
const baseUrl = `https://github.com/${OPENFANG_REPO}/releases`;
const downloadUrl = OPENFANG_VERSION === 'latest'
const baseUrl = `https://github.com/${ZCLAW_REPO}/releases`;
const downloadUrl = ZCLAW_VERSION === 'latest'
? `${baseUrl}/latest/download/${config.downloadName}`
: `${baseUrl}/download/${OPENFANG_VERSION}/${config.downloadName}`;
: `${baseUrl}/download/${ZCLAW_VERSION}/${config.downloadName}`;
const outputPath = join(RESOURCES_DIR, config.binaryName);
console.log('='.repeat(60));
console.log('OpenFang Binary Downloader');
console.log('ZCLAW Binary Downloader');
console.log('='.repeat(60));
console.log(`Platform: ${platform()} (${arch()})`);
console.log(`Binary: ${config.binaryName}`);
console.log(`Version: ${OPENFANG_VERSION}`);
console.log(`Version: ${ZCLAW_VERSION}`);
console.log(`URL: ${downloadUrl}`);
console.log('='.repeat(60));
@@ -83,7 +83,7 @@ function downloadBinary(): void {
// Check if already downloaded
if (existsSync(outputPath)) {
console.log('Binary already exists, skipping download.');
console.log('Binary already exists, skipping download.');
return;
}
@@ -113,11 +113,11 @@ function downloadBinary(): void {
execSync(`chmod +x "${outputPath}"`);
}
console.log('Download complete!');
console.log('Download complete!');
} catch (error) {
console.error('Download failed:', error);
console.error('Download failed:', error);
console.log('\nPlease download manually from:');
console.log(` ${baseUrl}/${OPENFANG_VERSION === 'latest' ? 'latest' : 'tag/' + OPENFANG_VERSION}`);
console.log(` ${baseUrl}/${ZCLAW_VERSION === 'latest' ? 'latest' : 'tag/' + ZCLAW_VERSION}`);
process.exit(1);
}
}
@@ -127,12 +127,12 @@ function updateManifest(): void {
const manifest = {
source: {
binPath: platform() === 'win32' ? 'openfang.exe' : `openfang-${arch()}-${platform()}`,
binPath: platform() === 'win32' ? 'zclaw.exe' : `zclaw-${arch()}-${platform()}`,
},
stagedAt: new Date().toISOString(),
version: OPENFANG_VERSION === 'latest' ? new Date().toISOString().split('T')[0].replace(/-/g, '.') : OPENFANG_VERSION,
runtimeType: 'openfang',
description: 'OpenFang Agent OS - Single binary runtime (~32MB)',
version: ZCLAW_VERSION === 'latest' ? new Date().toISOString().split('T')[0].replace(/-/g, '.') : ZCLAW_VERSION,
runtimeType: 'zclaw',
description: 'ZCLAW Agent OS - Single binary runtime (~32MB)',
endpoints: {
websocket: 'ws://127.0.0.1:4200/ws',
rest: 'http://127.0.0.1:4200/api',
@@ -140,11 +140,11 @@ function updateManifest(): void {
};
writeFileSync(manifestPath, JSON.stringify(manifest, null, 2));
console.log('Manifest updated');
console.log('Manifest updated');
}
// Run
downloadBinary();
updateManifest();
console.log('\n✓ OpenFang runtime ready for build!');
console.log('\nZCLAW runtime ready for build!');

View File

@@ -1,167 +0,0 @@
import { execFileSync } from 'node:child_process';
import fs from 'node:fs';
import path from 'node:path';
import { fileURLToPath } from 'node:url';
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
const desktopRoot = path.resolve(__dirname, '..');
const outputDir = path.join(desktopRoot, 'src-tauri', 'resources', 'openclaw-runtime');
const dryRun = process.argv.includes('--dry-run');
function log(message) {
console.log(`[prepare-openclaw-runtime] ${message}`);
}
function readFirstExistingPath(commandNames) {
for (const commandName of commandNames) {
try {
const stdout = execFileSync('where.exe', [commandName], {
encoding: 'utf8',
stdio: ['ignore', 'pipe', 'ignore'],
});
const firstMatch = stdout
.split(/\r?\n/)
.map((line) => line.trim())
.find(Boolean);
if (firstMatch) {
return firstMatch;
}
} catch {
continue;
}
}
return null;
}
function ensureFileExists(filePath, label) {
if (!filePath || !fs.existsSync(filePath) || !fs.statSync(filePath).isFile()) {
throw new Error(`${label} 不存在:${filePath || '(empty)'}`);
}
}
function ensureDirExists(dirPath, label) {
if (!dirPath || !fs.existsSync(dirPath) || !fs.statSync(dirPath).isDirectory()) {
throw new Error(`${label} 不存在:${dirPath || '(empty)'}`);
}
}
function resolveOpenClawBin() {
const override = process.env.OPENCLAW_BIN;
if (override) {
return path.resolve(override);
}
const resolved = readFirstExistingPath(['openclaw.cmd', 'openclaw']);
if (!resolved) {
throw new Error('未找到 openclaw 入口。请先安装 OpenClaw或设置 OPENCLAW_BIN。');
}
return resolved;
}
function resolvePackageDir(openclawBinPath) {
const override = process.env.OPENCLAW_PACKAGE_DIR;
if (override) {
return path.resolve(override);
}
return path.join(path.dirname(openclawBinPath), 'node_modules', 'openclaw');
}
function resolveNodeExe(openclawBinPath) {
const override = process.env.OPENCLAW_NODE_EXE;
if (override) {
return path.resolve(override);
}
const bundledNode = path.join(path.dirname(openclawBinPath), 'node.exe');
if (fs.existsSync(bundledNode)) {
return bundledNode;
}
const resolved = readFirstExistingPath(['node.exe', 'node']);
if (!resolved) {
throw new Error('未找到 node.exe。请先安装 Node.js或设置 OPENCLAW_NODE_EXE。');
}
return resolved;
}
function cleanOutputDirectory(dirPath) {
if (!fs.existsSync(dirPath)) {
return;
}
for (const entry of fs.readdirSync(dirPath)) {
fs.rmSync(path.join(dirPath, entry), { recursive: true, force: true });
}
}
function writeCmdLauncher(dirPath) {
const launcher = [
'@ECHO off',
'SETLOCAL',
'SET "_prog=%~dp0\\node.exe"',
'"%_prog%" "%~dp0\\node_modules\\openclaw\\openclaw.mjs" %*',
'',
].join('\r\n');
fs.writeFileSync(path.join(dirPath, 'openclaw.cmd'), launcher, 'utf8');
}
function stageRuntime() {
const openclawBinPath = resolveOpenClawBin();
const packageDir = resolvePackageDir(openclawBinPath);
const nodeExePath = resolveNodeExe(openclawBinPath);
const packageJsonPath = path.join(packageDir, 'package.json');
const entryPath = path.join(packageDir, 'openclaw.mjs');
ensureFileExists(openclawBinPath, 'OpenClaw 入口');
ensureDirExists(packageDir, 'OpenClaw 包目录');
ensureFileExists(packageJsonPath, 'OpenClaw package.json');
ensureFileExists(entryPath, 'OpenClaw 入口脚本');
ensureFileExists(nodeExePath, 'Node.js 可执行文件');
const packageJson = JSON.parse(fs.readFileSync(packageJsonPath, 'utf8'));
const destinationPackageDir = path.join(outputDir, 'node_modules', 'openclaw');
const manifest = {
source: {
openclawBinPath,
packageDir,
nodeExePath,
},
stagedAt: new Date().toISOString(),
version: packageJson.version ?? null,
};
log(`OpenClaw version: ${packageJson.version || 'unknown'}`);
log(`Source bin: ${openclawBinPath}`);
log(`Source package: ${packageDir}`);
log(`Source node.exe: ${nodeExePath}`);
log(`Target dir: ${outputDir}`);
if (dryRun) {
log('Dry run 完成,未写入任何文件。');
return;
}
fs.mkdirSync(outputDir, { recursive: true });
cleanOutputDirectory(outputDir);
fs.mkdirSync(path.join(outputDir, 'node_modules'), { recursive: true });
fs.copyFileSync(nodeExePath, path.join(outputDir, 'node.exe'));
fs.cpSync(packageDir, destinationPackageDir, { recursive: true, force: true });
writeCmdLauncher(outputDir);
fs.writeFileSync(path.join(outputDir, 'runtime-manifest.json'), JSON.stringify(manifest, null, 2), 'utf8');
log('OpenClaw runtime 已写入 src-tauri/resources/openclaw-runtime');
}
try {
stageRuntime();
} catch (error) {
const message = error instanceof Error ? error.message : String(error);
console.error(`[prepare-openclaw-runtime] ${message}`);
process.exit(1);
}

View File

@@ -1,14 +1,14 @@
#!/usr/bin/env node
/**
* OpenFang Runtime Preparation Script
* ZCLAW Runtime Preparation Script
*
* Prepares the OpenFang binary for bundling with Tauri.
* Prepares the ZCLAW binary for bundling with Tauri.
* Supports cross-platform: Windows, Linux, macOS
*
* Usage:
* node scripts/prepare-openfang-runtime.mjs
* node scripts/prepare-openfang-runtime.mjs --dry-run
* OPENFANG_VERSION=v1.2.3 node scripts/prepare-openfang-runtime.mjs
* node scripts/prepare-zclaw-runtime.mjs
* node scripts/prepare-zclaw-runtime.mjs --dry-run
* ZCLAW_VERSION=v1.2.3 node scripts/prepare-zclaw-runtime.mjs
*/
import { execSync, execFileSync } from 'node:child_process';
@@ -20,64 +20,64 @@ import { arch as osArch, platform as osPlatform, homedir } from 'node:os';
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
const desktopRoot = path.resolve(__dirname, '..');
const outputDir = path.join(desktopRoot, 'src-tauri', 'resources', 'openfang-runtime');
const outputDir = path.join(desktopRoot, 'src-tauri', 'resources', 'zclaw-runtime');
const dryRun = process.argv.includes('--dry-run');
const openfangVersion = process.env.OPENFANG_VERSION || 'latest';
const zclawVersion = process.env.ZCLAW_VERSION || 'latest';
const PLATFORM = osPlatform();
const ARCH = osArch();
function log(message) {
console.log(`[prepare-openfang-runtime] ${message}`);
console.log(`[prepare-zclaw-runtime] ${message}`);
}
function warn(message) {
console.warn(`[prepare-openfang-runtime] WARN: ${message}`);
console.warn(`[prepare-zclaw-runtime] WARN: ${message}`);
}
function error(message) {
console.error(`[prepare-openfang-runtime] ERROR: ${message}`);
console.error(`[prepare-zclaw-runtime] ERROR: ${message}`);
}
/**
* Get platform-specific binary configuration
* OpenFang releases: .zip for Windows, .tar.gz for Unix
* ZCLAW releases: .zip for Windows, .tar.gz for Unix
*/
function getPlatformConfig() {
const configs = {
win32: {
x64: {
binaryName: 'openfang.exe',
downloadName: 'openfang-x86_64-pc-windows-msvc.zip',
binaryName: 'zclaw.exe',
downloadName: 'zclaw-x86_64-pc-windows-msvc.zip',
archiveFormat: 'zip',
},
arm64: {
binaryName: 'openfang.exe',
downloadName: 'openfang-aarch64-pc-windows-msvc.zip',
binaryName: 'zclaw.exe',
downloadName: 'zclaw-aarch64-pc-windows-msvc.zip',
archiveFormat: 'zip',
},
},
darwin: {
x64: {
binaryName: 'openfang-x86_64-apple-darwin',
downloadName: 'openfang-x86_64-apple-darwin.tar.gz',
binaryName: 'zclaw-x86_64-apple-darwin',
downloadName: 'zclaw-x86_64-apple-darwin.tar.gz',
archiveFormat: 'tar.gz',
},
arm64: {
binaryName: 'openfang-aarch64-apple-darwin',
downloadName: 'openfang-aarch64-apple-darwin.tar.gz',
binaryName: 'zclaw-aarch64-apple-darwin',
downloadName: 'zclaw-aarch64-apple-darwin.tar.gz',
archiveFormat: 'tar.gz',
},
},
linux: {
x64: {
binaryName: 'openfang-x86_64-unknown-linux-gnu',
downloadName: 'openfang-x86_64-unknown-linux-gnu.tar.gz',
binaryName: 'zclaw-x86_64-unknown-linux-gnu',
downloadName: 'zclaw-x86_64-unknown-linux-gnu.tar.gz',
archiveFormat: 'tar.gz',
},
arm64: {
binaryName: 'openfang-aarch64-unknown-linux-gnu',
downloadName: 'openfang-aarch64-unknown-linux-gnu.tar.gz',
binaryName: 'zclaw-aarch64-unknown-linux-gnu',
downloadName: 'zclaw-aarch64-unknown-linux-gnu.tar.gz',
archiveFormat: 'tar.gz',
},
},
@@ -97,26 +97,26 @@ function getPlatformConfig() {
}
/**
* Find OpenFang binary in system PATH
* Find ZCLAW binary in system PATH
*/
function findSystemBinary() {
const override = process.env.OPENFANG_BIN;
const override = process.env.ZCLAW_BIN;
if (override) {
if (fs.existsSync(override)) {
return override;
}
throw new Error(`OPENFANG_BIN specified but file not found: ${override}`);
throw new Error(`ZCLAW_BIN specified but file not found: ${override}`);
}
try {
let result;
if (PLATFORM === 'win32') {
result = execFileSync('where.exe', ['openfang'], {
result = execFileSync('where.exe', ['zclaw'], {
encoding: 'utf8',
stdio: ['ignore', 'pipe', 'ignore'],
});
} else {
result = execFileSync('which', ['openfang'], {
result = execFileSync('which', ['zclaw'], {
encoding: 'utf8',
stdio: ['ignore', 'pipe', 'ignore'],
});
@@ -134,7 +134,7 @@ function findSystemBinary() {
}
/**
* Check if OpenFang is installed via install script
* Check if ZCLAW is installed via install script
*/
function findInstalledBinary() {
const config = getPlatformConfig();
@@ -142,12 +142,12 @@ function findInstalledBinary() {
const possiblePaths = [
// Default install location
path.join(home, '.openfang', 'bin', config.binaryName),
path.join(home, '.zclaw', 'bin', config.binaryName),
path.join(home, '.local', 'bin', config.binaryName),
// macOS
path.join(home, '.openfang', 'bin', 'openfang'),
'/usr/local/bin/openfang',
'/usr/bin/openfang',
path.join(home, '.zclaw', 'bin', 'zclaw'),
'/usr/local/bin/zclaw',
'/usr/bin/zclaw',
];
for (const p of possiblePaths) {
@@ -160,21 +160,21 @@ function findInstalledBinary() {
}
/**
* Download OpenFang binary from GitHub Releases
* Download ZCLAW binary from GitHub Releases
* Handles .zip for Windows, .tar.gz for Unix
*/
function downloadBinary(config) {
const baseUrl = 'https://github.com/RightNow-AI/openfang/releases';
const downloadUrl = openfangVersion === 'latest'
const baseUrl = 'https://github.com/RightNow-AI/zclaw/releases';
const downloadUrl = zclawVersion === 'latest'
? `${baseUrl}/latest/download/${config.downloadName}`
: `${baseUrl}/download/${openfangVersion}/${config.downloadName}`;
: `${baseUrl}/download/${zclawVersion}/${config.downloadName}`;
const archivePath = path.join(outputDir, config.downloadName);
const binaryOutputPath = path.join(outputDir, config.binaryName);
log(`Downloading OpenFang binary...`);
log(`Downloading ZCLAW binary...`);
log(` Platform: ${PLATFORM} (${ARCH})`);
log(` Version: ${openfangVersion}`);
log(` Version: ${zclawVersion}`);
log(` Archive: ${config.downloadName}`);
log(` URL: ${downloadUrl}`);
@@ -211,7 +211,7 @@ function downloadBinary(config) {
// Find and rename the extracted binary
// The archive contains a single binary file
const extractedFiles = fs.readdirSync(outputDir).filter(f =>
f.startsWith('openfang') && !f.endsWith('.zip') && !f.endsWith('.tar.gz') && !f.endsWith('.sha256')
f.startsWith('zclaw') && !f.endsWith('.zip') && !f.endsWith('.tar.gz') && !f.endsWith('.sha256')
);
if (extractedFiles.length === 0) {
@@ -285,16 +285,16 @@ function writeManifest(config) {
const manifest = {
source: {
binPath: config.binaryName,
binPathLinux: 'openfang-x86_64-unknown-linux-gnu',
binPathMac: 'openfang-x86_64-apple-darwin',
binPathMacArm: 'openfang-aarch64-apple-darwin',
binPathLinux: 'zclaw-x86_64-unknown-linux-gnu',
binPathMac: 'zclaw-x86_64-apple-darwin',
binPathMacArm: 'zclaw-aarch64-apple-darwin',
},
stagedAt: new Date().toISOString(),
version: openfangVersion === 'latest'
version: zclawVersion === 'latest'
? new Date().toISOString().split('T')[0].replace(/-/g, '.')
: openfangVersion,
runtimeType: 'openfang',
description: 'OpenFang Agent OS - Single binary runtime (~32MB)',
: zclawVersion,
runtimeType: 'zclaw',
description: 'ZCLAW Agent OS - Single binary runtime (~32MB)',
endpoints: {
websocket: 'ws://127.0.0.1:4200/ws',
rest: 'http://127.0.0.1:4200/api',
@@ -322,21 +322,21 @@ function writeLauncherScripts(config) {
// Windows launcher
const cmdLauncher = [
'@echo off',
'REM OpenFang Agent OS - Bundled Binary Launcher',
'REM ZCLAW Agent OS - Bundled Binary Launcher',
`"%~dp0${config.binaryName}" %*`,
'',
].join('\r\n');
fs.writeFileSync(path.join(outputDir, 'openfang.cmd'), cmdLauncher, 'utf8');
fs.writeFileSync(path.join(outputDir, 'zclaw.cmd'), cmdLauncher, 'utf8');
// Unix launcher
const shLauncher = [
'#!/bin/bash',
'# OpenFang Agent OS - Bundled Binary Launcher',
'# ZCLAW Agent OS - Bundled Binary Launcher',
`SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"`,
`exec "$SCRIPT_DIR/${config.binaryName}" "$@"`,
'',
].join('\n');
const shPath = path.join(outputDir, 'openfang.sh');
const shPath = path.join(outputDir, 'zclaw.sh');
fs.writeFileSync(shPath, shLauncher, 'utf8');
fs.chmodSync(shPath, 0o755);
@@ -370,7 +370,7 @@ function cleanOldRuntime() {
*/
function main() {
log('='.repeat(60));
log('OpenFang Runtime Preparation');
log('ZCLAW Runtime Preparation');
log('='.repeat(60));
const config = getPlatformConfig();
@@ -385,23 +385,23 @@ function main() {
let binaryPath = findSystemBinary();
if (binaryPath) {
log(`Found OpenFang in PATH: ${binaryPath}`);
log(`Found ZCLAW in PATH: ${binaryPath}`);
copyBinary(binaryPath, config);
} else {
binaryPath = findInstalledBinary();
if (binaryPath) {
log(`Found installed OpenFang: ${binaryPath}`);
log(`Found installed ZCLAW: ${binaryPath}`);
copyBinary(binaryPath, config);
} else {
log('OpenFang not found locally, downloading...');
log('ZCLAW not found locally, downloading...');
const downloaded = downloadBinary(config);
if (!downloaded && !dryRun) {
error('Failed to obtain OpenFang binary!');
error('Failed to obtain ZCLAW binary!');
error('');
error('Please either:');
error(' 1. Install OpenFang: curl -fsSL https://openfang.sh/install | sh');
error(' 2. Set OPENFANG_BIN environment variable to binary path');
error(' 3. Manually download from: https://github.com/RightNow-AI/openfang/releases');
error(' 1. Install ZCLAW: curl -fsSL https://zclaw.sh/install | sh');
error(' 2. Set ZCLAW_BIN environment variable to binary path');
error(' 3. Manually download from: https://github.com/RightNow-AI/zclaw/releases');
process.exit(1);
}
}
@@ -415,7 +415,7 @@ function main() {
if (dryRun) {
log('DRY RUN complete. No files were written.');
} else {
log('OpenFang runtime ready for build!');
log('ZCLAW runtime ready for build!');
}
log('='.repeat(60));
}

View File

@@ -35,6 +35,6 @@ if (!process.env.TAURI_BUNDLER_TOOLS_GITHUB_MIRROR_TEMPLATE && process.env.ZCLAW
env.TAURI_BUNDLER_TOOLS_GITHUB_MIRROR_TEMPLATE = process.env.ZCLAW_TAURI_TOOLS_GITHUB_MIRROR_TEMPLATE;
}
run('node', ['scripts/prepare-openfang-runtime.mjs']);
run('node', ['scripts/prepare-zclaw-runtime.mjs']);
run('node', ['scripts/preseed-tauri-tools.mjs']);
run('pnpm', ['exec', 'tauri', 'build', ...forwardArgs], env);

View File

@@ -1,15 +1,15 @@
#!/usr/bin/env node
/**
* OpenFang Backend API Connection Test Script
* ZCLAW Backend API Connection Test Script
*
* Tests all API endpoints used by the ZCLAW desktop client against
* the OpenFang Kernel backend.
* the ZCLAW Kernel backend.
*
* Usage:
* node desktop/scripts/test-api-connection.mjs [options]
*
* Options:
* --url=URL Base URL for OpenFang API (default: http://127.0.0.1:50051)
* --url=URL Base URL for ZCLAW API (default: http://127.0.0.1:50051)
* --verbose Show detailed output
* --json Output results as JSON
* --timeout=MS Request timeout in milliseconds (default: 5000)
@@ -41,12 +41,12 @@ for (const arg of args) {
config.timeout = parseInt(arg.slice(10), 10);
} else if (arg === '--help' || arg === '-h') {
console.log(`
OpenFang API Connection Tester
ZCLAW API Connection Tester
Usage: node test-api-connection.mjs [options]
Options:
--url=URL Base URL for OpenFang API (default: ${DEFAULT_BASE_URL})
--url=URL Base URL for ZCLAW API (default: ${DEFAULT_BASE_URL})
--verbose Show detailed output including response bodies
--json Output results as JSON for programmatic processing
--timeout=MS Request timeout in milliseconds (default: ${DEFAULT_TIMEOUT})
@@ -324,7 +324,7 @@ function printSummary() {
* Run all API tests
*/
async function runAllTests() {
console.log(`\n=== OpenFang API Connection Test ===`);
console.log(`\n=== ZCLAW API Connection Test ===`);
console.log(`Base URL: ${config.baseUrl}`);
console.log(`Timeout: ${config.timeout}ms`);
console.log(`\n`);

View File

@@ -13,7 +13,7 @@ websocket_port = 4200
websocket_path = "/ws"
[agent.defaults]
workspace = "~/.openfang/workspace"
workspace = "~/.zclaw/workspace"
default_model = "gpt-4"
[llm]

View File

@@ -70,6 +70,7 @@ rand = { workspace = true }
# SQLite (keep for backward compatibility during migration)
sqlx = { workspace = true }
libsqlite3-sys = { workspace = true }
# Development server (optional, only for debug builds)
axum = { version = "0.7", optional = true }

View File

@@ -0,0 +1,32 @@
//! Embedding Adapter - Bridges Tauri LLM EmbeddingClient to Growth System trait
//!
//! Implements zclaw_growth::retrieval::semantic::EmbeddingClient
//! by wrapping the concrete llm::EmbeddingClient.
use std::sync::Arc;
use zclaw_growth::retrieval::semantic::EmbeddingClient;
/// Adapter wrapping Tauri's llm::EmbeddingClient to implement the growth trait
pub struct TauriEmbeddingAdapter {
inner: Arc<crate::llm::EmbeddingClient>,
}
impl TauriEmbeddingAdapter {
pub fn new(client: crate::llm::EmbeddingClient) -> Self {
Self {
inner: Arc::new(client),
}
}
}
#[async_trait::async_trait]
impl EmbeddingClient for TauriEmbeddingAdapter {
async fn embed(&self, text: &str) -> Result<Vec<f32>, String> {
let response = self.inner.embed(text).await?;
Ok(response.embedding)
}
fn is_available(&self) -> bool {
self.inner.is_configured()
}
}

View File

@@ -9,8 +9,6 @@
//!
//! NOTE: Some methods are reserved for future proactive features.
#![allow(dead_code)] // Methods reserved for future proactive features
use chrono::{Local, Timelike};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
@@ -94,6 +92,7 @@ pub enum HeartbeatStatus {
}
/// Type alias for heartbeat check function
#[allow(dead_code)] // Reserved for future proactive check registration
pub type HeartbeatCheckFn = Box<dyn Fn(String) -> std::pin::Pin<Box<dyn std::future::Future<Output = Option<HeartbeatAlert>> + Send>> + Send + Sync>;
// === Default Config ===
@@ -187,6 +186,7 @@ impl HeartbeatEngine {
}
/// Check if the engine is running
#[allow(dead_code)] // Reserved for UI status display
pub async fn is_running(&self) -> bool {
*self.running.lock().await
}
@@ -197,6 +197,7 @@ impl HeartbeatEngine {
}
/// Subscribe to alerts
#[allow(dead_code)] // Reserved for future UI notification integration
pub fn subscribe(&self) -> broadcast::Receiver<HeartbeatAlert> {
self.alert_sender.subscribe()
}
@@ -355,7 +356,9 @@ static LAST_INTERACTION: OnceLock<RwLock<StdHashMap<String, String>>> = OnceLock
pub struct MemoryStatsCache {
pub task_count: usize,
pub total_entries: usize,
#[allow(dead_code)] // Reserved for UI display
pub storage_size_bytes: usize,
#[allow(dead_code)] // Reserved for UI display
pub last_updated: Option<String>,
}

View File

@@ -1,397 +0,0 @@
//! Adaptive Intelligence Mesh - Coordinates Memory, Pipeline, and Heartbeat
//!
//! This module provides proactive workflow recommendations based on user behavior patterns.
//! It integrates with:
//! - PatternDetector for behavior pattern detection
//! - WorkflowRecommender for generating recommendations
//! - HeartbeatEngine for periodic checks
//! - PersistentMemoryStore for historical data
//! - PipelineExecutor for workflow execution
//!
//! NOTE: Some methods are reserved for future integration with the UI.
#![allow(dead_code)] // Methods reserved for future UI integration
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::{broadcast, Mutex};
use super::pattern_detector::{BehaviorPattern, PatternContext, PatternDetector};
use super::recommender::WorkflowRecommender;
// === Types ===
/// Workflow recommendation generated by the mesh
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct WorkflowRecommendation {
/// Unique recommendation identifier
pub id: String,
/// Pipeline ID to recommend
pub pipeline_id: String,
/// Confidence score (0.0-1.0)
pub confidence: f32,
/// Human-readable reason for recommendation
pub reason: String,
/// Suggested input values
pub suggested_inputs: HashMap<String, serde_json::Value>,
/// Pattern IDs that matched
pub patterns_matched: Vec<String>,
/// When this recommendation was generated
pub timestamp: DateTime<Utc>,
}
/// Mesh coordinator configuration
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MeshConfig {
/// Enable mesh recommendations
pub enabled: bool,
/// Minimum confidence threshold for recommendations
pub min_confidence: f32,
/// Maximum recommendations to generate per analysis
pub max_recommendations: usize,
/// Hours to look back for pattern analysis
pub analysis_window_hours: u64,
}
impl Default for MeshConfig {
fn default() -> Self {
Self {
enabled: true,
min_confidence: 0.6,
max_recommendations: 5,
analysis_window_hours: 24,
}
}
}
/// Analysis result from mesh coordinator
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MeshAnalysisResult {
/// Generated recommendations
pub recommendations: Vec<WorkflowRecommendation>,
/// Patterns detected
pub patterns_detected: usize,
/// Analysis timestamp
pub timestamp: DateTime<Utc>,
}
// === Mesh Coordinator ===
/// Main mesh coordinator that integrates pattern detection and recommendations
pub struct MeshCoordinator {
/// Agent ID
#[allow(dead_code)] // Reserved for multi-agent scenarios
agent_id: String,
/// Configuration
config: Arc<Mutex<MeshConfig>>,
/// Pattern detector
pattern_detector: Arc<Mutex<PatternDetector>>,
/// Workflow recommender
recommender: Arc<Mutex<WorkflowRecommender>>,
/// Recommendation sender
#[allow(dead_code)] // Reserved for real-time recommendation streaming
recommendation_sender: broadcast::Sender<WorkflowRecommendation>,
/// Last analysis timestamp
last_analysis: Arc<Mutex<Option<DateTime<Utc>>>>,
}
impl MeshCoordinator {
/// Create a new mesh coordinator
pub fn new(agent_id: String, config: Option<MeshConfig>) -> Self {
let (sender, _) = broadcast::channel(100);
let config = config.unwrap_or_default();
Self {
agent_id,
config: Arc::new(Mutex::new(config)),
pattern_detector: Arc::new(Mutex::new(PatternDetector::new(None))),
recommender: Arc::new(Mutex::new(WorkflowRecommender::new(None))),
recommendation_sender: sender,
last_analysis: Arc::new(Mutex::new(None)),
}
}
/// Analyze current context and generate recommendations
pub async fn analyze(&self) -> Result<MeshAnalysisResult, String> {
let config = self.config.lock().await.clone();
if !config.enabled {
return Ok(MeshAnalysisResult {
recommendations: vec![],
patterns_detected: 0,
timestamp: Utc::now(),
});
}
// Get patterns from detector (clone to avoid borrow issues)
let patterns: Vec<BehaviorPattern> = {
let detector = self.pattern_detector.lock().await;
let patterns_ref = detector.get_patterns();
patterns_ref.into_iter().cloned().collect()
};
let patterns_detected = patterns.len();
// Generate recommendations from patterns
let recommender = self.recommender.lock().await;
let pattern_refs: Vec<&BehaviorPattern> = patterns.iter().collect();
let mut recommendations = recommender.recommend(&pattern_refs);
// Filter by confidence
recommendations.retain(|r| r.confidence >= config.min_confidence);
// Limit count
recommendations.truncate(config.max_recommendations);
// Update timestamps
for rec in &mut recommendations {
rec.timestamp = Utc::now();
}
// Update last analysis time
*self.last_analysis.lock().await = Some(Utc::now());
Ok(MeshAnalysisResult {
recommendations: recommendations.clone(),
patterns_detected,
timestamp: Utc::now(),
})
}
/// Record user activity for pattern detection
pub async fn record_activity(
&self,
activity_type: ActivityType,
context: PatternContext,
) -> Result<(), String> {
let mut detector = self.pattern_detector.lock().await;
match activity_type {
ActivityType::SkillUsed { skill_ids } => {
detector.record_skill_usage(skill_ids);
}
ActivityType::PipelineExecuted {
task_type,
pipeline_id,
} => {
detector.record_pipeline_execution(&task_type, &pipeline_id, context);
}
ActivityType::InputReceived { keywords, intent } => {
detector.record_input_pattern(keywords, &intent, context);
}
}
Ok(())
}
/// Subscribe to recommendations
pub fn subscribe(&self) -> broadcast::Receiver<WorkflowRecommendation> {
self.recommendation_sender.subscribe()
}
/// Get current patterns
pub async fn get_patterns(&self) -> Vec<BehaviorPattern> {
let detector = self.pattern_detector.lock().await;
detector.get_patterns().into_iter().cloned().collect()
}
/// Decay old patterns (call periodically)
pub async fn decay_patterns(&self) {
let mut detector = self.pattern_detector.lock().await;
detector.decay_patterns();
}
/// Update configuration
pub async fn update_config(&self, config: MeshConfig) {
*self.config.lock().await = config;
}
/// Get configuration
pub async fn get_config(&self) -> MeshConfig {
self.config.lock().await.clone()
}
/// Record a user correction (for pattern refinement)
pub async fn record_correction(&self, correction_type: &str) {
let mut detector = self.pattern_detector.lock().await;
// Record as input pattern with negative signal
detector.record_input_pattern(
vec![format!("correction:{}", correction_type)],
"user_preference",
PatternContext::default(),
);
}
/// Get recommendation count
pub async fn recommendation_count(&self) -> usize {
let recommender = self.recommender.lock().await;
recommender.recommendation_count()
}
/// Accept a recommendation (returns the accepted recommendation)
pub async fn accept_recommendation(&self, recommendation_id: &str) -> Option<WorkflowRecommendation> {
let mut recommender = self.recommender.lock().await;
recommender.accept_recommendation(recommendation_id)
}
/// Dismiss a recommendation (returns true if found and dismissed)
pub async fn dismiss_recommendation(&self, recommendation_id: &str) -> bool {
let mut recommender = self.recommender.lock().await;
recommender.dismiss_recommendation(recommendation_id)
}
}
/// Types of user activities that can be recorded
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(tag = "type", rename_all = "snake_case")]
pub enum ActivityType {
/// Skills were used together
SkillUsed { skill_ids: Vec<String> },
/// A pipeline was executed
PipelineExecuted { task_type: String, pipeline_id: String },
/// User input was received
InputReceived { keywords: Vec<String>, intent: String },
}
// === Tauri Commands ===
/// Mesh coordinator state for Tauri
pub type MeshCoordinatorState = Arc<Mutex<HashMap<String, MeshCoordinator>>>;
/// Initialize mesh coordinator for an agent
#[tauri::command]
pub async fn mesh_init(
agent_id: String,
config: Option<MeshConfig>,
state: tauri::State<'_, MeshCoordinatorState>,
) -> Result<(), String> {
let coordinator = MeshCoordinator::new(agent_id.clone(), config);
let mut coordinators = state.lock().await;
coordinators.insert(agent_id, coordinator);
Ok(())
}
/// Analyze and get recommendations
#[tauri::command]
pub async fn mesh_analyze(
agent_id: String,
state: tauri::State<'_, MeshCoordinatorState>,
) -> Result<MeshAnalysisResult, String> {
let coordinators = state.lock().await;
let coordinator = coordinators
.get(&agent_id)
.ok_or_else(|| format!("Mesh coordinator not initialized for agent: {}", agent_id))?;
coordinator.analyze().await
}
/// Record user activity
#[tauri::command]
pub async fn mesh_record_activity(
agent_id: String,
activity_type: ActivityType,
context: PatternContext,
state: tauri::State<'_, MeshCoordinatorState>,
) -> Result<(), String> {
let coordinators = state.lock().await;
let coordinator = coordinators
.get(&agent_id)
.ok_or_else(|| format!("Mesh coordinator not initialized for agent: {}", agent_id))?;
coordinator.record_activity(activity_type, context).await
}
/// Get current patterns
#[tauri::command]
pub async fn mesh_get_patterns(
agent_id: String,
state: tauri::State<'_, MeshCoordinatorState>,
) -> Result<Vec<BehaviorPattern>, String> {
let coordinators = state.lock().await;
let coordinator = coordinators
.get(&agent_id)
.ok_or_else(|| format!("Mesh coordinator not initialized for agent: {}", agent_id))?;
Ok(coordinator.get_patterns().await)
}
/// Update mesh configuration
#[tauri::command]
pub async fn mesh_update_config(
agent_id: String,
config: MeshConfig,
state: tauri::State<'_, MeshCoordinatorState>,
) -> Result<(), String> {
let coordinators = state.lock().await;
let coordinator = coordinators
.get(&agent_id)
.ok_or_else(|| format!("Mesh coordinator not initialized for agent: {}", agent_id))?;
coordinator.update_config(config).await;
Ok(())
}
/// Decay old patterns
#[tauri::command]
pub async fn mesh_decay_patterns(
agent_id: String,
state: tauri::State<'_, MeshCoordinatorState>,
) -> Result<(), String> {
let coordinators = state.lock().await;
let coordinator = coordinators
.get(&agent_id)
.ok_or_else(|| format!("Mesh coordinator not initialized for agent: {}", agent_id))?;
coordinator.decay_patterns().await;
Ok(())
}
/// Accept a recommendation (removes it and returns the accepted recommendation)
#[tauri::command]
pub async fn mesh_accept_recommendation(
agent_id: String,
recommendation_id: String,
state: tauri::State<'_, MeshCoordinatorState>,
) -> Result<Option<WorkflowRecommendation>, String> {
let coordinators = state.lock().await;
let coordinator = coordinators
.get(&agent_id)
.ok_or_else(|| format!("Mesh coordinator not initialized for agent: {}", agent_id))?;
Ok(coordinator.accept_recommendation(&recommendation_id).await)
}
/// Dismiss a recommendation (removes it without acting on it)
#[tauri::command]
pub async fn mesh_dismiss_recommendation(
agent_id: String,
recommendation_id: String,
state: tauri::State<'_, MeshCoordinatorState>,
) -> Result<bool, String> {
let coordinators = state.lock().await;
let coordinator = coordinators
.get(&agent_id)
.ok_or_else(|| format!("Mesh coordinator not initialized for agent: {}", agent_id))?;
Ok(coordinator.dismiss_recommendation(&recommendation_id).await)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_mesh_config_default() {
let config = MeshConfig::default();
assert!(config.enabled);
assert_eq!(config.min_confidence, 0.6);
}
#[tokio::test]
async fn test_mesh_coordinator_creation() {
let coordinator = MeshCoordinator::new("test_agent".to_string(), None);
let config = coordinator.get_config().await;
assert!(config.enabled);
}
#[tokio::test]
async fn test_mesh_analysis() {
let coordinator = MeshCoordinator::new("test_agent".to_string(), None);
let result = coordinator.analyze().await;
assert!(result.is_ok());
}
}

View File

@@ -1,421 +0,0 @@
//! Pattern Detector - Behavior pattern detection for Adaptive Intelligence Mesh
//!
//! Detects patterns from user activities including:
//! - Skill combinations (frequently used together)
//! - Temporal triggers (time-based patterns)
//! - Task-pipeline mappings (task types mapped to pipelines)
//! - Input patterns (keyword/intent patterns)
//!
//! NOTE: Analysis and export methods are reserved for future dashboard integration.
#![allow(dead_code)] // Analysis and export methods reserved for future dashboard features
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
// === Pattern Types ===
/// Unique identifier for a pattern
pub type PatternId = String;
/// Behavior pattern detected from user activities
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BehaviorPattern {
/// Unique pattern identifier
pub id: PatternId,
/// Type of pattern detected
pub pattern_type: PatternType,
/// How many times this pattern has occurred
pub frequency: usize,
/// When this pattern was last detected
pub last_occurrence: DateTime<Utc>,
/// When this pattern was first detected
pub first_occurrence: DateTime<Utc>,
/// Confidence score (0.0-1.0)
pub confidence: f32,
/// Context when pattern was detected
pub context: PatternContext,
}
/// Types of detectable patterns
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(tag = "type", rename_all = "snake_case")]
pub enum PatternType {
/// Skills frequently used together
SkillCombination {
skill_ids: Vec<String>,
},
/// Time-based trigger pattern
TemporalTrigger {
hand_id: String,
time_pattern: String, // Cron-like pattern or time range
},
/// Task type mapped to a pipeline
TaskPipelineMapping {
task_type: String,
pipeline_id: String,
},
/// Input keyword/intent pattern
InputPattern {
keywords: Vec<String>,
intent: String,
},
}
/// Context information when pattern was detected
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct PatternContext {
/// Skills involved in the session
pub skill_ids: Option<Vec<String>>,
/// Topics discussed recently
pub recent_topics: Option<Vec<String>>,
/// Detected intent
pub intent: Option<String>,
/// Time of day when detected (hour 0-23)
pub time_of_day: Option<u8>,
/// Day of week (0=Monday, 6=Sunday)
pub day_of_week: Option<u8>,
}
/// Pattern detection configuration
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PatternDetectorConfig {
/// Minimum occurrences before pattern is recognized
pub min_frequency: usize,
/// Minimum confidence threshold
pub min_confidence: f32,
/// Days after which pattern confidence decays
pub decay_days: u32,
/// Maximum patterns to keep
pub max_patterns: usize,
}
impl Default for PatternDetectorConfig {
fn default() -> Self {
Self {
min_frequency: 3,
min_confidence: 0.5,
decay_days: 30,
max_patterns: 100,
}
}
}
// === Pattern Detector ===
/// Pattern detector that identifies behavior patterns from activities
pub struct PatternDetector {
/// Detected patterns
patterns: HashMap<PatternId, BehaviorPattern>,
/// Configuration
config: PatternDetectorConfig,
/// Skill combination history for pattern detection
skill_combination_history: Vec<(Vec<String>, DateTime<Utc>)>,
}
impl PatternDetector {
/// Create a new pattern detector
pub fn new(config: Option<PatternDetectorConfig>) -> Self {
Self {
patterns: HashMap::new(),
config: config.unwrap_or_default(),
skill_combination_history: Vec::new(),
}
}
/// Record skill usage for combination detection
pub fn record_skill_usage(&mut self, skill_ids: Vec<String>) {
let now = Utc::now();
self.skill_combination_history.push((skill_ids, now));
// Keep only recent history (last 1000 entries)
if self.skill_combination_history.len() > 1000 {
self.skill_combination_history.drain(0..500);
}
// Detect patterns
self.detect_skill_combinations();
}
/// Record a pipeline execution for task mapping detection
pub fn record_pipeline_execution(
&mut self,
task_type: &str,
pipeline_id: &str,
context: PatternContext,
) {
let pattern_key = format!("task_pipeline:{}:{}", task_type, pipeline_id);
self.update_or_create_pattern(
&pattern_key,
PatternType::TaskPipelineMapping {
task_type: task_type.to_string(),
pipeline_id: pipeline_id.to_string(),
},
context,
);
}
/// Record an input pattern
pub fn record_input_pattern(
&mut self,
keywords: Vec<String>,
intent: &str,
context: PatternContext,
) {
let pattern_key = format!("input_pattern:{}:{}", keywords.join(","), intent);
self.update_or_create_pattern(
&pattern_key,
PatternType::InputPattern {
keywords,
intent: intent.to_string(),
},
context,
);
}
/// Update existing pattern or create new one
fn update_or_create_pattern(
&mut self,
key: &str,
pattern_type: PatternType,
context: PatternContext,
) {
let now = Utc::now();
let decay_days = self.config.decay_days;
if let Some(pattern) = self.patterns.get_mut(key) {
// Update existing pattern
pattern.frequency += 1;
pattern.last_occurrence = now;
pattern.context = context;
// Recalculate confidence inline to avoid borrow issues
let days_since_last = (now - pattern.last_occurrence).num_days() as f32;
let frequency_score = (pattern.frequency as f32 / 10.0).min(1.0);
let decay_factor = if days_since_last > decay_days as f32 {
0.5
} else {
1.0 - (days_since_last / decay_days as f32) * 0.3
};
pattern.confidence = (frequency_score * decay_factor).min(1.0);
} else {
// Create new pattern
let pattern = BehaviorPattern {
id: key.to_string(),
pattern_type,
frequency: 1,
first_occurrence: now,
last_occurrence: now,
confidence: 0.1, // Low initial confidence
context,
};
self.patterns.insert(key.to_string(), pattern);
// Enforce max patterns limit
self.enforce_max_patterns();
}
}
/// Detect skill combination patterns from history
fn detect_skill_combinations(&mut self) {
// Group skill combinations
let mut combination_counts: HashMap<String, (Vec<String>, usize, DateTime<Utc>)> =
HashMap::new();
for (skills, time) in &self.skill_combination_history {
if skills.len() < 2 {
continue;
}
// Sort skills for consistent grouping
let mut sorted_skills = skills.clone();
sorted_skills.sort();
let key = sorted_skills.join("|");
let entry = combination_counts.entry(key).or_insert((
sorted_skills,
0,
*time,
));
entry.1 += 1;
entry.2 = *time; // Update last occurrence
}
// Create patterns for combinations meeting threshold
for (key, (skills, count, last_time)) in combination_counts {
if count >= self.config.min_frequency {
let pattern = BehaviorPattern {
id: format!("skill_combo:{}", key),
pattern_type: PatternType::SkillCombination { skill_ids: skills },
frequency: count,
first_occurrence: last_time,
last_occurrence: last_time,
confidence: self.calculate_confidence_from_frequency(count),
context: PatternContext::default(),
};
self.patterns.insert(pattern.id.clone(), pattern);
}
}
self.enforce_max_patterns();
}
/// Calculate confidence based on frequency and recency
fn calculate_confidence(&self, pattern: &BehaviorPattern) -> f32 {
let now = Utc::now();
let days_since_last = (now - pattern.last_occurrence).num_days() as f32;
// Base confidence from frequency (capped at 1.0)
let frequency_score = (pattern.frequency as f32 / 10.0).min(1.0);
// Decay factor based on time since last occurrence
let decay_factor = if days_since_last > self.config.decay_days as f32 {
0.5 // Significant decay for old patterns
} else {
1.0 - (days_since_last / self.config.decay_days as f32) * 0.3
};
(frequency_score * decay_factor).min(1.0)
}
/// Calculate confidence from frequency alone
fn calculate_confidence_from_frequency(&self, frequency: usize) -> f32 {
(frequency as f32 / self.config.min_frequency.max(1) as f32).min(1.0)
}
/// Enforce maximum patterns limit by removing lowest confidence patterns
fn enforce_max_patterns(&mut self) {
if self.patterns.len() <= self.config.max_patterns {
return;
}
// Sort patterns by confidence and remove lowest
let mut patterns_vec: Vec<_> = self.patterns.drain().collect();
patterns_vec.sort_by(|a, b| b.1.confidence.partial_cmp(&a.1.confidence).unwrap());
// Keep top patterns
self.patterns = patterns_vec
.into_iter()
.take(self.config.max_patterns)
.collect();
}
/// Get all patterns above confidence threshold
pub fn get_patterns(&self) -> Vec<&BehaviorPattern> {
self.patterns
.values()
.filter(|p| p.confidence >= self.config.min_confidence)
.collect()
}
/// Get patterns of a specific type
pub fn get_patterns_by_type(&self, pattern_type: &PatternType) -> Vec<&BehaviorPattern> {
self.patterns
.values()
.filter(|p| std::mem::discriminant(&p.pattern_type) == std::mem::discriminant(pattern_type))
.filter(|p| p.confidence >= self.config.min_confidence)
.collect()
}
/// Get patterns sorted by confidence
pub fn get_patterns_sorted(&self) -> Vec<&BehaviorPattern> {
let mut patterns: Vec<_> = self.get_patterns();
patterns.sort_by(|a, b| b.confidence.partial_cmp(&a.confidence).unwrap());
patterns
}
/// Decay old patterns (should be called periodically)
pub fn decay_patterns(&mut self) {
let now = Utc::now();
for pattern in self.patterns.values_mut() {
let days_since_last = (now - pattern.last_occurrence).num_days() as f32;
if days_since_last > self.config.decay_days as f32 {
// Reduce confidence for old patterns
let decay_amount = 0.1 * (days_since_last / self.config.decay_days as f32);
pattern.confidence = (pattern.confidence - decay_amount).max(0.0);
}
}
// Remove patterns below threshold
self.patterns
.retain(|_, p| p.confidence >= self.config.min_confidence * 0.5);
}
/// Clear all patterns
pub fn clear(&mut self) {
self.patterns.clear();
self.skill_combination_history.clear();
}
/// Get pattern count
pub fn pattern_count(&self) -> usize {
self.patterns.len()
}
/// Export patterns for persistence
pub fn export_patterns(&self) -> Vec<BehaviorPattern> {
self.patterns.values().cloned().collect()
}
/// Import patterns from persistence
pub fn import_patterns(&mut self, patterns: Vec<BehaviorPattern>) {
for pattern in patterns {
self.patterns.insert(pattern.id.clone(), pattern);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_pattern_creation() {
let detector = PatternDetector::new(None);
assert_eq!(detector.pattern_count(), 0);
}
#[test]
fn test_skill_combination_detection() {
let mut detector = PatternDetector::new(Some(PatternDetectorConfig {
min_frequency: 2,
..Default::default()
}));
// Record skill usage multiple times
detector.record_skill_usage(vec!["skill_a".to_string(), "skill_b".to_string()]);
detector.record_skill_usage(vec!["skill_a".to_string(), "skill_b".to_string()]);
// Should detect pattern after 2 occurrences
let patterns = detector.get_patterns();
assert!(!patterns.is_empty());
}
#[test]
fn test_confidence_calculation() {
let detector = PatternDetector::new(None);
let pattern = BehaviorPattern {
id: "test".to_string(),
pattern_type: PatternType::TaskPipelineMapping {
task_type: "test".to_string(),
pipeline_id: "pipeline".to_string(),
},
frequency: 5,
first_occurrence: Utc::now(),
last_occurrence: Utc::now(),
confidence: 0.5,
context: PatternContext::default(),
};
let confidence = detector.calculate_confidence(&pattern);
assert!(confidence > 0.0 && confidence <= 1.0);
}
}

View File

@@ -1,819 +0,0 @@
//! Persona Evolver - Memory-powered persona evolution system
//!
//! Automatically evolves agent persona based on:
//! - User interaction patterns (preferences, communication style)
//! - Reflection insights (positive/negative patterns)
//! - Memory accumulation (facts, lessons, context)
//!
//! Key features:
//! - Automatic user_profile enrichment from preferences
//! - Instruction refinement proposals based on patterns
//! - Soul evolution suggestions (requires explicit user approval)
//!
//! Phase 4 of Intelligence Layer - P1 Innovation Task.
//!
//! NOTE: Tauri commands defined here are not yet registered with the app.
#![allow(dead_code)] // Tauri commands not yet registered with application
use chrono::Utc;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::Mutex;
use super::reflection::{ReflectionResult, Sentiment, MemoryEntryForAnalysis};
use super::identity::{IdentityFiles, IdentityFile, ProposalStatus};
// === Types ===
/// Persona evolution configuration
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PersonaEvolverConfig {
/// Enable automatic user_profile updates
#[serde(default = "default_auto_profile_update")]
pub auto_profile_update: bool,
/// Minimum preferences before suggesting profile update
#[serde(default = "default_min_preferences")]
pub min_preferences_for_update: usize,
/// Minimum conversations before evolution
#[serde(default = "default_min_conversations")]
pub min_conversations_for_evolution: usize,
/// Enable instruction refinement proposals
#[serde(default = "default_enable_instruction_refinement")]
pub enable_instruction_refinement: bool,
/// Enable soul evolution (requires explicit approval)
#[serde(default = "default_enable_soul_evolution")]
pub enable_soul_evolution: bool,
/// Maximum proposals per evolution cycle
#[serde(default = "default_max_proposals")]
pub max_proposals_per_cycle: usize,
}
fn default_auto_profile_update() -> bool { true }
fn default_min_preferences() -> usize { 3 }
fn default_min_conversations() -> usize { 5 }
fn default_enable_instruction_refinement() -> bool { true }
fn default_enable_soul_evolution() -> bool { true }
fn default_max_proposals() -> usize { 3 }
impl Default for PersonaEvolverConfig {
fn default() -> Self {
Self {
auto_profile_update: true,
min_preferences_for_update: 3,
min_conversations_for_evolution: 5,
enable_instruction_refinement: true,
enable_soul_evolution: true,
max_proposals_per_cycle: 3,
}
}
}
/// Persona evolution result
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EvolutionResult {
/// Agent ID
pub agent_id: String,
/// Timestamp
pub timestamp: String,
/// Profile updates applied (auto)
pub profile_updates: Vec<ProfileUpdate>,
/// Proposals generated (require approval)
pub proposals: Vec<EvolutionProposal>,
/// Evolution insights
pub insights: Vec<EvolutionInsight>,
/// Whether evolution occurred
pub evolved: bool,
}
/// Profile update (auto-applied)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ProfileUpdate {
pub section: String,
pub previous: String,
pub updated: String,
pub source: String,
}
/// Evolution proposal (requires approval)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EvolutionProposal {
pub id: String,
pub agent_id: String,
pub target_file: IdentityFile,
pub change_type: EvolutionChangeType,
pub reason: String,
pub current_content: String,
pub proposed_content: String,
pub confidence: f32,
pub evidence: Vec<String>,
pub status: ProposalStatus,
pub created_at: String,
}
/// Type of evolution change
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum EvolutionChangeType {
/// Add new instruction section
InstructionAddition,
/// Refine existing instruction
InstructionRefinement,
/// Add personality trait
TraitAddition,
/// Communication style adjustment
StyleAdjustment,
/// Knowledge domain expansion
DomainExpansion,
}
/// Evolution insight
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EvolutionInsight {
pub category: InsightCategory,
pub observation: String,
pub recommendation: String,
pub confidence: f32,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum InsightCategory {
CommunicationStyle,
TechnicalExpertise,
TaskEfficiency,
UserPreference,
KnowledgeGap,
}
/// Persona evolution state
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PersonaEvolverState {
pub last_evolution: Option<String>,
pub total_evolutions: usize,
pub pending_proposals: usize,
pub profile_enrichment_score: f32,
}
impl Default for PersonaEvolverState {
fn default() -> Self {
Self {
last_evolution: None,
total_evolutions: 0,
pending_proposals: 0,
profile_enrichment_score: 0.0,
}
}
}
// === Persona Evolver ===
pub struct PersonaEvolver {
config: PersonaEvolverConfig,
state: PersonaEvolverState,
evolution_history: Vec<EvolutionResult>,
}
impl PersonaEvolver {
pub fn new(config: Option<PersonaEvolverConfig>) -> Self {
Self {
config: config.unwrap_or_default(),
state: PersonaEvolverState::default(),
evolution_history: Vec::new(),
}
}
/// Run evolution cycle for an agent
pub fn evolve(
&mut self,
agent_id: &str,
memories: &[MemoryEntryForAnalysis],
reflection_result: &ReflectionResult,
current_identity: &IdentityFiles,
) -> EvolutionResult {
let mut profile_updates = Vec::new();
let mut proposals = Vec::new();
#[allow(unused_assignments)] // Overwritten by generate_insights below
let mut insights = Vec::new();
// 1. Extract user preferences and auto-update profile
if self.config.auto_profile_update {
profile_updates = self.extract_profile_updates(memories, current_identity);
}
// 2. Generate instruction refinement proposals
if self.config.enable_instruction_refinement {
let instruction_proposals = self.generate_instruction_proposals(
agent_id,
reflection_result,
current_identity,
);
proposals.extend(instruction_proposals);
}
// 3. Generate soul evolution proposals (rare, high bar)
if self.config.enable_soul_evolution {
let soul_proposals = self.generate_soul_proposals(
agent_id,
reflection_result,
current_identity,
);
proposals.extend(soul_proposals);
}
// 4. Generate insights
insights = self.generate_insights(memories, reflection_result);
// 5. Limit proposals
proposals.truncate(self.config.max_proposals_per_cycle);
// 6. Update state
let evolved = !profile_updates.is_empty() || !proposals.is_empty();
if evolved {
self.state.last_evolution = Some(Utc::now().to_rfc3339());
self.state.total_evolutions += 1;
self.state.pending_proposals += proposals.len();
self.state.profile_enrichment_score = self.calculate_profile_score(memories);
}
let result = EvolutionResult {
agent_id: agent_id.to_string(),
timestamp: Utc::now().to_rfc3339(),
profile_updates,
proposals,
insights,
evolved,
};
// Store in history
self.evolution_history.push(result.clone());
if self.evolution_history.len() > 20 {
self.evolution_history = self.evolution_history.split_off(10);
}
result
}
/// Extract profile updates from memory
fn extract_profile_updates(
&self,
memories: &[MemoryEntryForAnalysis],
current_identity: &IdentityFiles,
) -> Vec<ProfileUpdate> {
let mut updates = Vec::new();
// Extract preferences
let preferences: Vec<_> = memories
.iter()
.filter(|m| m.memory_type == "preference")
.collect();
if preferences.len() >= self.config.min_preferences_for_update {
// Check if user_profile needs updating
let current_profile = &current_identity.user_profile;
let default_profile = "尚未收集到用户偏好信息";
if current_profile.contains(default_profile) || current_profile.len() < 100 {
// Build new profile from preferences
let mut sections = Vec::new();
// Group preferences by category
let mut categories: HashMap<String, Vec<String>> = HashMap::new();
for pref in &preferences {
// Simple categorization based on keywords
let category = self.categorize_preference(&pref.content);
categories
.entry(category)
.or_insert_with(Vec::new)
.push(pref.content.clone());
}
// Build sections
for (category, items) in categories {
if !items.is_empty() {
sections.push(format!("### {}\n{}", category, items.iter()
.map(|i| format!("- {}", i))
.collect::<Vec<_>>()
.join("\n")));
}
}
if !sections.is_empty() {
let new_profile = format!("# 用户画像\n\n{}\n\n_自动生成于 {}_",
sections.join("\n\n"),
Utc::now().format("%Y-%m-%d")
);
updates.push(ProfileUpdate {
section: "user_profile".to_string(),
previous: current_profile.clone(),
updated: new_profile,
source: format!("{} 个偏好记忆", preferences.len()),
});
}
}
}
updates
}
/// Categorize a preference
fn categorize_preference(&self, content: &str) -> String {
let content_lower = content.to_lowercase();
if content_lower.contains("语言") || content_lower.contains("沟通") || content_lower.contains("回复") {
"沟通偏好".to_string()
} else if content_lower.contains("技术") || content_lower.contains("框架") || content_lower.contains("工具") {
"技术栈".to_string()
} else if content_lower.contains("项目") || content_lower.contains("工作") || content_lower.contains("任务") {
"工作习惯".to_string()
} else if content_lower.contains("格式") || content_lower.contains("风格") || content_lower.contains("风格") {
"输出风格".to_string()
} else {
"其他偏好".to_string()
}
}
/// Generate instruction refinement proposals
fn generate_instruction_proposals(
&self,
agent_id: &str,
reflection_result: &ReflectionResult,
current_identity: &IdentityFiles,
) -> Vec<EvolutionProposal> {
let mut proposals = Vec::new();
// Only propose if there are negative patterns
let negative_patterns: Vec<_> = reflection_result.patterns
.iter()
.filter(|p| matches!(p.sentiment, Sentiment::Negative))
.collect();
if negative_patterns.is_empty() {
return proposals;
}
// Check if instructions already contain these warnings
let current_instructions = &current_identity.instructions;
// Build proposed additions
let mut additions = Vec::new();
let mut evidence = Vec::new();
for pattern in &negative_patterns {
// Check if this pattern is already addressed
let key_phrase = &pattern.observation;
if !current_instructions.contains(key_phrase) {
additions.push(format!("- **注意事项**: {}", pattern.observation));
evidence.extend(pattern.evidence.clone());
}
}
if !additions.is_empty() {
let proposed = format!(
"{}\n\n## 🔄 自我改进建议\n\n{}\n\n_基于交互模式分析自动生成_",
current_instructions.trim_end(),
additions.join("\n")
);
proposals.push(EvolutionProposal {
id: format!("evo_inst_{}", Utc::now().timestamp()),
agent_id: agent_id.to_string(),
target_file: IdentityFile::Instructions,
change_type: EvolutionChangeType::InstructionAddition,
reason: format!(
"基于 {} 个负面模式观察,建议在指令中增加自我改进提醒",
negative_patterns.len()
),
current_content: current_instructions.clone(),
proposed_content: proposed,
confidence: 0.7 + (negative_patterns.len() as f32 * 0.05).min(0.2),
evidence,
status: ProposalStatus::Pending,
created_at: Utc::now().to_rfc3339(),
});
}
// Check for improvement suggestions that could become instructions
for improvement in &reflection_result.improvements {
if current_instructions.contains(&improvement.suggestion) {
continue;
}
// High priority improvements become instruction proposals
if matches!(improvement.priority, super::reflection::Priority::High) {
proposals.push(EvolutionProposal {
id: format!("evo_inst_{}_{}", Utc::now().timestamp(), rand_suffix()),
agent_id: agent_id.to_string(),
target_file: IdentityFile::Instructions,
change_type: EvolutionChangeType::InstructionRefinement,
reason: format!("高优先级改进建议: {}", improvement.area),
current_content: current_instructions.clone(),
proposed_content: format!(
"{}\n\n### {}\n\n{}",
current_instructions.trim_end(),
improvement.area,
improvement.suggestion
),
confidence: 0.75,
evidence: vec![improvement.suggestion.clone()],
status: ProposalStatus::Pending,
created_at: Utc::now().to_rfc3339(),
});
}
}
proposals
}
/// Generate soul evolution proposals (high bar)
fn generate_soul_proposals(
&self,
agent_id: &str,
reflection_result: &ReflectionResult,
current_identity: &IdentityFiles,
) -> Vec<EvolutionProposal> {
let mut proposals = Vec::new();
// Soul evolution requires strong positive patterns
let positive_patterns: Vec<_> = reflection_result.patterns
.iter()
.filter(|p| matches!(p.sentiment, Sentiment::Positive))
.collect();
// Need at least 3 strong positive patterns
if positive_patterns.len() < 3 {
return proposals;
}
// Calculate overall confidence
let avg_frequency: usize = positive_patterns.iter()
.map(|p| p.frequency)
.sum::<usize>() / positive_patterns.len();
if avg_frequency < 5 {
return proposals;
}
// Build soul enhancement proposal
let current_soul = &current_identity.soul;
let mut traits = Vec::new();
let mut evidence = Vec::new();
for pattern in &positive_patterns {
// Extract trait from observation
if pattern.observation.contains("偏好") {
traits.push("深入理解用户偏好");
} else if pattern.observation.contains("经验") {
traits.push("持续积累经验教训");
} else if pattern.observation.contains("知识") {
traits.push("构建核心知识体系");
}
evidence.extend(pattern.evidence.clone());
}
if !traits.is_empty() {
let traits_section = traits.iter()
.map(|t| format!("- {}", t))
.collect::<Vec<_>>()
.join("\n");
let proposed = format!(
"{}\n\n## 🌱 成长特质\n\n{}\n\n_通过交互学习持续演化_",
current_soul.trim_end(),
traits_section
);
proposals.push(EvolutionProposal {
id: format!("evo_soul_{}", Utc::now().timestamp()),
agent_id: agent_id.to_string(),
target_file: IdentityFile::Soul,
change_type: EvolutionChangeType::TraitAddition,
reason: format!(
"基于 {} 个强正面模式,建议增加成长特质",
positive_patterns.len()
),
current_content: current_soul.clone(),
proposed_content: proposed,
confidence: 0.85,
evidence,
status: ProposalStatus::Pending,
created_at: Utc::now().to_rfc3339(),
});
}
proposals
}
/// Generate evolution insights
fn generate_insights(
&self,
memories: &[MemoryEntryForAnalysis],
reflection_result: &ReflectionResult,
) -> Vec<EvolutionInsight> {
let mut insights = Vec::new();
// Communication style insight
let comm_prefs: Vec<_> = memories
.iter()
.filter(|m| m.memory_type == "preference" &&
(m.content.contains("回复") || m.content.contains("语言") || m.content.contains("简洁")))
.collect();
if !comm_prefs.is_empty() {
insights.push(EvolutionInsight {
category: InsightCategory::CommunicationStyle,
observation: format!("用户有 {} 个沟通风格偏好", comm_prefs.len()),
recommendation: "在回复中应用这些偏好,提高用户满意度".to_string(),
confidence: 0.8,
});
}
// Technical expertise insight
let tech_memories: Vec<_> = memories
.iter()
.filter(|m| m.tags.iter().any(|t| t.contains("技术") || t.contains("代码")))
.collect();
if tech_memories.len() >= 5 {
insights.push(EvolutionInsight {
category: InsightCategory::TechnicalExpertise,
observation: format!("积累了 {} 个技术相关记忆", tech_memories.len()),
recommendation: "考虑构建技术知识图谱,提高检索效率".to_string(),
confidence: 0.7,
});
}
// Task efficiency insight from negative patterns
let has_task_issues = reflection_result.patterns
.iter()
.any(|p| p.observation.contains("任务") && matches!(p.sentiment, Sentiment::Negative));
if has_task_issues {
insights.push(EvolutionInsight {
category: InsightCategory::TaskEfficiency,
observation: "存在任务管理相关问题".to_string(),
recommendation: "建议增加任务跟踪和提醒机制".to_string(),
confidence: 0.75,
});
}
// Knowledge gap insight
let lesson_count = memories.iter()
.filter(|m| m.memory_type == "lesson")
.count();
if lesson_count > 10 {
insights.push(EvolutionInsight {
category: InsightCategory::KnowledgeGap,
observation: format!("已记录 {} 条经验教训", lesson_count),
recommendation: "定期回顾教训,避免重复错误".to_string(),
confidence: 0.8,
});
}
insights
}
/// Calculate profile enrichment score
fn calculate_profile_score(&self, memories: &[MemoryEntryForAnalysis]) -> f32 {
let pref_count = memories.iter().filter(|m| m.memory_type == "preference").count();
let fact_count = memories.iter().filter(|m| m.memory_type == "fact").count();
// Score based on diversity and quantity
let pref_score = (pref_count as f32 / 10.0).min(1.0) * 0.5;
let fact_score = (fact_count as f32 / 20.0).min(1.0) * 0.3;
let diversity = if pref_count > 0 && fact_count > 0 { 0.2 } else { 0.0 };
pref_score + fact_score + diversity
}
/// Get evolution history
pub fn get_history(&self, limit: usize) -> Vec<&EvolutionResult> {
self.evolution_history.iter().rev().take(limit).collect()
}
/// Get current state
pub fn get_state(&self) -> &PersonaEvolverState {
&self.state
}
/// Get configuration
pub fn get_config(&self) -> &PersonaEvolverConfig {
&self.config
}
/// Update configuration
pub fn update_config(&mut self, config: PersonaEvolverConfig) {
self.config = config;
}
/// Mark proposal as handled (approved/rejected)
pub fn proposal_handled(&mut self) {
if self.state.pending_proposals > 0 {
self.state.pending_proposals -= 1;
}
}
}
/// Generate random suffix
fn rand_suffix() -> String {
use std::sync::atomic::{AtomicU64, Ordering};
static COUNTER: AtomicU64 = AtomicU64::new(0);
let count = COUNTER.fetch_add(1, Ordering::Relaxed);
format!("{:04x}", count % 0x10000)
}
// === Tauri Commands ===
/// Type alias for Tauri state management (shared evolver handle)
pub type PersonaEvolverStateHandle = Arc<Mutex<PersonaEvolver>>;
/// Initialize persona evolver
#[tauri::command]
pub async fn persona_evolver_init(
config: Option<PersonaEvolverConfig>,
state: tauri::State<'_, PersonaEvolverStateHandle>,
) -> Result<bool, String> {
let mut evolver = state.lock().await;
if let Some(cfg) = config {
evolver.update_config(cfg);
}
Ok(true)
}
/// Run evolution cycle
#[tauri::command]
pub async fn persona_evolve(
agent_id: String,
memories: Vec<MemoryEntryForAnalysis>,
reflection_state: tauri::State<'_, super::reflection::ReflectionEngineState>,
identity_state: tauri::State<'_, super::identity::IdentityManagerState>,
evolver_state: tauri::State<'_, PersonaEvolverStateHandle>,
) -> Result<EvolutionResult, String> {
// 1. Run reflection first
let mut reflection = reflection_state.lock().await;
let reflection_result = reflection.reflect(&agent_id, &memories);
drop(reflection);
// 2. Get current identity
let mut identity = identity_state.lock().await;
let current_identity = identity.get_identity(&agent_id);
drop(identity);
// 3. Run evolution
let mut evolver = evolver_state.lock().await;
let result = evolver.evolve(&agent_id, &memories, &reflection_result, &current_identity);
// 4. Apply auto profile updates
if !result.profile_updates.is_empty() {
let mut identity = identity_state.lock().await;
for update in &result.profile_updates {
identity.update_user_profile(&agent_id, &update.updated);
}
}
Ok(result)
}
/// Get evolution history
#[tauri::command]
pub async fn persona_evolution_history(
limit: Option<usize>,
state: tauri::State<'_, PersonaEvolverStateHandle>,
) -> Result<Vec<EvolutionResult>, String> {
let evolver = state.lock().await;
Ok(evolver.get_history(limit.unwrap_or(10)).into_iter().cloned().collect())
}
/// Get evolver state
#[tauri::command]
pub async fn persona_evolver_state(
state: tauri::State<'_, PersonaEvolverStateHandle>,
) -> Result<PersonaEvolverState, String> {
let evolver = state.lock().await;
Ok(evolver.get_state().clone())
}
/// Get evolver config
#[tauri::command]
pub async fn persona_evolver_config(
state: tauri::State<'_, PersonaEvolverStateHandle>,
) -> Result<PersonaEvolverConfig, String> {
let evolver = state.lock().await;
Ok(evolver.get_config().clone())
}
/// Update evolver config
#[tauri::command]
pub async fn persona_evolver_update_config(
config: PersonaEvolverConfig,
state: tauri::State<'_, PersonaEvolverStateHandle>,
) -> Result<(), String> {
let mut evolver = state.lock().await;
evolver.update_config(config);
Ok(())
}
/// Apply evolution proposal (approve)
#[tauri::command]
pub async fn persona_apply_proposal(
proposal: EvolutionProposal,
identity_state: tauri::State<'_, super::identity::IdentityManagerState>,
evolver_state: tauri::State<'_, PersonaEvolverStateHandle>,
) -> Result<IdentityFiles, String> {
// Apply the proposal through identity manager
let mut identity = identity_state.lock().await;
let result = match proposal.target_file {
IdentityFile::Soul => {
identity.update_file(&proposal.agent_id, "soul", &proposal.proposed_content)
}
IdentityFile::Instructions => {
identity.update_file(&proposal.agent_id, "instructions", &proposal.proposed_content)
}
};
if result.is_err() {
return result.map(|_| IdentityFiles {
soul: String::new(),
instructions: String::new(),
user_profile: String::new(),
heartbeat: None,
});
}
// Update evolver state
let mut evolver = evolver_state.lock().await;
evolver.proposal_handled();
// Return updated identity
Ok(identity.get_identity(&proposal.agent_id))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_evolve_empty() {
let mut evolver = PersonaEvolver::new(None);
let memories = vec![];
let reflection = ReflectionResult {
patterns: vec![],
improvements: vec![],
identity_proposals: vec![],
new_memories: 0,
timestamp: Utc::now().to_rfc3339(),
};
let identity = IdentityFiles {
soul: "Test soul".to_string(),
instructions: "Test instructions".to_string(),
user_profile: "Test profile".to_string(),
heartbeat: None,
};
let result = evolver.evolve("test-agent", &memories, &reflection, &identity);
assert!(!result.evolved);
}
#[test]
fn test_profile_update() {
let mut evolver = PersonaEvolver::new(None);
let memories = vec![
MemoryEntryForAnalysis {
memory_type: "preference".to_string(),
content: "喜欢简洁的回复".to_string(),
importance: 7,
access_count: 3,
tags: vec!["沟通".to_string()],
},
MemoryEntryForAnalysis {
memory_type: "preference".to_string(),
content: "使用中文".to_string(),
importance: 8,
access_count: 5,
tags: vec!["语言".to_string()],
},
MemoryEntryForAnalysis {
memory_type: "preference".to_string(),
content: "代码使用 TypeScript".to_string(),
importance: 7,
access_count: 2,
tags: vec!["技术".to_string()],
},
];
let identity = IdentityFiles {
soul: "Test".to_string(),
instructions: "Test".to_string(),
user_profile: "尚未收集到用户偏好信息".to_string(),
heartbeat: None,
};
let updates = evolver.extract_profile_updates(&memories, &identity);
assert!(!updates.is_empty());
assert!(updates[0].updated.contains("用户画像"));
}
}

View File

@@ -1,519 +0,0 @@
//! Workflow Recommender - Generates workflow recommendations from detected patterns
//!
//! This module analyzes behavior patterns and generates actionable workflow recommendations.
//! It maps detected patterns to pipelines and provides confidence scoring.
//!
//! NOTE: Some methods are reserved for future integration with the UI.
#![allow(dead_code)] // Methods reserved for future UI integration
use chrono::Utc;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use uuid::Uuid;
use super::mesh::WorkflowRecommendation;
use super::pattern_detector::{BehaviorPattern, PatternType};
// === Types ===
/// Recommendation rule that maps patterns to pipelines
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RecommendationRule {
/// Rule identifier
pub id: String,
/// Pattern types this rule matches
pub pattern_types: Vec<String>,
/// Pipeline to recommend
pub pipeline_id: String,
/// Base confidence for this rule
pub base_confidence: f32,
/// Human-readable description
pub description: String,
/// Input mappings (pattern context field -> pipeline input)
pub input_mappings: HashMap<String, String>,
/// Priority (higher = more important)
pub priority: u8,
}
/// Recommender configuration
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RecommenderConfig {
/// Minimum confidence threshold
pub min_confidence: f32,
/// Maximum recommendations to generate
pub max_recommendations: usize,
/// Enable rule-based recommendations
pub enable_rules: bool,
/// Enable pattern-based recommendations
pub enable_patterns: bool,
}
impl Default for RecommenderConfig {
fn default() -> Self {
Self {
min_confidence: 0.5,
max_recommendations: 10,
enable_rules: true,
enable_patterns: true,
}
}
}
// === Workflow Recommender ===
/// Workflow recommendation engine
pub struct WorkflowRecommender {
/// Configuration
config: RecommenderConfig,
/// Recommendation rules
rules: Vec<RecommendationRule>,
/// Pipeline registry (pipeline_id -> metadata)
#[allow(dead_code)] // Reserved for future pipeline-based recommendations
pipeline_registry: HashMap<String, PipelineMetadata>,
/// Generated recommendations cache
recommendations_cache: Vec<WorkflowRecommendation>,
}
/// Metadata about a registered pipeline
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PipelineMetadata {
pub id: String,
pub name: String,
pub description: Option<String>,
pub tags: Vec<String>,
pub input_schema: Option<serde_json::Value>,
}
impl WorkflowRecommender {
/// Create a new workflow recommender
pub fn new(config: Option<RecommenderConfig>) -> Self {
let mut recommender = Self {
config: config.unwrap_or_default(),
rules: Vec::new(),
pipeline_registry: HashMap::new(),
recommendations_cache: Vec::new(),
};
// Initialize with built-in rules
recommender.initialize_default_rules();
recommender
}
/// Initialize default recommendation rules
fn initialize_default_rules(&mut self) {
// Rule: Research + Analysis -> Report Generation
self.rules.push(RecommendationRule {
id: "rule_research_report".to_string(),
pattern_types: vec!["SkillCombination".to_string()],
pipeline_id: "research-report-generator".to_string(),
base_confidence: 0.7,
description: "Generate comprehensive research report".to_string(),
input_mappings: HashMap::new(),
priority: 8,
});
// Rule: Code + Test -> Quality Check Pipeline
self.rules.push(RecommendationRule {
id: "rule_code_quality".to_string(),
pattern_types: vec!["SkillCombination".to_string()],
pipeline_id: "code-quality-check".to_string(),
base_confidence: 0.75,
description: "Run code quality and test pipeline".to_string(),
input_mappings: HashMap::new(),
priority: 7,
});
// Rule: Daily morning -> Daily briefing
self.rules.push(RecommendationRule {
id: "rule_morning_briefing".to_string(),
pattern_types: vec!["TemporalTrigger".to_string()],
pipeline_id: "daily-briefing".to_string(),
base_confidence: 0.6,
description: "Generate daily briefing".to_string(),
input_mappings: HashMap::new(),
priority: 5,
});
// Rule: Task + Deadline -> Priority sort
self.rules.push(RecommendationRule {
id: "rule_task_priority".to_string(),
pattern_types: vec!["InputPattern".to_string()],
pipeline_id: "task-priority-sorter".to_string(),
base_confidence: 0.65,
description: "Sort and prioritize tasks".to_string(),
input_mappings: HashMap::new(),
priority: 6,
});
}
/// Generate recommendations from detected patterns
pub fn recommend(&self, patterns: &[&BehaviorPattern]) -> Vec<WorkflowRecommendation> {
let mut recommendations = Vec::new();
if patterns.is_empty() {
return recommendations;
}
// Rule-based recommendations
if self.config.enable_rules {
for rule in &self.rules {
if let Some(rec) = self.apply_rule(rule, patterns) {
if rec.confidence >= self.config.min_confidence {
recommendations.push(rec);
}
}
}
}
// Pattern-based recommendations (direct mapping)
if self.config.enable_patterns {
for pattern in patterns {
if let Some(rec) = self.pattern_to_recommendation(pattern) {
if rec.confidence >= self.config.min_confidence {
recommendations.push(rec);
}
}
}
}
// Sort by confidence (descending) and priority
recommendations.sort_by(|a, b| {
let priority_diff = self.get_priority_for_recommendation(b)
.cmp(&self.get_priority_for_recommendation(a));
if priority_diff != std::cmp::Ordering::Equal {
return priority_diff;
}
b.confidence.partial_cmp(&a.confidence).unwrap()
});
// Limit recommendations
recommendations.truncate(self.config.max_recommendations);
recommendations
}
/// Apply a recommendation rule to patterns
fn apply_rule(
&self,
rule: &RecommendationRule,
patterns: &[&BehaviorPattern],
) -> Option<WorkflowRecommendation> {
let mut matched_patterns: Vec<String> = Vec::new();
let mut total_confidence = 0.0;
let mut match_count = 0;
for pattern in patterns {
let pattern_type_name = self.get_pattern_type_name(&pattern.pattern_type);
if rule.pattern_types.contains(&pattern_type_name) {
matched_patterns.push(pattern.id.clone());
total_confidence += pattern.confidence;
match_count += 1;
}
}
if matched_patterns.is_empty() {
return None;
}
// Calculate combined confidence
let avg_pattern_confidence = total_confidence / match_count as f32;
let final_confidence = (rule.base_confidence * 0.6 + avg_pattern_confidence * 0.4).min(1.0);
// Build suggested inputs from pattern context
let suggested_inputs = self.build_suggested_inputs(&matched_patterns, patterns, rule);
Some(WorkflowRecommendation {
id: format!("rec_{}", Uuid::new_v4()),
pipeline_id: rule.pipeline_id.clone(),
confidence: final_confidence,
reason: rule.description.clone(),
suggested_inputs,
patterns_matched: matched_patterns,
timestamp: Utc::now(),
})
}
/// Convert a single pattern to a recommendation
fn pattern_to_recommendation(&self, pattern: &BehaviorPattern) -> Option<WorkflowRecommendation> {
let (pipeline_id, reason) = match &pattern.pattern_type {
PatternType::TaskPipelineMapping { task_type, pipeline_id } => {
(pipeline_id.clone(), format!("Detected task type: {}", task_type))
}
PatternType::SkillCombination { skill_ids } => {
// Find a pipeline that uses these skills
let pipeline_id = self.find_pipeline_for_skills(skill_ids)?;
(pipeline_id, format!("Skills often used together: {}", skill_ids.join(", ")))
}
PatternType::InputPattern { keywords, intent } => {
// Find a pipeline for this intent
let pipeline_id = self.find_pipeline_for_intent(intent)?;
(pipeline_id, format!("Intent detected: {} ({})", intent, keywords.join(", ")))
}
PatternType::TemporalTrigger { hand_id, time_pattern } => {
(format!("scheduled_{}", hand_id), format!("Scheduled at: {}", time_pattern))
}
};
Some(WorkflowRecommendation {
id: format!("rec_{}", Uuid::new_v4()),
pipeline_id,
confidence: pattern.confidence,
reason,
suggested_inputs: HashMap::new(),
patterns_matched: vec![pattern.id.clone()],
timestamp: Utc::now(),
})
}
/// Get string name for pattern type
fn get_pattern_type_name(&self, pattern_type: &PatternType) -> String {
match pattern_type {
PatternType::SkillCombination { .. } => "SkillCombination".to_string(),
PatternType::TemporalTrigger { .. } => "TemporalTrigger".to_string(),
PatternType::TaskPipelineMapping { .. } => "TaskPipelineMapping".to_string(),
PatternType::InputPattern { .. } => "InputPattern".to_string(),
}
}
/// Get priority for a recommendation
fn get_priority_for_recommendation(&self, rec: &WorkflowRecommendation) -> u8 {
self.rules
.iter()
.find(|r| r.pipeline_id == rec.pipeline_id)
.map(|r| r.priority)
.unwrap_or(5)
}
/// Build suggested inputs from patterns and rule
fn build_suggested_inputs(
&self,
matched_pattern_ids: &[String],
patterns: &[&BehaviorPattern],
rule: &RecommendationRule,
) -> HashMap<String, serde_json::Value> {
let mut inputs = HashMap::new();
for pattern_id in matched_pattern_ids {
if let Some(pattern) = patterns.iter().find(|p| p.id == *pattern_id) {
// Add context-based inputs
if let Some(ref topics) = pattern.context.recent_topics {
if !topics.is_empty() {
inputs.insert(
"topics".to_string(),
serde_json::Value::Array(
topics.iter().map(|t| serde_json::Value::String(t.clone())).collect()
),
);
}
}
if let Some(ref intent) = pattern.context.intent {
inputs.insert("intent".to_string(), serde_json::Value::String(intent.clone()));
}
// Add pattern-specific inputs
match &pattern.pattern_type {
PatternType::InputPattern { keywords, .. } => {
inputs.insert(
"keywords".to_string(),
serde_json::Value::Array(
keywords.iter().map(|k| serde_json::Value::String(k.clone())).collect()
),
);
}
PatternType::SkillCombination { skill_ids } => {
inputs.insert(
"skills".to_string(),
serde_json::Value::Array(
skill_ids.iter().map(|s| serde_json::Value::String(s.clone())).collect()
),
);
}
_ => {}
}
}
}
// Apply rule mappings
for (source, target) in &rule.input_mappings {
if let Some(value) = inputs.get(source) {
inputs.insert(target.clone(), value.clone());
}
}
inputs
}
/// Find a pipeline that uses the given skills
fn find_pipeline_for_skills(&self, skill_ids: &[String]) -> Option<String> {
// In production, this would query the pipeline registry
// For now, return a default
if skill_ids.len() >= 2 {
Some("skill-orchestration-pipeline".to_string())
} else {
None
}
}
/// Find a pipeline for an intent
fn find_pipeline_for_intent(&self, intent: &str) -> Option<String> {
// Map common intents to pipelines
match intent {
"research" => Some("research-pipeline".to_string()),
"analysis" => Some("analysis-pipeline".to_string()),
"report" => Some("report-generation".to_string()),
"code" => Some("code-generation".to_string()),
"task" | "tasks" => Some("task-management".to_string()),
_ => None,
}
}
/// Register a pipeline
pub fn register_pipeline(&mut self, metadata: PipelineMetadata) {
self.pipeline_registry.insert(metadata.id.clone(), metadata);
}
/// Unregister a pipeline
pub fn unregister_pipeline(&mut self, pipeline_id: &str) {
self.pipeline_registry.remove(pipeline_id);
}
/// Add a custom recommendation rule
pub fn add_rule(&mut self, rule: RecommendationRule) {
self.rules.push(rule);
// Sort by priority
self.rules.sort_by(|a, b| b.priority.cmp(&a.priority));
}
/// Remove a rule
pub fn remove_rule(&mut self, rule_id: &str) {
self.rules.retain(|r| r.id != rule_id);
}
/// Get all rules
pub fn get_rules(&self) -> &[RecommendationRule] {
&self.rules
}
/// Update configuration
pub fn update_config(&mut self, config: RecommenderConfig) {
self.config = config;
}
/// Get configuration
pub fn get_config(&self) -> &RecommenderConfig {
&self.config
}
/// Get recommendation count
pub fn recommendation_count(&self) -> usize {
self.recommendations_cache.len()
}
/// Clear recommendation cache
pub fn clear_cache(&mut self) {
self.recommendations_cache.clear();
}
/// Accept a recommendation (remove from cache and return it)
/// Returns the accepted recommendation if found
pub fn accept_recommendation(&mut self, recommendation_id: &str) -> Option<WorkflowRecommendation> {
if let Some(pos) = self.recommendations_cache.iter().position(|r| r.id == recommendation_id) {
Some(self.recommendations_cache.remove(pos))
} else {
None
}
}
/// Dismiss a recommendation (remove from cache without acting on it)
/// Returns true if the recommendation was found and dismissed
pub fn dismiss_recommendation(&mut self, recommendation_id: &str) -> bool {
if let Some(pos) = self.recommendations_cache.iter().position(|r| r.id == recommendation_id) {
self.recommendations_cache.remove(pos);
true
} else {
false
}
}
/// Get a recommendation by ID
pub fn get_recommendation(&self, recommendation_id: &str) -> Option<&WorkflowRecommendation> {
self.recommendations_cache.iter().find(|r| r.id == recommendation_id)
}
/// Load recommendations from file
pub fn load_from_file(&mut self, path: &str) -> Result<(), String> {
let content = std::fs::read_to_string(path)
.map_err(|e| format!("Failed to read file: {}", e))?;
let recommendations: Vec<WorkflowRecommendation> = serde_json::from_str(&content)
.map_err(|e| format!("Failed to parse recommendations: {}", e))?;
self.recommendations_cache = recommendations;
Ok(())
}
/// Save recommendations to file
pub fn save_to_file(&self, path: &str) -> Result<(), String> {
let content = serde_json::to_string_pretty(&self.recommendations_cache)
.map_err(|e| format!("Failed to serialize recommendations: {}", e))?;
std::fs::write(path, content)
.map_err(|e| format!("Failed to write file: {}", e))?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_recommender_creation() {
let recommender = WorkflowRecommender::new(None);
assert!(!recommender.get_rules().is_empty());
}
#[test]
fn test_recommend_from_empty_patterns() {
let recommender = WorkflowRecommender::new(None);
let recommendations = recommender.recommend(&[]);
assert!(recommendations.is_empty());
}
#[test]
fn test_rule_priority() {
let mut recommender = WorkflowRecommender::new(None);
recommender.add_rule(RecommendationRule {
id: "high_priority".to_string(),
pattern_types: vec!["SkillCombination".to_string()],
pipeline_id: "important-pipeline".to_string(),
base_confidence: 0.9,
description: "High priority rule".to_string(),
input_mappings: HashMap::new(),
priority: 10,
});
let rules = recommender.get_rules();
assert!(rules.iter().any(|r| r.priority == 10));
}
#[test]
fn test_register_pipeline() {
let mut recommender = WorkflowRecommender::new(None);
recommender.register_pipeline(PipelineMetadata {
id: "test-pipeline".to_string(),
name: "Test Pipeline".to_string(),
description: Some("A test pipeline".to_string()),
tags: vec!["test".to_string()],
input_schema: None,
});
assert!(recommender.pipeline_registry.contains_key("test-pipeline"));
}
}

View File

@@ -1,845 +0,0 @@
//! Trigger Evaluator - Evaluates context-aware triggers for Hands
//!
//! This module extends the basic trigger system with semantic matching:
//! Supports MemoryQuery, ContextCondition, and IdentityState triggers.
//!
//! NOTE: This module is not yet integrated into the main application.
//! Components are still being developed and will be connected in a future release.
#![allow(dead_code)] // Module not yet integrated - components under development
use std::sync::Arc;
use std::pin::Pin;
use tokio::sync::Mutex;
use chrono::{DateTime, Utc, Timelike, Datelike};
use serde::{Deserialize, Serialize};
use serde_json::Value as JsonValue;
use zclaw_memory::MemoryStore;
// === ReDoS Protection Constants ===
/// Maximum allowed length for regex patterns (prevents memory exhaustion)
const MAX_REGEX_PATTERN_LENGTH: usize = 500;
/// Maximum allowed nesting depth for regex quantifiers/groups
const MAX_REGEX_NESTING_DEPTH: usize = 10;
/// Error type for regex validation failures
#[derive(Debug, Clone, PartialEq)]
pub enum RegexValidationError {
/// Pattern exceeds maximum length
TooLong { length: usize, max: usize },
/// Pattern has excessive nesting depth
TooDeeplyNested { depth: usize, max: usize },
/// Pattern contains dangerous ReDoS-prone constructs
DangerousPattern(String),
/// Invalid regex syntax
InvalidSyntax(String),
}
impl std::fmt::Display for RegexValidationError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
RegexValidationError::TooLong { length, max } => {
write!(f, "Regex pattern too long: {} bytes (max: {})", length, max)
}
RegexValidationError::TooDeeplyNested { depth, max } => {
write!(f, "Regex pattern too deeply nested: {} levels (max: {})", depth, max)
}
RegexValidationError::DangerousPattern(reason) => {
write!(f, "Dangerous regex pattern detected: {}", reason)
}
RegexValidationError::InvalidSyntax(err) => {
write!(f, "Invalid regex syntax: {}", err)
}
}
}
}
impl std::error::Error for RegexValidationError {}
/// Validate a regex pattern for ReDoS safety
///
/// This function checks for:
/// 1. Pattern length (prevents memory exhaustion)
/// 2. Nesting depth (prevents exponential backtracking)
/// 3. Dangerous patterns (nested quantifiers on overlapping character classes)
fn validate_regex_pattern(pattern: &str) -> Result<(), RegexValidationError> {
// Check length
if pattern.len() > MAX_REGEX_PATTERN_LENGTH {
return Err(RegexValidationError::TooLong {
length: pattern.len(),
max: MAX_REGEX_PATTERN_LENGTH,
});
}
// Check nesting depth by counting unescaped parentheses and brackets
let nesting_depth = calculate_nesting_depth(pattern);
if nesting_depth > MAX_REGEX_NESTING_DEPTH {
return Err(RegexValidationError::TooDeeplyNested {
depth: nesting_depth,
max: MAX_REGEX_NESTING_DEPTH,
});
}
// Check for dangerous ReDoS patterns:
// - Nested quantifiers on overlapping patterns like (a+)+
// - Alternation with overlapping patterns like (a|a)+
if contains_dangerous_redos_pattern(pattern) {
return Err(RegexValidationError::DangerousPattern(
"Pattern contains nested quantifiers on overlapping character classes".to_string()
));
}
Ok(())
}
/// Calculate the maximum nesting depth of groups in a regex pattern
fn calculate_nesting_depth(pattern: &str) -> usize {
let chars: Vec<char> = pattern.chars().collect();
let mut max_depth = 0;
let mut current_depth = 0;
let mut i = 0;
while i < chars.len() {
let c = chars[i];
// Check for escape sequence
if c == '\\' && i + 1 < chars.len() {
// Skip the escaped character
i += 2;
continue;
}
// Handle character classes [...]
if c == '[' {
current_depth += 1;
max_depth = max_depth.max(current_depth);
// Find matching ]
i += 1;
while i < chars.len() {
if chars[i] == '\\' && i + 1 < chars.len() {
i += 2;
continue;
}
if chars[i] == ']' {
current_depth -= 1;
break;
}
i += 1;
}
}
// Handle groups (...)
else if c == '(' {
// Skip non-capturing groups and lookaheads for simplicity
// (?:...), (?=...), (?!...), (?<=...), (?<!...), (?P<name>...)
current_depth += 1;
max_depth = max_depth.max(current_depth);
} else if c == ')' {
if current_depth > 0 {
current_depth -= 1;
}
}
i += 1;
}
max_depth
}
/// Check for dangerous ReDoS patterns
///
/// Detects patterns like:
/// - (a+)+ - nested quantifiers
/// - (a*)+ - nested quantifiers
/// - (a+)* - nested quantifiers
/// - (.*)* - nested quantifiers on wildcard
fn contains_dangerous_redos_pattern(pattern: &str) -> bool {
let chars: Vec<char> = pattern.chars().collect();
let mut i = 0;
while i < chars.len() {
// Look for quantified patterns followed by another quantifier
if i > 0 {
let prev = chars[i - 1];
// Check if current char is a quantifier
if matches!(chars[i], '+' | '*' | '?') {
// Look back to see what's being quantified
if prev == ')' {
// Find the matching opening paren
let mut depth = 1;
let mut j = i - 2;
while j > 0 && depth > 0 {
if chars[j] == ')' {
depth += 1;
} else if chars[j] == '(' {
depth -= 1;
} else if chars[j] == '\\' && j > 0 {
j -= 1; // Skip escaped char
}
j -= 1;
}
// Check if the group content ends with a quantifier
// This would indicate nested quantification
// Note: j is usize, so we don't check >= 0 (always true)
// The loop above ensures j is valid if depth reached 0
let mut k = i - 2;
while k > j + 1 {
if chars[k] == '\\' && k > 0 {
k -= 1;
} else if matches!(chars[k], '+' | '*' | '?') {
// Found nested quantifier
return true;
} else if chars[k] == ')' {
// Skip nested groups
let mut nested_depth = 1;
k -= 1;
while k > j + 1 && nested_depth > 0 {
if chars[k] == ')' {
nested_depth += 1;
} else if chars[k] == '(' {
nested_depth -= 1;
} else if chars[k] == '\\' && k > 0 {
k -= 1;
}
k -= 1;
}
}
k -= 1;
}
}
}
}
i += 1;
}
false
}
/// Safely compile a regex pattern with ReDoS protection
///
/// This function validates the pattern for safety before compilation.
/// Returns a compiled regex or an error describing why validation failed.
pub fn compile_safe_regex(pattern: &str) -> Result<regex::Regex, RegexValidationError> {
validate_regex_pattern(pattern)?;
regex::Regex::new(pattern).map_err(|e| RegexValidationError::InvalidSyntax(e.to_string()))
}
// === Extended Trigger Types ===
/// Memory query trigger configuration
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MemoryQueryConfig {
/// Memory type to filter (e.g., "task", "preference")
pub memory_type: Option<String>,
/// Content pattern to match (regex or substring)
pub content_pattern: String,
/// Minimum count of matching memories
pub min_count: usize,
/// Minimum importance threshold
pub min_importance: Option<i32>,
/// Time window for memories (hours)
pub time_window_hours: Option<u64>,
}
/// Context condition configuration
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ContextConditionConfig {
/// Conditions to check
pub conditions: Vec<ContextConditionClause>,
/// How to combine conditions (All, Any, None)
pub combination: ConditionCombination,
}
/// Single context condition clause
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ContextConditionClause {
/// Field to check
pub field: ContextField,
/// Comparison operator
pub operator: ComparisonOperator,
/// Value to compare against
pub value: JsonValue,
}
/// Context fields that can be checked
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub enum ContextField {
/// Current hour of day (0-23)
TimeOfDay,
/// Day of week (0=Monday, 6=Sunday)
DayOfWeek,
/// Currently active project (if any)
ActiveProject,
/// Topics discussed recently
RecentTopic,
/// Number of pending tasks
PendingTasks,
/// Count of memories in storage
MemoryCount,
/// Hours since last interaction
LastInteractionHours,
/// Current conversation intent
ConversationIntent,
}
/// Comparison operators for context conditions
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub enum ComparisonOperator {
Equals,
NotEquals,
Contains,
GreaterThan,
LessThan,
Exists,
NotExists,
Matches, // regex match
}
/// How to combine multiple conditions
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub enum ConditionCombination {
/// All conditions must true
All,
/// Any one condition being true is enough
Any,
/// None of the conditions should be true
None,
}
/// Identity state trigger configuration
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct IdentityStateConfig {
/// Identity file to check
pub file: IdentityFile,
/// Content pattern to match (regex)
pub content_pattern: Option<String>,
/// Trigger on any change to the file
pub any_change: bool,
}
/// Identity files that can be monitored
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub enum IdentityFile {
Soul,
Instructions,
User,
}
/// Composite trigger configuration
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CompositeTriggerConfig {
/// Sub-triggers to combine
pub triggers: Vec<ExtendedTriggerType>,
/// How to combine results
pub combination: ConditionCombination,
}
/// Extended trigger type that includes semantic triggers
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(tag = "type", rename_all = "snake_case")]
pub enum ExtendedTriggerType {
/// Standard interval trigger
Interval {
/// Interval in seconds
seconds: u64,
},
/// Time-of-day trigger
TimeOfDay {
/// Hour (0-23)
hour: u8,
/// Optional minute (0-59)
minute: Option<u8>,
},
/// Memory query trigger
MemoryQuery(MemoryQueryConfig),
/// Context condition trigger
ContextCondition(ContextConditionConfig),
/// Identity state trigger
IdentityState(IdentityStateConfig),
/// Composite trigger
Composite(CompositeTriggerConfig),
}
// === Trigger Evaluator ===
/// Evaluator for context-aware triggers
pub struct TriggerEvaluator {
/// Memory store for memory queries
memory_store: Arc<MemoryStore>,
/// Identity manager for identity triggers
identity_manager: Arc<Mutex<super::identity::AgentIdentityManager>>,
/// Heartbeat engine for context
heartbeat_engine: Arc<Mutex<super::heartbeat::HeartbeatEngine>>,
/// Cached context data
context_cache: Arc<Mutex<TriggerContextCache>>,
}
/// Cached context for trigger evaluation
#[derive(Debug, Clone, Default)]
pub struct TriggerContextCache {
/// Last known active project
pub active_project: Option<String>,
/// Recent topics discussed
pub recent_topics: Vec<String>,
/// Last conversation intent
pub conversation_intent: Option<String>,
/// Last update time
pub last_updated: Option<DateTime<Utc>>,
}
impl TriggerEvaluator {
/// Create a new trigger evaluator
pub fn new(
memory_store: Arc<MemoryStore>,
identity_manager: Arc<Mutex<super::identity::AgentIdentityManager>>,
heartbeat_engine: Arc<Mutex<super::heartbeat::HeartbeatEngine>>,
) -> Self {
Self {
memory_store,
identity_manager,
heartbeat_engine,
context_cache: Arc::new(Mutex::new(TriggerContextCache::default())),
}
}
/// Evaluate a trigger
pub async fn evaluate(
&self,
trigger: &ExtendedTriggerType,
agent_id: &str,
) -> Result<bool, String> {
match trigger {
ExtendedTriggerType::Interval { .. } => Ok(true),
ExtendedTriggerType::TimeOfDay { hour, minute } => {
let now = Utc::now();
let current_hour = now.hour() as u8;
let current_minute = now.minute() as u8;
if current_hour != *hour {
return Ok(false);
}
if let Some(min) = minute {
if current_minute != *min {
return Ok(false);
}
}
Ok(true)
}
ExtendedTriggerType::MemoryQuery(config) => {
self.evaluate_memory_query(config, agent_id).await
}
ExtendedTriggerType::ContextCondition(config) => {
self.evaluate_context_condition(config, agent_id).await
}
ExtendedTriggerType::IdentityState(config) => {
self.evaluate_identity_state(config, agent_id).await
}
ExtendedTriggerType::Composite(config) => {
self.evaluate_composite(config, agent_id, None).await
}
}
}
/// Evaluate memory query trigger
async fn evaluate_memory_query(
&self,
config: &MemoryQueryConfig,
_agent_id: &str,
) -> Result<bool, String> {
// TODO: Implement proper memory search when MemoryStore supports it
// For now, use KV store to check if we have enough keys matching pattern
// This is a simplified implementation
// Memory search is not fully implemented in current MemoryStore
// Return false to indicate no matches until proper search is available
tracing::warn!(
pattern = %config.content_pattern,
min_count = config.min_count,
"Memory query trigger evaluation not fully implemented"
);
Ok(false)
}
/// Evaluate context condition trigger
async fn evaluate_context_condition(
&self,
config: &ContextConditionConfig,
agent_id: &str,
) -> Result<bool, String> {
let context = self.get_cached_context(agent_id).await;
let mut results = Vec::new();
for condition in &config.conditions {
let result = self.evaluate_condition_clause(condition, &context);
results.push(result);
}
// Combine results based on combination mode
let final_result = match config.combination {
ConditionCombination::All => results.iter().all(|r| *r),
ConditionCombination::Any => results.iter().any(|r| *r),
ConditionCombination::None => results.iter().all(|r| !*r),
};
Ok(final_result)
}
/// Evaluate a single condition clause
fn evaluate_condition_clause(
&self,
clause: &ContextConditionClause,
context: &TriggerContextCache,
) -> bool {
match clause.field {
ContextField::TimeOfDay => {
let now = Utc::now();
let current_hour = now.hour() as i32;
self.compare_values(current_hour, &clause.operator, &clause.value)
}
ContextField::DayOfWeek => {
let now = Utc::now();
let current_day = now.weekday().num_days_from_monday() as i32;
self.compare_values(current_day, &clause.operator, &clause.value)
}
ContextField::ActiveProject => {
if let Some(project) = &context.active_project {
self.compare_values(project.clone(), &clause.operator, &clause.value)
} else {
matches!(clause.operator, ComparisonOperator::NotExists)
}
}
ContextField::RecentTopic => {
if let Some(topic) = context.recent_topics.first() {
self.compare_values(topic.clone(), &clause.operator, &clause.value)
} else {
matches!(clause.operator, ComparisonOperator::NotExists)
}
}
ContextField::PendingTasks => {
// Would need to query memory store
false // Not implemented yet
}
ContextField::MemoryCount => {
// Would need to query memory store
false // Not implemented yet
}
ContextField::LastInteractionHours => {
if let Some(last_updated) = context.last_updated {
let hours = (Utc::now() - last_updated).num_hours();
self.compare_values(hours as i32, &clause.operator, &clause.value)
} else {
false
}
}
ContextField::ConversationIntent => {
if let Some(intent) = &context.conversation_intent {
self.compare_values(intent.clone(), &clause.operator, &clause.value)
} else {
matches!(clause.operator, ComparisonOperator::NotExists)
}
}
}
}
/// Compare values using operator
fn compare_values<T>(&self, actual: T, operator: &ComparisonOperator, expected: &JsonValue) -> bool
where
T: Into<JsonValue>,
{
let actual_value = actual.into();
match operator {
ComparisonOperator::Equals => &actual_value == expected,
ComparisonOperator::NotEquals => &actual_value != expected,
ComparisonOperator::Contains => {
if let (Some(actual_str), Some(expected_str)) =
(actual_value.as_str(), expected.as_str())
{
actual_str.contains(expected_str)
} else {
false
}
}
ComparisonOperator::GreaterThan => {
if let (Some(actual_num), Some(expected_num)) =
(actual_value.as_i64(), expected.as_i64())
{
actual_num > expected_num
} else if let (Some(actual_num), Some(expected_num)) =
(actual_value.as_f64(), expected.as_f64())
{
actual_num > expected_num
} else {
false
}
}
ComparisonOperator::LessThan => {
if let (Some(actual_num), Some(expected_num)) =
(actual_value.as_i64(), expected.as_i64())
{
actual_num < expected_num
} else if let (Some(actual_num), Some(expected_num)) =
(actual_value.as_f64(), expected.as_f64())
{
actual_num < expected_num
} else {
false
}
}
ComparisonOperator::Exists => !actual_value.is_null(),
ComparisonOperator::NotExists => actual_value.is_null(),
ComparisonOperator::Matches => {
if let (Some(actual_str), Some(expected_str)) =
(actual_value.as_str(), expected.as_str())
{
compile_safe_regex(expected_str)
.map(|re| re.is_match(actual_str))
.unwrap_or_else(|e| {
tracing::warn!(
pattern = %expected_str,
error = %e,
"Regex pattern validation failed, treating as no match"
);
false
})
} else {
false
}
}
}
}
/// Evaluate identity state trigger
async fn evaluate_identity_state(
&self,
config: &IdentityStateConfig,
agent_id: &str,
) -> Result<bool, String> {
let mut manager = self.identity_manager.lock().await;
let identity = manager.get_identity(agent_id);
// Get the target file content
let content = match config.file {
IdentityFile::Soul => identity.soul,
IdentityFile::Instructions => identity.instructions,
IdentityFile::User => identity.user_profile,
};
// Check content pattern if specified
if let Some(pattern) = &config.content_pattern {
let re = compile_safe_regex(pattern)
.map_err(|e| format!("Invalid regex pattern: {}", e))?;
if !re.is_match(&content) {
return Ok(false);
}
}
// If any_change is true, we would need to track changes
// For now, just return true
Ok(true)
}
/// Get cached context for an agent
async fn get_cached_context(&self, _agent_id: &str) -> TriggerContextCache {
self.context_cache.lock().await.clone()
}
/// Evaluate composite trigger
fn evaluate_composite<'a>(
&'a self,
config: &'a CompositeTriggerConfig,
agent_id: &'a str,
_depth: Option<usize>,
) -> Pin<Box<dyn std::future::Future<Output = Result<bool, String>> + 'a>> {
Box::pin(async move {
let mut results = Vec::new();
for trigger in &config.triggers {
let result = self.evaluate(trigger, agent_id).await?;
results.push(result);
}
// Combine results based on combination mode
let final_result = match config.combination {
ConditionCombination::All => results.iter().all(|r| *r),
ConditionCombination::Any => results.iter().any(|r| *r),
ConditionCombination::None => results.iter().all(|r| !*r),
};
Ok(final_result)
})
}
}
// === Unit Tests ===
#[cfg(test)]
mod tests {
use super::*;
mod regex_validation {
use super::*;
#[test]
fn test_valid_simple_pattern() {
let pattern = r"hello";
assert!(compile_safe_regex(pattern).is_ok());
}
#[test]
fn test_valid_pattern_with_quantifiers() {
let pattern = r"\d+";
assert!(compile_safe_regex(pattern).is_ok());
}
#[test]
fn test_valid_pattern_with_groups() {
let pattern = r"(foo|bar)\d{2,4}";
assert!(compile_safe_regex(pattern).is_ok());
}
#[test]
fn test_valid_character_class() {
let pattern = r"[a-zA-Z0-9_]+";
assert!(compile_safe_regex(pattern).is_ok());
}
#[test]
fn test_pattern_too_long() {
let pattern = "a".repeat(501);
let result = compile_safe_regex(&pattern);
assert!(matches!(result, Err(RegexValidationError::TooLong { .. })));
}
#[test]
fn test_pattern_at_max_length() {
let pattern = "a".repeat(500);
let result = compile_safe_regex(&pattern);
assert!(result.is_ok());
}
#[test]
fn test_nested_quantifier_detection_simple() {
// Classic ReDoS pattern: (a+)+
// Our implementation detects this as dangerous
let pattern = r"(a+)+";
let result = validate_regex_pattern(pattern);
assert!(
matches!(result, Err(RegexValidationError::DangerousPattern(_))),
"Expected nested quantifier pattern to be detected as dangerous"
);
}
#[test]
fn test_deeply_nested_groups() {
// Create a pattern with too many nested groups
let pattern = "(".repeat(15) + &"a".repeat(10) + &")".repeat(15);
let result = compile_safe_regex(&pattern);
assert!(matches!(result, Err(RegexValidationError::TooDeeplyNested { .. })));
}
#[test]
fn test_reasonably_nested_groups() {
// Pattern with acceptable nesting
let pattern = "(((foo|bar)))";
let result = compile_safe_regex(pattern);
assert!(result.is_ok());
}
#[test]
fn test_invalid_regex_syntax() {
let pattern = r"[unclosed";
let result = compile_safe_regex(pattern);
assert!(matches!(result, Err(RegexValidationError::InvalidSyntax(_))));
}
#[test]
fn test_escaped_characters_in_pattern() {
let pattern = r"\[hello\]";
let result = compile_safe_regex(pattern);
assert!(result.is_ok());
}
#[test]
fn test_complex_valid_pattern() {
// Email-like pattern (simplified)
let pattern = r"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}";
let result = compile_safe_regex(pattern);
assert!(result.is_ok());
}
}
mod nesting_depth_calculation {
use super::*;
#[test]
fn test_no_nesting() {
assert_eq!(calculate_nesting_depth("abc"), 0);
}
#[test]
fn test_single_group() {
assert_eq!(calculate_nesting_depth("(abc)"), 1);
}
#[test]
fn test_nested_groups() {
assert_eq!(calculate_nesting_depth("((abc))"), 2);
}
#[test]
fn test_character_class() {
assert_eq!(calculate_nesting_depth("[abc]"), 1);
}
#[test]
fn test_mixed_nesting() {
assert_eq!(calculate_nesting_depth("([a-z]+)"), 2);
}
#[test]
fn test_escaped_parens() {
// Escaped parens shouldn't count toward nesting
assert_eq!(calculate_nesting_depth(r"\(abc\)"), 0);
}
#[test]
fn test_multiple_groups_same_level() {
assert_eq!(calculate_nesting_depth("(abc)(def)"), 1);
}
}
mod dangerous_pattern_detection {
use super::*;
#[test]
fn test_simple_quantifier_not_dangerous() {
assert!(!contains_dangerous_redos_pattern(r"a+"));
}
#[test]
fn test_simple_group_not_dangerous() {
assert!(!contains_dangerous_redos_pattern(r"(abc)"));
}
#[test]
fn test_quantified_group_not_dangerous() {
assert!(!contains_dangerous_redos_pattern(r"(abc)+"));
}
#[test]
fn test_alternation_not_dangerous() {
assert!(!contains_dangerous_redos_pattern(r"(a|b)+"));
}
}
}

View File

@@ -0,0 +1,153 @@
//! Intelligence Hooks - Pre/Post conversation integration
//!
//! Bridges the intelligence layer modules (identity, memory, heartbeat, reflection)
//! into the kernel's chat flow at the Tauri command boundary.
//!
//! Architecture: kernel_commands.rs → intelligence_hooks → intelligence modules → Viking/Kernel
use tracing::debug;
use crate::intelligence::identity::IdentityManagerState;
use crate::intelligence::heartbeat::HeartbeatEngineState;
use crate::intelligence::reflection::ReflectionEngineState;
/// Run pre-conversation intelligence hooks
///
/// 1. Build memory context from VikingStorage (FTS5 + TF-IDF + Embedding)
/// 2. Build identity-enhanced system prompt (SOUL.md + instructions)
///
/// Returns the enhanced system prompt that should be passed to the kernel.
pub async fn pre_conversation_hook(
agent_id: &str,
user_message: &str,
identity_state: &IdentityManagerState,
) -> Result<String, String> {
// Step 1: Build memory context from Viking storage
let memory_context = build_memory_context(agent_id, user_message).await
.unwrap_or_default();
// Step 2: Build identity-enhanced system prompt
let enhanced_prompt = build_identity_prompt(agent_id, &memory_context, identity_state)
.await
.unwrap_or_default();
Ok(enhanced_prompt)
}
/// Run post-conversation intelligence hooks
///
/// 1. Record interaction for heartbeat engine
/// 2. Record conversation for reflection engine, trigger reflection if needed
pub async fn post_conversation_hook(
agent_id: &str,
_heartbeat_state: &HeartbeatEngineState,
reflection_state: &ReflectionEngineState,
) {
// Step 1: Record interaction for heartbeat
crate::intelligence::heartbeat::record_interaction(agent_id);
debug!("[intelligence_hooks] Recorded interaction for agent: {}", agent_id);
// Step 2: Record conversation for reflection
// tokio::sync::Mutex::lock() returns MutexGuard directly (panics on poison)
let mut engine = reflection_state.lock().await;
engine.record_conversation();
debug!(
"[intelligence_hooks] Conversation count updated for agent: {}",
agent_id
);
if engine.should_reflect() {
debug!(
"[intelligence_hooks] Reflection threshold reached for agent: {}",
agent_id
);
let reflection_result = engine.reflect(agent_id, &[]);
debug!(
"[intelligence_hooks] Reflection completed: {} patterns, {} suggestions",
reflection_result.patterns.len(),
reflection_result.improvements.len()
);
}
}
/// Build memory context by searching VikingStorage for relevant memories
async fn build_memory_context(
agent_id: &str,
user_message: &str,
) -> Result<String, String> {
// Try Viking storage (has FTS5 + TF-IDF + Embedding)
let storage = crate::viking_commands::get_storage().await?;
// FindOptions from zclaw_growth
let options = zclaw_growth::FindOptions {
scope: Some(format!("agent://{}", agent_id)),
limit: Some(8),
min_similarity: Some(0.2),
};
// find is on the VikingStorage trait — call via trait to dispatch correctly
let results: Vec<zclaw_growth::MemoryEntry> =
zclaw_growth::VikingStorage::find(storage.as_ref(), user_message, options)
.await
.map_err(|e| format!("Memory search failed: {}", e))?;
if results.is_empty() {
return Ok(String::new());
}
// Format memories into context string
let mut context = String::from("## 相关记忆\n\n");
let mut token_estimate: usize = 0;
let max_tokens: usize = 500;
for entry in &results {
// Prefer overview (L1 summary) over full content
// overview is Option<String> — use as_deref to get Option<&str>
let overview_str = entry.overview.as_deref().unwrap_or("");
let text = if !overview_str.is_empty() {
overview_str
} else {
&entry.content
};
// Truncate long entries
let truncated = if text.len() > 100 {
format!("{}...", &text[..100])
} else {
text.to_string()
};
// Simple token estimate (~1.5 tokens per CJK char, ~0.25 per other)
let tokens: usize = truncated.chars()
.map(|c: char| if c.is_ascii() { 1 } else { 2 })
.sum();
if token_estimate + tokens > max_tokens {
break;
}
context.push_str(&format!("- [{}] {}\n", entry.memory_type, truncated));
token_estimate += tokens;
}
Ok(context)
}
/// Build identity-enhanced system prompt
async fn build_identity_prompt(
agent_id: &str,
memory_context: &str,
identity_state: &IdentityManagerState,
) -> Result<String, String> {
// IdentityManagerState is Arc<tokio::sync::Mutex<AgentIdentityManager>>
// tokio::sync::Mutex::lock() returns MutexGuard directly
let mut manager = identity_state.lock().await;
let prompt = manager.build_system_prompt(
agent_id,
if memory_context.is_empty() { None } else { Some(memory_context) },
);
Ok(prompt)
}

View File

@@ -1,7 +1,7 @@
//! ZCLAW Kernel commands for Tauri
//!
//! These commands provide direct access to the internal ZCLAW Kernel,
//! eliminating the need for external OpenFang process.
//! eliminating the need for external ZCLAW process.
use std::path::PathBuf;
use std::sync::Arc;
@@ -416,6 +416,9 @@ pub struct StreamChatRequest {
pub async fn agent_chat_stream(
app: AppHandle,
state: State<'_, KernelState>,
identity_state: State<'_, crate::intelligence::IdentityManagerState>,
heartbeat_state: State<'_, crate::intelligence::HeartbeatEngineState>,
reflection_state: State<'_, crate::intelligence::ReflectionEngineState>,
request: StreamChatRequest,
) -> Result<(), String> {
// Validate inputs
@@ -428,7 +431,15 @@ pub async fn agent_chat_stream(
.map_err(|_| "Invalid agent ID format".to_string())?;
let session_id = request.session_id.clone();
let message = request.message;
let agent_id_str = request.agent_id.clone();
let message = request.message.clone();
// PRE-CONVERSATION: Build intelligence-enhanced system prompt
let enhanced_prompt = crate::intelligence_hooks::pre_conversation_hook(
&request.agent_id,
&request.message,
&identity_state,
).await.unwrap_or_default();
// Get the streaming receiver while holding the lock, then release it
let mut rx = {
@@ -437,12 +448,18 @@ pub async fn agent_chat_stream(
.ok_or_else(|| "Kernel not initialized. Call kernel_init first.".to_string())?;
// Start the stream - this spawns a background task
kernel.send_message_stream(&id, message)
// Use intelligence-enhanced system prompt if available
let prompt_arg = if enhanced_prompt.is_empty() { None } else { Some(enhanced_prompt) };
kernel.send_message_stream_with_prompt(&id, message, prompt_arg)
.await
.map_err(|e| format!("Failed to start streaming: {}", e))?
};
// Lock is released here
// Clone Arc references before spawning (State<'_, T> borrows can't enter the spawn)
let hb_state = heartbeat_state.inner().clone();
let rf_state = reflection_state.inner().clone();
// Spawn a task to process stream events
tokio::spawn(async move {
use zclaw_runtime::LoopEvent;
@@ -472,6 +489,12 @@ pub async fn agent_chat_stream(
LoopEvent::Complete(result) => {
println!("[agent_chat_stream] Complete: input_tokens={}, output_tokens={}",
result.input_tokens, result.output_tokens);
// POST-CONVERSATION: record interaction + trigger reflection
crate::intelligence_hooks::post_conversation_hook(
&agent_id_str, &hb_state, &rf_state,
).await;
StreamChatEvent::Complete {
input_tokens: result.input_tokens,
output_tokens: result.output_tokens,
@@ -1078,3 +1101,155 @@ pub async fn approval_respond(
kernel.respond_to_approval(&id, approved, reason).await
.map_err(|e| format!("Failed to respond to approval: {}", e))
}
/// Approve a hand execution (alias for approval_respond with approved=true)
#[tauri::command]
pub async fn hand_approve(
state: State<'_, KernelState>,
_hand_name: String,
run_id: String,
approved: bool,
reason: Option<String>,
) -> Result<serde_json::Value, String> {
let kernel_lock = state.lock().await;
let kernel = kernel_lock.as_ref()
.ok_or_else(|| "Kernel not initialized".to_string())?;
// run_id maps to approval id
kernel.respond_to_approval(&run_id, approved, reason).await
.map_err(|e| format!("Failed to approve hand: {}", e))?;
Ok(serde_json::json!({ "status": if approved { "approved" } else { "rejected" } }))
}
/// Cancel a hand execution
#[tauri::command]
pub async fn hand_cancel(
state: State<'_, KernelState>,
_hand_name: String,
run_id: String,
) -> Result<serde_json::Value, String> {
let kernel_lock = state.lock().await;
let kernel = kernel_lock.as_ref()
.ok_or_else(|| "Kernel not initialized".to_string())?;
kernel.cancel_approval(&run_id).await
.map_err(|e| format!("Failed to cancel hand: {}", e))?;
Ok(serde_json::json!({ "status": "cancelled" }))
}
// ============================================================
// Scheduled Task Commands
// ============================================================
/// Request to create a scheduled task (maps to kernel trigger)
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CreateScheduledTaskRequest {
pub name: String,
pub schedule: String,
pub schedule_type: String,
pub target: Option<ScheduledTaskTarget>,
pub description: Option<String>,
pub enabled: Option<bool>,
}
/// Target for a scheduled task
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ScheduledTaskTarget {
#[serde(rename = "type")]
pub target_type: String,
pub id: String,
}
/// Response for scheduled task creation
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ScheduledTaskResponse {
pub id: String,
pub name: String,
pub schedule: String,
pub status: String,
}
/// Create a scheduled task (backed by kernel TriggerManager)
///
/// Tasks are stored in the kernel's trigger system. Automatic execution
/// requires a scheduler loop (not yet implemented in embedded kernel mode).
#[tauri::command]
pub async fn scheduled_task_create(
state: State<'_, KernelState>,
request: CreateScheduledTaskRequest,
) -> Result<ScheduledTaskResponse, String> {
let kernel_lock = state.lock().await;
let kernel = kernel_lock.as_ref()
.ok_or_else(|| "Kernel not initialized".to_string())?;
// Build TriggerConfig from request
let trigger_type = match request.schedule_type.as_str() {
"cron" | "schedule" => zclaw_hands::TriggerType::Schedule {
cron: request.schedule.clone(),
},
"interval" => zclaw_hands::TriggerType::Schedule {
cron: request.schedule.clone(), // interval as simplified cron
},
"once" => zclaw_hands::TriggerType::Schedule {
cron: request.schedule.clone(),
},
_ => return Err(format!("Unsupported schedule type: {}", request.schedule_type)),
};
let target_id = request.target.as_ref().map(|t| t.id.clone()).unwrap_or_default();
let task_id = format!("sched_{}", chrono::Utc::now().timestamp_millis());
let config = zclaw_hands::TriggerConfig {
id: task_id.clone(),
name: request.name.clone(),
hand_id: target_id,
trigger_type,
enabled: request.enabled.unwrap_or(true),
max_executions_per_hour: 60,
};
let entry = kernel.create_trigger(config).await
.map_err(|e| format!("Failed to create scheduled task: {}", e))?;
Ok(ScheduledTaskResponse {
id: entry.config.id,
name: entry.config.name,
schedule: request.schedule,
status: "active".to_string(),
})
}
/// List all scheduled tasks (kernel triggers of Schedule type)
#[tauri::command]
pub async fn scheduled_task_list(
state: State<'_, KernelState>,
) -> Result<Vec<ScheduledTaskResponse>, String> {
let kernel_lock = state.lock().await;
let kernel = kernel_lock.as_ref()
.ok_or_else(|| "Kernel not initialized".to_string())?;
let triggers = kernel.list_triggers().await;
let tasks: Vec<ScheduledTaskResponse> = triggers
.into_iter()
.filter(|t| matches!(t.config.trigger_type, zclaw_hands::TriggerType::Schedule { .. }))
.map(|t| {
let schedule = match t.config.trigger_type {
zclaw_hands::TriggerType::Schedule { cron } => cron,
_ => String::new(),
};
ScheduledTaskResponse {
id: t.config.id,
name: t.config.name,
schedule,
status: if t.config.enabled { "active".to_string() } else { "paused".to_string() },
}
})
.collect();
Ok(tasks)
}

View File

@@ -15,5 +15,6 @@ pub mod crypto;
// Re-export main types for convenience
pub use persistent::{
PersistentMemory, PersistentMemoryStore, MemorySearchQuery, MemoryStats,
generate_memory_id,
generate_memory_id, configure_embedding_client, is_embedding_configured,
EmbedFn,
};

View File

@@ -11,12 +11,69 @@
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
use std::sync::Arc;
use tokio::sync::Mutex;
use tokio::sync::{Mutex, OnceCell};
use uuid::Uuid;
use tauri::Manager;
use sqlx::{SqliteConnection, Connection, Row, sqlite::SqliteRow};
use chrono::Utc;
/// Embedding function type: text -> vector of f32
pub type EmbedFn = Arc<dyn Fn(&str) -> std::pin::Pin<Box<dyn std::future::Future<Output = Result<Vec<f32>, String>> + Send>> + Send + Sync>;
/// Global embedding function for PersistentMemoryStore
static EMBEDDING_FN: OnceCell<EmbedFn> = OnceCell::const_new();
/// Configure the global embedding function for memory search
pub fn configure_embedding_client(fn_impl: EmbedFn) {
let _ = EMBEDDING_FN.set(fn_impl);
tracing::info!("[PersistentMemoryStore] Embedding client configured");
}
/// Check if embedding is available
pub fn is_embedding_configured() -> bool {
EMBEDDING_FN.get().is_some()
}
/// Generate embedding for text using the configured client
async fn embed_text(text: &str) -> Result<Vec<f32>, String> {
let client = EMBEDDING_FN.get()
.ok_or_else(|| "Embedding client not configured".to_string())?;
client(text).await
}
/// Deserialize f32 vector from BLOB (4 bytes per f32, little-endian)
fn deserialize_embedding(blob: &[u8]) -> Vec<f32> {
blob.chunks_exact(4)
.map(|chunk| f32::from_le_bytes([chunk[0], chunk[1], chunk[2], chunk[3]]))
.collect()
}
/// Serialize f32 vector to BLOB
fn serialize_embedding(vec: &[f32]) -> Vec<u8> {
let mut bytes = Vec::with_capacity(vec.len() * 4);
for val in vec {
bytes.extend_from_slice(&val.to_le_bytes());
}
bytes
}
/// Compute cosine similarity between two vectors
fn cosine_similarity(a: &[f32], b: &[f32]) -> f32 {
if a.is_empty() || b.is_empty() || a.len() != b.len() {
return 0.0;
}
let mut dot = 0.0f32;
let mut norm_a = 0.0f32;
let mut norm_b = 0.0f32;
for i in 0..a.len() {
dot += a[i] * b[i];
norm_a += a[i] * a[i];
norm_b += b[i] * b[i];
}
let denom = (norm_a * norm_b).sqrt();
if denom == 0.0 { 0.0 } else { (dot / denom).clamp(0.0, 1.0) }
}
/// Memory entry stored in SQLite
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PersistentMemory {
@@ -32,6 +89,7 @@ pub struct PersistentMemory {
pub last_accessed_at: String,
pub access_count: i32,
pub embedding: Option<Vec<u8>>, // Vector embedding for semantic search
pub overview: Option<String>, // L1 summary (1-2 sentences, ~200 tokens)
}
// Manual implementation of FromRow since sqlx::FromRow derive has issues with Option<Vec<u8>>
@@ -50,12 +108,13 @@ impl<'r> sqlx::FromRow<'r, SqliteRow> for PersistentMemory {
last_accessed_at: row.try_get("last_accessed_at")?,
access_count: row.try_get("access_count")?,
embedding: row.try_get("embedding")?,
overview: row.try_get("overview").ok(),
})
}
}
/// Memory search options
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Default)]
pub struct MemorySearchQuery {
pub agent_id: Option<String>,
pub memory_type: Option<String>,
@@ -149,11 +208,34 @@ impl PersistentMemoryStore {
.await
.map_err(|e| format!("Failed to create schema: {}", e))?;
// Migration: add overview column (L1 summary)
let _ = sqlx::query("ALTER TABLE memories ADD COLUMN overview TEXT")
.execute(&mut *conn)
.await;
Ok(())
}
/// Store a new memory
pub async fn store(&self, memory: &PersistentMemory) -> Result<(), String> {
// Generate embedding if client is configured and memory doesn't have one
let embedding = if memory.embedding.is_some() {
memory.embedding.clone()
} else if is_embedding_configured() {
match embed_text(&memory.content).await {
Ok(vec) => {
tracing::debug!("[PersistentMemoryStore] Generated embedding for {} ({} dims)", memory.id, vec.len());
Some(serialize_embedding(&vec))
}
Err(e) => {
tracing::debug!("[PersistentMemoryStore] Embedding generation failed: {}", e);
None
}
}
} else {
None
};
let mut conn = self.conn.lock().await;
sqlx::query(
@@ -161,8 +243,8 @@ impl PersistentMemoryStore {
INSERT INTO memories (
id, agent_id, memory_type, content, importance, source,
tags, conversation_id, created_at, last_accessed_at,
access_count, embedding
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
access_count, embedding, overview
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
"#,
)
.bind(&memory.id)
@@ -176,7 +258,8 @@ impl PersistentMemoryStore {
.bind(&memory.created_at)
.bind(&memory.last_accessed_at)
.bind(memory.access_count)
.bind(&memory.embedding)
.bind(&embedding)
.bind(&memory.overview)
.execute(&mut *conn)
.await
.map_err(|e| format!("Failed to store memory: {}", e))?;
@@ -212,7 +295,7 @@ impl PersistentMemoryStore {
Ok(result)
}
/// Search memories with simple query
/// Search memories with semantic ranking when embeddings are available
pub async fn search(&self, query: MemorySearchQuery) -> Result<Vec<PersistentMemory>, String> {
let mut conn = self.conn.lock().await;
@@ -239,11 +322,14 @@ impl PersistentMemoryStore {
params.push(format!("%{}%", query_text));
}
sql.push_str(" ORDER BY created_at DESC");
// When using embedding ranking, fetch more candidates
let effective_limit = if query.query.is_some() && is_embedding_configured() {
query.limit.unwrap_or(50).max(20) // Fetch more for re-ranking
} else {
query.limit.unwrap_or(50)
};
if let Some(limit) = query.limit {
sql.push_str(&format!(" LIMIT {}", limit));
}
sql.push_str(&format!(" LIMIT {}", effective_limit));
if let Some(offset) = query.offset {
sql.push_str(&format!(" OFFSET {}", offset));
@@ -255,11 +341,41 @@ impl PersistentMemoryStore {
query_builder = query_builder.bind(param);
}
let results = query_builder
let mut results = query_builder
.fetch_all(&mut *conn)
.await
.map_err(|e| format!("Failed to search memories: {}", e))?;
// Apply semantic ranking if query and embedding are available
if let Some(query_text) = &query.query {
if is_embedding_configured() {
if let Ok(query_embedding) = embed_text(query_text).await {
// Score each result by cosine similarity
let mut scored: Vec<(f32, PersistentMemory)> = results
.into_iter()
.map(|mem| {
let score = mem.embedding.as_ref()
.map(|blob| {
let vec = deserialize_embedding(blob);
cosine_similarity(&query_embedding, &vec)
})
.unwrap_or(0.0);
(score, mem)
})
.collect();
// Sort by score descending
scored.sort_by(|a, b| b.0.partial_cmp(&a.0).unwrap_or(std::cmp::Ordering::Equal));
// Apply the original limit
results = scored.into_iter()
.take(query.limit.unwrap_or(20))
.map(|(_, mem)| mem)
.collect();
}
}
}
Ok(results)
}

View File

@@ -3,7 +3,7 @@
//! Phase 1 of Intelligence Layer Migration:
//! Provides frontend API for memory storage and retrieval
use crate::memory::{PersistentMemory, PersistentMemoryStore, MemorySearchQuery, MemoryStats, generate_memory_id};
use crate::memory::{PersistentMemory, PersistentMemoryStore, MemorySearchQuery, MemoryStats, generate_memory_id, configure_embedding_client, is_embedding_configured, EmbedFn};
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use tauri::{AppHandle, State};
@@ -52,6 +52,9 @@ pub async fn memory_init(
}
/// Store a new memory
///
/// Writes to both PersistentMemoryStore (backward compat) and SqliteStorage (FTS5+Embedding).
/// SqliteStorage write failure is logged but does not block the operation.
#[tauri::command]
pub async fn memory_store(
entry: MemoryEntryInput,
@@ -64,28 +67,61 @@ pub async fn memory_store(
.ok_or_else(|| "Memory store not initialized. Call memory_init first.".to_string())?;
let now = Utc::now().to_rfc3339();
let id = generate_memory_id();
let memory = PersistentMemory {
id: generate_memory_id(),
agent_id: entry.agent_id,
memory_type: entry.memory_type,
content: entry.content,
id: id.clone(),
agent_id: entry.agent_id.clone(),
memory_type: entry.memory_type.clone(),
content: entry.content.clone(),
importance: entry.importance.unwrap_or(5),
source: entry.source.unwrap_or_else(|| "auto".to_string()),
tags: serde_json::to_string(&entry.tags.unwrap_or_default())
tags: serde_json::to_string(&entry.tags.clone().unwrap_or_default())
.unwrap_or_else(|_| "[]".to_string()),
conversation_id: entry.conversation_id,
conversation_id: entry.conversation_id.clone(),
created_at: now.clone(),
last_accessed_at: now,
access_count: 0,
embedding: None,
overview: None,
};
let id = memory.id.clone();
// Write to PersistentMemoryStore (primary)
store.store(&memory).await?;
// Also write to SqliteStorage via VikingStorage for FTS5 + Embedding search
if let Ok(storage) = crate::viking_commands::get_storage().await {
let memory_type = parse_memory_type(&entry.memory_type);
let keywords = entry.tags.unwrap_or_default();
let viking_entry = zclaw_growth::MemoryEntry::new(
&entry.agent_id,
memory_type,
&entry.memory_type,
entry.content,
)
.with_importance(entry.importance.unwrap_or(5) as u8)
.with_keywords(keywords);
match zclaw_growth::VikingStorage::store(storage.as_ref(), &viking_entry).await {
Ok(()) => tracing::debug!("[memory_store] Also stored in SqliteStorage"),
Err(e) => tracing::warn!("[memory_store] SqliteStorage write failed (non-blocking): {}", e),
}
}
Ok(id)
}
/// Parse a string memory_type into zclaw_growth::MemoryType
fn parse_memory_type(type_str: &str) -> zclaw_growth::MemoryType {
match type_str.to_lowercase().as_str() {
"preference" => zclaw_growth::MemoryType::Preference,
"knowledge" | "fact" | "task" | "todo" | "lesson" | "event" => zclaw_growth::MemoryType::Knowledge,
"skill" | "experience" => zclaw_growth::MemoryType::Experience,
"session" | "conversation" => zclaw_growth::MemoryType::Session,
_ => zclaw_growth::MemoryType::Knowledge,
}
}
/// Get a memory by ID
#[tauri::command]
pub async fn memory_get(
@@ -213,3 +249,223 @@ pub async fn memory_db_path(
Ok(store.path().to_string_lossy().to_string())
}
/// Configure embedding for PersistentMemoryStore (chat memory search)
/// This is called alongside viking_configure_embedding to enable vector search in chat flow
#[tauri::command]
pub async fn memory_configure_embedding(
provider: String,
api_key: String,
model: Option<String>,
endpoint: Option<String>,
) -> Result<bool, String> {
// Create an llm::EmbeddingClient and wrap it in Arc for the closure
let config = crate::llm::EmbeddingConfig {
provider,
api_key,
endpoint,
model,
};
let client = std::sync::Arc::new(crate::llm::EmbeddingClient::new(config));
let embed_fn: EmbedFn = {
let client = client.clone();
Arc::new(move |text: &str| {
let client = client.clone();
let text = text.to_string();
Box::pin(async move {
let response = client.embed(&text).await?;
Ok(response.embedding)
})
})
};
configure_embedding_client(embed_fn);
tracing::info!("[MemoryCommands] Embedding configured for PersistentMemoryStore");
Ok(true)
}
/// Check if embedding is configured for PersistentMemoryStore
#[tauri::command]
pub fn memory_is_embedding_configured() -> bool {
is_embedding_configured()
}
/// Build layered memory context for chat prompt injection
///
/// Uses SqliteStorage (FTS5 + TF-IDF + Embedding) for high-quality semantic search,
/// with fallback to PersistentMemoryStore if Viking storage is unavailable.
///
/// Performs L0→L1→L2 progressive loading:
/// - L0: Search all matching memories (vector similarity when available)
/// - L1: Use overview/summary when available, fall back to truncated content
/// - L2: Full content only for top-ranked items
#[tauri::command]
pub async fn memory_build_context(
agent_id: String,
query: String,
max_tokens: Option<usize>,
state: State<'_, MemoryStoreState>,
) -> Result<BuildContextResult, String> {
let budget = max_tokens.unwrap_or(500);
// Try SqliteStorage (Viking) first — has FTS5 + TF-IDF + Embedding
let entries = match crate::viking_commands::get_storage().await {
Ok(storage) => {
let options = zclaw_growth::FindOptions {
scope: Some(format!("agent://{}", agent_id)),
limit: Some((budget / 25).max(8)),
min_similarity: Some(0.2),
};
match zclaw_growth::VikingStorage::find(storage.as_ref(), &query, options).await {
Ok(entries) => entries,
Err(e) => {
tracing::warn!("[memory_build_context] Viking search failed, falling back: {}", e);
Vec::new()
}
}
}
Err(_) => {
tracing::debug!("[memory_build_context] Viking storage unavailable, falling back to PersistentMemoryStore");
Vec::new()
}
};
// If Viking found results, use them (they have overview/embedding ranking)
if !entries.is_empty() {
let mut used_tokens = 0;
let mut items: Vec<String> = Vec::new();
let mut memories_used = 0;
for entry in &entries {
if used_tokens >= budget {
break;
}
// Prefer overview (L1 summary) over full content
let overview_str = entry.overview.as_deref().unwrap_or("");
let display_content = if !overview_str.is_empty() {
overview_str.to_string()
} else {
truncate_for_l1(&entry.content)
};
let item_tokens = estimate_tokens_text(&display_content);
if used_tokens + item_tokens > budget {
continue;
}
items.push(format!("- [{}] {}", entry.memory_type, display_content));
used_tokens += item_tokens;
memories_used += 1;
}
let system_prompt_addition = if items.is_empty() {
String::new()
} else {
format!("## 相关记忆\n{}", items.join("\n"))
};
return Ok(BuildContextResult {
system_prompt_addition,
total_tokens: used_tokens,
memories_used,
});
}
// Fallback: PersistentMemoryStore (LIKE-based search)
let state_guard = state.lock().await;
let store = state_guard
.as_ref()
.ok_or_else(|| "Memory store not initialized".to_string())?;
let limit = budget / 25;
let search_query = MemorySearchQuery {
agent_id: Some(agent_id.clone()),
query: Some(query.clone()),
limit: Some(limit.max(20)),
min_importance: Some(3),
..Default::default()
};
let memories = store.search(search_query).await?;
if memories.is_empty() {
return Ok(BuildContextResult {
system_prompt_addition: String::new(),
total_tokens: 0,
memories_used: 0,
});
}
// Build layered context with token budget
let mut used_tokens = 0;
let mut items: Vec<String> = Vec::new();
let mut memories_used = 0;
for memory in &memories {
if used_tokens >= budget {
break;
}
let display_content = if let Some(ref overview) = memory.overview {
if !overview.is_empty() {
overview.clone()
} else {
truncate_for_l1(&memory.content)
}
} else {
truncate_for_l1(&memory.content)
};
let item_tokens = estimate_tokens_text(&display_content);
if used_tokens + item_tokens > budget {
continue;
}
items.push(format!("- [{}] {}", memory.memory_type, display_content));
used_tokens += item_tokens;
memories_used += 1;
}
let system_prompt_addition = if items.is_empty() {
String::new()
} else {
format!("## 相关记忆\n{}", items.join("\n"))
};
Ok(BuildContextResult {
system_prompt_addition,
total_tokens: used_tokens,
memories_used,
})
}
/// Result of building layered memory context
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct BuildContextResult {
pub system_prompt_addition: String,
pub total_tokens: usize,
pub memories_used: usize,
}
/// Truncate content for L1 overview display (~50 tokens)
fn truncate_for_l1(content: &str) -> String {
let char_limit = 100; // ~50 tokens for mixed CJK/ASCII
if content.chars().count() <= char_limit {
content.to_string()
} else {
let truncated: String = content.chars().take(char_limit).collect();
format!("{}...", truncated)
}
}
/// Estimate token count for text
fn estimate_tokens_text(text: &str) -> usize {
let cjk_count = text.chars().filter(|c| ('\u{4E00}'..='\u{9FFF}').contains(c)).count();
let other_count = text.chars().count() - cjk_count;
(cjk_count as f32 * 1.5 + other_count as f32 * 0.4).ceil() as usize
}

View File

@@ -0,0 +1,133 @@
//! Summarizer Adapter - Bridges zclaw_growth::SummaryLlmDriver with Tauri LLM Client
//!
//! Implements the SummaryLlmDriver trait using the local LlmClient,
//! enabling L0/L1 summary generation via the user's configured LLM.
use zclaw_growth::{MemoryEntry, SummaryLlmDriver, summarizer::{overview_prompt, abstract_prompt}};
/// Tauri-side implementation of SummaryLlmDriver using llm::LlmClient
pub struct TauriSummaryDriver {
endpoint: String,
api_key: String,
model: Option<String>,
}
impl TauriSummaryDriver {
/// Create a new Tauri summary driver
pub fn new(endpoint: String, api_key: String, model: Option<String>) -> Self {
Self {
endpoint,
api_key,
model,
}
}
/// Check if the driver is configured (has endpoint and api_key)
pub fn is_configured(&self) -> bool {
!self.endpoint.is_empty() && !self.api_key.is_empty()
}
/// Call the LLM API with a simple prompt
async fn call_llm(&self, prompt: String) -> Result<String, String> {
let client = reqwest::Client::new();
let model = self.model.clone().unwrap_or_else(|| "glm-4-flash".to_string());
let request = serde_json::json!({
"model": model,
"messages": [
{ "role": "user", "content": prompt }
],
"temperature": 0.3,
"max_tokens": 200,
});
let response = client
.post(format!("{}/chat/completions", self.endpoint))
.header("Authorization", format!("Bearer {}", self.api_key))
.header("Content-Type", "application/json")
.json(&request)
.send()
.await
.map_err(|e| format!("Summary LLM request failed: {}", e))?;
if !response.status().is_success() {
let status = response.status();
let body = response.text().await.unwrap_or_default();
return Err(format!("Summary LLM error {}: {}", status, body));
}
let json: serde_json::Value = response
.json()
.await
.map_err(|e| format!("Failed to parse summary response: {}", e))?;
json.get("choices")
.and_then(|c| c.get(0))
.and_then(|c| c.get("message"))
.and_then(|m| m.get("content"))
.and_then(|c| c.as_str())
.map(|s| s.to_string())
.ok_or_else(|| "Invalid summary LLM response format".to_string())
}
}
#[async_trait::async_trait]
impl SummaryLlmDriver for TauriSummaryDriver {
async fn generate_overview(&self, entry: &MemoryEntry) -> Result<String, String> {
let prompt = overview_prompt(entry);
self.call_llm(prompt).await
}
async fn generate_abstract(&self, entry: &MemoryEntry) -> Result<String, String> {
let prompt = abstract_prompt(entry);
self.call_llm(prompt).await
}
}
/// Global summary driver instance (lazy-initialized)
static SUMMARY_DRIVER: tokio::sync::OnceCell<std::sync::Arc<TauriSummaryDriver>> =
tokio::sync::OnceCell::const_new();
/// Configure the global summary driver
pub fn configure_summary_driver(driver: TauriSummaryDriver) {
let _ = SUMMARY_DRIVER.set(std::sync::Arc::new(driver));
tracing::info!("[SummarizerAdapter] Summary driver configured");
}
/// Check if summary driver is available
pub fn is_summary_driver_configured() -> bool {
SUMMARY_DRIVER
.get()
.map(|d| d.is_configured())
.unwrap_or(false)
}
/// Get the global summary driver
pub fn get_summary_driver() -> Option<std::sync::Arc<TauriSummaryDriver>> {
SUMMARY_DRIVER.get().cloned()
}
#[cfg(test)]
mod tests {
use super::*;
use zclaw_growth::MemoryType;
#[test]
fn test_summary_driver_not_configured_by_default() {
assert!(!is_summary_driver_configured());
}
#[test]
fn test_summary_driver_configure_and_check() {
let driver = TauriSummaryDriver::new(
"https://example.com/v1".to_string(),
"test-key".to_string(),
None,
);
assert!(driver.is_configured());
let empty_driver = TauriSummaryDriver::new(String::new(), String::new(), None);
assert!(!empty_driver.is_configured());
}
}

View File

@@ -67,6 +67,13 @@ pub struct VikingAddResult {
pub status: String,
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct EmbeddingConfigResult {
pub provider: String,
pub configured: bool,
}
// === Global Storage Instance ===
/// Global storage instance
@@ -100,12 +107,20 @@ pub async fn init_storage() -> Result<(), String> {
Ok(())
}
/// Get the storage instance (public for use by other modules)
/// Get the storage instance, initializing on first access if needed
pub async fn get_storage() -> Result<Arc<SqliteStorage>, String> {
if let Some(storage) = STORAGE.get() {
return Ok(storage.clone());
}
// Attempt lazy initialization
tracing::info!("[VikingCommands] Storage not yet initialized, attempting lazy init...");
init_storage().await?;
STORAGE
.get()
.cloned()
.ok_or_else(|| "Storage not initialized. Call init_storage() first.".to_string())
.ok_or_else(|| "Storage initialization failed. Check logs for details.".to_string())
}
/// Get storage directory for status
@@ -217,12 +232,24 @@ pub async fn viking_find(
Ok(entries
.into_iter()
.enumerate()
.map(|(i, entry)| VikingFindResult {
uri: entry.uri,
score: 1.0 - (i as f64 * 0.1), // Simple scoring based on rank
content: entry.content,
level: "L1".to_string(),
overview: None,
.map(|(i, entry)| {
// Use overview (L1) when available, full content otherwise (L2)
let (content, level, overview) = if let Some(ref ov) = entry.overview {
if !ov.is_empty() {
(ov.clone(), "L1".to_string(), None)
} else {
(entry.content.clone(), "L2".to_string(), None)
}
} else {
(entry.content.clone(), "L2".to_string(), None)
};
VikingFindResult {
uri: entry.uri,
score: 1.0 - (i as f64 * 0.1), // Simple scoring based on rank
content,
level,
overview,
}
})
.collect())
}
@@ -309,7 +336,7 @@ pub async fn viking_ls(path: String) -> Result<Vec<VikingResource>, String> {
/// Read memory content
#[tauri::command]
pub async fn viking_read(uri: String, _level: Option<String>) -> Result<String, String> {
pub async fn viking_read(uri: String, level: Option<String>) -> Result<String, String> {
let storage = get_storage().await?;
let entry = storage
@@ -318,7 +345,34 @@ pub async fn viking_read(uri: String, _level: Option<String>) -> Result<String,
.map_err(|e| format!("Failed to read memory: {}", e))?;
match entry {
Some(e) => Ok(e.content),
Some(e) => {
// Support level-based content retrieval
let content = match level.as_deref() {
Some("L0") | Some("l0") => {
// L0: abstract_summary (keywords)
e.abstract_summary
.filter(|s| !s.is_empty())
.unwrap_or_else(|| {
// Fallback: first 50 chars of overview
e.overview
.as_ref()
.map(|ov| ov.chars().take(50).collect())
.unwrap_or_else(|| e.content.chars().take(50).collect())
})
}
Some("L1") | Some("l1") => {
// L1: overview (1-2 sentence summary)
e.overview
.filter(|s| !s.is_empty())
.unwrap_or_else(|| truncate_text(&e.content, 200))
}
_ => {
// L2 or default: full content
e.content
}
};
Ok(content)
}
None => Err(format!("Memory not found: {}", uri)),
}
}
@@ -442,6 +496,16 @@ pub async fn viking_inject_prompt(
// === Helper Functions ===
/// Truncate text to approximately max_chars characters
fn truncate_text(text: &str, max_chars: usize) -> String {
if text.chars().count() <= max_chars {
text.to_string()
} else {
let truncated: String = text.chars().take(max_chars).collect();
format!("{}...", truncated)
}
}
/// Parse URI to extract components
fn parse_uri(uri: &str) -> Result<(String, MemoryType, String), String> {
// Expected format: agent://{agent_id}/{type}/{category}
@@ -462,6 +526,136 @@ fn parse_uri(uri: &str) -> Result<(String, MemoryType, String), String> {
Ok((agent_id, memory_type, category))
}
/// Configure embedding for semantic memory search
/// Configures both SqliteStorage (VikingPanel) and PersistentMemoryStore (chat flow)
#[tauri::command]
pub async fn viking_configure_embedding(
provider: String,
api_key: String,
model: Option<String>,
endpoint: Option<String>,
) -> Result<EmbeddingConfigResult, String> {
let storage = get_storage().await?;
// 1. Configure SqliteStorage (VikingPanel / VikingCommands)
let config_viking = crate::llm::EmbeddingConfig {
provider: provider.clone(),
api_key: api_key.clone(),
endpoint: endpoint.clone(),
model: model.clone(),
};
let client_viking = crate::llm::EmbeddingClient::new(config_viking);
let adapter = crate::embedding_adapter::TauriEmbeddingAdapter::new(client_viking);
storage
.configure_embedding(std::sync::Arc::new(adapter))
.await
.map_err(|e| format!("Failed to configure embedding: {}", e))?;
// 2. Configure PersistentMemoryStore (chat flow)
let config_memory = crate::llm::EmbeddingConfig {
provider: provider.clone(),
api_key,
endpoint,
model,
};
let client_memory = std::sync::Arc::new(crate::llm::EmbeddingClient::new(config_memory));
let embed_fn: crate::memory::EmbedFn = {
let client_arc = client_memory.clone();
std::sync::Arc::new(move |text: &str| {
let client = client_arc.clone();
let text = text.to_string();
Box::pin(async move {
let response = client.embed(&text).await?;
Ok(response.embedding)
})
})
};
crate::memory::configure_embedding_client(embed_fn);
tracing::info!("[VikingCommands] Embedding configured with provider: {} (both storage systems)", provider);
Ok(EmbeddingConfigResult {
provider,
configured: true,
})
}
/// Configure summary driver for L0/L1 auto-generation
#[tauri::command]
pub async fn viking_configure_summary_driver(
endpoint: String,
api_key: String,
model: Option<String>,
) -> Result<bool, String> {
let driver = crate::summarizer_adapter::TauriSummaryDriver::new(endpoint, api_key, model);
crate::summarizer_adapter::configure_summary_driver(driver);
tracing::info!("[VikingCommands] Summary driver configured");
Ok(true)
}
/// Store a memory and optionally generate L0/L1 summaries in the background
#[tauri::command]
pub async fn viking_store_with_summaries(
uri: String,
content: String,
) -> Result<VikingAddResult, String> {
let storage = get_storage().await?;
let (agent_id, memory_type, category) = parse_uri(&uri)?;
let entry = MemoryEntry::new(&agent_id, memory_type, &category, content);
// Store the entry immediately (L2 full content)
storage
.store(&entry)
.await
.map_err(|e| format!("Failed to store memory: {}", e))?;
// Background: generate L0/L1 summaries if driver is configured
if crate::summarizer_adapter::is_summary_driver_configured() {
let entry_uri = entry.uri.clone();
let storage_clone = storage.clone();
tokio::spawn(async move {
if let Some(driver) = crate::summarizer_adapter::get_summary_driver() {
let (overview, abstract_summary) =
zclaw_growth::summarizer::generate_summaries(driver.as_ref(), &entry).await;
if overview.is_some() || abstract_summary.is_some() {
// Update the entry with summaries
let updated = MemoryEntry {
overview,
abstract_summary,
..entry
};
if let Err(e) = storage_clone.store(&updated).await {
tracing::debug!(
"[VikingCommands] Failed to update summaries for {}: {}",
entry_uri,
e
);
} else {
tracing::debug!(
"[VikingCommands] Updated L0/L1 summaries for {}",
entry_uri
);
}
}
}
});
}
Ok(VikingAddResult {
uri,
status: "added".to_string(),
})
}
// === Tests ===
#[cfg(test)]

View File

@@ -21,13 +21,15 @@ import { Loader2 } from 'lucide-react';
import { isTauriRuntime, getLocalGatewayStatus, startLocalGateway } from './lib/tauri-gateway';
import { useOnboarding } from './lib/use-onboarding';
import { intelligenceClient } from './lib/intelligence-client';
import { loadEmbeddingConfig } from './lib/embedding-client';
import { invoke } from '@tauri-apps/api/core';
import { useProposalNotifications, ProposalNotificationHandler } from './lib/useProposalNotifications';
import { useToast } from './components/ui/Toast';
import type { Clone } from './store/agentStore';
type View = 'main' | 'settings';
// Bootstrap component that ensures OpenFang is running before rendering main UI
// Bootstrap component that ensures ZCLAW is running before rendering main UI
function BootstrapScreen({ status }: { status: string }) {
return (
<div className="h-screen flex items-center justify-center bg-gray-50">
@@ -125,7 +127,7 @@ function App() {
// Don't clear pendingApprovalRun - keep it for reference
}, []);
// Bootstrap: Start OpenFang Gateway before rendering main UI
// Bootstrap: Start ZCLAW Gateway before rendering main UI
useEffect(() => {
let mounted = true;
@@ -140,7 +142,7 @@ function App() {
const isRunning = status.portStatus === 'busy' || status.listenerPids.length > 0;
if (!isRunning && status.cliAvailable) {
setBootstrapStatus('Starting OpenFang Gateway...');
setBootstrapStatus('Starting ZCLAW Gateway...');
console.log('[App] Local gateway not running, auto-starting...');
await startLocalGateway();
@@ -230,7 +232,43 @@ function App() {
// Non-critical, continue without heartbeat
}
// Step 5: Bootstrap complete
// Step 5: Restore embedding config to Rust backend
try {
const embConfig = loadEmbeddingConfig();
if (embConfig.enabled && embConfig.provider !== 'local' && embConfig.apiKey) {
setBootstrapStatus('Restoring embedding configuration...');
await invoke('viking_configure_embedding', {
provider: embConfig.provider,
apiKey: embConfig.apiKey,
model: embConfig.model || undefined,
endpoint: embConfig.endpoint || undefined,
});
console.log('[App] Embedding configuration restored to backend');
}
} catch (embErr) {
console.warn('[App] Failed to restore embedding config:', embErr);
// Non-critical, semantic search will fall back to TF-IDF
}
// Step 5b: Configure summary driver using active LLM (for L0/L1 generation)
try {
const { getDefaultModelConfig } = await import('./store/connectionStore');
const modelConfig = getDefaultModelConfig();
if (modelConfig && modelConfig.apiKey && modelConfig.baseUrl) {
setBootstrapStatus('Configuring summary driver...');
await invoke('viking_configure_summary_driver', {
endpoint: modelConfig.baseUrl,
apiKey: modelConfig.apiKey,
model: modelConfig.model || undefined,
});
console.log('[App] Summary driver configured with active LLM');
}
} catch (sumErr) {
console.warn('[App] Failed to configure summary driver:', sumErr);
// Non-critical, summaries won't be auto-generated
}
// Step 6: Bootstrap complete
setBootstrapping(false);
} catch (err) {
console.error('[App] Bootstrap failed:', err);

View File

@@ -1,10 +1,10 @@
/**
* ApprovalsPanel - OpenFang Execution Approvals UI
* ApprovalsPanel - ZCLAW Execution Approvals UI
*
* Displays pending, approved, and rejected approval requests
* for Hand executions that require human approval.
*
* Design based on OpenFang Dashboard v0.4.0
* Design based on ZCLAW Dashboard v0.4.0
*/
import { useState, useEffect, useCallback } from 'react';

View File

@@ -1,5 +1,5 @@
/**
* AuditLogsPanel - OpenFang Audit Logs UI with Merkle Hash Chain Verification
* AuditLogsPanel - ZCLAW Audit Logs UI with Merkle Hash Chain Verification
*
* Phase 3.4 Enhancement: Full-featured audit log viewer with:
* - Complete log entry display
@@ -51,7 +51,7 @@ export interface AuditLogFilter {
}
interface EnhancedAuditLogEntry extends AuditLogEntry {
// Extended fields from OpenFang
// Extended fields from ZCLAW
targetResource?: string;
operationDetails?: Record<string, unknown>;
ipAddress?: string;
@@ -633,7 +633,7 @@ export function AuditLogsPanel() {
setVerificationResult(null);
try {
// Call OpenFang API to verify the chain
// Call ZCLAW API to verify the chain
const result = await client.verifyAuditLogChain(log.id);
const verification: MerkleVerificationResult = {

View File

@@ -42,7 +42,7 @@ export function CloneManager() {
role: '默认助手',
nickname: a.name,
scenarios: [] as string[],
workspaceDir: '~/.openfang/zclaw-workspace',
workspaceDir: '~/.zclaw/zclaw-workspace',
userName: quickConfig.userName || '未设置',
userRole: '',
restrictFiles: true,

View File

@@ -3,7 +3,7 @@
*
* Displays the current Gateway connection status with visual indicators.
* Supports automatic reconnect and manual reconnect button.
* Includes health status indicator for OpenFang backend.
* Includes health status indicator for ZCLAW backend.
*/
import { useState, useEffect } from 'react';
@@ -230,7 +230,7 @@ export function ConnectionIndicator({ className = '' }: { className?: string })
}
/**
* HealthStatusIndicator - Displays OpenFang backend health status
* HealthStatusIndicator - Displays ZCLAW backend health status
*/
export function HealthStatusIndicator({
className = '',

View File

@@ -3,7 +3,7 @@
*
* Supports trigger types:
* - webhook: External HTTP request trigger
* - event: OpenFang internal event trigger
* - event: ZCLAW internal event trigger
* - message: Chat message pattern trigger
*/
@@ -119,7 +119,7 @@ const triggerTypeOptions: Array<{
{
value: 'event',
label: 'Event',
description: 'OpenFang internal event trigger',
description: 'ZCLAW internal event trigger',
icon: Bell,
},
{

View File

@@ -64,7 +64,7 @@ export function HandList({ selectedHandId, onSelectHand }: HandListProps) {
<div className="p-4 text-center">
<Zap className="w-8 h-8 mx-auto text-gray-300 mb-2" />
<p className="text-xs text-gray-400 mb-1"> Hands</p>
<p className="text-xs text-gray-300"> OpenFang </p>
<p className="text-xs text-gray-300"> ZCLAW </p>
</div>
);
}

View File

@@ -1,10 +1,10 @@
/**
* HandsPanel - OpenFang Hands Management UI
* HandsPanel - ZCLAW Hands Management UI
*
* Displays available OpenFang Hands (autonomous capability packages)
* Displays available ZCLAW Hands (autonomous capability packages)
* with detailed status, requirements, and activation controls.
*
* Design based on OpenFang Dashboard v0.4.0
* Design based on ZCLAW Dashboard v0.4.0
*/
import { useState, useEffect, useCallback } from 'react';
@@ -528,7 +528,7 @@ export function HandsPanel() {
</div>
<p className="text-sm text-gray-500 dark:text-gray-400 mb-3"> Hands</p>
<p className="text-xs text-gray-400 dark:text-gray-500">
OpenFang
ZCLAW
</p>
</div>
);

View File

@@ -441,7 +441,7 @@ export function RightPanel() {
))}
</div>
</div>
<AgentRow label="Workspace" value={selectedClone?.workspaceDir || workspaceInfo?.path || '~/.openfang/zclaw-workspace'} />
<AgentRow label="Workspace" value={selectedClone?.workspaceDir || workspaceInfo?.path || '~/.zclaw/zclaw-workspace'} />
<AgentRow label="Resolved" value={selectedClone?.workspaceResolvedPath || workspaceInfo?.resolvedPath || '-'} />
<AgentRow label="File Restriction" value={selectedClone?.restrictFiles ? 'Enabled' : 'Disabled'} />
<AgentRow label="Opt-in" value={selectedClone?.privacyOptIn ? 'Joined' : 'Not joined'} />
@@ -739,7 +739,7 @@ function createAgentDraft(
nickname: clone.nickname || '',
model: clone.model || currentModel,
scenarios: clone.scenarios?.join(', ') || '',
workspaceDir: clone.workspaceDir || '~/.openfang/zclaw-workspace',
workspaceDir: clone.workspaceDir || '~/.zclaw/zclaw-workspace',
userName: clone.userName || '',
userRole: clone.userRole || '',
restrictFiles: clone.restrictFiles ?? true,

View File

@@ -1,9 +1,9 @@
/**
* SchedulerPanel - OpenFang Scheduler UI
* SchedulerPanel - ZCLAW Scheduler UI
*
* Displays scheduled jobs, event triggers, workflows, and run history.
*
* Design based on OpenFang Dashboard v0.4.0
* Design based on ZCLAW Dashboard v0.4.0
*/
import { useState, useEffect, useCallback } from 'react';

View File

@@ -30,7 +30,7 @@ import type { SecurityLayer, SecurityStatus } from '../store/securityStore';
import { useSecurityStore } from '../store/securityStore';
import { useConnectionStore } from '../store/connectionStore';
// OpenFang 16-layer security architecture definitions
// ZCLAW 16-layer security architecture definitions
export const SECURITY_LAYERS: Array<{
id: string;
name: string;
@@ -482,7 +482,7 @@ export function calculateSecurityScore(layers: SecurityLayer[]): number {
return Math.round((activeCount / SECURITY_LAYERS.length) * 100);
}
// ZCLAW 默认安全状态(独立于 OpenFang
// ZCLAW 默认安全状态(本地检测
export function getDefaultSecurityStatus(): SecurityStatus {
// ZCLAW 默认启用的安全层
const defaultEnabledLayers = [
@@ -687,7 +687,7 @@ export function SecurityStatusPanel({ className = '' }: SecurityStatusPanelProps
</span>
</div>
<p className="text-xs text-gray-500 mt-1">
{!connected && 'ZCLAW 默认安全配置。连接 OpenFang 后可获取完整安全状态。'}
{!connected && 'ZCLAW 默认安全配置。连接后可获取实时安全状态。'}
</p>
</div>

View File

@@ -1,9 +1,8 @@
import { useEffect } from 'react';
import { Shield, ShieldCheck, ShieldAlert, ShieldX, RefreshCw, Loader2, AlertCircle } from 'lucide-react';
import { useConnectionStore } from '../store/connectionStore';
import { useSecurityStore } from '../store/securityStore';
// OpenFang 16-layer security architecture names (Chinese)
// ZCLAW 16-layer security architecture names (Chinese)
const SECURITY_LAYER_NAMES: Record<string, string> = {
// Layer 1: Network
'network.firewall': '网络防火墙',
@@ -76,30 +75,14 @@ function getSecurityLabel(level: 'critical' | 'high' | 'medium' | 'low') {
}
export function SecurityStatus() {
const connectionState = useConnectionStore((s) => s.connectionState);
const securityStatus = useSecurityStore((s) => s.securityStatus);
const securityStatusLoading = useSecurityStore((s) => s.securityStatusLoading);
const securityStatusError = useSecurityStore((s) => s.securityStatusError);
const loadSecurityStatus = useSecurityStore((s) => s.loadSecurityStatus);
const connected = connectionState === 'connected';
useEffect(() => {
if (connected) {
loadSecurityStatus();
}
}, [connected]);
if (!connected) {
return (
<div className="rounded-xl border border-gray-200 bg-white p-4 shadow-sm">
<div className="flex items-center gap-2 mb-3">
<Shield className="w-4 h-4 text-gray-400" />
<span className="text-sm font-semibold text-gray-900"></span>
</div>
<p className="text-xs text-gray-400"></p>
</div>
);
}
loadSecurityStatus();
}, [loadSecurityStatus]);
// Loading state
if (securityStatusLoading && !securityStatus) {
@@ -131,9 +114,9 @@ export function SecurityStatus() {
<RefreshCw className="w-3.5 h-3.5" />
</button>
</div>
<p className="text-xs text-gray-500 mb-2">API </p>
<p className="text-xs text-gray-500 mb-2"></p>
<p className="text-xs text-gray-400">
OpenFang API ({'/api/security/status'})
</p>
</div>
);

View File

@@ -34,10 +34,10 @@ export function About() {
</div>
<div className="mt-12 text-center text-xs text-gray-400">
2026 ZCLAW | Powered by OpenFang
2026 ZCLAW
</div>
<div className="text-center text-xs text-gray-400 space-y-1">
<p> OpenFang Rust Agent OS </p>
<p> Rust Agent OS </p>
<div className="flex justify-center gap-4 mt-3">
<a href="#" className="text-orange-500 hover:text-orange-600"></a>
<a href="#" className="text-orange-500 hover:text-orange-600"></a>

View File

@@ -382,7 +382,7 @@ export function IMChannels() {
<div className="text-xs text-blue-700 dark:text-blue-300">
<p className="font-medium mb-1"></p>
<p> Gateway </p>
<p className="mt-1">: <code className="bg-blue-100 dark:bg-blue-800 px-1 rounded">~/.openfang/openfang.toml</code></p>
<p className="mt-1">: <code className="bg-blue-100 dark:bg-blue-800 px-1 rounded">~/.zclaw/zclaw.toml</code></p>
</div>
</div>
</div>

View File

@@ -266,13 +266,30 @@ export function ModelsAPI() {
};
// 保存 Embedding 配置
const handleSaveEmbeddingConfig = () => {
const handleSaveEmbeddingConfig = async () => {
const configToSave = {
...embeddingConfig,
enabled: embeddingConfig.provider !== 'local' && embeddingConfig.apiKey.trim() !== '',
};
setEmbeddingConfig(configToSave);
saveEmbeddingConfig(configToSave);
// Push config to Rust backend for semantic memory search
if (configToSave.enabled) {
try {
await invoke('viking_configure_embedding', {
provider: configToSave.provider,
apiKey: configToSave.apiKey,
model: configToSave.model || undefined,
endpoint: configToSave.endpoint || undefined,
});
setEmbeddingTestResult({ success: true, message: 'Embedding 配置已应用到语义记忆搜索' });
} catch (error) {
setEmbeddingTestResult({ success: false, message: `配置保存成功但应用失败: ${error}` });
}
} else {
setEmbeddingTestResult(null);
}
};
// 测试 Embedding API

View File

@@ -24,7 +24,7 @@ export function Privacy() {
<h3 className="font-medium mb-2 text-gray-900"></h3>
<div className="text-xs text-gray-500 mb-3"> Agent </div>
<div className="p-3 bg-gray-50 border border-gray-200 rounded-lg text-xs text-gray-600 font-mono">
{workspaceInfo?.resolvedPath || workspaceInfo?.path || quickConfig.workspaceDir || '~/.openfang/zclaw-workspace'}
{workspaceInfo?.resolvedPath || workspaceInfo?.path || quickConfig.workspaceDir || '~/.zclaw/zclaw-workspace'}
</div>
</div>

View File

@@ -1,19 +1,15 @@
import { useEffect, useState } from 'react';
import { useAgentStore } from '../../store/agentStore';
import { useConnectionStore } from '../../store/connectionStore';
import { BarChart3, TrendingUp, Clock, Zap } from 'lucide-react';
export function UsageStats() {
const usageStats = useAgentStore((s) => s.usageStats);
const loadUsageStats = useAgentStore((s) => s.loadUsageStats);
const connectionState = useConnectionStore((s) => s.connectionState);
const [timeRange, setTimeRange] = useState<'7d' | '30d' | 'all'>('7d');
useEffect(() => {
if (connectionState === 'connected') {
loadUsageStats();
}
}, [connectionState]);
loadUsageStats();
}, [loadUsageStats]);
const stats = usageStats || { totalSessions: 0, totalMessages: 0, totalTokens: 0, byModel: {} };
const models = Object.entries(stats.byModel || {});
@@ -56,7 +52,7 @@ export function UsageStats() {
</button>
</div>
</div>
<div className="text-xs text-gray-500 mb-4"> Token </div>
<div className="text-xs text-gray-500 mb-4">使</div>
{/* 主要统计卡片 */}
<div className="grid grid-cols-4 gap-4 mb-8">
@@ -89,6 +85,9 @@ export function UsageStats() {
{/* 总 Token 使用量概览 */}
<div className="bg-white rounded-xl border border-gray-200 p-5 shadow-sm mb-6">
<h3 className="text-sm font-semibold mb-4 text-gray-900">Token 使</h3>
{stats.totalTokens === 0 ? (
<p className="text-xs text-gray-400">Token </p>
) : (
<div className="flex items-center gap-4">
<div className="flex-1">
<div className="flex justify-between text-xs text-gray-500 mb-1">
@@ -111,6 +110,7 @@ export function UsageStats() {
<div className="text-xs text-gray-500"></div>
</div>
</div>
)}
</div>
{/* 按模型分组 */}

View File

@@ -7,18 +7,18 @@ export function Workspace() {
const workspaceInfo = useConfigStore((s) => s.workspaceInfo);
const loadWorkspaceInfo = useConfigStore((s) => s.loadWorkspaceInfo);
const saveQuickConfig = useConfigStore((s) => s.saveQuickConfig);
const [projectDir, setProjectDir] = useState('~/.openfang/zclaw-workspace');
const [projectDir, setProjectDir] = useState('~/.zclaw/zclaw-workspace');
useEffect(() => {
loadWorkspaceInfo().catch(silentErrorHandler('Workspace'));
}, []);
useEffect(() => {
setProjectDir(quickConfig.workspaceDir || workspaceInfo?.path || '~/.openfang/zclaw-workspace');
setProjectDir(quickConfig.workspaceDir || workspaceInfo?.path || '~/.zclaw/zclaw-workspace');
}, [quickConfig.workspaceDir, workspaceInfo?.path]);
const handleWorkspaceBlur = async () => {
const nextValue = projectDir.trim() || '~/.openfang/zclaw-workspace';
const nextValue = projectDir.trim() || '~/.zclaw/zclaw-workspace';
setProjectDir(nextValue);
await saveQuickConfig({ workspaceDir: nextValue });
await loadWorkspaceInfo();

View File

@@ -375,8 +375,10 @@ export function SkillMarket({
/>
</div>
{/* Suggestions - placeholder for future AI-powered recommendations */}
{/* AI 智能推荐功能开发中 */}
<div className="text-xs text-gray-400 dark:text-gray-500 text-center py-1">
AI
</div>
</div>
{/* Category Filter */}

View File

@@ -1,7 +1,7 @@
/**
* TriggersPanel - OpenFang Triggers Management UI
* TriggersPanel - ZCLAW Triggers Management UI
*
* Displays available OpenFang Triggers and allows creating and toggling them.
* Displays available ZCLAW Triggers and allows creating and toggling them.
*/
import { useState, useEffect, useCallback } from 'react';

View File

@@ -1,8 +1,8 @@
/**
* VikingPanel - OpenViking Semantic Memory UI
* VikingPanel - ZCLAW Semantic Memory UI
*
* Provides interface for semantic search and knowledge base management.
* OpenViking is an optional sidecar for semantic memory operations.
* Uses native Rust SqliteStorage with TF-IDF semantic search.
*/
import { useState, useEffect } from 'react';
import {
@@ -11,16 +11,13 @@ import {
AlertCircle,
CheckCircle,
FileText,
Server,
Play,
Square,
Database,
} from 'lucide-react';
import {
getVikingStatus,
findVikingResources,
getVikingServerStatus,
startVikingServer,
stopVikingServer,
listVikingResources,
readVikingResource,
} from '../lib/viking-client';
import type { VikingStatus, VikingFindResult } from '../lib/viking-client';
@@ -30,17 +27,28 @@ export function VikingPanel() {
const [searchQuery, setSearchQuery] = useState('');
const [searchResults, setSearchResults] = useState<VikingFindResult[]>([]);
const [isSearching, setIsSearching] = useState(false);
const [serverRunning, setServerRunning] = useState(false);
const [message, setMessage] = useState<{ type: 'success' | 'error'; text: string } | null>(null);
const [memoryCount, setMemoryCount] = useState<number | null>(null);
const [expandedUri, setExpandedUri] = useState<string | null>(null);
const [expandedContent, setExpandedContent] = useState<string | null>(null);
const [isLoadingL2, setIsLoadingL2] = useState(false);
const loadStatus = async () => {
setIsLoading(true);
setMessage(null);
try {
const vikingStatus = await getVikingStatus();
setStatus(vikingStatus);
const serverStatus = await getVikingServerStatus();
setServerRunning(serverStatus.running);
if (vikingStatus.available) {
// Load memory count
try {
const resources = await listVikingResources('/');
setMemoryCount(resources.length);
} catch {
setMemoryCount(null);
}
}
} catch (error) {
console.error('Failed to load Viking status:', error);
setStatus({ available: false, error: String(error) });
@@ -74,22 +82,22 @@ export function VikingPanel() {
}
};
const handleServerToggle = async () => {
const handleExpandL2 = async (uri: string) => {
if (expandedUri === uri) {
setExpandedUri(null);
setExpandedContent(null);
return;
}
setExpandedUri(uri);
setIsLoadingL2(true);
try {
if (serverRunning) {
await stopVikingServer();
setServerRunning(false);
setMessage({ type: 'success', text: '服务器已停止' });
} else {
await startVikingServer();
setServerRunning(true);
setMessage({ type: 'success', text: '服务器已启动' });
}
} catch (error) {
setMessage({
type: 'error',
text: `操作失败: ${error instanceof Error ? error.message : '未知错误'}`,
});
const fullContent = await readVikingResource(uri, 'L2');
setExpandedContent(fullContent);
} catch {
setExpandedContent(null);
} finally {
setIsLoadingL2(false);
}
};
@@ -100,7 +108,7 @@ export function VikingPanel() {
<div>
<h1 className="text-xl font-bold text-gray-900 dark:text-white"></h1>
<p className="text-xs text-gray-500 dark:text-gray-400 mt-1">
OpenViking
ZCLAW
</p>
</div>
<div className="flex gap-2 items-center">
@@ -125,10 +133,9 @@ export function VikingPanel() {
<div className="flex items-start gap-2">
<AlertCircle className="w-4 h-4 text-amber-500 mt-0.5" />
<div className="text-xs text-amber-700 dark:text-amber-300">
<p className="font-medium">OpenViking CLI </p>
<p className="font-medium"></p>
<p className="mt-1">
OpenViking CLI {' '}
<code className="bg-amber-100 dark:bg-amber-800 px-1 rounded">ZCLAW_VIKING_BIN</code>
SQLite
</p>
{status?.error && (
<p className="mt-1 text-amber-600 dark:text-amber-400 font-mono text-xs">
@@ -158,47 +165,37 @@ export function VikingPanel() {
</div>
)}
{/* Server Control */}
{/* Storage Info */}
{status?.available && (
<div className="bg-white dark:bg-gray-800 rounded-xl border border-gray-200 dark:border-gray-700 p-4 mb-6 shadow-sm">
<div className="flex items-center justify-between">
<div className="flex items-center gap-3">
<div
className={`w-10 h-10 rounded-xl flex items-center justify-center ${
serverRunning
? 'bg-gradient-to-br from-green-500 to-emerald-500 text-white'
: 'bg-gray-200 dark:bg-gray-700 text-gray-400'
}`}
>
<Server className="w-4 h-4" />
<div className="flex items-center gap-3 mb-3">
<div className="w-10 h-10 rounded-xl bg-gradient-to-br from-blue-500 to-indigo-500 flex items-center justify-center">
<Database className="w-4 h-4 text-white" />
</div>
<div>
<div className="text-sm font-medium text-gray-900 dark:text-white">
</div>
<div>
<div className="text-sm font-medium text-gray-900 dark:text-white">
Viking Server
</div>
<div className="text-xs text-gray-500 dark:text-gray-400">
{serverRunning ? '运行中' : '已停止'}
</div>
<div className="text-xs text-gray-500 dark:text-gray-400">
{status.version || 'Native'} · {status.dataDir || '默认路径'}
</div>
</div>
<button
onClick={handleServerToggle}
className={`px-4 py-2 rounded-lg flex items-center gap-2 text-sm transition-colors ${
serverRunning
? 'bg-red-100 text-red-600 hover:bg-red-200 dark:bg-red-900/30 dark:text-red-400'
: 'bg-green-100 text-green-600 hover:bg-green-200 dark:bg-green-900/30 dark:text-green-400'
}`}
>
{serverRunning ? (
<>
<Square className="w-4 h-4" />
</>
) : (
<>
<Play className="w-4 h-4" />
</>
)}
</button>
</div>
<div className="flex gap-4 text-xs">
<div className="flex items-center gap-1.5 text-gray-600 dark:text-gray-300">
<CheckCircle className="w-3.5 h-3.5 text-green-500" />
<span>SQLite + FTS5</span>
</div>
<div className="flex items-center gap-1.5 text-gray-600 dark:text-gray-300">
<CheckCircle className="w-3.5 h-3.5 text-green-500" />
<span>TF-IDF </span>
</div>
{memoryCount !== null && (
<div className="flex items-center gap-1.5 text-gray-600 dark:text-gray-300">
<CheckCircle className="w-3.5 h-3.5 text-green-500" />
<span>{memoryCount} </span>
</div>
)}
</div>
</div>
)}
@@ -251,21 +248,43 @@ export function VikingPanel() {
<span className="text-sm font-medium text-gray-900 dark:text-white truncate">
{result.uri}
</span>
<span className="text-xs text-gray-400 bg-gray-100 dark:bg-gray-700 px-2 py-0.5 rounded">
<span className={`text-xs px-2 py-0.5 rounded ${
result.level === 'L1'
? 'text-green-600 bg-green-100 dark:bg-green-900/30 dark:text-green-400'
: 'text-gray-400 bg-gray-100 dark:bg-gray-700'
}`}>
{result.level}
</span>
<span className="text-xs text-blue-600 dark:text-blue-400">
{Math.round(result.score * 100)}%
</span>
</div>
{result.overview && (
<p className="text-xs text-gray-500 dark:text-gray-400 mt-1 line-clamp-2">
{result.overview}
</p>
)}
<p className="text-xs text-gray-600 dark:text-gray-300 mt-2 line-clamp-3 font-mono">
<p className="text-xs text-gray-600 dark:text-gray-300 mt-2 line-clamp-3">
{result.content}
</p>
{result.level === 'L1' && (
<button
onClick={() => handleExpandL2(result.uri)}
className="mt-1.5 text-xs text-blue-500 hover:text-blue-600 dark:text-blue-400 dark:hover:text-blue-300 transition-colors"
>
{expandedUri === result.uri ? '收起全文' : '展开全文'}
</button>
)}
{expandedUri === result.uri && (
<div className="mt-2 p-3 bg-gray-50 dark:bg-gray-900/50 rounded-lg border border-gray-200 dark:border-gray-700">
{isLoadingL2 ? (
<div className="flex items-center gap-2 text-xs text-gray-400">
<RefreshCw className="w-3 h-3 animate-spin" /> ...
</div>
) : expandedContent ? (
<p className="text-xs text-gray-600 dark:text-gray-300 whitespace-pre-wrap font-mono">
{expandedContent}
</p>
) : (
<p className="text-xs text-gray-400"></p>
)}
</div>
)}
</div>
</div>
</div>
@@ -275,11 +294,11 @@ export function VikingPanel() {
{/* Info Section */}
<div className="mt-6 p-4 bg-gray-50 dark:bg-gray-800/50 rounded-lg border border-gray-200 dark:border-gray-700">
<h3 className="text-sm font-medium text-gray-900 dark:text-white mb-2"> OpenViking</h3>
<h3 className="text-sm font-medium text-gray-900 dark:text-white mb-2"></h3>
<ul className="text-xs text-gray-500 dark:text-gray-400 space-y-1">
<li> </li>
<li> </li>
<li> </li>
<li> SQLite + TF-IDF </li>
<li> </li>
<li> </li>
<li> AI </li>
</ul>
</div>

View File

@@ -1,10 +1,10 @@
/**
* WorkflowEditor - OpenFang Workflow Editor Component
* WorkflowEditor - ZCLAW Workflow Editor Component
*
* Allows creating and editing multi-step workflows that chain
* multiple Hands together for complex task automation.
*
* Design based on OpenFang Dashboard v0.4.0
* Design based on ZCLAW Dashboard v0.4.0
*/
import { useState, useEffect, useCallback } from 'react';

View File

@@ -1,10 +1,10 @@
/**
* WorkflowHistory - OpenFang Workflow Execution History Component
* WorkflowHistory - ZCLAW Workflow Execution History Component
*
* Displays the execution history of a specific workflow,
* showing run details, status, and results.
*
* Design based on OpenFang Dashboard v0.4.0
* Design based on ZCLAW Dashboard v0.4.0
*/
import { useState, useEffect, useCallback } from 'react';

View File

@@ -1,15 +1,16 @@
/**
* WorkflowList - OpenFang Workflow Management UI
* WorkflowList - ZCLAW Workflow Management UI
*
* Displays available OpenFang Workflows and allows executing them.
* Displays available ZCLAW Workflows and allows executing them.
*
* Design based on OpenFang Dashboard v0.4.0
* Design based on ZCLAW Dashboard v0.4.0
*/
import { useState, useEffect, useCallback } from 'react';
import { useWorkflowStore, type Workflow } from '../store/workflowStore';
import { WorkflowEditor } from './WorkflowEditor';
import { WorkflowHistory } from './WorkflowHistory';
import { WorkflowBuilder } from './WorkflowBuilder';
import {
Play,
Edit,
@@ -467,18 +468,8 @@ export function WorkflowList() {
</div>
)
) : (
// Visual Builder View (placeholder)
<div className="p-8 text-center bg-white dark:bg-gray-800 rounded-lg border border-gray-200 dark:border-gray-700">
<div className="w-12 h-12 bg-gray-100 dark:bg-gray-700 rounded-full flex items-center justify-center mx-auto mb-3">
<GitBranch className="w-6 h-6 text-gray-400" />
</div>
<p className="text-sm text-gray-500 dark:text-gray-400 mb-2">
</p>
<p className="text-xs text-gray-400 dark:text-gray-500">
</p>
</div>
// Visual Builder View
<WorkflowBuilder />
)}
{/* Execute Modal */}

View File

@@ -1,7 +1,7 @@
/**
* useAutomationEvents - WebSocket Event Hook for Automation System
*
* Subscribes to hand and workflow events from OpenFang WebSocket
* Subscribes to hand and workflow events from ZCLAW WebSocket
* and updates the corresponding stores.
*
* @module hooks/useAutomationEvents

View File

@@ -1,7 +1,7 @@
/**
* API Fallbacks for ZCLAW Gateway
*
* Provides sensible default data when OpenFang API endpoints return 404.
* Provides sensible default data when ZCLAW API endpoints return 404.
* This allows the UI to function gracefully even when backend features
* are not yet implemented.
*/
@@ -178,7 +178,7 @@ export function getUsageStatsFallback(sessions: SessionForStats[] = []): UsageSt
/**
* Convert skills to plugin status when /api/plugins/status returns 404.
* OpenFang uses Skills instead of traditional plugins.
* ZCLAW uses Skills instead of traditional plugins.
*/
export function getPluginStatusFallback(skills: SkillForPlugins[] = []): PluginStatusFallback[] {
if (skills.length === 0) {
@@ -215,7 +215,7 @@ export function getScheduledTasksFallback(triggers: TriggerForTasks[] = []): Sch
/**
* Default security status when /api/security/status returns 404.
* OpenFang has 16 security layers - show them with conservative defaults.
* ZCLAW has 16 security layers - show them with conservative defaults.
*/
export function getSecurityStatusFallback(): SecurityStatusFallback {
const layers: SecurityLayerFallback[] = [

View File

@@ -1,7 +1,7 @@
/**
* OpenFang Configuration Parser
* ZCLAW Configuration Parser
*
* Provides configuration parsing, validation, and serialization for OpenFang TOML files.
* Provides configuration parsing, validation, and serialization for ZCLAW TOML files.
*
* @module lib/config-parser
*/
@@ -9,7 +9,7 @@
import { tomlUtils, TomlParseError } from './toml-utils';
import { DEFAULT_MODEL_ID, DEFAULT_PROVIDER } from '../constants/models';
import type {
OpenFangConfig,
ZclawConfig,
ConfigValidationResult,
ConfigValidationError,
ConfigValidationWarning,
@@ -64,7 +64,7 @@ const REQUIRED_FIELDS: Array<{ path: string; description: string }> = [
/**
* Default configuration values
*/
const DEFAULT_CONFIG: Partial<OpenFangConfig> = {
const DEFAULT_CONFIG: Partial<ZclawConfig> = {
server: {
host: '127.0.0.1',
port: 4200,
@@ -74,7 +74,7 @@ const DEFAULT_CONFIG: Partial<OpenFangConfig> = {
},
agent: {
defaults: {
workspace: '~/.openfang/workspace',
workspace: '~/.zclaw/workspace',
default_model: DEFAULT_MODEL_ID,
},
},
@@ -89,7 +89,7 @@ const DEFAULT_CONFIG: Partial<OpenFangConfig> = {
*/
export const configParser = {
/**
* Parse TOML content into an OpenFang configuration object
* Parse TOML content into a ZCLAW configuration object
*
* @param content - The TOML content to parse
* @param envVars - Optional environment variables for resolution
@@ -101,13 +101,13 @@ export const configParser = {
* const config = configParser.parseConfig(tomlContent, { OPENAI_API_KEY: 'sk-...' });
* ```
*/
parseConfig: (content: string, envVars?: Record<string, string | undefined>): OpenFangConfig => {
parseConfig: (content: string, envVars?: Record<string, string | undefined>): ZclawConfig => {
try {
// First resolve environment variables
const resolved = tomlUtils.resolveEnvVars(content, envVars);
// Parse TOML
const parsed = tomlUtils.parse<OpenFangConfig>(resolved);
const parsed = tomlUtils.parse<ZclawConfig>(resolved);
return parsed;
} catch (error) {
if (error instanceof TomlParseError) {
@@ -121,7 +121,7 @@ export const configParser = {
},
/**
* Validate an OpenFang configuration object
* Validate a ZCLAW configuration object
*
* @param config - The configuration object to validate
* @returns Validation result with errors and warnings
@@ -238,7 +238,7 @@ export const configParser = {
parseAndValidate: (
content: string,
envVars?: Record<string, string | undefined>
): OpenFangConfig => {
): ZclawConfig => {
const config = configParser.parseConfig(content, envVars);
const result = configParser.validateConfig(config);
if (!result.valid) {
@@ -261,7 +261,7 @@ export const configParser = {
* const toml = configParser.stringifyConfig(config);
* ```
*/
stringifyConfig: (config: OpenFangConfig): string => {
stringifyConfig: (config: ZclawConfig): string => {
return tomlUtils.stringify(config as unknown as Record<string, unknown>);
},
@@ -276,8 +276,8 @@ export const configParser = {
* const fullConfig = configParser.mergeWithDefaults(partialConfig);
* ```
*/
mergeWithDefaults: (config: Partial<OpenFangConfig>): OpenFangConfig => {
return deepMerge(DEFAULT_CONFIG, config) as unknown as OpenFangConfig;
mergeWithDefaults: (config: Partial<ZclawConfig>): ZclawConfig => {
return deepMerge(DEFAULT_CONFIG, config) as unknown as ZclawConfig;
},
/**
@@ -307,19 +307,19 @@ export const configParser = {
/**
* Get default configuration
*
* @returns Default OpenFang configuration
* @returns Default ZCLAW configuration
*/
getDefaults: (): OpenFangConfig => {
return JSON.parse(JSON.stringify(DEFAULT_CONFIG)) as OpenFangConfig;
getDefaults: (): ZclawConfig => {
return JSON.parse(JSON.stringify(DEFAULT_CONFIG)) as ZclawConfig;
},
/**
* Check if a configuration object is valid
*
* @param config - The configuration to check
* @returns Type guard for OpenFangConfig
* @returns Type guard for ZclawConfig
*/
isOpenFangConfig: (config: unknown): config is OpenFangConfig => {
isZclawConfig: (config: unknown): config is ZclawConfig => {
const result = configParser.validateConfig(config);
return result.valid;
},

View File

@@ -7,13 +7,13 @@
* - Agents (Clones)
* - Stats & Workspace
* - Config (Quick Config, Channels, Skills, Scheduler, Models)
* - Hands (OpenFang)
* - Workflows (OpenFang)
* - Sessions (OpenFang)
* - Triggers (OpenFang)
* - Audit (OpenFang)
* - Security (OpenFang)
* - Approvals (OpenFang)
* - Hands (ZCLAW)
* - Workflows (ZCLAW)
* - Sessions (ZCLAW)
* - Triggers (ZCLAW)
* - Audit (ZCLAW)
* - Security (ZCLAW)
* - Approvals (ZCLAW)
*
* These methods are installed onto GatewayClient.prototype via installApiMethods().
* The GatewayClient core class exposes restGet/restPost/restPut/restDelete/restPatch
@@ -179,7 +179,7 @@ export function installApiMethods(ClientClass: { prototype: GatewayClient }): vo
const storedAutoStart = localStorage.getItem('zclaw-autoStart');
const storedShowToolCalls = localStorage.getItem('zclaw-showToolCalls');
// Map OpenFang config to frontend expected format
// Map ZCLAW config to frontend expected format
return {
quickConfig: {
agentName: 'ZCLAW',
@@ -220,15 +220,15 @@ export function installApiMethods(ClientClass: { prototype: GatewayClient }): vo
localStorage.setItem('zclaw-showToolCalls', String(config.showToolCalls));
}
// Map frontend config back to OpenFang format
const openfangConfig = {
// Map frontend config back to ZCLAW format
const zclawConfig = {
data_dir: config.workspaceDir,
default_model: config.defaultModel ? {
model: config.defaultModel,
provider: config.defaultProvider || 'bailian',
} : undefined,
};
return this.restPut('/api/config', openfangConfig);
return this.restPut('/api/config', zclawConfig);
};
// ─── Skills ───
@@ -333,7 +333,7 @@ export function installApiMethods(ClientClass: { prototype: GatewayClient }): vo
return this.restPatch(`/api/scheduler/tasks/${id}`, { enabled });
};
// ─── OpenFang Hands API ───
// ─── ZCLAW Hands API ───
proto.listHands = async function (this: GatewayClient): Promise<{
hands: {
@@ -407,7 +407,7 @@ export function installApiMethods(ClientClass: { prototype: GatewayClient }): vo
return this.restGet(`/api/hands/${name}/runs?${params}`);
};
// ─── OpenFang Workflows API ───
// ─── ZCLAW Workflows API ───
proto.listWorkflows = async function (this: GatewayClient): Promise<{ workflows: { id: string; name: string; steps: number }[] }> {
return this.restGet('/api/workflows');
@@ -476,7 +476,7 @@ export function installApiMethods(ClientClass: { prototype: GatewayClient }): vo
return this.restDelete(`/api/workflows/${id}`);
};
// ─── OpenFang Session API ───
// ─── ZCLAW Session API ───
proto.listSessions = async function (this: GatewayClient, opts?: { limit?: number; offset?: number }): Promise<{
sessions: Array<{
@@ -539,7 +539,7 @@ export function installApiMethods(ClientClass: { prototype: GatewayClient }): vo
return this.restGet(`/api/sessions/${sessionId}/messages?${params}`);
};
// ─── OpenFang Triggers API ───
// ─── ZCLAW Triggers API ───
proto.listTriggers = async function (this: GatewayClient): Promise<{ triggers: { id: string; type: string; enabled: boolean }[] }> {
return this.restGet('/api/triggers');
@@ -580,7 +580,7 @@ export function installApiMethods(ClientClass: { prototype: GatewayClient }): vo
return this.restDelete(`/api/triggers/${id}`);
};
// ─── OpenFang Audit API ───
// ─── ZCLAW Audit API ───
proto.getAuditLogs = async function (this: GatewayClient, opts?: { limit?: number; offset?: number }): Promise<{ logs: unknown[] }> {
const params = new URLSearchParams();
@@ -598,7 +598,7 @@ export function installApiMethods(ClientClass: { prototype: GatewayClient }): vo
return this.restGet(`/api/audit/verify/${logId}`);
};
// ─── OpenFang Security API ───
// ─── ZCLAW Security API ───
proto.getSecurityStatus = async function (this: GatewayClient): Promise<{ layers: { name: string; enabled: boolean }[] }> {
try {
@@ -626,7 +626,7 @@ export function installApiMethods(ClientClass: { prototype: GatewayClient }): vo
}
};
// ─── OpenFang Approvals API ───
// ─── ZCLAW Approvals API ───
proto.listApprovals = async function (this: GatewayClient, status?: string): Promise<{
approvals: {

View File

@@ -1,7 +1,7 @@
/**
/**
* ZCLAW Gateway Client (Browser/Tauri side)
*
* Core WebSocket client for OpenFang Kernel protocol.
* Core WebSocket client for ZCLAW Kernel protocol.
* Handles connection management, WebSocket framing, heartbeat,
* event dispatch, and chat/stream operations.
*
@@ -22,7 +22,7 @@ export type {
GatewayPong,
GatewayFrame,
AgentStreamDelta,
OpenFangStreamEvent,
ZclawStreamEvent,
ConnectionState,
EventCallback,
} from './gateway-types';
@@ -51,7 +51,7 @@ import type {
GatewayFrame,
GatewayResponse,
GatewayEvent,
OpenFangStreamEvent,
ZclawStreamEvent,
ConnectionState,
EventCallback,
AgentStreamDelta,
@@ -158,7 +158,7 @@ function createIdempotencyKey(): string {
export class GatewayClient {
private ws: WebSocket | null = null;
private openfangWs: WebSocket | null = null; // OpenFang stream WebSocket
private zclawWs: WebSocket | null = null; // ZCLAW stream WebSocket
private state: ConnectionState = 'disconnected';
private requestId = 0;
private pendingRequests = new Map<string, {
@@ -243,20 +243,20 @@ export class GatewayClient {
// === Connection ===
/** Connect using REST API only (for OpenFang mode) */
/** Connect using REST API only (for ZCLAW mode) */
async connectRest(): Promise<void> {
if (this.state === 'connected') {
return;
}
this.setState('connecting');
try {
// Check if OpenFang API is healthy
// Check if ZCLAW API is healthy
const health = await this.restGet<{ status: string; version?: string }>('/api/health');
if (health.status === 'ok') {
this.reconnectAttempts = 0;
this.setState('connected');
this.startHeartbeat(); // Start heartbeat after successful connection
this.log('info', `Connected to OpenFang via REST API${health.version ? ` (v${health.version})` : ''}`);
this.log('info', `Connected to ZCLAW via REST API${health.version ? ` (v${health.version})` : ''}`);
this.emitEvent('connected', { version: health.version });
} else {
throw new Error('Health check failed');
@@ -264,7 +264,7 @@ export class GatewayClient {
} catch (err: unknown) {
this.setState('disconnected');
const errorMessage = err instanceof Error ? err.message : String(err);
throw new Error(`Failed to connect to OpenFang: ${errorMessage}`);
throw new Error(`Failed to connect to ZCLAW: ${errorMessage}`);
}
}
@@ -273,7 +273,7 @@ export class GatewayClient {
return Promise.resolve();
}
// Check if URL is for OpenFang (port 4200 or 50051) - use REST mode
// Check if URL is for ZCLAW (port 4200 or 50051) - use REST mode
if (this.url.includes(':4200') || this.url.includes(':50051')) {
return this.connectRest();
}
@@ -389,10 +389,10 @@ export class GatewayClient {
// === High-level API ===
// Default agent ID for OpenFang (will be set dynamically from /api/agents)
// Default agent ID for ZCLAW (will be set dynamically from /api/agents)
private defaultAgentId: string = '';
/** Try to fetch default agent ID from OpenFang /api/agents endpoint */
/** Try to fetch default agent ID from ZCLAW /api/agents endpoint */
async fetchDefaultAgentId(): Promise<string | null> {
try {
// Use /api/agents endpoint which returns array of agents
@@ -422,7 +422,7 @@ export class GatewayClient {
return this.defaultAgentId;
}
/** Send message to agent (OpenFang chat API) */
/** Send message to agent (ZCLAW chat API) */
async chat(message: string, opts?: {
sessionKey?: string;
agentId?: string;
@@ -432,24 +432,24 @@ export class GatewayClient {
temperature?: number;
maxTokens?: number;
}): Promise<{ runId: string; sessionId?: string; response?: string }> {
// OpenFang uses /api/agents/{agentId}/message endpoint
// ZCLAW uses /api/agents/{agentId}/message endpoint
let agentId = opts?.agentId || this.defaultAgentId;
// If no agent ID, try to fetch from OpenFang status
// If no agent ID, try to fetch from ZCLAW status
if (!agentId) {
await this.fetchDefaultAgentId();
agentId = this.defaultAgentId;
}
if (!agentId) {
throw new Error('No agent available. Please ensure OpenFang has at least one agent.');
throw new Error('No agent available. Please ensure ZCLAW has at least one agent.');
}
const result = await this.restPost<{ response?: string; input_tokens?: number; output_tokens?: number }>(`/api/agents/${agentId}/message`, {
message,
session_id: opts?.sessionKey,
});
// OpenFang returns { response, input_tokens, output_tokens }
// ZCLAW returns { response, input_tokens, output_tokens }
return {
runId: createIdempotencyKey(),
sessionId: opts?.sessionKey,
@@ -457,7 +457,7 @@ export class GatewayClient {
};
}
/** Send message with streaming response (OpenFang WebSocket) */
/** Send message with streaming response (ZCLAW WebSocket) */
async chatStream(
message: string,
callbacks: {
@@ -472,20 +472,20 @@ export class GatewayClient {
agentId?: string;
}
): Promise<{ runId: string }> {
let agentId = opts?.agentId || this.defaultAgentId;
const agentId = opts?.agentId || this.defaultAgentId;
const runId = createIdempotencyKey();
const sessionId = opts?.sessionKey || `session_${Date.now()}`;
// If no agent ID, try to fetch from OpenFang status (async, but we'll handle it in connectOpenFangStream)
// If no agent ID, try to fetch from ZCLAW status (async, but we'll handle it in connectZclawStream)
if (!agentId) {
// Try to get default agent asynchronously
this.fetchDefaultAgentId().then(() => {
const resolvedAgentId = this.defaultAgentId;
if (resolvedAgentId) {
this.streamCallbacks.set(runId, callbacks);
this.connectOpenFangStream(resolvedAgentId, runId, sessionId, message);
this.connectZclawStream(resolvedAgentId, runId, sessionId, message);
} else {
callbacks.onError('No agent available. Please ensure OpenFang has at least one agent.');
callbacks.onError('No agent available. Please ensure ZCLAW has at least one agent.');
callbacks.onComplete();
}
}).catch((err) => {
@@ -498,22 +498,22 @@ export class GatewayClient {
// Store callbacks for this run
this.streamCallbacks.set(runId, callbacks);
// Connect to OpenFang WebSocket if not connected
this.connectOpenFangStream(agentId, runId, sessionId, message);
// Connect to ZCLAW WebSocket if not connected
this.connectZclawStream(agentId, runId, sessionId, message);
return { runId };
}
/** Connect to OpenFang streaming WebSocket */
private connectOpenFangStream(
/** Connect to ZCLAW streaming WebSocket */
private connectZclawStream(
agentId: string,
runId: string,
sessionId: string,
message: string
): void {
// Close existing connection if any
if (this.openfangWs && this.openfangWs.readyState !== WebSocket.CLOSED) {
this.openfangWs.close();
if (this.zclawWs && this.zclawWs.readyState !== WebSocket.CLOSED) {
this.zclawWs.close();
}
// Build WebSocket URL
@@ -528,34 +528,34 @@ export class GatewayClient {
wsUrl = httpUrl.replace(/^http/, 'ws') + `/api/agents/${agentId}/ws`;
}
this.log('info', `Connecting to OpenFang stream: ${wsUrl}`);
this.log('info', `Connecting to ZCLAW stream: ${wsUrl}`);
try {
this.openfangWs = new WebSocket(wsUrl);
this.zclawWs = new WebSocket(wsUrl);
this.openfangWs.onopen = () => {
this.log('info', 'OpenFang WebSocket connected');
// Send chat message using OpenFang actual protocol
this.zclawWs.onopen = () => {
this.log('info', 'ZCLAW WebSocket connected');
// Send chat message using ZCLAW actual protocol
const chatRequest = {
type: 'message',
content: message,
session_id: sessionId,
};
this.openfangWs?.send(JSON.stringify(chatRequest));
this.zclawWs?.send(JSON.stringify(chatRequest));
};
this.openfangWs.onmessage = (event) => {
this.zclawWs.onmessage = (event) => {
try {
const data = JSON.parse(event.data);
this.handleOpenFangStreamEvent(runId, data, sessionId);
this.handleZclawStreamEvent(runId, data, sessionId);
} catch (err: unknown) {
const errorMessage = err instanceof Error ? err.message : String(err);
this.log('error', `Failed to parse stream event: ${errorMessage}`);
}
};
this.openfangWs.onerror = (_event) => {
this.log('error', 'OpenFang WebSocket error');
this.zclawWs.onerror = (_event) => {
this.log('error', 'ZCLAW WebSocket error');
const callbacks = this.streamCallbacks.get(runId);
if (callbacks) {
callbacks.onError('WebSocket connection failed');
@@ -563,14 +563,14 @@ export class GatewayClient {
}
};
this.openfangWs.onclose = (event) => {
this.log('info', `OpenFang WebSocket closed: ${event.code} ${event.reason}`);
this.zclawWs.onclose = (event) => {
this.log('info', `ZCLAW WebSocket closed: ${event.code} ${event.reason}`);
const callbacks = this.streamCallbacks.get(runId);
if (callbacks && event.code !== 1000) {
callbacks.onError(`Connection closed: ${event.reason || 'unknown'}`);
}
this.streamCallbacks.delete(runId);
this.openfangWs = null;
this.zclawWs = null;
};
} catch (err: unknown) {
const errorMessage = err instanceof Error ? err.message : String(err);
@@ -583,13 +583,13 @@ export class GatewayClient {
}
}
/** Handle OpenFang stream events */
private handleOpenFangStreamEvent(runId: string, data: OpenFangStreamEvent, sessionId: string): void {
/** Handle ZCLAW stream events */
private handleZclawStreamEvent(runId: string, data: ZclawStreamEvent, sessionId: string): void {
const callbacks = this.streamCallbacks.get(runId);
if (!callbacks) return;
switch (data.type) {
// OpenFang actual event types
// ZCLAW actual event types
case 'text_delta':
// Stream delta content
if (data.content) {
@@ -602,8 +602,8 @@ export class GatewayClient {
if (data.phase === 'done') {
callbacks.onComplete();
this.streamCallbacks.delete(runId);
if (this.openfangWs) {
this.openfangWs.close(1000, 'Stream complete');
if (this.zclawWs) {
this.zclawWs.close(1000, 'Stream complete');
}
}
break;
@@ -617,8 +617,8 @@ export class GatewayClient {
// Mark complete if phase done wasn't sent
callbacks.onComplete();
this.streamCallbacks.delete(runId);
if (this.openfangWs) {
this.openfangWs.close(1000, 'Stream complete');
if (this.zclawWs) {
this.zclawWs.close(1000, 'Stream complete');
}
break;
@@ -649,14 +649,14 @@ export class GatewayClient {
case 'error':
callbacks.onError(data.message || data.code || data.content || 'Unknown error');
this.streamCallbacks.delete(runId);
if (this.openfangWs) {
this.openfangWs.close(1011, 'Error');
if (this.zclawWs) {
this.zclawWs.close(1011, 'Error');
}
break;
case 'connected':
// Connection established
this.log('info', `OpenFang agent connected: ${data.agent_id}`);
this.log('info', `ZCLAW agent connected: ${data.agent_id}`);
break;
case 'agents_updated':
@@ -687,12 +687,12 @@ export class GatewayClient {
callbacks.onError('Stream cancelled');
this.streamCallbacks.delete(runId);
}
if (this.openfangWs && this.openfangWs.readyState === WebSocket.OPEN) {
this.openfangWs.close(1000, 'User cancelled');
if (this.zclawWs && this.zclawWs.readyState === WebSocket.OPEN) {
this.zclawWs.close(1000, 'User cancelled');
}
}
// === REST API Helpers (OpenFang) ===
// === REST API Helpers (ZCLAW) ===
public getRestBaseUrl(): string {
// In browser dev mode, use Vite proxy (empty string = relative path)

View File

@@ -1,5 +1,5 @@
/**
* OpenFang Gateway Configuration Types
* ZCLAW Gateway Configuration Types
*
* Types for gateway configuration and model choices.
*/

View File

@@ -42,7 +42,7 @@ export function isLocalhost(url: string): boolean {
// === URL Constants ===
// OpenFang endpoints (port 50051 - actual running port)
// ZCLAW endpoints (port 50051 - actual running port)
// Note: REST API uses relative path to leverage Vite proxy for CORS bypass
export const DEFAULT_GATEWAY_URL = `${DEFAULT_WS_PROTOCOL}127.0.0.1:50051/ws`;
export const REST_API_URL = ''; // Empty = use relative path (Vite proxy)

View File

@@ -66,8 +66,8 @@ export interface AgentStreamDelta {
workflowResult?: unknown;
}
/** OpenFang WebSocket stream event types */
export interface OpenFangStreamEvent {
/** ZCLAW WebSocket stream event types */
export interface ZclawStreamEvent {
type: 'text_delta' | 'phase' | 'response' | 'typing' | 'tool_call' | 'tool_result' | 'hand' | 'workflow' | 'error' | 'connected' | 'agents_updated';
content?: string;
phase?: 'streaming' | 'done';

View File

@@ -2,7 +2,7 @@
* Health Check Library
*
* Provides Tauri health check command wrappers and utilities
* for monitoring the health status of the OpenFang backend.
* for monitoring the health status of the ZCLAW backend.
*/
import { invoke } from '@tauri-apps/api/core';
@@ -19,7 +19,7 @@ export interface HealthCheckResult {
details?: Record<string, unknown>;
}
export interface OpenFangHealthResponse {
export interface ZclawHealthResponse {
healthy: boolean;
message?: string;
details?: Record<string, unknown>;
@@ -43,7 +43,7 @@ export async function performHealthCheck(): Promise<HealthCheckResult> {
}
try {
const response = await invoke<OpenFangHealthResponse>('openfang_health_check');
const response = await invoke<ZclawHealthResponse>('zclaw_health_check');
return {
status: response.healthy ? 'healthy' : 'unhealthy',

View File

@@ -239,6 +239,14 @@ export const memory = {
async dbPath(): Promise<string> {
return invoke('memory_db_path');
},
async buildContext(
agentId: string,
query: string,
maxTokens: number | null,
): Promise<{ systemPromptAddition: string; totalTokens: number; memoriesUsed: number }> {
return invoke('memory_build_context', { agentId, query, maxTokens });
},
};
// === Heartbeat API ===

View File

@@ -771,7 +771,7 @@ function saveSnapshotsToStorage(snapshots: IdentitySnapshot[]): void {
}
const fallbackIdentities = loadIdentitiesFromStorage();
let fallbackProposals = loadProposalsFromStorage();
const fallbackProposals = loadProposalsFromStorage();
let fallbackSnapshots = loadSnapshotsFromStorage();
const fallbackIdentity = {
@@ -1073,6 +1073,27 @@ export const intelligenceClient = {
}
return fallbackMemory.dbPath();
},
buildContext: async (
agentId: string,
query: string,
maxTokens?: number,
): Promise<{ systemPromptAddition: string; totalTokens: number; memoriesUsed: number }> => {
if (isTauriEnv()) {
return intelligence.memory.buildContext(agentId, query, maxTokens ?? null);
}
// Fallback: use basic search
const memories = await fallbackMemory.search({
agentId,
query,
limit: 8,
minImportance: 3,
});
const addition = memories.length > 0
? `## 相关记忆\n${memories.map(m => `- [${m.type}] ${m.content}`).join('\n')}`
: '';
return { systemPromptAddition: addition, totalTokens: 0, memoriesUsed: memories.length };
},
},
heartbeat: {

View File

@@ -2,7 +2,7 @@
* ZCLAW Kernel Client (Tauri Internal)
*
* Client for communicating with the internal ZCLAW Kernel via Tauri commands.
* This replaces the external OpenFang Gateway WebSocket connection.
* This replaces the external ZCLAW Gateway WebSocket connection.
*
* Phase 5 of Intelligence Layer Migration.
*/
@@ -648,24 +648,14 @@ export class KernelClient {
* Approve a hand execution
*/
async approveHand(name: string, runId: string, approved: boolean, reason?: string): Promise<{ status: string }> {
try {
return await invoke('hand_approve', { handName: name, runId, approved, reason });
} catch {
this.log('warn', `hand_approve not yet implemented, returning fallback`);
return { status: approved ? 'approved' : 'rejected' };
}
return await invoke('hand_approve', { handName: name, runId, approved, reason });
}
/**
* Cancel a hand execution
*/
async cancelHand(name: string, runId: string): Promise<{ status: string }> {
try {
return await invoke('hand_cancel', { handName: name, runId });
} catch {
this.log('warn', `hand_cancel not yet implemented, returning fallback`);
return { status: 'cancelled' };
}
return await invoke('hand_cancel', { handName: name, runId });
}
/**

View File

@@ -9,7 +9,7 @@
* Supports multiple backends:
* - OpenAI (GPT-4, GPT-3.5)
* - Volcengine (Doubao)
* - OpenFang Gateway (passthrough)
* - ZCLAW Gateway (passthrough)
*
* Part of ZCLAW L4 Self-Evolution capability.
*/
@@ -284,7 +284,7 @@ class VolcengineLLMAdapter implements LLMServiceAdapter {
}
}
// === Gateway Adapter (pass through to OpenFang or internal Kernel) ===
// === Gateway Adapter (pass through to ZCLAW or internal Kernel) ===
class GatewayLLMAdapter implements LLMServiceAdapter {
private config: LLMConfig;
@@ -346,7 +346,7 @@ class GatewayLLMAdapter implements LLMServiceAdapter {
}
}
// External Gateway mode: Use OpenFang's chat endpoint
// External Gateway mode: Use ZCLAW's chat endpoint
const agentId = localStorage.getItem('zclaw-default-agent-id') || 'default';
const response = await fetch(`/api/agents/${agentId}/message`, {
@@ -403,7 +403,7 @@ class GatewayLLMAdapter implements LLMServiceAdapter {
}
isAvailable(): boolean {
// Gateway is available if we're in browser (can connect to OpenFang)
// Gateway is available if we're in browser (can connect to ZCLAW)
return typeof window !== 'undefined';
}
@@ -460,7 +460,7 @@ export function loadConfig(): LLMConfig {
// Ignore parse errors
}
// Default to gateway (OpenFang passthrough) for L4 self-evolution
// Default to gateway (ZCLAW passthrough) for L4 self-evolution
return DEFAULT_CONFIGS.gateway;
}

View File

@@ -239,12 +239,7 @@ export function generateWelcomeMessage(config: {
const { userName, agentName, emoji, personality, scenarios } = config;
// Build greeting
let greeting = '';
if (userName) {
greeting = `你好,${userName}`;
} else {
greeting = '你好!';
}
const greeting = userName ? `你好,${userName}` : '你好!';
// Build introduction
let intro = `我是${emoji ? ' ' + emoji : ''} ${agentName}`;

Some files were not shown because too many files have changed in this diff Show More