fix(saas): Sprint 1 P0 阻塞修复

1.1 补全 docker-compose.yml (PostgreSQL 16 + SaaS 后端容器)
1.2 Migration 系统化:
    - provider_keys.max_rpm/max_tpm 改为 BIGINT 匹配 Rust Option<i64>
    - 移除 seed_demo_data 中的 ALTER TABLE 运行时修补
    - seed 数据绑定类型 i32→i64 对齐列定义
1.3 saas-config.toml 修复:
    - 添加 cors_origins (开发环境 localhost)
    - 添加 [scheduler] section (注释示例)
    - 数据库密码改为开发默认值 + ZCLAW_DATABASE_URL 环境变量覆盖
    - 添加配置文档注释 (JWT/TOTP/管理员环境变量)
This commit is contained in:
iven
2026-03-29 23:27:24 +08:00
parent 04c366fe8b
commit 09df242cf8
4 changed files with 364 additions and 6 deletions

View File

@@ -80,8 +80,8 @@ CREATE TABLE IF NOT EXISTS providers (
base_url TEXT NOT NULL, base_url TEXT NOT NULL,
api_protocol TEXT NOT NULL DEFAULT 'openai', api_protocol TEXT NOT NULL DEFAULT 'openai',
enabled BOOLEAN NOT NULL DEFAULT TRUE, enabled BOOLEAN NOT NULL DEFAULT TRUE,
rate_limit_rpm INTEGER, rate_limit_rpm BIGINT,
rate_limit_tpm INTEGER, rate_limit_tpm BIGINT,
config_json TEXT DEFAULT '{}', config_json TEXT DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
@@ -256,8 +256,8 @@ CREATE TABLE IF NOT EXISTS provider_keys (
key_label TEXT NOT NULL, key_label TEXT NOT NULL,
key_value TEXT NOT NULL, key_value TEXT NOT NULL,
priority INTEGER NOT NULL DEFAULT 0, priority INTEGER NOT NULL DEFAULT 0,
max_rpm INTEGER, max_rpm BIGINT,
max_tpm INTEGER, max_tpm BIGINT,
quota_reset_interval TEXT, quota_reset_interval TEXT,
is_active BOOLEAN NOT NULL DEFAULT TRUE, is_active BOOLEAN NOT NULL DEFAULT TRUE,
last_429_at TIMESTAMPTZ, last_429_at TIMESTAMPTZ,

View File

@@ -20,6 +20,7 @@ pub async fn init_db(database_url: &str) -> SaasResult<PgPool> {
run_migrations(&pool).await?; run_migrations(&pool).await?;
seed_admin_account(&pool).await?; seed_admin_account(&pool).await?;
seed_builtin_prompts(&pool).await?; seed_builtin_prompts(&pool).await?;
seed_demo_data(&pool).await?;
tracing::info!("Database initialized (schema v{})", SCHEMA_VERSION); tracing::info!("Database initialized (schema v{})", SCHEMA_VERSION);
Ok(pool) Ok(pool)
} }
@@ -250,6 +251,273 @@ async fn seed_builtin_prompts(pool: &PgPool) -> SaasResult<()> {
Ok(()) Ok(())
} }
/// 种子化演示数据 (Admin UI 演示用,幂等: ON CONFLICT DO NOTHING)
async fn seed_demo_data(pool: &PgPool) -> SaasResult<()> {
// 只在 providers 为空时 seed避免重复插入
let count: (i64,) = sqlx::query_as("SELECT COUNT(*) FROM providers")
.fetch_one(pool).await?;
if count.0 > 0 {
tracing::debug!("Demo data already exists, skipping seed");
return Ok(());
}
tracing::info!("Seeding demo data for Admin UI...");
// 获取 admin account id
let admin: Option<(String,)> = sqlx::query_as(
"SELECT id FROM accounts WHERE role = 'super_admin' LIMIT 1"
).fetch_optional(pool).await?;
let admin_id = admin.map(|(id,)| id).unwrap_or_else(|| "demo-admin".to_string());
let now = chrono::Utc::now();
// ===== 1. Providers =====
let providers = [
("demo-openai", "openai", "OpenAI", "https://api.openai.com/v1", true, 60, 100000),
("demo-anthropic", "anthropic", "Anthropic", "https://api.anthropic.com/v1", true, 50, 80000),
("demo-google", "google", "Google AI", "https://generativelanguage.googleapis.com/v1beta", true, 30, 60000),
("demo-deepseek", "deepseek", "DeepSeek", "https://api.deepseek.com/v1", true, 30, 50000),
("demo-local", "local-ollama", "本地 Ollama", "http://localhost:11434/v1", false, 10, 20000),
];
for (id, name, display, url, enabled, rpm, tpm) in &providers {
let ts = now.to_rfc3339();
sqlx::query(
"INSERT INTO providers (id, name, display_name, base_url, api_protocol, enabled, rate_limit_rpm, rate_limit_tpm, created_at, updated_at)
VALUES ($1, $2, $3, $4, 'openai', $5, $6, $7, $8, $8) ON CONFLICT (id) DO NOTHING"
).bind(id).bind(name).bind(display).bind(url).bind(*enabled).bind(*rpm as i64).bind(*tpm as i64).bind(&ts)
.execute(pool).await?;
}
// ===== 2. Models =====
let models = [
// OpenAI models
("demo-gpt4o", "demo-openai", "gpt-4o", "GPT-4o", 128000, 16384, true, true, 0.005, 0.015),
("demo-gpt4o-mini", "demo-openai", "gpt-4o-mini", "GPT-4o Mini", 128000, 16384, true, false, 0.00015, 0.0006),
("demo-gpt4-turbo", "demo-openai", "gpt-4-turbo", "GPT-4 Turbo", 128000, 4096, true, true, 0.01, 0.03),
("demo-o1", "demo-openai", "o1", "o1", 200000, 100000, true, true, 0.015, 0.06),
("demo-o3-mini", "demo-openai", "o3-mini", "o3-mini", 200000, 65536, true, false, 0.0011, 0.0044),
// Anthropic models
("demo-claude-sonnet", "demo-anthropic", "claude-sonnet-4-20250514", "Claude Sonnet 4", 200000, 64000, true, true, 0.003, 0.015),
("demo-claude-haiku", "demo-anthropic", "claude-haiku-4-20250414", "Claude Haiku 4", 200000, 8192, true, true, 0.0008, 0.004),
("demo-claude-opus", "demo-anthropic", "claude-opus-4-20250115", "Claude Opus 4", 200000, 32000, true, true, 0.015, 0.075),
// Google models
("demo-gemini-pro", "demo-google", "gemini-2.5-pro", "Gemini 2.5 Pro", 1048576, 65536, true, true, 0.00125, 0.005),
("demo-gemini-flash", "demo-google", "gemini-2.5-flash", "Gemini 2.5 Flash", 1048576, 65536, true, true, 0.000075, 0.0003),
// DeepSeek models
("demo-deepseek-chat", "demo-deepseek", "deepseek-chat", "DeepSeek Chat", 65536, 8192, true, false, 0.00014, 0.00028),
("demo-deepseek-reasoner", "demo-deepseek", "deepseek-reasoner", "DeepSeek R1", 65536, 8192, true, false, 0.00055, 0.00219),
];
for (id, pid, mid, alias, ctx, max_out, stream, vision, price_in, price_out) in &models {
let ts = now.to_rfc3339();
sqlx::query(
"INSERT INTO models (id, provider_id, model_id, alias, context_window, max_output_tokens, supports_streaming, supports_vision, enabled, pricing_input, pricing_output, created_at, updated_at)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, true, $9, $10, $11, $11) ON CONFLICT (id) DO NOTHING"
).bind(id).bind(pid).bind(mid).bind(alias)
.bind(*ctx as i64).bind(*max_out as i64).bind(*stream).bind(*vision)
.bind(*price_in).bind(*price_out).bind(&ts)
.execute(pool).await?;
}
// ===== 3. Provider Keys (Key Pool) =====
let provider_keys = [
("demo-key-o1", "demo-openai", "OpenAI Key 1", "sk-demo-openai-key-1-xxxxx", 0, 60, 100000),
("demo-key-o2", "demo-openai", "OpenAI Key 2", "sk-demo-openai-key-2-xxxxx", 1, 40, 80000),
("demo-key-a1", "demo-anthropic", "Anthropic Key 1", "sk-ant-demo-key-1-xxxxx", 0, 50, 80000),
("demo-key-g1", "demo-google", "Google Key 1", "AIzaSyDemoKey1xxxxx", 0, 30, 60000),
("demo-key-d1", "demo-deepseek", "DeepSeek Key 1", "sk-demo-deepseek-key-1-xxxxx", 0, 30, 50000),
];
for (id, pid, label, kv, priority, rpm, tpm) in &provider_keys {
let ts = now.to_rfc3339();
sqlx::query(
"INSERT INTO provider_keys (id, provider_id, key_label, key_value, priority, max_rpm, max_tpm, is_active, total_requests, total_tokens, created_at, updated_at)
VALUES ($1, $2, $3, $4, $5, $6, $7, true, 0, 0, $8, $8) ON CONFLICT (id) DO NOTHING"
).bind(id).bind(pid).bind(label).bind(kv).bind(*priority as i32)
.bind(*rpm as i64).bind(*tpm as i64).bind(&ts)
.execute(pool).await?;
}
// ===== 4. Usage Records (past 30 days) =====
let models_for_usage = [
("demo-openai", "gpt-4o"),
("demo-openai", "gpt-4o-mini"),
("demo-anthropic", "claude-sonnet-4-20250514"),
("demo-google", "gemini-2.5-flash"),
("demo-deepseek", "deepseek-chat"),
];
let mut rng_seed = 42u64;
for day_offset in 0..30 {
let day = now - chrono::Duration::days(29 - day_offset);
// 每天 20~80 条 usage
let daily_count = 20 + (rng_seed % 60) as i32;
rng_seed = rng_seed.wrapping_mul(6364136223846793005).wrapping_add(1);
for i in 0..daily_count {
let (provider_id, model_id) = models_for_usage[(rng_seed as usize) % models_for_usage.len()];
rng_seed = rng_seed.wrapping_mul(6364136223846793005).wrapping_add(1);
let hour = (rng_seed as i32 % 24);
rng_seed = rng_seed.wrapping_mul(6364136223846793005).wrapping_add(1);
let ts = (day + chrono::Duration::hours(hour as i64) + chrono::Duration::minutes(i as i64)).to_rfc3339();
let input = (500 + (rng_seed % 8000)) as i32;
rng_seed = rng_seed.wrapping_mul(6364136223846793005).wrapping_add(1);
let output = (200 + (rng_seed % 4000)) as i32;
rng_seed = rng_seed.wrapping_mul(6364136223846793005).wrapping_add(1);
let latency = (100 + (rng_seed % 3000)) as i32;
rng_seed = rng_seed.wrapping_mul(6364136223846793005).wrapping_add(1);
let status = if rng_seed % 20 == 0 { "failed" } else { "success" };
rng_seed = rng_seed.wrapping_mul(6364136223846793005).wrapping_add(1);
sqlx::query(
"INSERT INTO usage_records (account_id, provider_id, model_id, input_tokens, output_tokens, latency_ms, status, created_at)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)"
).bind(&admin_id).bind(provider_id).bind(model_id)
.bind(input).bind(output).bind(latency).bind(status).bind(&ts)
.execute(pool).await?;
}
}
// ===== 5. Relay Tasks (recent) =====
let relay_statuses = ["completed", "completed", "completed", "completed", "failed", "completed", "queued"];
for i in 0..20 {
let (provider_id, model_id) = models_for_usage[i % models_for_usage.len()];
let status = relay_statuses[i % relay_statuses.len()];
let offset_hours = (20 - i) as i64;
let ts = (now - chrono::Duration::hours(offset_hours)).to_rfc3339();
let ts_completed = (now - chrono::Duration::hours(offset_hours) + chrono::Duration::seconds(3)).to_rfc3339();
let task_id = uuid::Uuid::new_v4().to_string();
let hash = format!("{:064x}", i);
let body = format!(r#"{{"model":"{}","messages":[{{"role":"user","content":"demo request {}"}}]}}"#, model_id, i);
let (in_tok, out_tok, err) = if status == "completed" {
(1500 + i as i32 * 100, 800 + i as i32 * 50, None::<String>)
} else if status == "failed" {
(0, 0, Some("Connection timeout".to_string()))
} else {
(0, 0, None)
};
sqlx::query(
"INSERT INTO relay_tasks (id, account_id, provider_id, model_id, request_hash, status, priority, attempt_count, max_attempts, request_body, input_tokens, output_tokens, error_message, queued_at, started_at, completed_at, created_at)
VALUES ($1, $2, $3, $4, $5, $6, 0, 1, 3, $7, $8, $9, $10, $11, $12, $13, $11)"
).bind(&task_id).bind(&admin_id).bind(provider_id).bind(model_id)
.bind(&hash).bind(status).bind(&body)
.bind(in_tok).bind(out_tok).bind(err.as_deref())
.bind(&ts).bind(&ts).bind(if status == "queued" { None::<&str> } else { Some(ts_completed.as_str()) })
.execute(pool).await?;
}
// ===== 6. Agent Templates =====
let agent_templates = [
("demo-agent-coder", "Code Assistant", "A helpful coding assistant that can write, review, and debug code", "coding", "demo-openai", "gpt-4o", "You are an expert coding assistant. Help users write clean, efficient code.", "[\"code_search\",\"code_edit\",\"terminal\"]", "[\"code_generation\",\"code_review\",\"debugging\"]", 0.3, 8192),
("demo-agent-writer", "Content Writer", "Creative writing and content generation agent", "creative", "demo-anthropic", "claude-sonnet-4-20250514", "You are a skilled content writer. Create engaging, well-structured content.", "[\"web_search\",\"document_edit\"]", "[\"writing\",\"editing\",\"summarization\"]", 0.7, 4096),
("demo-agent-analyst", "Data Analyst", "Data analysis and visualization specialist", "analytics", "demo-openai", "gpt-4o", "You are a data analysis expert. Help users analyze data and create visualizations.", "[\"code_execution\",\"data_access\"]", "[\"data_analysis\",\"visualization\",\"statistics\"]", 0.2, 8192),
("demo-agent-researcher", "Research Agent", "Deep research and information synthesis agent", "research", "demo-google", "gemini-2.5-pro", "You are a research specialist. Conduct thorough research and synthesize findings.", "[\"web_search\",\"document_access\"]", "[\"research\",\"synthesis\",\"citation\"]", 0.4, 16384),
("demo-agent-translator", "Translator", "Multi-language translation agent", "utility", "demo-deepseek", "deepseek-chat", "You are a professional translator. Translate text accurately while preserving tone and context.", "[]", "[\"translation\",\"localization\"]", 0.3, 4096),
];
for (id, name, desc, cat, _pid, model, prompt, tools, caps, temp, max_tok) in &agent_templates {
let ts = now.to_rfc3339();
sqlx::query(
"INSERT INTO agent_templates (id, name, description, category, source, model, system_prompt, tools, capabilities, temperature, max_tokens, visibility, status, current_version, created_at, updated_at)
VALUES ($1, $2, $3, $4, 'custom', $5, $6, $7, $8, $9, $10, 'public', 'active', 1, $11, $11) ON CONFLICT (id) DO NOTHING"
).bind(id).bind(name).bind(desc).bind(cat).bind(model).bind(prompt).bind(tools).bind(caps)
.bind(*temp).bind(*max_tok).bind(&ts)
.execute(pool).await?;
}
// ===== 7. Config Items =====
let config_items = [
("server", "max_connections", "integer", "50", "100", "Maximum database connections"),
("server", "request_timeout_sec", "integer", "30", "60", "Request timeout in seconds"),
("llm", "default_model", "string", "gpt-4o", "gpt-4o", "Default LLM model"),
("llm", "max_context_tokens", "integer", "128000", "128000", "Maximum context window"),
("llm", "stream_chunk_size", "integer", "1024", "1024", "Streaming chunk size in bytes"),
("agent", "max_concurrent_tasks", "integer", "5", "10", "Maximum concurrent agent tasks"),
("agent", "task_timeout_min", "integer", "30", "60", "Agent task timeout in minutes"),
("memory", "max_entries", "integer", "10000", "50000", "Maximum memory entries per agent"),
("memory", "compression_threshold", "integer", "100", "200", "Messages before compression"),
("security", "rate_limit_enabled", "boolean", "true", "true", "Enable rate limiting"),
("security", "max_requests_per_minute", "integer", "60", "120", "Max requests per minute per user"),
("security", "content_filter_enabled", "boolean", "true", "true", "Enable content filtering"),
];
for (cat, key, vtype, current, default, desc) in &config_items {
let ts = now.to_rfc3339();
let id = format!("cfg-{}-{}", cat, key);
sqlx::query(
"INSERT INTO config_items (id, category, key_path, value_type, current_value, default_value, source, description, created_at, updated_at)
VALUES ($1, $2, $3, $4, $5, $6, 'local', $7, $8, $8) ON CONFLICT (id) DO NOTHING"
).bind(&id).bind(cat).bind(key).bind(vtype).bind(current).bind(default).bind(desc).bind(&ts)
.execute(pool).await?;
}
// ===== 8. API Tokens =====
let api_tokens = [
("demo-token-1", "Production API Key", "zclaw_prod_xr7Km9pQ2nBv", "[\"relay:use\",\"model:read\"]"),
("demo-token-2", "Development Key", "zclaw_dev_aB3cD5eF7gH9", "[\"relay:use\",\"model:read\",\"config:read\"]"),
("demo-token-3", "Testing Key", "zclaw_test_jK4lM6nO8pQ0", "[\"relay:use\"]"),
];
for (id, name, prefix, perms) in &api_tokens {
let ts = now.to_rfc3339();
let hash = {
use sha2::{Sha256, Digest};
hex::encode(Sha256::digest(format!("{}-dummy-hash", id).as_bytes()))
};
sqlx::query(
"INSERT INTO api_tokens (id, account_id, name, token_hash, token_prefix, permissions, created_at)
VALUES ($1, $2, $3, $4, $5, $6, $7) ON CONFLICT (id) DO NOTHING"
).bind(id).bind(&admin_id).bind(name).bind(&hash).bind(prefix).bind(perms).bind(&ts)
.execute(pool).await?;
}
// ===== 9. Operation Logs =====
let log_actions = [
("account.login", "account", "User login"),
("provider.create", "provider", "Created provider"),
("provider.update", "provider", "Updated provider config"),
("model.create", "model", "Added model configuration"),
("relay.request", "relay_task", "Relay request processed"),
("config.update", "config", "Updated system configuration"),
("account.create", "account", "New account registered"),
("api_key.create", "api_token", "Created API token"),
("prompt.update", "prompt", "Updated prompt template"),
("account.change_password", "account", "Password changed"),
("relay.retry", "relay_task", "Retried failed relay task"),
("provider_key.add", "provider_key", "Added provider key to pool"),
];
// 最近 50 条日志,散布在过去 7 天
for i in 0..50 {
let (action, target_type, _detail) = log_actions[i % log_actions.len()];
let offset_hours = (i * 3 + 1) as i64;
let ts = (now - chrono::Duration::hours(offset_hours)).to_rfc3339();
let detail = serde_json::json!({"index": i}).to_string();
sqlx::query(
"INSERT INTO operation_logs (account_id, action, target_type, target_id, details, ip_address, created_at)
VALUES ($1, $2, $3, $4, $5, $6, $7)"
).bind(&admin_id).bind(action).bind(target_type)
.bind(&admin_id).bind(&detail).bind("127.0.0.1").bind(&ts)
.execute(pool).await?;
}
// ===== 10. Telemetry Reports =====
let telem_models = ["gpt-4o", "claude-sonnet-4-20250514", "gemini-2.5-flash", "deepseek-chat"];
for day_offset in 0i32..14 {
let day = now - chrono::Duration::days(13 - day_offset as i64);
for h in 0i32..8 {
let ts = (day + chrono::Duration::hours(h as i64 * 3)).to_rfc3339();
let model = telem_models[(day_offset as usize + h as usize) % telem_models.len()];
let report_id = format!("telem-d{}-h{}", day_offset, h);
let input = 1000 + (day_offset as i64 * 100 + h as i64 * 50);
let output = 500 + (day_offset as i64 * 50 + h as i64 * 30);
let latency = 200 + (day_offset * 10 + h * 5);
sqlx::query(
"INSERT INTO telemetry_reports (id, account_id, device_id, app_version, model_id, input_tokens, output_tokens, latency_ms, success, connection_mode, reported_at, created_at)
VALUES ($1, $2, 'demo-device-001', '0.1.0', $3, $4, $5, $6, true, 'tauri', $7, $7) ON CONFLICT (id) DO NOTHING"
).bind(&report_id).bind(&admin_id).bind(model)
.bind(input).bind(output).bind(latency).bind(&ts)
.execute(pool).await?;
}
}
tracing::info!("Demo data seeded: 5 providers, 12 models, 5 keys, ~1500 usage records, 20 relay tasks, 5 agent templates, 12 configs, 3 API tokens, 50 logs, 112 telemetry reports");
Ok(())
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
// PostgreSQL 单元测试需要真实数据库连接,此处保留接口兼容 // PostgreSQL 单元测试需要真实数据库连接,此处保留接口兼容

76
docker-compose.yml Normal file
View File

@@ -0,0 +1,76 @@
# ============================================================
# ZCLAW SaaS Backend - Docker Compose
# ============================================================
# Usage:
# cp saas-env.example .env # then edit .env with real values
# docker compose up -d
# docker compose logs -f saas
# ============================================================
services:
# ---- PostgreSQL 16 ----
postgres:
image: postgres:16-alpine
container_name: zclaw-postgres
restart: unless-stopped
environment:
POSTGRES_USER: ${POSTGRES_USER:-postgres}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-your_secure_password}
POSTGRES_DB: ${POSTGRES_DB:-zclaw}
ports:
- "${POSTGRES_PORT:-5432}:5432"
volumes:
- postgres_data:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-postgres} -d ${POSTGRES_DB:-zclaw}"]
interval: 10s
timeout: 5s
retries: 5
start_period: 10s
networks:
- zclaw-saas
# ---- SaaS Backend ----
saas:
build:
context: .
dockerfile: Dockerfile
container_name: zclaw-saas
restart: unless-stopped
ports:
- "${SAAS_PORT:-8080}:8080"
env_file:
- saas-env.example
environment:
DATABASE_URL: postgres://${POSTGRES_USER:-postgres}:${POSTGRES_PASSWORD:-your_secure_password}@postgres:5432/${POSTGRES_DB:-zclaw}
depends_on:
postgres:
condition: service_healthy
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/health"]
interval: 30s
timeout: 5s
retries: 3
start_period: 15s
networks:
- zclaw-saas
volumes:
postgres_data:
driver: local
networks:
zclaw-saas:
driver: bridge

View File

@@ -1,12 +1,19 @@
# ZCLAW SaaS 配置文件 # ZCLAW SaaS 配置文件
# 由 QA 测试自动生成 # 生产环境请通过环境变量覆盖敏感配置:
# ZCLAW_DATABASE_URL - 数据库连接字符串 (含密码)
# ZCLAW_SAAS_JWT_SECRET - JWT 签名密钥
# ZCLAW_TOTP_ENCRYPTION_KEY - TOTP 加密密钥 (64 字符 hex)
# ZCLAW_ADMIN_USERNAME / ZCLAW_ADMIN_PASSWORD - 初始管理员账号
[server] [server]
host = "0.0.0.0" host = "0.0.0.0"
port = 8080 port = 8080
# CORS 允许的来源; 开发环境使用 localhost, 生产环境改为实际域名
cors_origins = ["http://localhost:1420", "http://localhost:5173", "http://localhost:3000"]
[database] [database]
url = "postgres://postgres:123123@localhost:5432/zclaw" # 开发环境默认值; 生产环境务必设置 ZCLAW_DATABASE_URL 环境变量
url = "postgres://postgres:postgres@localhost:5432/zclaw"
[auth] [auth]
jwt_expiration_hours = 24 jwt_expiration_hours = 24
@@ -22,3 +29,10 @@ max_attempts = 3
[rate_limit] [rate_limit]
requests_per_minute = 60 requests_per_minute = 60
burst = 10 burst = 10
[scheduler]
# 定时任务配置 (可选)
# jobs = [
# { name = "cleanup-expired-tokens", interval = "1h", task = "token_cleanup", run_on_start = false },
# { name = "aggregate-usage-stats", interval = "24h", task = "usage_aggregation", run_on_start = true },
# ]