初始化提交
Some checks failed
CI / Check / macos-latest (push) Has been cancelled
CI / Check / ubuntu-latest (push) Has been cancelled
CI / Check / windows-latest (push) Has been cancelled
CI / Test / macos-latest (push) Has been cancelled
CI / Test / ubuntu-latest (push) Has been cancelled
CI / Test / windows-latest (push) Has been cancelled
CI / Clippy (push) Has been cancelled
CI / Format (push) Has been cancelled
CI / Security Audit (push) Has been cancelled
CI / Secrets Scan (push) Has been cancelled
CI / Install Script Smoke Test (push) Has been cancelled

This commit is contained in:
iven
2026-03-01 16:24:24 +08:00
commit 92e5def702
492 changed files with 211343 additions and 0 deletions

View File

@@ -0,0 +1,854 @@
//! Real HTTP integration tests for the OpenFang API.
//!
//! These tests boot a real kernel, start a real axum HTTP server on a random
//! port, and hit actual endpoints with reqwest. No mocking.
//!
//! Tests that require an LLM API call are gated behind GROQ_API_KEY.
//!
//! Run: cargo test -p openfang-api --test api_integration_test -- --nocapture
use axum::Router;
use openfang_api::middleware;
use openfang_api::routes::{self, AppState};
use openfang_api::ws;
use openfang_kernel::OpenFangKernel;
use openfang_types::config::{DefaultModelConfig, KernelConfig};
use std::sync::Arc;
use std::time::Instant;
use tower_http::cors::CorsLayer;
use tower_http::trace::TraceLayer;
// ---------------------------------------------------------------------------
// Test infrastructure
// ---------------------------------------------------------------------------
struct TestServer {
base_url: String,
state: Arc<AppState>,
_tmp: tempfile::TempDir,
}
impl Drop for TestServer {
fn drop(&mut self) {
self.state.kernel.shutdown();
}
}
/// Start a test server using ollama as default provider (no API key needed).
/// This lets the kernel boot without any real LLM credentials.
/// Tests that need actual LLM calls should use `start_test_server_with_llm()`.
async fn start_test_server() -> TestServer {
start_test_server_with_provider("ollama", "test-model", "OLLAMA_API_KEY").await
}
/// Start a test server with Groq as the LLM provider (requires GROQ_API_KEY).
async fn start_test_server_with_llm() -> TestServer {
start_test_server_with_provider("groq", "llama-3.3-70b-versatile", "GROQ_API_KEY").await
}
async fn start_test_server_with_provider(
provider: &str,
model: &str,
api_key_env: &str,
) -> TestServer {
let tmp = tempfile::tempdir().expect("Failed to create temp dir");
let config = KernelConfig {
home_dir: tmp.path().to_path_buf(),
data_dir: tmp.path().join("data"),
default_model: DefaultModelConfig {
provider: provider.to_string(),
model: model.to_string(),
api_key_env: api_key_env.to_string(),
base_url: None,
},
..KernelConfig::default()
};
let kernel = OpenFangKernel::boot_with_config(config).expect("Kernel should boot");
let kernel = Arc::new(kernel);
kernel.set_self_handle();
let state = Arc::new(AppState {
kernel,
started_at: Instant::now(),
peer_registry: None,
bridge_manager: tokio::sync::Mutex::new(None),
channels_config: tokio::sync::RwLock::new(Default::default()),
shutdown_notify: Arc::new(tokio::sync::Notify::new()),
});
let app = Router::new()
.route("/api/health", axum::routing::get(routes::health))
.route("/api/status", axum::routing::get(routes::status))
.route(
"/api/agents",
axum::routing::get(routes::list_agents).post(routes::spawn_agent),
)
.route(
"/api/agents/{id}/message",
axum::routing::post(routes::send_message),
)
.route(
"/api/agents/{id}/session",
axum::routing::get(routes::get_agent_session),
)
.route("/api/agents/{id}/ws", axum::routing::get(ws::agent_ws))
.route(
"/api/agents/{id}",
axum::routing::delete(routes::kill_agent),
)
.route(
"/api/triggers",
axum::routing::get(routes::list_triggers).post(routes::create_trigger),
)
.route(
"/api/triggers/{id}",
axum::routing::delete(routes::delete_trigger),
)
.route(
"/api/workflows",
axum::routing::get(routes::list_workflows).post(routes::create_workflow),
)
.route(
"/api/workflows/{id}/run",
axum::routing::post(routes::run_workflow),
)
.route(
"/api/workflows/{id}/runs",
axum::routing::get(routes::list_workflow_runs),
)
.route("/api/shutdown", axum::routing::post(routes::shutdown))
.layer(axum::middleware::from_fn(middleware::request_logging))
.layer(TraceLayer::new_for_http())
.layer(CorsLayer::permissive())
.with_state(state.clone());
let listener = tokio::net::TcpListener::bind("127.0.0.1:0")
.await
.expect("Failed to bind test server");
let addr = listener.local_addr().unwrap();
tokio::spawn(async move {
axum::serve(listener, app).await.unwrap();
});
TestServer {
base_url: format!("http://{}", addr),
state,
_tmp: tmp,
}
}
/// Manifest that uses ollama (no API key required, won't make real LLM calls).
const TEST_MANIFEST: &str = r#"
name = "test-agent"
version = "0.1.0"
description = "Integration test agent"
author = "test"
module = "builtin:chat"
[model]
provider = "ollama"
model = "test-model"
system_prompt = "You are a test agent. Reply concisely."
[capabilities]
tools = ["file_read"]
memory_read = ["*"]
memory_write = ["self.*"]
"#;
/// Manifest that uses Groq for real LLM tests.
const LLM_MANIFEST: &str = r#"
name = "test-agent"
version = "0.1.0"
description = "Integration test agent"
author = "test"
module = "builtin:chat"
[model]
provider = "groq"
model = "llama-3.3-70b-versatile"
system_prompt = "You are a test agent. Reply concisely."
[capabilities]
tools = ["file_read"]
memory_read = ["*"]
memory_write = ["self.*"]
"#;
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
#[tokio::test]
async fn test_health_endpoint() {
let server = start_test_server().await;
let client = reqwest::Client::new();
let resp = client
.get(format!("{}/api/health", server.base_url))
.send()
.await
.unwrap();
assert_eq!(resp.status(), 200);
// Middleware injects x-request-id
assert!(resp.headers().contains_key("x-request-id"));
let body: serde_json::Value = resp.json().await.unwrap();
// Public health endpoint returns minimal info (redacted for security)
assert_eq!(body["status"], "ok");
assert!(body["version"].is_string());
// Detailed fields should NOT appear in public health endpoint
assert!(body["database"].is_null());
assert!(body["agent_count"].is_null());
}
#[tokio::test]
async fn test_status_endpoint() {
let server = start_test_server().await;
let client = reqwest::Client::new();
let resp = client
.get(format!("{}/api/status", server.base_url))
.send()
.await
.unwrap();
assert_eq!(resp.status(), 200);
let body: serde_json::Value = resp.json().await.unwrap();
assert_eq!(body["status"], "running");
assert_eq!(body["agent_count"], 0);
assert!(body["uptime_seconds"].is_number());
assert_eq!(body["default_provider"], "ollama");
assert_eq!(body["agents"].as_array().unwrap().len(), 0);
}
#[tokio::test]
async fn test_spawn_list_kill_agent() {
let server = start_test_server().await;
let client = reqwest::Client::new();
// --- Spawn ---
let resp = client
.post(format!("{}/api/agents", server.base_url))
.json(&serde_json::json!({"manifest_toml": TEST_MANIFEST}))
.send()
.await
.unwrap();
assert_eq!(resp.status(), 201);
let body: serde_json::Value = resp.json().await.unwrap();
assert_eq!(body["name"], "test-agent");
let agent_id = body["agent_id"].as_str().unwrap().to_string();
assert!(!agent_id.is_empty());
// --- List (1 agent) ---
let resp = client
.get(format!("{}/api/agents", server.base_url))
.send()
.await
.unwrap();
assert_eq!(resp.status(), 200);
let agents: Vec<serde_json::Value> = resp.json().await.unwrap();
assert_eq!(agents.len(), 1);
assert_eq!(agents[0]["name"], "test-agent");
assert_eq!(agents[0]["id"], agent_id);
assert_eq!(agents[0]["model_provider"], "ollama");
// --- Kill ---
let resp = client
.delete(format!("{}/api/agents/{}", server.base_url, agent_id))
.send()
.await
.unwrap();
assert_eq!(resp.status(), 200);
let body: serde_json::Value = resp.json().await.unwrap();
assert_eq!(body["status"], "killed");
// --- List (empty) ---
let resp = client
.get(format!("{}/api/agents", server.base_url))
.send()
.await
.unwrap();
assert_eq!(resp.status(), 200);
let agents: Vec<serde_json::Value> = resp.json().await.unwrap();
assert_eq!(agents.len(), 0);
}
#[tokio::test]
async fn test_agent_session_empty() {
let server = start_test_server().await;
let client = reqwest::Client::new();
// Spawn agent
let resp = client
.post(format!("{}/api/agents", server.base_url))
.json(&serde_json::json!({"manifest_toml": TEST_MANIFEST}))
.send()
.await
.unwrap();
let body: serde_json::Value = resp.json().await.unwrap();
let agent_id = body["agent_id"].as_str().unwrap();
// Session should be empty — no messages sent yet
let resp = client
.get(format!(
"{}/api/agents/{}/session",
server.base_url, agent_id
))
.send()
.await
.unwrap();
assert_eq!(resp.status(), 200);
let body: serde_json::Value = resp.json().await.unwrap();
assert_eq!(body["message_count"], 0);
assert_eq!(body["messages"].as_array().unwrap().len(), 0);
}
#[tokio::test]
async fn test_send_message_with_llm() {
if std::env::var("GROQ_API_KEY").is_err() {
eprintln!("GROQ_API_KEY not set, skipping LLM integration test");
return;
}
let server = start_test_server_with_llm().await;
let client = reqwest::Client::new();
// Spawn
let resp = client
.post(format!("{}/api/agents", server.base_url))
.json(&serde_json::json!({"manifest_toml": LLM_MANIFEST}))
.send()
.await
.unwrap();
let body: serde_json::Value = resp.json().await.unwrap();
let agent_id = body["agent_id"].as_str().unwrap().to_string();
// Send message through the real HTTP endpoint → kernel → Groq LLM
let resp = client
.post(format!(
"{}/api/agents/{}/message",
server.base_url, agent_id
))
.json(&serde_json::json!({"message": "Say hello in exactly 3 words."}))
.send()
.await
.unwrap();
assert_eq!(resp.status(), 200);
let body: serde_json::Value = resp.json().await.unwrap();
let response_text = body["response"].as_str().unwrap();
assert!(
!response_text.is_empty(),
"LLM response should not be empty"
);
assert!(body["input_tokens"].as_u64().unwrap() > 0);
assert!(body["output_tokens"].as_u64().unwrap() > 0);
// Session should now have messages
let resp = client
.get(format!(
"{}/api/agents/{}/session",
server.base_url, agent_id
))
.send()
.await
.unwrap();
let session: serde_json::Value = resp.json().await.unwrap();
assert!(session["message_count"].as_u64().unwrap() > 0);
}
#[tokio::test]
async fn test_workflow_crud() {
let server = start_test_server().await;
let client = reqwest::Client::new();
// Spawn agent for workflow
let resp = client
.post(format!("{}/api/agents", server.base_url))
.json(&serde_json::json!({"manifest_toml": TEST_MANIFEST}))
.send()
.await
.unwrap();
let body: serde_json::Value = resp.json().await.unwrap();
let agent_name = body["name"].as_str().unwrap().to_string();
// Create workflow
let resp = client
.post(format!("{}/api/workflows", server.base_url))
.json(&serde_json::json!({
"name": "test-workflow",
"description": "Integration test workflow",
"steps": [
{
"name": "step1",
"agent_name": agent_name,
"prompt": "Echo: {{input}}",
"mode": "sequential",
"timeout_secs": 30
}
]
}))
.send()
.await
.unwrap();
assert_eq!(resp.status(), 201);
let body: serde_json::Value = resp.json().await.unwrap();
let workflow_id = body["workflow_id"].as_str().unwrap().to_string();
assert!(!workflow_id.is_empty());
// List workflows
let resp = client
.get(format!("{}/api/workflows", server.base_url))
.send()
.await
.unwrap();
assert_eq!(resp.status(), 200);
let workflows: Vec<serde_json::Value> = resp.json().await.unwrap();
assert_eq!(workflows.len(), 1);
assert_eq!(workflows[0]["name"], "test-workflow");
assert_eq!(workflows[0]["steps"], 1);
}
#[tokio::test]
async fn test_trigger_crud() {
let server = start_test_server().await;
let client = reqwest::Client::new();
// Spawn agent for trigger
let resp = client
.post(format!("{}/api/agents", server.base_url))
.json(&serde_json::json!({"manifest_toml": TEST_MANIFEST}))
.send()
.await
.unwrap();
let body: serde_json::Value = resp.json().await.unwrap();
let agent_id = body["agent_id"].as_str().unwrap().to_string();
// Create trigger (Lifecycle pattern — simplest variant)
let resp = client
.post(format!("{}/api/triggers", server.base_url))
.json(&serde_json::json!({
"agent_id": agent_id,
"pattern": "lifecycle",
"prompt_template": "Handle: {{event}}",
"max_fires": 5
}))
.send()
.await
.unwrap();
assert_eq!(resp.status(), 201);
let body: serde_json::Value = resp.json().await.unwrap();
let trigger_id = body["trigger_id"].as_str().unwrap().to_string();
assert_eq!(body["agent_id"], agent_id);
// List triggers (unfiltered)
let resp = client
.get(format!("{}/api/triggers", server.base_url))
.send()
.await
.unwrap();
assert_eq!(resp.status(), 200);
let triggers: Vec<serde_json::Value> = resp.json().await.unwrap();
assert_eq!(triggers.len(), 1);
assert_eq!(triggers[0]["agent_id"], agent_id);
assert_eq!(triggers[0]["enabled"], true);
assert_eq!(triggers[0]["max_fires"], 5);
// List triggers (filtered by agent_id)
let resp = client
.get(format!(
"{}/api/triggers?agent_id={}",
server.base_url, agent_id
))
.send()
.await
.unwrap();
assert_eq!(resp.status(), 200);
let triggers: Vec<serde_json::Value> = resp.json().await.unwrap();
assert_eq!(triggers.len(), 1);
// Delete trigger
let resp = client
.delete(format!("{}/api/triggers/{}", server.base_url, trigger_id))
.send()
.await
.unwrap();
assert_eq!(resp.status(), 200);
// List triggers (should be empty)
let resp = client
.get(format!("{}/api/triggers", server.base_url))
.send()
.await
.unwrap();
assert_eq!(resp.status(), 200);
let triggers: Vec<serde_json::Value> = resp.json().await.unwrap();
assert_eq!(triggers.len(), 0);
}
#[tokio::test]
async fn test_invalid_agent_id_returns_400() {
let server = start_test_server().await;
let client = reqwest::Client::new();
// Send message to invalid ID
let resp = client
.post(format!("{}/api/agents/not-a-uuid/message", server.base_url))
.json(&serde_json::json!({"message": "hello"}))
.send()
.await
.unwrap();
assert_eq!(resp.status(), 400);
let body: serde_json::Value = resp.json().await.unwrap();
assert!(body["error"].as_str().unwrap().contains("Invalid"));
// Kill invalid ID
let resp = client
.delete(format!("{}/api/agents/not-a-uuid", server.base_url))
.send()
.await
.unwrap();
assert_eq!(resp.status(), 400);
// Session for invalid ID
let resp = client
.get(format!("{}/api/agents/not-a-uuid/session", server.base_url))
.send()
.await
.unwrap();
assert_eq!(resp.status(), 400);
}
#[tokio::test]
async fn test_kill_nonexistent_agent_returns_404() {
let server = start_test_server().await;
let client = reqwest::Client::new();
let fake_id = uuid::Uuid::new_v4();
let resp = client
.delete(format!("{}/api/agents/{}", server.base_url, fake_id))
.send()
.await
.unwrap();
assert_eq!(resp.status(), 404);
}
#[tokio::test]
async fn test_spawn_invalid_manifest_returns_400() {
let server = start_test_server().await;
let client = reqwest::Client::new();
let resp = client
.post(format!("{}/api/agents", server.base_url))
.json(&serde_json::json!({"manifest_toml": "this is {{ not valid toml"}))
.send()
.await
.unwrap();
assert_eq!(resp.status(), 400);
let body: serde_json::Value = resp.json().await.unwrap();
assert!(body["error"].as_str().unwrap().contains("Invalid manifest"));
}
#[tokio::test]
async fn test_request_id_header_is_uuid() {
let server = start_test_server().await;
let client = reqwest::Client::new();
let resp = client
.get(format!("{}/api/health", server.base_url))
.send()
.await
.unwrap();
let request_id = resp
.headers()
.get("x-request-id")
.expect("x-request-id header should be present");
let id_str = request_id.to_str().unwrap();
assert!(
uuid::Uuid::parse_str(id_str).is_ok(),
"x-request-id should be a valid UUID, got: {}",
id_str
);
}
#[tokio::test]
async fn test_multiple_agents_lifecycle() {
let server = start_test_server().await;
let client = reqwest::Client::new();
// Spawn 3 agents
let mut ids = Vec::new();
for i in 0..3 {
let manifest = format!(
r#"
name = "agent-{i}"
version = "0.1.0"
description = "Multi-agent test {i}"
author = "test"
module = "builtin:chat"
[model]
provider = "ollama"
model = "test-model"
system_prompt = "Agent {i}."
[capabilities]
memory_read = ["*"]
memory_write = ["self.*"]
"#
);
let resp = client
.post(format!("{}/api/agents", server.base_url))
.json(&serde_json::json!({"manifest_toml": manifest}))
.send()
.await
.unwrap();
assert_eq!(resp.status(), 201);
let body: serde_json::Value = resp.json().await.unwrap();
ids.push(body["agent_id"].as_str().unwrap().to_string());
}
// List should show 3
let resp = client
.get(format!("{}/api/agents", server.base_url))
.send()
.await
.unwrap();
let agents: Vec<serde_json::Value> = resp.json().await.unwrap();
assert_eq!(agents.len(), 3);
// Status should agree
let resp = client
.get(format!("{}/api/status", server.base_url))
.send()
.await
.unwrap();
let status: serde_json::Value = resp.json().await.unwrap();
assert_eq!(status["agent_count"], 3);
// Kill one
let resp = client
.delete(format!("{}/api/agents/{}", server.base_url, ids[1]))
.send()
.await
.unwrap();
assert_eq!(resp.status(), 200);
// List should show 2
let resp = client
.get(format!("{}/api/agents", server.base_url))
.send()
.await
.unwrap();
let agents: Vec<serde_json::Value> = resp.json().await.unwrap();
assert_eq!(agents.len(), 2);
// Kill the rest
for id in [&ids[0], &ids[2]] {
client
.delete(format!("{}/api/agents/{}", server.base_url, id))
.send()
.await
.unwrap();
}
// List should be empty
let resp = client
.get(format!("{}/api/agents", server.base_url))
.send()
.await
.unwrap();
let agents: Vec<serde_json::Value> = resp.json().await.unwrap();
assert_eq!(agents.len(), 0);
}
// ---------------------------------------------------------------------------
// Auth integration tests
// ---------------------------------------------------------------------------
/// Start a test server with Bearer-token authentication enabled.
async fn start_test_server_with_auth(api_key: &str) -> TestServer {
let tmp = tempfile::tempdir().expect("Failed to create temp dir");
let config = KernelConfig {
home_dir: tmp.path().to_path_buf(),
data_dir: tmp.path().join("data"),
api_key: api_key.to_string(),
default_model: DefaultModelConfig {
provider: "ollama".to_string(),
model: "test-model".to_string(),
api_key_env: "OLLAMA_API_KEY".to_string(),
base_url: None,
},
..KernelConfig::default()
};
let kernel = OpenFangKernel::boot_with_config(config).expect("Kernel should boot");
let kernel = Arc::new(kernel);
kernel.set_self_handle();
let state = Arc::new(AppState {
kernel,
started_at: Instant::now(),
peer_registry: None,
bridge_manager: tokio::sync::Mutex::new(None),
channels_config: tokio::sync::RwLock::new(Default::default()),
shutdown_notify: Arc::new(tokio::sync::Notify::new()),
});
let api_key_state = state.kernel.config.api_key.clone();
let app = Router::new()
.route("/api/health", axum::routing::get(routes::health))
.route("/api/status", axum::routing::get(routes::status))
.route(
"/api/agents",
axum::routing::get(routes::list_agents).post(routes::spawn_agent),
)
.route(
"/api/agents/{id}/message",
axum::routing::post(routes::send_message),
)
.route(
"/api/agents/{id}/session",
axum::routing::get(routes::get_agent_session),
)
.route("/api/agents/{id}/ws", axum::routing::get(ws::agent_ws))
.route(
"/api/agents/{id}",
axum::routing::delete(routes::kill_agent),
)
.route(
"/api/triggers",
axum::routing::get(routes::list_triggers).post(routes::create_trigger),
)
.route(
"/api/triggers/{id}",
axum::routing::delete(routes::delete_trigger),
)
.route(
"/api/workflows",
axum::routing::get(routes::list_workflows).post(routes::create_workflow),
)
.route(
"/api/workflows/{id}/run",
axum::routing::post(routes::run_workflow),
)
.route(
"/api/workflows/{id}/runs",
axum::routing::get(routes::list_workflow_runs),
)
.route("/api/shutdown", axum::routing::post(routes::shutdown))
.layer(axum::middleware::from_fn_with_state(
api_key_state,
middleware::auth,
))
.layer(axum::middleware::from_fn(middleware::request_logging))
.layer(TraceLayer::new_for_http())
.layer(CorsLayer::permissive())
.with_state(state.clone());
let listener = tokio::net::TcpListener::bind("127.0.0.1:0")
.await
.expect("Failed to bind test server");
let addr = listener.local_addr().unwrap();
tokio::spawn(async move {
axum::serve(listener, app).await.unwrap();
});
TestServer {
base_url: format!("http://{}", addr),
state,
_tmp: tmp,
}
}
#[tokio::test]
async fn test_auth_health_is_public() {
let server = start_test_server_with_auth("secret-key-123").await;
let client = reqwest::Client::new();
// /api/health should be accessible without auth
let resp = client
.get(format!("{}/api/health", server.base_url))
.send()
.await
.unwrap();
assert_eq!(resp.status(), 200);
}
#[tokio::test]
async fn test_auth_rejects_no_token() {
let server = start_test_server_with_auth("secret-key-123").await;
let client = reqwest::Client::new();
// Protected endpoint without auth header → 401
// Note: /api/status is public (dashboard needs it), so use a protected endpoint
let resp = client
.get(format!("{}/api/commands", server.base_url))
.send()
.await
.unwrap();
assert_eq!(resp.status(), 401);
let body: serde_json::Value = resp.json().await.unwrap();
assert!(body["error"].as_str().unwrap().contains("Missing"));
}
#[tokio::test]
async fn test_auth_rejects_wrong_token() {
let server = start_test_server_with_auth("secret-key-123").await;
let client = reqwest::Client::new();
// Wrong bearer token → 401
// Note: /api/status is public (dashboard needs it), so use a protected endpoint
let resp = client
.get(format!("{}/api/commands", server.base_url))
.header("authorization", "Bearer wrong-key")
.send()
.await
.unwrap();
assert_eq!(resp.status(), 401);
let body: serde_json::Value = resp.json().await.unwrap();
assert!(body["error"].as_str().unwrap().contains("Invalid"));
}
#[tokio::test]
async fn test_auth_accepts_correct_token() {
let server = start_test_server_with_auth("secret-key-123").await;
let client = reqwest::Client::new();
// Correct bearer token → 200
let resp = client
.get(format!("{}/api/status", server.base_url))
.header("authorization", "Bearer secret-key-123")
.send()
.await
.unwrap();
assert_eq!(resp.status(), 200);
let body: serde_json::Value = resp.json().await.unwrap();
assert_eq!(body["status"], "running");
}
#[tokio::test]
async fn test_auth_disabled_when_no_key() {
// Empty API key = auth disabled
let server = start_test_server().await;
let client = reqwest::Client::new();
// Protected endpoint accessible without auth when no key is configured
let resp = client
.get(format!("{}/api/status", server.base_url))
.send()
.await
.unwrap();
assert_eq!(resp.status(), 200);
}

View File

@@ -0,0 +1,270 @@
//! Daemon lifecycle integration tests.
//!
//! Tests the real daemon startup, PID file management, health serving,
//! and graceful shutdown sequence.
use axum::Router;
use openfang_api::middleware;
use openfang_api::routes::{self, AppState};
use openfang_api::server::{read_daemon_info, DaemonInfo};
use openfang_kernel::OpenFangKernel;
use openfang_types::config::{DefaultModelConfig, KernelConfig};
use std::sync::Arc;
use std::time::Instant;
use tower_http::cors::CorsLayer;
use tower_http::trace::TraceLayer;
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
/// Test DaemonInfo serialization and deserialization round-trip.
#[test]
fn test_daemon_info_serde_roundtrip() {
let info = DaemonInfo {
pid: 12345,
listen_addr: "127.0.0.1:4200".to_string(),
started_at: "2024-01-01T00:00:00Z".to_string(),
version: "0.1.0".to_string(),
platform: "linux".to_string(),
};
let json = serde_json::to_string_pretty(&info).unwrap();
let parsed: DaemonInfo = serde_json::from_str(&json).unwrap();
assert_eq!(parsed.pid, 12345);
assert_eq!(parsed.listen_addr, "127.0.0.1:4200");
assert_eq!(parsed.version, "0.1.0");
assert_eq!(parsed.platform, "linux");
}
/// Test read_daemon_info from a file on disk.
#[test]
fn test_read_daemon_info_from_file() {
let tmp = tempfile::tempdir().unwrap();
// Write a daemon.json
let info = DaemonInfo {
pid: std::process::id(),
listen_addr: "127.0.0.1:9999".to_string(),
started_at: chrono::Utc::now().to_rfc3339(),
version: "0.1.0".to_string(),
platform: "test".to_string(),
};
let json = serde_json::to_string_pretty(&info).unwrap();
std::fs::write(tmp.path().join("daemon.json"), json).unwrap();
// Read it back
let loaded = read_daemon_info(tmp.path());
assert!(loaded.is_some());
let loaded = loaded.unwrap();
assert_eq!(loaded.pid, std::process::id());
assert_eq!(loaded.listen_addr, "127.0.0.1:9999");
}
/// Test read_daemon_info returns None when file doesn't exist.
#[test]
fn test_read_daemon_info_missing_file() {
let tmp = tempfile::tempdir().unwrap();
let loaded = read_daemon_info(tmp.path());
assert!(loaded.is_none());
}
/// Test read_daemon_info returns None for corrupt JSON.
#[test]
fn test_read_daemon_info_corrupt_json() {
let tmp = tempfile::tempdir().unwrap();
std::fs::write(tmp.path().join("daemon.json"), "not json at all").unwrap();
let loaded = read_daemon_info(tmp.path());
assert!(loaded.is_none());
}
/// Test the full daemon lifecycle:
/// 1. Boot kernel + start server on random port
/// 2. Write daemon info file
/// 3. Verify health endpoint
/// 4. Verify daemon info file contents match
/// 5. Shut down and verify cleanup
#[tokio::test]
async fn test_full_daemon_lifecycle() {
let tmp = tempfile::tempdir().unwrap();
let daemon_info_path = tmp.path().join("daemon.json");
let config = KernelConfig {
home_dir: tmp.path().to_path_buf(),
data_dir: tmp.path().join("data"),
default_model: DefaultModelConfig {
provider: "ollama".to_string(),
model: "test".to_string(),
api_key_env: "OLLAMA_API_KEY".to_string(),
base_url: None,
},
..KernelConfig::default()
};
let kernel = OpenFangKernel::boot_with_config(config).expect("Kernel should boot");
let kernel = Arc::new(kernel);
kernel.set_self_handle();
let state = Arc::new(AppState {
kernel: kernel.clone(),
started_at: Instant::now(),
peer_registry: None,
bridge_manager: tokio::sync::Mutex::new(None),
channels_config: tokio::sync::RwLock::new(Default::default()),
shutdown_notify: Arc::new(tokio::sync::Notify::new()),
});
let app = Router::new()
.route("/api/health", axum::routing::get(routes::health))
.route("/api/status", axum::routing::get(routes::status))
.route("/api/shutdown", axum::routing::post(routes::shutdown))
.layer(axum::middleware::from_fn(middleware::request_logging))
.layer(TraceLayer::new_for_http())
.layer(CorsLayer::permissive())
.with_state(state.clone());
// Bind to random port
let listener = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap();
let addr = listener.local_addr().unwrap();
// Spawn server
tokio::spawn(async move {
axum::serve(listener, app).await.unwrap();
});
// Write daemon info file (like run_daemon does)
let daemon_info = DaemonInfo {
pid: std::process::id(),
listen_addr: addr.to_string(),
started_at: chrono::Utc::now().to_rfc3339(),
version: env!("CARGO_PKG_VERSION").to_string(),
platform: std::env::consts::OS.to_string(),
};
let json = serde_json::to_string_pretty(&daemon_info).unwrap();
std::fs::write(&daemon_info_path, &json).unwrap();
// --- Verify daemon info file ---
assert!(daemon_info_path.exists());
let loaded = read_daemon_info(tmp.path()).unwrap();
assert_eq!(loaded.pid, std::process::id());
assert_eq!(loaded.listen_addr, addr.to_string());
// --- Verify health endpoint ---
let client = reqwest::Client::new();
let resp = client
.get(format!("http://{}/api/health", addr))
.send()
.await
.unwrap();
assert_eq!(resp.status(), 200);
let body: serde_json::Value = resp.json().await.unwrap();
assert_eq!(body["status"], "ok");
// --- Verify status endpoint ---
let resp = client
.get(format!("http://{}/api/status", addr))
.send()
.await
.unwrap();
assert_eq!(resp.status(), 200);
let body: serde_json::Value = resp.json().await.unwrap();
assert_eq!(body["status"], "running");
// --- Shutdown ---
let resp = client
.post(format!("http://{}/api/shutdown", addr))
.send()
.await
.unwrap();
assert_eq!(resp.status(), 200);
// Clean up daemon info file (like run_daemon does)
let _ = std::fs::remove_file(&daemon_info_path);
assert!(!daemon_info_path.exists());
kernel.shutdown();
}
/// Test that stale daemon info is detected when no process is running at that PID.
#[test]
fn test_stale_daemon_info_detection() {
let tmp = tempfile::tempdir().unwrap();
// Write daemon.json with a PID that almost certainly doesn't exist
// (using a very high PID number)
let info = DaemonInfo {
pid: 99999999, // unlikely to be running
listen_addr: "127.0.0.1:9999".to_string(),
started_at: "2024-01-01T00:00:00Z".to_string(),
version: "0.1.0".to_string(),
platform: "test".to_string(),
};
let json = serde_json::to_string_pretty(&info).unwrap();
std::fs::write(tmp.path().join("daemon.json"), json).unwrap();
// read_daemon_info just reads the file — it doesn't check if the PID is alive
// (that check happens in run_daemon). So the file is readable:
let loaded = read_daemon_info(tmp.path());
assert!(loaded.is_some());
assert_eq!(loaded.unwrap().pid, 99999999);
}
/// Test that the server starts and immediately responds to requests.
#[tokio::test]
async fn test_server_immediate_responsiveness() {
let tmp = tempfile::tempdir().unwrap();
let config = KernelConfig {
home_dir: tmp.path().to_path_buf(),
data_dir: tmp.path().join("data"),
default_model: DefaultModelConfig {
provider: "ollama".to_string(),
model: "test".to_string(),
api_key_env: "OLLAMA_API_KEY".to_string(),
base_url: None,
},
..KernelConfig::default()
};
let kernel = OpenFangKernel::boot_with_config(config).unwrap();
let kernel = Arc::new(kernel);
let state = Arc::new(AppState {
kernel: kernel.clone(),
started_at: Instant::now(),
peer_registry: None,
bridge_manager: tokio::sync::Mutex::new(None),
channels_config: tokio::sync::RwLock::new(Default::default()),
shutdown_notify: Arc::new(tokio::sync::Notify::new()),
});
let app = Router::new()
.route("/api/health", axum::routing::get(routes::health))
.with_state(state);
let listener = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap();
let addr = listener.local_addr().unwrap();
tokio::spawn(async move {
axum::serve(listener, app).await.unwrap();
});
// Hit health endpoint immediately — should respond fast
let client = reqwest::Client::new();
let start = Instant::now();
let resp = client
.get(format!("http://{}/api/health", addr))
.send()
.await
.unwrap();
let latency = start.elapsed();
assert_eq!(resp.status(), 200);
assert!(
latency.as_millis() < 1000,
"Health endpoint should respond in <1s, took {}ms",
latency.as_millis()
);
kernel.shutdown();
}

View File

@@ -0,0 +1,584 @@
//! Load & performance tests for the OpenFang API.
//!
//! Measures throughput under concurrent access: agent spawning, API endpoint
//! latency, session management, and memory usage.
//!
//! Run: cargo test -p openfang-api --test load_test -- --nocapture
use axum::Router;
use openfang_api::middleware;
use openfang_api::routes::{self, AppState};
use openfang_kernel::OpenFangKernel;
use openfang_types::config::{DefaultModelConfig, KernelConfig};
use std::sync::Arc;
use std::time::{Duration, Instant};
use tower_http::cors::CorsLayer;
use tower_http::trace::TraceLayer;
// ---------------------------------------------------------------------------
// Test infrastructure (mirrors api_integration_test.rs)
// ---------------------------------------------------------------------------
struct TestServer {
base_url: String,
state: Arc<AppState>,
_tmp: tempfile::TempDir,
}
impl Drop for TestServer {
fn drop(&mut self) {
self.state.kernel.shutdown();
}
}
async fn start_test_server() -> TestServer {
let tmp = tempfile::tempdir().expect("Failed to create temp dir");
let config = KernelConfig {
home_dir: tmp.path().to_path_buf(),
data_dir: tmp.path().join("data"),
default_model: DefaultModelConfig {
provider: "ollama".to_string(),
model: "test-model".to_string(),
api_key_env: "OLLAMA_API_KEY".to_string(),
base_url: None,
},
..KernelConfig::default()
};
let kernel = OpenFangKernel::boot_with_config(config).expect("Kernel should boot");
let kernel = Arc::new(kernel);
kernel.set_self_handle();
let state = Arc::new(AppState {
kernel,
started_at: Instant::now(),
peer_registry: None,
bridge_manager: tokio::sync::Mutex::new(None),
channels_config: tokio::sync::RwLock::new(Default::default()),
shutdown_notify: Arc::new(tokio::sync::Notify::new()),
});
let app = Router::new()
.route("/api/health", axum::routing::get(routes::health))
.route("/api/status", axum::routing::get(routes::status))
.route("/api/version", axum::routing::get(routes::version))
.route(
"/api/metrics",
axum::routing::get(routes::prometheus_metrics),
)
.route(
"/api/agents",
axum::routing::get(routes::list_agents).post(routes::spawn_agent),
)
.route(
"/api/agents/{id}",
axum::routing::get(routes::get_agent).delete(routes::kill_agent),
)
.route(
"/api/agents/{id}/session",
axum::routing::get(routes::get_agent_session),
)
.route(
"/api/agents/{id}/session/reset",
axum::routing::post(routes::reset_session),
)
.route(
"/api/agents/{id}/sessions",
axum::routing::get(routes::list_agent_sessions).post(routes::create_agent_session),
)
.route("/api/tools", axum::routing::get(routes::list_tools))
.route("/api/models", axum::routing::get(routes::list_models))
.route("/api/providers", axum::routing::get(routes::list_providers))
.route("/api/usage", axum::routing::get(routes::usage_stats))
.route(
"/api/workflows",
axum::routing::get(routes::list_workflows).post(routes::create_workflow),
)
.route(
"/api/workflows/{id}/run",
axum::routing::post(routes::run_workflow),
)
.route("/api/config", axum::routing::get(routes::get_config))
.layer(axum::middleware::from_fn(middleware::request_logging))
.layer(TraceLayer::new_for_http())
.layer(CorsLayer::permissive())
.with_state(state.clone());
let listener = tokio::net::TcpListener::bind("127.0.0.1:0")
.await
.expect("Failed to bind test server");
let addr = listener.local_addr().unwrap();
tokio::spawn(async move {
axum::serve(listener, app).await.unwrap();
});
TestServer {
base_url: format!("http://{}", addr),
state,
_tmp: tmp,
}
}
const TEST_MANIFEST: &str = r#"
name = "load-test-agent"
version = "0.1.0"
description = "Load test agent"
author = "test"
module = "builtin:chat"
[model]
provider = "ollama"
model = "test-model"
system_prompt = "You are a test agent."
[capabilities]
tools = ["file_read"]
memory_read = ["*"]
memory_write = ["self.*"]
"#;
// ---------------------------------------------------------------------------
// Load tests
// ---------------------------------------------------------------------------
/// Test: Concurrent agent spawns — verify kernel handles parallel agent creation.
#[tokio::test]
async fn load_concurrent_agent_spawns() {
let server = start_test_server().await;
let client = reqwest::Client::new();
let n = 20; // 20 concurrent spawns
let start = Instant::now();
let mut handles = Vec::new();
for i in 0..n {
let c = client.clone();
let url = format!("{}/api/agents", server.base_url);
let manifest = TEST_MANIFEST.replace("load-test-agent", &format!("load-agent-{i}"));
handles.push(tokio::spawn(async move {
let res = c
.post(&url)
.json(&serde_json::json!({"manifest_toml": manifest}))
.send()
.await
.expect("request failed");
(res.status().as_u16(), i)
}));
}
let mut success = 0;
for h in handles {
let (status, _i) = h.await.unwrap();
if status == 200 || status == 201 {
success += 1;
}
}
let elapsed = start.elapsed();
eprintln!(
" [LOAD] Concurrent spawns: {success}/{n} succeeded in {:.0}ms ({:.0} spawns/sec)",
elapsed.as_millis(),
n as f64 / elapsed.as_secs_f64()
);
assert!(success >= n - 2, "Most agents should spawn successfully");
// Verify via list
let agents: serde_json::Value = client
.get(format!("{}/api/agents", server.base_url))
.send()
.await
.unwrap()
.json()
.await
.unwrap();
let count = agents.as_array().map(|a| a.len()).unwrap_or(0);
eprintln!(" [LOAD] Total agents after spawn: {count}");
assert!(count >= success);
}
/// Test: API endpoint latency — measure p50/p95/p99 for health, status, list agents.
#[tokio::test]
async fn load_endpoint_latency() {
let server = start_test_server().await;
let client = reqwest::Client::new();
// Spawn a few agents for the list endpoint to return
for i in 0..5 {
let manifest = TEST_MANIFEST.replace("load-test-agent", &format!("latency-agent-{i}"));
client
.post(format!("{}/api/agents", server.base_url))
.json(&serde_json::json!({"manifest_toml": manifest}))
.send()
.await
.unwrap();
}
let endpoints = vec![
("GET", "/api/health"),
("GET", "/api/status"),
("GET", "/api/agents"),
("GET", "/api/tools"),
("GET", "/api/models"),
("GET", "/api/metrics"),
("GET", "/api/config"),
("GET", "/api/usage"),
];
for (method, path) in &endpoints {
let mut latencies = Vec::new();
let n = 100;
for _ in 0..n {
let start = Instant::now();
let url = format!("{}{}", server.base_url, path);
let res = match *method {
"GET" => client.get(&url).send().await,
_ => client.post(&url).send().await,
};
let elapsed = start.elapsed();
assert!(res.is_ok(), "{method} {path} failed");
latencies.push(elapsed);
}
latencies.sort();
let p50 = latencies[n / 2];
let p95 = latencies[(n as f64 * 0.95) as usize];
let p99 = latencies[(n as f64 * 0.99) as usize];
eprintln!(
" [LOAD] {method} {path:30} p50={:>5.1}ms p95={:>5.1}ms p99={:>5.1}ms",
p50.as_secs_f64() * 1000.0,
p95.as_secs_f64() * 1000.0,
p99.as_secs_f64() * 1000.0,
);
// p99 should be under 100ms for read endpoints
assert!(
p99 < Duration::from_millis(500),
"{method} {path} p99 too high: {p99:?}"
);
}
}
/// Test: Concurrent reads — many clients hitting the same endpoints simultaneously.
#[tokio::test]
async fn load_concurrent_reads() {
let server = start_test_server().await;
let client = reqwest::Client::new();
// Spawn some agents first
for i in 0..3 {
let manifest = TEST_MANIFEST.replace("load-test-agent", &format!("concurrent-agent-{i}"));
client
.post(format!("{}/api/agents", server.base_url))
.json(&serde_json::json!({"manifest_toml": manifest}))
.send()
.await
.unwrap();
}
let n = 50;
let start = Instant::now();
let mut handles = Vec::new();
for i in 0..n {
let c = client.clone();
let base = server.base_url.clone();
handles.push(tokio::spawn(async move {
// Cycle through different endpoints
let path = match i % 4 {
0 => "/api/health",
1 => "/api/agents",
2 => "/api/status",
_ => "/api/metrics",
};
let res = c
.get(format!("{base}{path}"))
.send()
.await
.expect("request failed");
res.status().as_u16()
}));
}
let mut success = 0;
for h in handles {
let status = h.await.unwrap();
if status == 200 {
success += 1;
}
}
let elapsed = start.elapsed();
eprintln!(
" [LOAD] Concurrent reads: {success}/{n} succeeded in {:.0}ms ({:.0} req/sec)",
elapsed.as_millis(),
n as f64 / elapsed.as_secs_f64()
);
assert_eq!(success, n, "All concurrent reads should succeed");
}
/// Test: Session management under load — create, list, and switch sessions.
#[tokio::test]
async fn load_session_management() {
let server = start_test_server().await;
let client = reqwest::Client::new();
// Spawn an agent
let res: serde_json::Value = client
.post(format!("{}/api/agents", server.base_url))
.json(&serde_json::json!({"manifest_toml": TEST_MANIFEST}))
.send()
.await
.unwrap()
.json()
.await
.unwrap();
let agent_id = res["agent_id"].as_str().unwrap().to_string();
// Create multiple sessions
let n = 10;
let start = Instant::now();
let mut session_ids = Vec::new();
for i in 0..n {
let res: serde_json::Value = client
.post(format!(
"{}/api/agents/{}/sessions",
server.base_url, agent_id
))
.json(&serde_json::json!({"label": format!("session-{i}")}))
.send()
.await
.unwrap()
.json()
.await
.unwrap();
if let Some(id) = res.get("session_id").and_then(|v| v.as_str()) {
session_ids.push(id.to_string());
}
}
let elapsed = start.elapsed();
eprintln!(
" [LOAD] Created {n} sessions in {:.0}ms",
elapsed.as_millis()
);
// List sessions
let start = Instant::now();
let sessions_resp: serde_json::Value = client
.get(format!(
"{}/api/agents/{}/sessions",
server.base_url, agent_id
))
.send()
.await
.unwrap()
.json()
.await
.unwrap();
// Response is {"sessions": [...]} — extract the array
let session_count = sessions_resp
.get("sessions")
.and_then(|v| v.as_array())
.map(|a| a.len())
.unwrap_or_else(|| {
// Fallback: maybe it's a direct array
sessions_resp.as_array().map(|a| a.len()).unwrap_or(0)
});
eprintln!(
" [LOAD] Listed {session_count} sessions in {:.1}ms",
start.elapsed().as_secs_f64() * 1000.0
);
// We expect at least some sessions (the original + our new ones)
// Note: create_session might fail silently for some if agent was spawned without session
eprintln!(" [LOAD] Session IDs collected: {}", session_ids.len());
assert!(
!session_ids.is_empty() || session_count > 0,
"Should have created some sessions"
);
// Switch between sessions rapidly
let start = Instant::now();
for sid in &session_ids {
client
.post(format!(
"{}/api/agents/{}/sessions/{}/switch",
server.base_url, agent_id, sid
))
.send()
.await
.unwrap();
}
eprintln!(
" [LOAD] Switched through {} sessions in {:.0}ms",
session_ids.len(),
start.elapsed().as_millis()
);
}
/// Test: Workflow creation and listing under load.
#[tokio::test]
async fn load_workflow_operations() {
let server = start_test_server().await;
let client = reqwest::Client::new();
let n = 15;
let start = Instant::now();
// Create workflows concurrently
let mut handles = Vec::new();
for i in 0..n {
let c = client.clone();
let url = format!("{}/api/workflows", server.base_url);
handles.push(tokio::spawn(async move {
let res = c
.post(&url)
.json(&serde_json::json!({
"name": format!("wf-{i}"),
"description": format!("Load test workflow {i}"),
"steps": [{
"name": "step1",
"agent_name": "test-agent",
"mode": "sequential",
"prompt": "{{input}}"
}]
}))
.send()
.await
.expect("request failed");
res.status().as_u16()
}));
}
let mut created = 0;
for h in handles {
let status = h.await.unwrap();
if status == 200 || status == 201 {
created += 1;
}
}
let elapsed = start.elapsed();
eprintln!(
" [LOAD] Created {created}/{n} workflows in {:.0}ms",
elapsed.as_millis()
);
// List all workflows
let start = Instant::now();
let workflows: serde_json::Value = client
.get(format!("{}/api/workflows", server.base_url))
.send()
.await
.unwrap()
.json()
.await
.unwrap();
let wf_count = workflows.as_array().map(|a| a.len()).unwrap_or(0);
eprintln!(
" [LOAD] Listed {wf_count} workflows in {:.1}ms",
start.elapsed().as_secs_f64() * 1000.0
);
assert!(wf_count >= created);
}
/// Test: Agent spawn + kill cycle — stress the registry.
#[tokio::test]
async fn load_spawn_kill_cycle() {
let server = start_test_server().await;
let client = reqwest::Client::new();
let cycles = 10;
let start = Instant::now();
let mut ids = Vec::new();
// Spawn
for i in 0..cycles {
let manifest = TEST_MANIFEST.replace("load-test-agent", &format!("cycle-agent-{i}"));
let res: serde_json::Value = client
.post(format!("{}/api/agents", server.base_url))
.json(&serde_json::json!({"manifest_toml": manifest}))
.send()
.await
.unwrap()
.json()
.await
.unwrap();
if let Some(id) = res.get("agent_id").and_then(|v| v.as_str()) {
ids.push(id.to_string());
}
}
// Kill
for id in &ids {
client
.delete(format!("{}/api/agents/{}", server.base_url, id))
.send()
.await
.unwrap();
}
let elapsed = start.elapsed();
eprintln!(
" [LOAD] Spawn+kill {cycles} agents in {:.0}ms ({:.0}ms per cycle)",
elapsed.as_millis(),
elapsed.as_millis() as f64 / cycles as f64
);
// Verify all cleaned up
let agents: serde_json::Value = client
.get(format!("{}/api/agents", server.base_url))
.send()
.await
.unwrap()
.json()
.await
.unwrap();
let remaining = agents.as_array().map(|a| a.len()).unwrap_or(0);
assert_eq!(remaining, 0, "All agents should be killed");
}
/// Test: Prometheus metrics endpoint under sustained load.
#[tokio::test]
async fn load_metrics_sustained() {
let server = start_test_server().await;
let client = reqwest::Client::new();
// Spawn a few agents first so metrics have data
for i in 0..3 {
let manifest = TEST_MANIFEST.replace("load-test-agent", &format!("metrics-agent-{i}"));
client
.post(format!("{}/api/agents", server.base_url))
.json(&serde_json::json!({"manifest_toml": manifest}))
.send()
.await
.unwrap();
}
// Hit metrics endpoint 200 times
let n = 200;
let start = Instant::now();
for _ in 0..n {
let res = client
.get(format!("{}/api/metrics", server.base_url))
.send()
.await
.unwrap();
assert_eq!(res.status().as_u16(), 200);
let body = res.text().await.unwrap();
assert!(body.contains("openfang_agents_active"));
}
let elapsed = start.elapsed();
eprintln!(
" [LOAD] Metrics {n} requests in {:.0}ms ({:.0} req/sec, {:.1}ms avg)",
elapsed.as_millis(),
n as f64 / elapsed.as_secs_f64(),
elapsed.as_secs_f64() * 1000.0 / n as f64
);
}