初始化提交
Some checks failed
CI / Check / macos-latest (push) Has been cancelled
CI / Check / ubuntu-latest (push) Has been cancelled
CI / Check / windows-latest (push) Has been cancelled
CI / Test / macos-latest (push) Has been cancelled
CI / Test / ubuntu-latest (push) Has been cancelled
CI / Test / windows-latest (push) Has been cancelled
CI / Clippy (push) Has been cancelled
CI / Format (push) Has been cancelled
CI / Security Audit (push) Has been cancelled
CI / Secrets Scan (push) Has been cancelled
CI / Install Script Smoke Test (push) Has been cancelled

This commit is contained in:
iven
2026-03-01 16:24:24 +08:00
commit 92e5def702
492 changed files with 211343 additions and 0 deletions

View File

@@ -0,0 +1,163 @@
//! Integration test: boot kernel -> spawn agent -> send message via Groq API.
//!
//! Run with: GROQ_API_KEY=gsk_... cargo test -p openfang-kernel --test integration_test -- --nocapture
use openfang_kernel::OpenFangKernel;
use openfang_types::agent::AgentManifest;
use openfang_types::config::{DefaultModelConfig, KernelConfig};
fn test_config() -> KernelConfig {
let tmp = std::env::temp_dir().join("openfang-integration-test");
let _ = std::fs::remove_dir_all(&tmp);
std::fs::create_dir_all(&tmp).unwrap();
KernelConfig {
home_dir: tmp.clone(),
data_dir: tmp.join("data"),
default_model: DefaultModelConfig {
provider: "groq".to_string(),
model: "llama-3.3-70b-versatile".to_string(),
api_key_env: "GROQ_API_KEY".to_string(),
base_url: None,
},
..KernelConfig::default()
}
}
#[tokio::test]
async fn test_full_pipeline_with_groq() {
if std::env::var("GROQ_API_KEY").is_err() {
eprintln!("GROQ_API_KEY not set, skipping integration test");
return;
}
// Boot kernel
let config = test_config();
let kernel = OpenFangKernel::boot_with_config(config).expect("Kernel should boot");
// Spawn agent
let manifest: AgentManifest = toml::from_str(
r#"
name = "test-agent"
version = "0.1.0"
description = "Integration test agent"
author = "test"
module = "builtin:chat"
[model]
provider = "groq"
model = "llama-3.3-70b-versatile"
system_prompt = "You are a test agent. Reply concisely in one sentence."
[capabilities]
tools = ["file_read"]
memory_read = ["*"]
memory_write = ["self.*"]
"#,
)
.unwrap();
let agent_id = kernel.spawn_agent(manifest).expect("Agent should spawn");
// Send message
let result = kernel
.send_message(agent_id, "Say hello in exactly 5 words.")
.await
.expect("Message should get a response");
println!("\n=== AGENT RESPONSE ===");
println!("{}", result.response);
println!(
"=== USAGE: {} tokens in, {} tokens out, {} iterations ===",
result.total_usage.input_tokens, result.total_usage.output_tokens, result.iterations
);
assert!(!result.response.is_empty(), "Response should not be empty");
assert!(
result.total_usage.input_tokens > 0,
"Should have used tokens"
);
// Kill agent
kernel.kill_agent(agent_id).expect("Agent should be killed");
kernel.shutdown();
}
#[tokio::test]
async fn test_multiple_agents_different_models() {
if std::env::var("GROQ_API_KEY").is_err() {
eprintln!("GROQ_API_KEY not set, skipping integration test");
return;
}
let config = test_config();
let kernel = OpenFangKernel::boot_with_config(config).expect("Kernel should boot");
// Spawn agent 1: llama 70b
let manifest1: AgentManifest = toml::from_str(
r#"
name = "agent-llama70b"
version = "0.1.0"
description = "Llama 70B agent"
author = "test"
module = "builtin:chat"
[model]
provider = "groq"
model = "llama-3.3-70b-versatile"
system_prompt = "You are Agent A. Always start your reply with 'A:'."
[capabilities]
memory_read = ["*"]
memory_write = ["self.*"]
"#,
)
.unwrap();
// Spawn agent 2: llama 8b (faster, smaller)
let manifest2: AgentManifest = toml::from_str(
r#"
name = "agent-llama8b"
version = "0.1.0"
description = "Llama 8B agent"
author = "test"
module = "builtin:chat"
[model]
provider = "groq"
model = "llama-3.1-8b-instant"
system_prompt = "You are Agent B. Always start your reply with 'B:'."
[capabilities]
memory_read = ["*"]
memory_write = ["self.*"]
"#,
)
.unwrap();
let id1 = kernel.spawn_agent(manifest1).expect("Agent 1 should spawn");
let id2 = kernel.spawn_agent(manifest2).expect("Agent 2 should spawn");
// Send messages to both
let r1 = kernel
.send_message(id1, "What model are you?")
.await
.expect("Agent 1 response");
let r2 = kernel
.send_message(id2, "What model are you?")
.await
.expect("Agent 2 response");
println!("\n=== AGENT 1 (llama-70b) ===");
println!("{}", r1.response);
println!("\n=== AGENT 2 (llama-8b) ===");
println!("{}", r2.response);
assert!(!r1.response.is_empty());
assert!(!r2.response.is_empty());
// Cleanup
kernel.kill_agent(id1).unwrap();
kernel.kill_agent(id2).unwrap();
kernel.shutdown();
}

View File

@@ -0,0 +1,201 @@
//! Multi-agent integration test: spawn 6 agents, send messages, verify all respond.
//!
//! Run with: GROQ_API_KEY=gsk_... cargo test -p openfang-kernel --test multi_agent_test -- --nocapture
use openfang_kernel::OpenFangKernel;
use openfang_types::agent::AgentManifest;
use openfang_types::config::{DefaultModelConfig, KernelConfig};
fn test_config() -> KernelConfig {
let tmp = std::env::temp_dir().join("openfang-multi-agent-test");
let _ = std::fs::remove_dir_all(&tmp);
std::fs::create_dir_all(&tmp).unwrap();
KernelConfig {
home_dir: tmp.clone(),
data_dir: tmp.join("data"),
default_model: DefaultModelConfig {
provider: "groq".to_string(),
model: "llama-3.3-70b-versatile".to_string(),
api_key_env: "GROQ_API_KEY".to_string(),
base_url: None,
},
..KernelConfig::default()
}
}
fn load_manifest(toml_str: &str) -> AgentManifest {
toml::from_str(toml_str).expect("Should parse manifest")
}
#[tokio::test]
async fn test_six_agent_fleet() {
if std::env::var("GROQ_API_KEY").is_err() {
eprintln!("GROQ_API_KEY not set, skipping multi-agent test");
return;
}
let kernel = OpenFangKernel::boot_with_config(test_config()).expect("Kernel should boot");
// Define all 6 agents with different roles and models
let agents = vec![
(
"coder",
r#"
name = "coder"
module = "builtin:chat"
[model]
provider = "groq"
model = "llama-3.3-70b-versatile"
system_prompt = "You are Coder. Reply with 'CODER:' prefix. Be concise."
[capabilities]
tools = ["file_read", "file_write"]
memory_read = ["*"]
memory_write = ["self.*"]
"#,
"Write a one-line Rust function that adds two numbers.",
),
(
"researcher",
r#"
name = "researcher"
module = "builtin:chat"
[model]
provider = "groq"
model = "llama-3.3-70b-versatile"
system_prompt = "You are Researcher. Reply with 'RESEARCHER:' prefix. Be concise."
[capabilities]
tools = ["web_fetch"]
memory_read = ["*"]
memory_write = ["self.*"]
"#,
"What is Rust's primary advantage over C++? One sentence.",
),
(
"writer",
r#"
name = "writer"
module = "builtin:chat"
[model]
provider = "groq"
model = "llama-3.3-70b-versatile"
system_prompt = "You are Writer. Reply with 'WRITER:' prefix. Be concise."
[capabilities]
tools = ["file_read", "file_write"]
memory_read = ["*"]
memory_write = ["self.*"]
"#,
"Write a one-sentence tagline for an Agent Operating System.",
),
(
"ops",
r#"
name = "ops"
module = "builtin:chat"
[model]
provider = "groq"
model = "llama-3.1-8b-instant"
system_prompt = "You are Ops. Reply with 'OPS:' prefix. Be concise."
[capabilities]
tools = ["shell_exec"]
memory_read = ["*"]
memory_write = ["self.*"]
"#,
"What would you check first if a server is running slowly?",
),
(
"analyst",
r#"
name = "analyst"
module = "builtin:chat"
[model]
provider = "groq"
model = "llama-3.3-70b-versatile"
system_prompt = "You are Analyst. Reply with 'ANALYST:' prefix. Be concise."
[capabilities]
tools = ["file_read"]
memory_read = ["*"]
memory_write = ["self.*"]
"#,
"What are the top 3 metrics to track for an API service?",
),
(
"hello-world",
r#"
name = "hello-world"
module = "builtin:chat"
[model]
provider = "groq"
model = "llama-3.1-8b-instant"
system_prompt = "You are a friendly greeter. Reply with 'HELLO:' prefix. Be concise."
[capabilities]
memory_read = ["*"]
memory_write = ["self.*"]
"#,
"Greet the user in a fun way.",
),
];
println!("\n{}", "=".repeat(60));
println!(" OPENFANG MULTI-AGENT FLEET TEST");
println!(" Spawning {} agents...", agents.len());
println!("{}\n", "=".repeat(60));
// Spawn all agents
let mut agent_ids = Vec::new();
for (name, manifest_str, _) in &agents {
let manifest = load_manifest(manifest_str);
let id = kernel
.spawn_agent(manifest)
.unwrap_or_else(|e| panic!("Failed to spawn {name}: {e}"));
println!(" Spawned: {name:<12} -> {id}");
agent_ids.push(id);
}
assert_eq!(kernel.registry.count(), 6, "Should have 6 agents");
println!(
"\n All {} agents spawned. Sending messages...\n",
agents.len()
);
// Send messages to each agent sequentially (to respect Groq rate limits)
let mut results = Vec::new();
for (i, (name, _, message)) in agents.iter().enumerate() {
let result = kernel
.send_message(agent_ids[i], message)
.await
.unwrap_or_else(|e| panic!("Failed to message {name}: {e}"));
println!("--- {name} ---");
println!(" Q: {message}");
println!(" A: {}", result.response);
println!(
" [{} tokens in, {} tokens out, {} iters]",
result.total_usage.input_tokens, result.total_usage.output_tokens, result.iterations
);
println!();
assert!(
!result.response.is_empty(),
"{name} response should not be empty"
);
results.push(result);
}
// Summary
let total_input: u64 = results.iter().map(|r| r.total_usage.input_tokens).sum();
let total_output: u64 = results.iter().map(|r| r.total_usage.output_tokens).sum();
println!("============================================================");
println!(" FLEET SUMMARY");
println!(" Agents: {}", agents.len());
println!(" Total input: {} tokens", total_input);
println!(" Total output: {} tokens", total_output);
println!(" All responded: YES");
println!("============================================================");
// Cleanup
for id in agent_ids {
kernel.kill_agent(id).unwrap();
}
kernel.shutdown();
}

View File

@@ -0,0 +1,410 @@
//! WASM agent integration tests.
//!
//! Tests the full pipeline: boot kernel → spawn agent with `module = "wasm:..."`
//! → send message → verify WASM module executes and returns response.
//!
//! These tests use real WASM execution — no mocks.
use openfang_kernel::OpenFangKernel;
use openfang_types::agent::AgentManifest;
use openfang_types::config::{DefaultModelConfig, KernelConfig};
use std::sync::Arc;
/// Minimal echo module: returns input JSON wrapped as `{"response": "..."}`.
///
/// Reads the "message" field from input and echoes it back as the response.
/// Since WAT can't do real string manipulation, this module echoes the
/// entire input JSON as-is (which the kernel extracts via serde).
const ECHO_WAT: &str = r#"
(module
(memory (export "memory") 1)
(global $bump (mut i32) (i32.const 1024))
(func (export "alloc") (param $size i32) (result i32)
(local $ptr i32)
(local.set $ptr (global.get $bump))
(global.set $bump (i32.add (global.get $bump) (local.get $size)))
(local.get $ptr)
)
(func (export "execute") (param $ptr i32) (param $len i32) (result i64)
;; Echo: return the input as-is (kernel will extract from JSON)
(i64.or
(i64.shl
(i64.extend_i32_u (local.get $ptr))
(i64.const 32)
)
(i64.extend_i32_u (local.get $len))
)
)
)
"#;
/// Module that always returns a fixed JSON response.
/// Writes `{"response":"hello from wasm"}` at offset 0 and returns it.
const HELLO_WAT: &str = r#"
(module
(memory (export "memory") 1)
(global $bump (mut i32) (i32.const 4096))
;; Fixed response bytes: {"response":"hello from wasm"}
(data (i32.const 0) "{\"response\":\"hello from wasm\"}")
(func (export "alloc") (param $size i32) (result i32)
(local $ptr i32)
(local.set $ptr (global.get $bump))
(global.set $bump (i32.add (global.get $bump) (local.get $size)))
(local.get $ptr)
)
(func (export "execute") (param $ptr i32) (param $len i32) (result i64)
;; Return pointer=0, length=30 (the fixed response)
(i64.const 30) ;; low 32 = len=30, high 32 = ptr=0
)
)
"#;
/// Module with infinite loop — tests fuel exhaustion enforcement.
const INFINITE_LOOP_WAT: &str = r#"
(module
(memory (export "memory") 1)
(global $bump (mut i32) (i32.const 1024))
(func (export "alloc") (param $size i32) (result i32)
(local $ptr i32)
(local.set $ptr (global.get $bump))
(global.set $bump (i32.add (global.get $bump) (local.get $size)))
(local.get $ptr)
)
(func (export "execute") (param $ptr i32) (param $len i32) (result i64)
(loop $inf
(br $inf)
)
(i64.const 0)
)
)
"#;
/// Host-call proxy: forwards input to host_call and returns the response.
const HOST_CALL_PROXY_WAT: &str = r#"
(module
(import "openfang" "host_call" (func $host_call (param i32 i32) (result i64)))
(memory (export "memory") 2)
(global $bump (mut i32) (i32.const 1024))
(func (export "alloc") (param $size i32) (result i32)
(local $ptr i32)
(local.set $ptr (global.get $bump))
(global.set $bump (i32.add (global.get $bump) (local.get $size)))
(local.get $ptr)
)
(func (export "execute") (param $input_ptr i32) (param $input_len i32) (result i64)
(call $host_call (local.get $input_ptr) (local.get $input_len))
)
)
"#;
fn test_config(tmp: &tempfile::TempDir) -> KernelConfig {
KernelConfig {
home_dir: tmp.path().to_path_buf(),
data_dir: tmp.path().join("data"),
default_model: DefaultModelConfig {
provider: "ollama".to_string(),
model: "test".to_string(),
api_key_env: "OLLAMA_API_KEY".to_string(),
base_url: None,
},
..KernelConfig::default()
}
}
fn wasm_manifest(name: &str, module: &str) -> AgentManifest {
let toml_str = format!(
r#"
name = "{name}"
version = "0.1.0"
description = "WASM test agent"
author = "test"
module = "wasm:{module}"
[model]
provider = "ollama"
model = "test"
system_prompt = "WASM agent."
[capabilities]
memory_read = ["*"]
memory_write = ["self.*"]
"#
);
toml::from_str(&toml_str).unwrap()
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
/// Test that a WASM agent can be spawned and returns a response.
#[tokio::test]
async fn test_wasm_agent_hello_response() {
let tmp = tempfile::tempdir().unwrap();
std::fs::write(tmp.path().join("hello.wat"), HELLO_WAT).unwrap();
let config = test_config(&tmp);
let kernel = OpenFangKernel::boot_with_config(config).expect("Kernel should boot");
let manifest = wasm_manifest("wasm-hello", "hello.wat");
let agent_id = kernel.spawn_agent(manifest).unwrap();
let result = kernel
.send_message(agent_id, "Hi there!")
.await
.expect("WASM agent should execute");
assert_eq!(result.response, "hello from wasm");
assert_eq!(result.iterations, 1);
kernel.shutdown();
}
/// Test that a WASM echo module returns input data.
#[tokio::test]
async fn test_wasm_agent_echo() {
let tmp = tempfile::tempdir().unwrap();
std::fs::write(tmp.path().join("echo.wat"), ECHO_WAT).unwrap();
let config = test_config(&tmp);
let kernel = OpenFangKernel::boot_with_config(config).expect("Kernel should boot");
let manifest = wasm_manifest("wasm-echo", "echo.wat");
let agent_id = kernel.spawn_agent(manifest).unwrap();
let result = kernel
.send_message(agent_id, "test message")
.await
.expect("Echo agent should execute");
// Echo returns the entire input JSON, so the response should contain our message
assert!(
result.response.contains("test message"),
"Response should contain the input message, got: {}",
result.response
);
kernel.shutdown();
}
/// Test that WASM fuel exhaustion is caught and reported as an error.
#[tokio::test]
async fn test_wasm_agent_fuel_exhaustion() {
let tmp = tempfile::tempdir().unwrap();
std::fs::write(tmp.path().join("loop.wat"), INFINITE_LOOP_WAT).unwrap();
let config = test_config(&tmp);
let kernel = OpenFangKernel::boot_with_config(config).expect("Kernel should boot");
let manifest = wasm_manifest("wasm-loop", "loop.wat");
let agent_id = kernel.spawn_agent(manifest).unwrap();
let result = kernel.send_message(agent_id, "go").await;
assert!(
result.is_err(),
"Infinite loop should fail with fuel exhaustion"
);
let err_msg = format!("{}", result.unwrap_err());
assert!(
err_msg.contains("Fuel exhausted") || err_msg.contains("fuel") || err_msg.contains("WASM"),
"Error should mention fuel exhaustion, got: {err_msg}"
);
kernel.shutdown();
}
/// Test that a missing WASM module produces a clear error.
#[tokio::test]
async fn test_wasm_agent_missing_module() {
let tmp = tempfile::tempdir().unwrap();
// Don't write any .wat file
let config = test_config(&tmp);
let kernel = OpenFangKernel::boot_with_config(config).expect("Kernel should boot");
let manifest = wasm_manifest("wasm-missing", "nonexistent.wasm");
let agent_id = kernel.spawn_agent(manifest).unwrap();
let result = kernel.send_message(agent_id, "hello").await;
assert!(result.is_err(), "Missing module should fail");
let err_msg = format!("{}", result.unwrap_err());
assert!(
err_msg.contains("Failed to read") || err_msg.contains("nonexistent"),
"Error should mention the missing file, got: {err_msg}"
);
kernel.shutdown();
}
/// Test that host_call time_now works end-to-end through the kernel.
#[tokio::test]
async fn test_wasm_agent_host_call_time() {
let tmp = tempfile::tempdir().unwrap();
std::fs::write(tmp.path().join("proxy.wat"), HOST_CALL_PROXY_WAT).unwrap();
let config = test_config(&tmp);
let kernel = OpenFangKernel::boot_with_config(config).expect("Kernel should boot");
// Proxy module forwards input to host_call — send a time_now request
let toml_str = r#"
name = "wasm-proxy"
version = "0.1.0"
description = "Host call proxy"
author = "test"
module = "wasm:proxy.wat"
[model]
provider = "ollama"
model = "test"
system_prompt = "Proxy."
[capabilities]
memory_read = ["*"]
memory_write = ["self.*"]
"#;
let manifest: AgentManifest = toml::from_str(toml_str).unwrap();
let agent_id = kernel.spawn_agent(manifest).unwrap();
// The proxy module expects JSON like {"method":"time_now","params":{}}
// But our kernel wraps it as {"message":"...", "agent_id":"...", "agent_name":"..."}
// So the proxy will try to dispatch with method=null which returns "Unknown"
// This still proves the full pipeline works end-to-end
let result = kernel
.send_message(agent_id, r#"{"method":"time_now","params":{}}"#)
.await
.expect("Proxy agent should execute");
// The response will contain the host_call dispatch result
assert!(!result.response.is_empty(), "Response should not be empty");
kernel.shutdown();
}
/// Test WASM agent with streaming (falls back to single event).
#[tokio::test]
async fn test_wasm_agent_streaming_fallback() {
let tmp = tempfile::tempdir().unwrap();
std::fs::write(tmp.path().join("hello.wat"), HELLO_WAT).unwrap();
let config = test_config(&tmp);
let kernel = OpenFangKernel::boot_with_config(config).expect("Kernel should boot");
let kernel = Arc::new(kernel);
let manifest = wasm_manifest("wasm-stream", "hello.wat");
let agent_id = kernel.spawn_agent(manifest).unwrap();
let (mut rx, handle) = kernel
.send_message_streaming(agent_id, "Hi!", None)
.expect("Streaming should start");
// Collect all stream events
let mut events = vec![];
while let Some(event) = rx.recv().await {
events.push(event);
}
// Should have gotten a TextDelta + ContentComplete
assert!(
events.len() >= 2,
"Expected at least 2 stream events, got {}",
events.len()
);
let final_result = handle.await.unwrap().expect("Task should complete");
assert_eq!(final_result.response, "hello from wasm");
kernel.shutdown();
}
/// Test that spawning multiple WASM agents works concurrently.
#[tokio::test]
async fn test_multiple_wasm_agents() {
let tmp = tempfile::tempdir().unwrap();
std::fs::write(tmp.path().join("hello.wat"), HELLO_WAT).unwrap();
std::fs::write(tmp.path().join("echo.wat"), ECHO_WAT).unwrap();
let config = test_config(&tmp);
let kernel = OpenFangKernel::boot_with_config(config).expect("Kernel should boot");
let hello_id = kernel
.spawn_agent(wasm_manifest("hello-agent", "hello.wat"))
.unwrap();
let echo_id = kernel
.spawn_agent(wasm_manifest("echo-agent", "echo.wat"))
.unwrap();
// Execute both
let hello_result = kernel.send_message(hello_id, "hi").await.unwrap();
let echo_result = kernel.send_message(echo_id, "test data").await.unwrap();
assert_eq!(hello_result.response, "hello from wasm");
assert!(echo_result.response.contains("test data"));
// Verify agent list shows both
let agents = kernel.registry.list();
assert_eq!(agents.len(), 2);
kernel.shutdown();
}
/// Test WASM agent alongside LLM agent (mixed fleet).
#[tokio::test]
async fn test_mixed_wasm_and_llm_agents() {
let tmp = tempfile::tempdir().unwrap();
std::fs::write(tmp.path().join("hello.wat"), HELLO_WAT).unwrap();
let config = test_config(&tmp);
let kernel = OpenFangKernel::boot_with_config(config).expect("Kernel should boot");
// Spawn a WASM agent
let wasm_id = kernel
.spawn_agent(wasm_manifest("wasm-agent", "hello.wat"))
.unwrap();
// Spawn a regular LLM agent (won't actually call LLM since ollama isn't running,
// but it should spawn fine and coexist)
let llm_toml = r#"
name = "llm-agent"
version = "0.1.0"
description = "LLM test agent"
author = "test"
module = "builtin:chat"
[model]
provider = "ollama"
model = "test"
system_prompt = "You are a test agent."
[capabilities]
memory_read = ["*"]
memory_write = ["self.*"]
"#;
let llm_manifest: AgentManifest = toml::from_str(llm_toml).unwrap();
let llm_id = kernel.spawn_agent(llm_manifest).unwrap();
// Verify both agents exist
let agents = kernel.registry.list();
assert_eq!(agents.len(), 2);
// WASM agent should work
let result = kernel.send_message(wasm_id, "hello").await.unwrap();
assert_eq!(result.response, "hello from wasm");
// LLM agent exists but we won't send it a message (no real LLM)
assert!(kernel.registry.get(llm_id).is_some());
// Kill WASM agent
kernel.kill_agent(wasm_id).unwrap();
assert_eq!(kernel.registry.list().len(), 1);
kernel.shutdown();
}

View File

@@ -0,0 +1,404 @@
//! End-to-end workflow integration tests.
//!
//! Tests the full pipeline: boot kernel → spawn agents → create workflow →
//! execute workflow → verify outputs flow through the pipeline.
//!
//! LLM tests require GROQ_API_KEY. Non-LLM tests verify the kernel-level
//! workflow wiring without making real API calls.
use openfang_kernel::workflow::{
ErrorMode, StepAgent, StepMode, Workflow, WorkflowId, WorkflowStep,
};
use openfang_kernel::OpenFangKernel;
use openfang_types::agent::AgentManifest;
use openfang_types::config::{DefaultModelConfig, KernelConfig};
use std::sync::Arc;
fn test_config(provider: &str, model: &str, api_key_env: &str) -> KernelConfig {
let tmp = tempfile::tempdir().unwrap();
KernelConfig {
home_dir: tmp.path().to_path_buf(),
data_dir: tmp.path().join("data"),
default_model: DefaultModelConfig {
provider: provider.to_string(),
model: model.to_string(),
api_key_env: api_key_env.to_string(),
base_url: None,
},
..KernelConfig::default()
}
}
fn spawn_test_agent(
kernel: &OpenFangKernel,
name: &str,
system_prompt: &str,
) -> openfang_types::agent::AgentId {
let manifest_str = format!(
r#"
name = "{name}"
version = "0.1.0"
description = "Workflow test agent: {name}"
author = "test"
module = "builtin:chat"
[model]
provider = "groq"
model = "llama-3.3-70b-versatile"
system_prompt = "{system_prompt}"
[capabilities]
memory_read = ["*"]
memory_write = ["self.*"]
"#
);
let manifest: AgentManifest = toml::from_str(&manifest_str).unwrap();
kernel.spawn_agent(manifest).expect("Agent should spawn")
}
// ---------------------------------------------------------------------------
// Kernel-level workflow wiring tests (no LLM needed)
// ---------------------------------------------------------------------------
/// Test that workflow registration and agent resolution work at the kernel level.
#[tokio::test]
async fn test_workflow_register_and_resolve() {
let config = test_config("ollama", "test-model", "OLLAMA_API_KEY");
let kernel = OpenFangKernel::boot_with_config(config).expect("Kernel should boot");
let kernel = Arc::new(kernel);
// Spawn agents
let manifest: AgentManifest = toml::from_str(
r#"
name = "agent-alpha"
version = "0.1.0"
description = "Alpha"
author = "test"
module = "builtin:chat"
[model]
provider = "ollama"
model = "test"
system_prompt = "Alpha."
[capabilities]
memory_read = ["*"]
memory_write = ["self.*"]
"#,
)
.unwrap();
let alpha_id = kernel.spawn_agent(manifest).unwrap();
let manifest2: AgentManifest = toml::from_str(
r#"
name = "agent-beta"
version = "0.1.0"
description = "Beta"
author = "test"
module = "builtin:chat"
[model]
provider = "ollama"
model = "test"
system_prompt = "Beta."
[capabilities]
memory_read = ["*"]
memory_write = ["self.*"]
"#,
)
.unwrap();
let beta_id = kernel.spawn_agent(manifest2).unwrap();
// Create a 2-step workflow referencing agents by name
let workflow = Workflow {
id: WorkflowId::new(),
name: "alpha-beta-pipeline".to_string(),
description: "Tests agent resolution by name".to_string(),
steps: vec![
WorkflowStep {
name: "step-alpha".to_string(),
agent: StepAgent::ByName {
name: "agent-alpha".to_string(),
},
prompt_template: "Analyze: {{input}}".to_string(),
mode: StepMode::Sequential,
timeout_secs: 30,
error_mode: ErrorMode::Fail,
output_var: Some("alpha_out".to_string()),
},
WorkflowStep {
name: "step-beta".to_string(),
agent: StepAgent::ByName {
name: "agent-beta".to_string(),
},
prompt_template: "Summarize: {{input}} (alpha said: {{alpha_out}})".to_string(),
mode: StepMode::Sequential,
timeout_secs: 30,
error_mode: ErrorMode::Fail,
output_var: None,
},
],
created_at: chrono::Utc::now(),
};
let wf_id = kernel.register_workflow(workflow).await;
// Verify workflow is registered
let workflows = kernel.workflows.list_workflows().await;
assert_eq!(workflows.len(), 1);
assert_eq!(workflows[0].name, "alpha-beta-pipeline");
// Verify agents can be found by name
let alpha = kernel.registry.find_by_name("agent-alpha");
assert!(alpha.is_some());
assert_eq!(alpha.unwrap().id, alpha_id);
let beta = kernel.registry.find_by_name("agent-beta");
assert!(beta.is_some());
assert_eq!(beta.unwrap().id, beta_id);
// Verify workflow run can be created
let run_id = kernel
.workflows
.create_run(wf_id, "test input".to_string())
.await;
assert!(run_id.is_some());
let run = kernel.workflows.get_run(run_id.unwrap()).await.unwrap();
assert_eq!(run.input, "test input");
kernel.shutdown();
}
/// Test workflow with agent referenced by ID.
#[tokio::test]
async fn test_workflow_agent_by_id() {
let config = test_config("ollama", "test-model", "OLLAMA_API_KEY");
let kernel = OpenFangKernel::boot_with_config(config).expect("Kernel should boot");
let manifest: AgentManifest = toml::from_str(
r#"
name = "id-agent"
version = "0.1.0"
description = "Test"
author = "test"
module = "builtin:chat"
[model]
provider = "ollama"
model = "test"
system_prompt = "Test."
[capabilities]
memory_read = ["*"]
memory_write = ["self.*"]
"#,
)
.unwrap();
let agent_id = kernel.spawn_agent(manifest).unwrap();
let workflow = Workflow {
id: WorkflowId::new(),
name: "by-id-test".to_string(),
description: "".to_string(),
steps: vec![WorkflowStep {
name: "step1".to_string(),
agent: StepAgent::ById {
id: agent_id.to_string(),
},
prompt_template: "{{input}}".to_string(),
mode: StepMode::Sequential,
timeout_secs: 30,
error_mode: ErrorMode::Fail,
output_var: None,
}],
created_at: chrono::Utc::now(),
};
let wf_id = kernel.register_workflow(workflow).await;
// Can create run (agent resolution happens at execute time)
let run_id = kernel
.workflows
.create_run(wf_id, "hello".to_string())
.await;
assert!(run_id.is_some());
kernel.shutdown();
}
/// Test trigger registration and listing at kernel level.
#[tokio::test]
async fn test_trigger_registration_with_kernel() {
use openfang_kernel::triggers::TriggerPattern;
let config = test_config("ollama", "test-model", "OLLAMA_API_KEY");
let kernel = OpenFangKernel::boot_with_config(config).expect("Kernel should boot");
let manifest: AgentManifest = toml::from_str(
r#"
name = "trigger-agent"
version = "0.1.0"
description = "Trigger test"
author = "test"
module = "builtin:chat"
[model]
provider = "ollama"
model = "test"
system_prompt = "Test."
[capabilities]
memory_read = ["*"]
memory_write = ["self.*"]
"#,
)
.unwrap();
let agent_id = kernel.spawn_agent(manifest).unwrap();
// Register triggers
let t1 = kernel
.register_trigger(
agent_id,
TriggerPattern::Lifecycle,
"Lifecycle event: {{event}}".to_string(),
0,
)
.unwrap();
let t2 = kernel
.register_trigger(
agent_id,
TriggerPattern::SystemKeyword {
keyword: "deploy".to_string(),
},
"Deploy event: {{event}}".to_string(),
5,
)
.unwrap();
// List all triggers
let all = kernel.list_triggers(None);
assert_eq!(all.len(), 2);
// List triggers for specific agent
let agent_triggers = kernel.list_triggers(Some(agent_id));
assert_eq!(agent_triggers.len(), 2);
// Remove one
assert!(kernel.remove_trigger(t1));
let remaining = kernel.list_triggers(None);
assert_eq!(remaining.len(), 1);
assert_eq!(remaining[0].id, t2);
kernel.shutdown();
}
// ---------------------------------------------------------------------------
// Full E2E with real LLM (skip if no GROQ_API_KEY)
// ---------------------------------------------------------------------------
/// End-to-end: boot kernel → spawn 2 agents → create 2-step workflow →
/// run it through the real Groq LLM → verify output flows from step 1 to step 2.
#[tokio::test]
async fn test_workflow_e2e_with_groq() {
if std::env::var("GROQ_API_KEY").is_err() {
eprintln!("GROQ_API_KEY not set, skipping E2E workflow test");
return;
}
let config = test_config("groq", "llama-3.3-70b-versatile", "GROQ_API_KEY");
let kernel = OpenFangKernel::boot_with_config(config).expect("Kernel should boot");
let kernel = Arc::new(kernel);
kernel.set_self_handle();
// Spawn two agents with distinct roles
let _analyst_id = spawn_test_agent(
&kernel,
"wf-analyst",
"You are an analyst. When given text, respond with exactly: ANALYSIS: followed by a one-sentence analysis.",
);
let _writer_id = spawn_test_agent(
&kernel,
"wf-writer",
"You are a writer. When given text, respond with exactly: SUMMARY: followed by a one-sentence summary.",
);
// Create a 2-step pipeline: analyst → writer
let workflow = Workflow {
id: WorkflowId::new(),
name: "analyst-writer-pipeline".to_string(),
description: "E2E integration test workflow".to_string(),
steps: vec![
WorkflowStep {
name: "analyze".to_string(),
agent: StepAgent::ByName {
name: "wf-analyst".to_string(),
},
prompt_template: "Analyze the following: {{input}}".to_string(),
mode: StepMode::Sequential,
timeout_secs: 60,
error_mode: ErrorMode::Fail,
output_var: None,
},
WorkflowStep {
name: "summarize".to_string(),
agent: StepAgent::ByName {
name: "wf-writer".to_string(),
},
prompt_template: "Summarize this analysis: {{input}}".to_string(),
mode: StepMode::Sequential,
timeout_secs: 60,
error_mode: ErrorMode::Fail,
output_var: None,
},
],
created_at: chrono::Utc::now(),
};
let wf_id = kernel.register_workflow(workflow).await;
// Run the workflow
let result = kernel
.run_workflow(
wf_id,
"The Rust programming language is growing rapidly.".to_string(),
)
.await;
assert!(
result.is_ok(),
"Workflow should complete: {:?}",
result.err()
);
let (run_id, output) = result.unwrap();
println!("\n=== WORKFLOW OUTPUT ===");
println!("{output}");
println!("======================\n");
assert!(!output.is_empty(), "Workflow output should not be empty");
// Verify the workflow run record
let run = kernel.workflows.get_run(run_id).await.unwrap();
assert!(matches!(
run.state,
openfang_kernel::workflow::WorkflowRunState::Completed
));
assert_eq!(run.step_results.len(), 2);
assert_eq!(run.step_results[0].step_name, "analyze");
assert_eq!(run.step_results[1].step_name, "summarize");
// Both steps should have used tokens
assert!(run.step_results[0].input_tokens > 0);
assert!(run.step_results[0].output_tokens > 0);
assert!(run.step_results[1].input_tokens > 0);
assert!(run.step_results[1].output_tokens > 0);
// List runs
let runs = kernel.workflows.list_runs(None).await;
assert_eq!(runs.len(), 1);
kernel.shutdown();
}