Files
openfang/crates/openfang-kernel/tests/wasm_agent_integration_test.rs
iven 92e5def702
Some checks failed
CI / Check / macos-latest (push) Has been cancelled
CI / Check / ubuntu-latest (push) Has been cancelled
CI / Check / windows-latest (push) Has been cancelled
CI / Test / macos-latest (push) Has been cancelled
CI / Test / ubuntu-latest (push) Has been cancelled
CI / Test / windows-latest (push) Has been cancelled
CI / Clippy (push) Has been cancelled
CI / Format (push) Has been cancelled
CI / Security Audit (push) Has been cancelled
CI / Secrets Scan (push) Has been cancelled
CI / Install Script Smoke Test (push) Has been cancelled
初始化提交
2026-03-01 16:24:24 +08:00

411 lines
13 KiB
Rust

//! WASM agent integration tests.
//!
//! Tests the full pipeline: boot kernel → spawn agent with `module = "wasm:..."`
//! → send message → verify WASM module executes and returns response.
//!
//! These tests use real WASM execution — no mocks.
use openfang_kernel::OpenFangKernel;
use openfang_types::agent::AgentManifest;
use openfang_types::config::{DefaultModelConfig, KernelConfig};
use std::sync::Arc;
/// Minimal echo module: returns input JSON wrapped as `{"response": "..."}`.
///
/// Reads the "message" field from input and echoes it back as the response.
/// Since WAT can't do real string manipulation, this module echoes the
/// entire input JSON as-is (which the kernel extracts via serde).
const ECHO_WAT: &str = r#"
(module
(memory (export "memory") 1)
(global $bump (mut i32) (i32.const 1024))
(func (export "alloc") (param $size i32) (result i32)
(local $ptr i32)
(local.set $ptr (global.get $bump))
(global.set $bump (i32.add (global.get $bump) (local.get $size)))
(local.get $ptr)
)
(func (export "execute") (param $ptr i32) (param $len i32) (result i64)
;; Echo: return the input as-is (kernel will extract from JSON)
(i64.or
(i64.shl
(i64.extend_i32_u (local.get $ptr))
(i64.const 32)
)
(i64.extend_i32_u (local.get $len))
)
)
)
"#;
/// Module that always returns a fixed JSON response.
/// Writes `{"response":"hello from wasm"}` at offset 0 and returns it.
const HELLO_WAT: &str = r#"
(module
(memory (export "memory") 1)
(global $bump (mut i32) (i32.const 4096))
;; Fixed response bytes: {"response":"hello from wasm"}
(data (i32.const 0) "{\"response\":\"hello from wasm\"}")
(func (export "alloc") (param $size i32) (result i32)
(local $ptr i32)
(local.set $ptr (global.get $bump))
(global.set $bump (i32.add (global.get $bump) (local.get $size)))
(local.get $ptr)
)
(func (export "execute") (param $ptr i32) (param $len i32) (result i64)
;; Return pointer=0, length=30 (the fixed response)
(i64.const 30) ;; low 32 = len=30, high 32 = ptr=0
)
)
"#;
/// Module with infinite loop — tests fuel exhaustion enforcement.
const INFINITE_LOOP_WAT: &str = r#"
(module
(memory (export "memory") 1)
(global $bump (mut i32) (i32.const 1024))
(func (export "alloc") (param $size i32) (result i32)
(local $ptr i32)
(local.set $ptr (global.get $bump))
(global.set $bump (i32.add (global.get $bump) (local.get $size)))
(local.get $ptr)
)
(func (export "execute") (param $ptr i32) (param $len i32) (result i64)
(loop $inf
(br $inf)
)
(i64.const 0)
)
)
"#;
/// Host-call proxy: forwards input to host_call and returns the response.
const HOST_CALL_PROXY_WAT: &str = r#"
(module
(import "openfang" "host_call" (func $host_call (param i32 i32) (result i64)))
(memory (export "memory") 2)
(global $bump (mut i32) (i32.const 1024))
(func (export "alloc") (param $size i32) (result i32)
(local $ptr i32)
(local.set $ptr (global.get $bump))
(global.set $bump (i32.add (global.get $bump) (local.get $size)))
(local.get $ptr)
)
(func (export "execute") (param $input_ptr i32) (param $input_len i32) (result i64)
(call $host_call (local.get $input_ptr) (local.get $input_len))
)
)
"#;
fn test_config(tmp: &tempfile::TempDir) -> KernelConfig {
KernelConfig {
home_dir: tmp.path().to_path_buf(),
data_dir: tmp.path().join("data"),
default_model: DefaultModelConfig {
provider: "ollama".to_string(),
model: "test".to_string(),
api_key_env: "OLLAMA_API_KEY".to_string(),
base_url: None,
},
..KernelConfig::default()
}
}
fn wasm_manifest(name: &str, module: &str) -> AgentManifest {
let toml_str = format!(
r#"
name = "{name}"
version = "0.1.0"
description = "WASM test agent"
author = "test"
module = "wasm:{module}"
[model]
provider = "ollama"
model = "test"
system_prompt = "WASM agent."
[capabilities]
memory_read = ["*"]
memory_write = ["self.*"]
"#
);
toml::from_str(&toml_str).unwrap()
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
/// Test that a WASM agent can be spawned and returns a response.
#[tokio::test]
async fn test_wasm_agent_hello_response() {
let tmp = tempfile::tempdir().unwrap();
std::fs::write(tmp.path().join("hello.wat"), HELLO_WAT).unwrap();
let config = test_config(&tmp);
let kernel = OpenFangKernel::boot_with_config(config).expect("Kernel should boot");
let manifest = wasm_manifest("wasm-hello", "hello.wat");
let agent_id = kernel.spawn_agent(manifest).unwrap();
let result = kernel
.send_message(agent_id, "Hi there!")
.await
.expect("WASM agent should execute");
assert_eq!(result.response, "hello from wasm");
assert_eq!(result.iterations, 1);
kernel.shutdown();
}
/// Test that a WASM echo module returns input data.
#[tokio::test]
async fn test_wasm_agent_echo() {
let tmp = tempfile::tempdir().unwrap();
std::fs::write(tmp.path().join("echo.wat"), ECHO_WAT).unwrap();
let config = test_config(&tmp);
let kernel = OpenFangKernel::boot_with_config(config).expect("Kernel should boot");
let manifest = wasm_manifest("wasm-echo", "echo.wat");
let agent_id = kernel.spawn_agent(manifest).unwrap();
let result = kernel
.send_message(agent_id, "test message")
.await
.expect("Echo agent should execute");
// Echo returns the entire input JSON, so the response should contain our message
assert!(
result.response.contains("test message"),
"Response should contain the input message, got: {}",
result.response
);
kernel.shutdown();
}
/// Test that WASM fuel exhaustion is caught and reported as an error.
#[tokio::test]
async fn test_wasm_agent_fuel_exhaustion() {
let tmp = tempfile::tempdir().unwrap();
std::fs::write(tmp.path().join("loop.wat"), INFINITE_LOOP_WAT).unwrap();
let config = test_config(&tmp);
let kernel = OpenFangKernel::boot_with_config(config).expect("Kernel should boot");
let manifest = wasm_manifest("wasm-loop", "loop.wat");
let agent_id = kernel.spawn_agent(manifest).unwrap();
let result = kernel.send_message(agent_id, "go").await;
assert!(
result.is_err(),
"Infinite loop should fail with fuel exhaustion"
);
let err_msg = format!("{}", result.unwrap_err());
assert!(
err_msg.contains("Fuel exhausted") || err_msg.contains("fuel") || err_msg.contains("WASM"),
"Error should mention fuel exhaustion, got: {err_msg}"
);
kernel.shutdown();
}
/// Test that a missing WASM module produces a clear error.
#[tokio::test]
async fn test_wasm_agent_missing_module() {
let tmp = tempfile::tempdir().unwrap();
// Don't write any .wat file
let config = test_config(&tmp);
let kernel = OpenFangKernel::boot_with_config(config).expect("Kernel should boot");
let manifest = wasm_manifest("wasm-missing", "nonexistent.wasm");
let agent_id = kernel.spawn_agent(manifest).unwrap();
let result = kernel.send_message(agent_id, "hello").await;
assert!(result.is_err(), "Missing module should fail");
let err_msg = format!("{}", result.unwrap_err());
assert!(
err_msg.contains("Failed to read") || err_msg.contains("nonexistent"),
"Error should mention the missing file, got: {err_msg}"
);
kernel.shutdown();
}
/// Test that host_call time_now works end-to-end through the kernel.
#[tokio::test]
async fn test_wasm_agent_host_call_time() {
let tmp = tempfile::tempdir().unwrap();
std::fs::write(tmp.path().join("proxy.wat"), HOST_CALL_PROXY_WAT).unwrap();
let config = test_config(&tmp);
let kernel = OpenFangKernel::boot_with_config(config).expect("Kernel should boot");
// Proxy module forwards input to host_call — send a time_now request
let toml_str = r#"
name = "wasm-proxy"
version = "0.1.0"
description = "Host call proxy"
author = "test"
module = "wasm:proxy.wat"
[model]
provider = "ollama"
model = "test"
system_prompt = "Proxy."
[capabilities]
memory_read = ["*"]
memory_write = ["self.*"]
"#;
let manifest: AgentManifest = toml::from_str(toml_str).unwrap();
let agent_id = kernel.spawn_agent(manifest).unwrap();
// The proxy module expects JSON like {"method":"time_now","params":{}}
// But our kernel wraps it as {"message":"...", "agent_id":"...", "agent_name":"..."}
// So the proxy will try to dispatch with method=null which returns "Unknown"
// This still proves the full pipeline works end-to-end
let result = kernel
.send_message(agent_id, r#"{"method":"time_now","params":{}}"#)
.await
.expect("Proxy agent should execute");
// The response will contain the host_call dispatch result
assert!(!result.response.is_empty(), "Response should not be empty");
kernel.shutdown();
}
/// Test WASM agent with streaming (falls back to single event).
#[tokio::test]
async fn test_wasm_agent_streaming_fallback() {
let tmp = tempfile::tempdir().unwrap();
std::fs::write(tmp.path().join("hello.wat"), HELLO_WAT).unwrap();
let config = test_config(&tmp);
let kernel = OpenFangKernel::boot_with_config(config).expect("Kernel should boot");
let kernel = Arc::new(kernel);
let manifest = wasm_manifest("wasm-stream", "hello.wat");
let agent_id = kernel.spawn_agent(manifest).unwrap();
let (mut rx, handle) = kernel
.send_message_streaming(agent_id, "Hi!", None)
.expect("Streaming should start");
// Collect all stream events
let mut events = vec![];
while let Some(event) = rx.recv().await {
events.push(event);
}
// Should have gotten a TextDelta + ContentComplete
assert!(
events.len() >= 2,
"Expected at least 2 stream events, got {}",
events.len()
);
let final_result = handle.await.unwrap().expect("Task should complete");
assert_eq!(final_result.response, "hello from wasm");
kernel.shutdown();
}
/// Test that spawning multiple WASM agents works concurrently.
#[tokio::test]
async fn test_multiple_wasm_agents() {
let tmp = tempfile::tempdir().unwrap();
std::fs::write(tmp.path().join("hello.wat"), HELLO_WAT).unwrap();
std::fs::write(tmp.path().join("echo.wat"), ECHO_WAT).unwrap();
let config = test_config(&tmp);
let kernel = OpenFangKernel::boot_with_config(config).expect("Kernel should boot");
let hello_id = kernel
.spawn_agent(wasm_manifest("hello-agent", "hello.wat"))
.unwrap();
let echo_id = kernel
.spawn_agent(wasm_manifest("echo-agent", "echo.wat"))
.unwrap();
// Execute both
let hello_result = kernel.send_message(hello_id, "hi").await.unwrap();
let echo_result = kernel.send_message(echo_id, "test data").await.unwrap();
assert_eq!(hello_result.response, "hello from wasm");
assert!(echo_result.response.contains("test data"));
// Verify agent list shows both
let agents = kernel.registry.list();
assert_eq!(agents.len(), 2);
kernel.shutdown();
}
/// Test WASM agent alongside LLM agent (mixed fleet).
#[tokio::test]
async fn test_mixed_wasm_and_llm_agents() {
let tmp = tempfile::tempdir().unwrap();
std::fs::write(tmp.path().join("hello.wat"), HELLO_WAT).unwrap();
let config = test_config(&tmp);
let kernel = OpenFangKernel::boot_with_config(config).expect("Kernel should boot");
// Spawn a WASM agent
let wasm_id = kernel
.spawn_agent(wasm_manifest("wasm-agent", "hello.wat"))
.unwrap();
// Spawn a regular LLM agent (won't actually call LLM since ollama isn't running,
// but it should spawn fine and coexist)
let llm_toml = r#"
name = "llm-agent"
version = "0.1.0"
description = "LLM test agent"
author = "test"
module = "builtin:chat"
[model]
provider = "ollama"
model = "test"
system_prompt = "You are a test agent."
[capabilities]
memory_read = ["*"]
memory_write = ["self.*"]
"#;
let llm_manifest: AgentManifest = toml::from_str(llm_toml).unwrap();
let llm_id = kernel.spawn_agent(llm_manifest).unwrap();
// Verify both agents exist
let agents = kernel.registry.list();
assert_eq!(agents.len(), 2);
// WASM agent should work
let result = kernel.send_message(wasm_id, "hello").await.unwrap();
assert_eq!(result.response, "hello from wasm");
// LLM agent exists but we won't send it a message (no real LLM)
assert!(kernel.registry.get(llm_id).is_some());
// Kill WASM agent
kernel.kill_agent(wasm_id).unwrap();
assert_eq!(kernel.registry.list().len(), 1);
kernel.shutdown();
}