feat: integrate DevQALoop into TeamOrchestrator and add integration test checklist
- Add Review tab to TeamOrchestrator with DevQALoopPanel integration - Create comprehensive integration test checklist (22 test cases) - Document component integration status analysis - Update progress documentation Key findings: - Most "low integration" components were actually integrated via indirect paths - DevQALoop was the only truly unintegrated component, now fixed Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -21,10 +21,11 @@ mod secure_storage;
|
||||
use serde::Serialize;
|
||||
use serde_json::{json, Value};
|
||||
use std::fs;
|
||||
use std::net::{TcpStream, ToSocketAddrs};
|
||||
use std::path::PathBuf;
|
||||
use std::process::Command;
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
|
||||
use tauri::{AppHandle, Manager};
|
||||
|
||||
#[derive(Serialize)]
|
||||
@@ -945,6 +946,299 @@ fn openfang_version(app: AppHandle) -> Result<VersionResponse, String> {
|
||||
})
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Health Check Commands
|
||||
// ============================================================================
|
||||
|
||||
/// Health status enum
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
enum HealthStatus {
|
||||
Healthy,
|
||||
Unhealthy,
|
||||
Unknown,
|
||||
}
|
||||
|
||||
/// Port check result
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct PortCheckResult {
|
||||
port: u16,
|
||||
accessible: bool,
|
||||
latency_ms: Option<u64>,
|
||||
error: Option<String>,
|
||||
}
|
||||
|
||||
/// Process health details
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct ProcessHealthDetails {
|
||||
pid: Option<u32>,
|
||||
name: Option<String>,
|
||||
status: Option<String>,
|
||||
uptime_seconds: Option<u64>,
|
||||
cpu_percent: Option<f64>,
|
||||
memory_mb: Option<f64>,
|
||||
}
|
||||
|
||||
/// Health check response
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct HealthCheckResponse {
|
||||
status: HealthStatus,
|
||||
process: ProcessHealthDetails,
|
||||
port_check: PortCheckResult,
|
||||
last_check_timestamp: u64,
|
||||
checks_performed: Vec<String>,
|
||||
issues: Vec<String>,
|
||||
runtime_source: Option<String>,
|
||||
}
|
||||
|
||||
/// Check if a TCP port is accessible
|
||||
fn check_port_accessibility(host: &str, port: u16, timeout_ms: u64) -> PortCheckResult {
|
||||
let addr = format!("{}:{}", host, port);
|
||||
|
||||
// Resolve the address
|
||||
let socket_addr = match addr.to_socket_addrs() {
|
||||
Ok(mut addrs) => addrs.next(),
|
||||
Err(e) => {
|
||||
return PortCheckResult {
|
||||
port,
|
||||
accessible: false,
|
||||
latency_ms: None,
|
||||
error: Some(format!("Failed to resolve address: {}", e)),
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
let Some(socket_addr) = socket_addr else {
|
||||
return PortCheckResult {
|
||||
port,
|
||||
accessible: false,
|
||||
latency_ms: None,
|
||||
error: Some("Failed to resolve address".to_string()),
|
||||
};
|
||||
};
|
||||
|
||||
// Try to connect with timeout
|
||||
let start = Instant::now();
|
||||
|
||||
// Use a simple TCP connect with timeout simulation
|
||||
let result = TcpStream::connect_timeout(&socket_addr, Duration::from_millis(timeout_ms));
|
||||
|
||||
match result {
|
||||
Ok(_) => {
|
||||
let latency = start.elapsed().as_millis() as u64;
|
||||
PortCheckResult {
|
||||
port,
|
||||
accessible: true,
|
||||
latency_ms: Some(latency),
|
||||
error: None,
|
||||
}
|
||||
}
|
||||
Err(e) => PortCheckResult {
|
||||
port,
|
||||
accessible: false,
|
||||
latency_ms: None,
|
||||
error: Some(format!("Connection failed: {}", e)),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Get process uptime from status command
|
||||
fn get_process_uptime(status: &LocalGatewayStatus) -> Option<u64> {
|
||||
// Try to extract uptime from raw status data
|
||||
status
|
||||
.raw
|
||||
.get("process")
|
||||
.and_then(|p| p.get("uptimeSeconds"))
|
||||
.and_then(Value::as_u64)
|
||||
}
|
||||
|
||||
/// Perform comprehensive health check on OpenFang Kernel
|
||||
#[tauri::command]
|
||||
fn openfang_health_check(
|
||||
app: AppHandle,
|
||||
port: Option<u16>,
|
||||
timeout_ms: Option<u64>,
|
||||
) -> Result<HealthCheckResponse, String> {
|
||||
let check_port = port.unwrap_or(OPENFANG_DEFAULT_PORT);
|
||||
let timeout = timeout_ms.unwrap_or(3000);
|
||||
let mut checks_performed = Vec::new();
|
||||
let mut issues = Vec::new();
|
||||
|
||||
// Get current timestamp
|
||||
let last_check_timestamp = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.map(|d| d.as_secs())
|
||||
.unwrap_or(0);
|
||||
|
||||
// 1. Check if OpenFang CLI is available
|
||||
let runtime = resolve_openfang_runtime(&app);
|
||||
let cli_available = runtime.executable.is_file();
|
||||
|
||||
if !cli_available {
|
||||
return Ok(HealthCheckResponse {
|
||||
status: HealthStatus::Unhealthy,
|
||||
process: ProcessHealthDetails {
|
||||
pid: None,
|
||||
name: None,
|
||||
status: None,
|
||||
uptime_seconds: None,
|
||||
cpu_percent: None,
|
||||
memory_mb: None,
|
||||
},
|
||||
port_check: PortCheckResult {
|
||||
port: check_port,
|
||||
accessible: false,
|
||||
latency_ms: None,
|
||||
error: Some("OpenFang CLI not available".to_string()),
|
||||
},
|
||||
last_check_timestamp,
|
||||
checks_performed: vec!["cli_availability".to_string()],
|
||||
issues: vec![format!(
|
||||
"OpenFang runtime not found at: {}",
|
||||
runtime.display_path.display()
|
||||
)],
|
||||
runtime_source: Some(runtime.source),
|
||||
});
|
||||
}
|
||||
checks_performed.push("cli_availability".to_string());
|
||||
|
||||
// 2. Get gateway status
|
||||
let gateway_status = read_gateway_status(&app)?;
|
||||
checks_performed.push("gateway_status".to_string());
|
||||
|
||||
// Check for configuration issues
|
||||
if !gateway_status.config_ok {
|
||||
issues.push("Gateway configuration has issues".to_string());
|
||||
}
|
||||
|
||||
// 3. Check port accessibility
|
||||
let port_check = check_port_accessibility("127.0.0.1", check_port, timeout);
|
||||
checks_performed.push("port_accessibility".to_string());
|
||||
|
||||
if !port_check.accessible {
|
||||
issues.push(format!(
|
||||
"Port {} is not accessible: {}",
|
||||
check_port,
|
||||
port_check.error.as_deref().unwrap_or("unknown error")
|
||||
));
|
||||
}
|
||||
|
||||
// 4. Extract process information
|
||||
let process_health = if !gateway_status.listener_pids.is_empty() {
|
||||
// Get the first listener PID
|
||||
let pid = gateway_status.listener_pids[0];
|
||||
|
||||
// Try to get detailed process info from process list
|
||||
let process_info = run_openfang(&app, &["process", "list", "--json"])
|
||||
.ok()
|
||||
.and_then(|result| parse_json_output(&result.stdout).ok())
|
||||
.and_then(|json| json.get("processes").and_then(Value::as_array).cloned());
|
||||
|
||||
let (cpu, memory, uptime) = if let Some(ref processes) = process_info {
|
||||
let matching = processes
|
||||
.iter()
|
||||
.find(|p| p.get("pid").and_then(Value::as_u64) == Some(pid as u64));
|
||||
|
||||
matching.map_or((None, None, None), |p| {
|
||||
(
|
||||
p.get("cpuPercent").and_then(Value::as_f64),
|
||||
p.get("memoryMb").and_then(Value::as_f64),
|
||||
p.get("uptimeSeconds").and_then(Value::as_u64),
|
||||
)
|
||||
})
|
||||
} else {
|
||||
(None, None, get_process_uptime(&gateway_status))
|
||||
};
|
||||
|
||||
ProcessHealthDetails {
|
||||
pid: Some(pid),
|
||||
name: Some("openfang".to_string()),
|
||||
status: Some(
|
||||
gateway_status
|
||||
.service_status
|
||||
.clone()
|
||||
.unwrap_or_else(|| "running".to_string()),
|
||||
),
|
||||
uptime_seconds: uptime,
|
||||
cpu_percent: cpu,
|
||||
memory_mb: memory,
|
||||
}
|
||||
} else {
|
||||
ProcessHealthDetails {
|
||||
pid: None,
|
||||
name: None,
|
||||
status: gateway_status.service_status.clone(),
|
||||
uptime_seconds: None,
|
||||
cpu_percent: None,
|
||||
memory_mb: None,
|
||||
}
|
||||
};
|
||||
|
||||
// Check if process is running but no listeners
|
||||
if gateway_status.service_status.as_deref() == Some("running")
|
||||
&& gateway_status.listener_pids.is_empty()
|
||||
{
|
||||
issues.push("Service reports running but no listener processes found".to_string());
|
||||
}
|
||||
|
||||
// 5. Determine overall health status
|
||||
let status = if !cli_available {
|
||||
HealthStatus::Unhealthy
|
||||
} else if !port_check.accessible {
|
||||
HealthStatus::Unhealthy
|
||||
} else if gateway_status.listener_pids.is_empty() {
|
||||
HealthStatus::Unhealthy
|
||||
} else if !issues.is_empty() {
|
||||
// Has some issues but core functionality is working
|
||||
HealthStatus::Healthy
|
||||
} else {
|
||||
HealthStatus::Healthy
|
||||
};
|
||||
|
||||
Ok(HealthCheckResponse {
|
||||
status,
|
||||
process: process_health,
|
||||
port_check,
|
||||
last_check_timestamp,
|
||||
checks_performed,
|
||||
issues,
|
||||
runtime_source: Some(runtime.source),
|
||||
})
|
||||
}
|
||||
|
||||
/// Quick ping to check if OpenFang is alive (lightweight check)
|
||||
#[tauri::command]
|
||||
fn openfang_ping(app: AppHandle) -> Result<bool, String> {
|
||||
let port_check = check_port_accessibility("127.0.0.1", OPENFANG_DEFAULT_PORT, 1000);
|
||||
|
||||
if port_check.accessible {
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
// Fallback: check via status command
|
||||
match run_openfang(&app, &["gateway", "status", "--json", "--no-probe"]) {
|
||||
Ok(result) => {
|
||||
if let Ok(status) = parse_json_output(&result.stdout) {
|
||||
// Check if there are any listener PIDs
|
||||
let has_listeners = status
|
||||
.get("port")
|
||||
.and_then(|p| p.get("listeners"))
|
||||
.and_then(Value::as_array)
|
||||
.map(|arr| !arr.is_empty())
|
||||
.unwrap_or(false);
|
||||
|
||||
Ok(has_listeners)
|
||||
} else {
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
Err(_) => Ok(false),
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Backward-compatible aliases (OpenClaw naming)
|
||||
// These delegate to OpenFang commands for backward compatibility
|
||||
@@ -1013,10 +1307,14 @@ pub fn run() {
|
||||
openfang_prepare_for_tauri,
|
||||
openfang_approve_device_pairing,
|
||||
openfang_doctor,
|
||||
openfang_health_check,
|
||||
// Process monitoring commands
|
||||
openfang_process_list,
|
||||
openfang_process_logs,
|
||||
openfang_version,
|
||||
// Health check commands
|
||||
openfang_health_check,
|
||||
openfang_ping,
|
||||
// Backward-compatible aliases (OpenClaw naming)
|
||||
gateway_status,
|
||||
gateway_start,
|
||||
|
||||
@@ -9,7 +9,8 @@
|
||||
|
||||
import { useState, useEffect } from 'react';
|
||||
import { useTeamStore } from '../store/teamStore';
|
||||
import { useGatewayStore } from '../store/gatewayStore';
|
||||
import { useAgentStore } from '../store/agentStore';
|
||||
import { DevQALoopPanel } from './DevQALoop';
|
||||
import type {
|
||||
TeamMember,
|
||||
TeamTask,
|
||||
@@ -20,7 +21,7 @@ import type {
|
||||
import {
|
||||
Users, Plus, Trash2, X,
|
||||
Bot, Clock, AlertTriangle, CheckCircle,
|
||||
Play, UserPlus, FileText,
|
||||
Play, UserPlus, FileText, RefreshCw,
|
||||
} from 'lucide-react';
|
||||
|
||||
// === Sub-Components ===
|
||||
@@ -206,7 +207,7 @@ interface TeamOrchestratorProps {
|
||||
}
|
||||
|
||||
export function TeamOrchestrator({ isOpen, onClose }: TeamOrchestratorProps) {
|
||||
const [view, setView] = useState<'teams' | 'tasks' | 'members'>('teams');
|
||||
const [view, setView] = useState<'teams' | 'tasks' | 'members' | 'review'>('teams');
|
||||
const [isCreating, setIsCreating] = useState(false);
|
||||
const [newTeamName, setNewTeamName] = useState('');
|
||||
const [newTeamPattern, setNewTeamPattern] = useState<CollaborationPattern>('sequential');
|
||||
@@ -230,9 +231,10 @@ export function TeamOrchestrator({ isOpen, onClose }: TeamOrchestratorProps) {
|
||||
updateMemberRole,
|
||||
setSelectedTask,
|
||||
setSelectedMember,
|
||||
startDevQALoop,
|
||||
} = useTeamStore();
|
||||
|
||||
const { clones } = useGatewayStore();
|
||||
const clones = useAgentStore((s) => s.clones);
|
||||
|
||||
useEffect(() => {
|
||||
if (isOpen) {
|
||||
@@ -405,6 +407,22 @@ export function TeamOrchestrator({ isOpen, onClose }: TeamOrchestratorProps) {
|
||||
>
|
||||
Members
|
||||
</button>
|
||||
<button
|
||||
onClick={() => setView('review')}
|
||||
className={`px-3 py-1.5 rounded-lg text-sm font-medium flex items-center gap-1 ${
|
||||
view === 'review'
|
||||
? 'bg-yellow-100 text-yellow-700 dark:bg-yellow-900/30 dark:text-yellow-300'
|
||||
: 'text-gray-500 hover:text-gray-700 dark:text-gray-400'
|
||||
}`}
|
||||
>
|
||||
<RefreshCw className="w-4 h-4" />
|
||||
Review
|
||||
{activeTeam.activeLoops.length > 0 && (
|
||||
<span className="ml-1 px-1.5 py-0.5 text-xs bg-yellow-200 dark:bg-yellow-800 rounded-full">
|
||||
{activeTeam.activeLoops.length}
|
||||
</span>
|
||||
)}
|
||||
</button>
|
||||
</div>
|
||||
|
||||
{/* Tasks View */}
|
||||
@@ -484,6 +502,62 @@ export function TeamOrchestrator({ isOpen, onClose }: TeamOrchestratorProps) {
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Review View - Dev↔QA Loop */}
|
||||
{view === 'review' && (
|
||||
<div className="flex-1 p-6 overflow-y-auto">
|
||||
<div className="flex items-center justify-between mb-4">
|
||||
<h3 className="font-semibold text-gray-900 dark:text-white">Dev↔QA Review Loops</h3>
|
||||
<button
|
||||
onClick={async () => {
|
||||
// Start a new Dev↔QA loop with the first available task and members
|
||||
if (activeTeam.tasks.length > 0 && activeTeam.members.length >= 2) {
|
||||
const devMember = activeTeam.members.find(m => m.role === 'developer');
|
||||
const reviewerMember = activeTeam.members.find(m => m.role === 'reviewer');
|
||||
if (devMember && reviewerMember) {
|
||||
const task = activeTeam.tasks.find(t => t.status === 'pending' || t.status === 'in_progress');
|
||||
if (task) {
|
||||
await startDevQALoop(activeTeam.id, task.id, devMember.id, reviewerMember.id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}}
|
||||
disabled={activeTeam.tasks.length === 0 || activeTeam.members.length < 2}
|
||||
className="flex items-center gap-1 px-3 py-1.5 text-sm bg-yellow-500 text-white rounded-lg hover:bg-yellow-600 disabled:opacity-50 disabled:cursor-not-allowed"
|
||||
>
|
||||
<Plus className="w-4 h-4" />
|
||||
Start Review Loop
|
||||
</button>
|
||||
</div>
|
||||
|
||||
{activeTeam.activeLoops.length === 0 ? (
|
||||
<div className="text-center py-8 text-gray-500 dark:text-gray-400">
|
||||
<RefreshCw className="w-12 h-12 mx-auto mb-4 text-gray-300 dark:text-gray-600" />
|
||||
<p>No active review loops.</p>
|
||||
<p className="text-sm mt-2">Add tasks and members, then start a Dev↔QA loop.</p>
|
||||
</div>
|
||||
) : (
|
||||
<div className="space-y-4">
|
||||
{activeTeam.activeLoops.map(loop => {
|
||||
const task = activeTeam.tasks.find(t => t.id === loop.taskId);
|
||||
const developer = activeTeam.members.find(m => m.id === loop.developerId);
|
||||
const reviewer = activeTeam.members.find(m => m.id === loop.reviewerId);
|
||||
|
||||
return (
|
||||
<DevQALoopPanel
|
||||
key={loop.id}
|
||||
loop={loop}
|
||||
teamId={activeTeam.id}
|
||||
developerName={developer?.name || 'Unknown Developer'}
|
||||
reviewerName={reviewer?.name || 'Unknown Reviewer'}
|
||||
taskTitle={task?.title || 'Unknown Task'}
|
||||
/>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
) : (
|
||||
<div className="flex-1 flex items-center justify-center text-gray-500 dark:text-gray-400">
|
||||
|
||||
Reference in New Issue
Block a user