- Add test helper library with assertion functions (scripts/lib/test-helpers.sh) - Add gateway integration test script (scripts/tests/gateway-test.sh) - Add configuration validation tool (scripts/validate-config.ts) - Add health-check.ts library with Tauri command wrappers - Add HealthStatusIndicator component to ConnectionStatus.tsx - Add E2E test specs for memory, settings, and team collaboration - Update ZCLAW-DEEP-ANALYSIS.md to reflect actual project state Key improvements: - Store architecture now properly documented as migrated - Tauri backend shown as 85-90% complete - Component integration status clarified Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
246 lines
5.1 KiB
Bash
246 lines
5.1 KiB
Bash
#!/bin/bash
|
|
# ZCLAW Test Helper Functions
|
|
# Provides common utilities for test scripts
|
|
|
|
# Colors
|
|
RED='\033[0;31m'
|
|
GREEN='\033[0;32m'
|
|
YELLOW='\033[0;33m'
|
|
BLUE='\033[0;34m'
|
|
GRAY='\033[0;90m'
|
|
NC='\033[0m' # No Color
|
|
|
|
# Test counters
|
|
TESTS_RUN=0
|
|
TESTS_PASSED=0
|
|
TESTS_FAILED=0
|
|
|
|
# Output functions
|
|
log_info() {
|
|
echo -e "${BLUE}[INFO]${NC} $1"
|
|
}
|
|
|
|
log_success() {
|
|
echo -e "${GREEN}[PASS]${NC} $1"
|
|
}
|
|
|
|
log_error() {
|
|
echo -e "${RED}[FAIL]${NC} $1"
|
|
}
|
|
|
|
log_warning() {
|
|
echo -e "${YELLOW}[WARN]${NC} $1"
|
|
}
|
|
|
|
log_skip() {
|
|
echo -e "${GRAY}[SKIP]${NC} $1"
|
|
}
|
|
|
|
# Assertion functions
|
|
assert_equals() {
|
|
local expected="$1"
|
|
local actual="$2"
|
|
local message="$3"
|
|
|
|
TESTS_RUN=$((TESTS_RUN + 1))
|
|
|
|
if [ "$expected" = "$actual" ]; then
|
|
TESTS_PASSED=$((TESTS_PASSED + 1))
|
|
log_success "$message"
|
|
return 0
|
|
else
|
|
TESTS_FAILED=$((TESTS_FAILED + 1))
|
|
log_error "$message"
|
|
echo " Expected: $expected"
|
|
echo " Actual: $actual"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
assert_not_empty() {
|
|
local value="$1"
|
|
local message="$2"
|
|
|
|
TESTS_RUN=$((TESTS_RUN + 1))
|
|
|
|
if [ -n "$value" ]; then
|
|
TESTS_PASSED=$((TESTS_PASSED + 1))
|
|
log_success "$message"
|
|
return 0
|
|
else
|
|
TESTS_FAILED=$((TESTS_FAILED + 1))
|
|
log_error "$message"
|
|
echo " Value is empty"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
assert_file_exists() {
|
|
local file="$1"
|
|
local message="$2"
|
|
|
|
TESTS_RUN=$((TESTS_RUN + 1))
|
|
|
|
if [ -f "$file" ]; then
|
|
TESTS_PASSED=$((TESTS_PASSED + 1))
|
|
log_success "$message"
|
|
return 0
|
|
else
|
|
TESTS_FAILED=$((TESTS_FAILED + 1))
|
|
log_error "$message"
|
|
echo " File not found: $file"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
assert_command_exists() {
|
|
local cmd="$1"
|
|
local message="$2"
|
|
|
|
TESTS_RUN=$((TESTS_RUN + 1))
|
|
|
|
if command -v "$cmd" &> /dev/null; then
|
|
TESTS_PASSED=$((TESTS_PASSED + 1))
|
|
log_success "$message"
|
|
return 0
|
|
else
|
|
TESTS_FAILED=$((TESTS_FAILED + 1))
|
|
log_error "$message"
|
|
echo " Command not found: $cmd"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
assert_http_status() {
|
|
local url="$1"
|
|
local expected_status="$2"
|
|
local message="$3"
|
|
local timeout="${4:-10}"
|
|
|
|
TESTS_RUN=$((TESTS_RUN + 1))
|
|
|
|
local status
|
|
status=$(curl -s -o /dev/null -w "%{http_code}" --max-time "$timeout" "$url" 2>/dev/null)
|
|
|
|
if [ "$status" = "$expected_status" ]; then
|
|
TESTS_PASSED=$((TESTS_PASSED + 1))
|
|
log_success "$message"
|
|
return 0
|
|
else
|
|
TESTS_FAILED=$((TESTS_FAILED + 1))
|
|
log_error "$message"
|
|
echo " Expected HTTP $expected_status, got: $status"
|
|
echo " URL: $url"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
assert_port_open() {
|
|
local host="$1"
|
|
local port="$2"
|
|
local message="$3"
|
|
local timeout="${4:-5}"
|
|
|
|
TESTS_RUN=$((TESTS_RUN + 1))
|
|
|
|
if timeout "$timeout" bash -c "echo > /dev/tcp/$host/$port" 2>/dev/null; then
|
|
TESTS_PASSED=$((TESTS_PASSED + 1))
|
|
log_success "$message"
|
|
return 0
|
|
else
|
|
TESTS_FAILED=$((TESTS_FAILED + 1))
|
|
log_error "$message"
|
|
echo " Port $port is not accessible on $host"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Utility functions
|
|
wait_for_port() {
|
|
local host="$1"
|
|
local port="$2"
|
|
local timeout="${3:-30}"
|
|
local message="${4:-Waiting for port $port...}"
|
|
|
|
log_info "$message"
|
|
|
|
local count=0
|
|
while [ $count -lt $timeout ]; do
|
|
if timeout 1 bash -c "echo > /dev/tcp/$host/$port" 2>/dev/null; then
|
|
return 0
|
|
fi
|
|
sleep 1
|
|
count=$((count + 1))
|
|
echo -n "."
|
|
done
|
|
echo ""
|
|
return 1
|
|
}
|
|
|
|
wait_for_http() {
|
|
local url="$1"
|
|
local expected_status="${2:-200}"
|
|
local timeout="${3:-30}"
|
|
local message="${4:-Waiting for HTTP response...}"
|
|
|
|
log_info "$message"
|
|
|
|
local count=0
|
|
while [ $count -lt $timeout ]; do
|
|
local status
|
|
status=$(curl -s -o /dev/null -w "%{http_code}" --max-time 2 "$url" 2>/dev/null)
|
|
if [ "$status" = "$expected_status" ]; then
|
|
return 0
|
|
fi
|
|
sleep 1
|
|
count=$((count + 1))
|
|
echo -n "."
|
|
done
|
|
echo ""
|
|
return 1
|
|
}
|
|
|
|
# Summary functions
|
|
print_summary() {
|
|
echo ""
|
|
echo "==================================="
|
|
echo " Test Summary"
|
|
echo "==================================="
|
|
echo " Total: $TESTS_RUN"
|
|
echo -e " ${GREEN}Passed: $TESTS_PASSED${NC}"
|
|
echo -e " ${RED}Failed: $TESTS_FAILED${NC}"
|
|
echo "==================================="
|
|
|
|
if [ $TESTS_FAILED -gt 0 ]; then
|
|
return 1
|
|
fi
|
|
return 0
|
|
}
|
|
|
|
reset_counters() {
|
|
TESTS_RUN=0
|
|
TESTS_PASSED=0
|
|
TESTS_FAILED=0
|
|
}
|
|
|
|
# JSON report generation
|
|
generate_json_report() {
|
|
local output_file="$1"
|
|
local test_name="$2"
|
|
local timestamp
|
|
timestamp=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
|
|
|
|
cat > "$output_file" << EOF
|
|
{
|
|
"testName": "$test_name",
|
|
"timestamp": "$timestamp",
|
|
"summary": {
|
|
"total": $TESTS_RUN,
|
|
"passed": $TESTS_PASSED,
|
|
"failed": $TESTS_FAILED
|
|
},
|
|
"status": "$([ $TESTS_FAILED -eq 0 ] && echo 'passed' || echo 'failed')"
|
|
}
|
|
EOF
|
|
}
|