## Sidebar Enhancement - Change tabs to icon + small label layout for better space utilization - Add Teams tab with team collaboration entry point ## Settings Page Improvements - Connect theme toggle to gatewayStore.saveQuickConfig for persistence - Remove OpenFang backend download section, simplify UI - Add time range filter to UsageStats (7d/30d/all) - Add stat cards with icons (sessions, messages, input/output tokens) - Add token usage overview bar chart - Add 8 ZCLAW system skill definitions with categories ## Bug Fixes - Fix ChannelList duplicate content with deduplication logic - Integrate CreateTriggerModal in TriggersPanel - Add independent SecurityStatusPanel with 12 default enabled layers - Change workflow view to use SchedulerPanel as unified entry ## New Components - CreateTriggerModal: Event trigger creation modal - HandApprovalModal: Hand approval workflow dialog - HandParamsForm: Enhanced Hand parameter form - SecurityLayersPanel: 16-layer security status display ## Architecture - Add TOML config parsing support (toml-utils.ts, config-parser.ts) - Add request timeout and retry mechanism (request-helper.ts) - Add secure token storage (secure-storage.ts, secure_storage.rs) ## Tests - Add unit tests for config-parser, toml-utils, request-helper - Add team-client and teamStore tests ## Documentation - Update SYSTEM_ANALYSIS.md with Phase 8 completion - UI completion: 100% (30/30 components) - API coverage: 93% (63/68 endpoints) Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
263 lines
6.3 KiB
TypeScript
263 lines
6.3 KiB
TypeScript
/**
|
|
* Config Parser Tests
|
|
*
|
|
* Tests for configuration parsing and validation.
|
|
*/
|
|
|
|
import { describe, it, expect } from 'vitest';
|
|
import {
|
|
configParser,
|
|
ConfigParseError,
|
|
ConfigValidationError,
|
|
} from '../src/lib/config-parser';
|
|
import type { OpenFangConfig } from '../src/types/config';
|
|
|
|
describe('configParser', () => {
|
|
const validToml = `
|
|
# Valid OpenFang configuration
|
|
[server]
|
|
host = "127.0.0.1"
|
|
port = 4200
|
|
|
|
websocket_port = 4200
|
|
websocket_path = "/ws"
|
|
|
|
[agent.defaults]
|
|
workspace = "~/.openfang/workspace"
|
|
default_model = "gpt-4"
|
|
|
|
[llm]
|
|
default_provider = "openai"
|
|
default_model = "gpt-4"
|
|
`;
|
|
|
|
describe('parseConfig', () => {
|
|
it('should parse valid TOML configuration', () => {
|
|
const config = configParser.parseConfig(validToml);
|
|
|
|
expect(config).toBeDefined();
|
|
expect(config.server).toEqual({
|
|
host: '127.0.0.1',
|
|
port: 4200,
|
|
websocket_port: 4200,
|
|
websocket_path: '/ws',
|
|
});
|
|
expect(config.agent).toBeDefined();
|
|
expect(config.agent.defaults).toEqual({
|
|
workspace: '~/.openfang/workspace',
|
|
default_model: 'gpt-4',
|
|
});
|
|
expect(config.llm).toEqual({
|
|
default_provider: 'openai',
|
|
default_model: 'gpt-4',
|
|
});
|
|
});
|
|
|
|
});
|
|
|
|
describe('validateConfig', () => {
|
|
it('should validate correct configuration', () => {
|
|
const config: OpenFangConfig = {
|
|
server: {
|
|
host: '127.0.0.1',
|
|
port: 4200,
|
|
},
|
|
agent: {
|
|
defaults: {
|
|
workspace: '~/.openfang/workspace',
|
|
default_model: 'gpt-4',
|
|
},
|
|
},
|
|
llm: {
|
|
default_provider: 'openai',
|
|
default_model: 'gpt-4',
|
|
},
|
|
};
|
|
|
|
const result = configParser.validateConfig(config);
|
|
|
|
expect(result.valid).toBe(true);
|
|
expect(result.errors).toHaveLength(0);
|
|
});
|
|
|
|
it('should detect missing required fields', () => {
|
|
const config = {
|
|
server: {
|
|
host: '127.0.0.1',
|
|
// missing port
|
|
},
|
|
};
|
|
|
|
const result = configParser.validateConfig(config);
|
|
|
|
expect(result.valid).toBe(false);
|
|
expect(result.errors.length).toBeGreaterThan(0);
|
|
});
|
|
|
|
it('should validate port range', () => {
|
|
const config = {
|
|
server: {
|
|
host: '127.0.0.1',
|
|
port: 99999, // invalid port
|
|
},
|
|
agent: {
|
|
defaults: {
|
|
workspace: '~/.openfang/workspace',
|
|
default_model: 'gpt-4',
|
|
},
|
|
},
|
|
llm: {
|
|
default_provider: 'openai',
|
|
default_model: 'gpt-4',
|
|
},
|
|
};
|
|
|
|
const result = configParser.validateConfig(config);
|
|
|
|
expect(result.valid).toBe(false);
|
|
const portError = result.errors.find(e => e.path === 'server.port');
|
|
expect(portError).toBeDefined();
|
|
});
|
|
|
|
it('should detect empty required fields', () => {
|
|
const config = {
|
|
server: {
|
|
host: '',
|
|
port: 4200,
|
|
},
|
|
agent: {
|
|
defaults: {
|
|
workspace: '~/.openfang/workspace',
|
|
default_model: '',
|
|
},
|
|
},
|
|
llm: {
|
|
default_provider: '',
|
|
default_model: 'gpt-4',
|
|
},
|
|
};
|
|
|
|
const result = configParser.validateConfig(config);
|
|
|
|
expect(result.valid).toBe(false);
|
|
});
|
|
});
|
|
|
|
describe('parseAndValidate', () => {
|
|
it('should parse and validate valid configuration', () => {
|
|
const config = configParser.parseAndValidate(validToml);
|
|
expect(config).toBeDefined();
|
|
expect(config.server.host).toBe('127.0.0.1');
|
|
});
|
|
|
|
it('should throw on invalid configuration', () => {
|
|
const invalidToml = `
|
|
[server]
|
|
host = "127.0.0.1"
|
|
# missing port
|
|
`;
|
|
|
|
expect(() => configParser.parseAndValidate(invalidToml)).toThrow(ConfigValidationError);
|
|
});
|
|
});
|
|
|
|
describe('stringifyConfig', () => {
|
|
it('should stringify configuration to TOML', () => {
|
|
const config: OpenFangConfig = {
|
|
server: {
|
|
host: '127.0.0.1',
|
|
port: 4200,
|
|
},
|
|
agent: {
|
|
defaults: {
|
|
workspace: '~/.openfang/workspace',
|
|
default_model: 'gpt-4',
|
|
},
|
|
},
|
|
llm: {
|
|
default_provider: 'openai',
|
|
default_model: 'gpt-4',
|
|
},
|
|
};
|
|
|
|
const result = configParser.stringifyConfig(config);
|
|
expect(result).toContain('host = "127.0.0.1"');
|
|
expect(result).toContain('port = 4200');
|
|
});
|
|
});
|
|
|
|
describe('extractMetadata', () => {
|
|
it('should extract metadata from TOML content', () => {
|
|
const content = `
|
|
[server]
|
|
host = "127.0.0.1"
|
|
port = 4200
|
|
|
|
[llm.providers]
|
|
api_key = "\${API_KEY}"
|
|
`;
|
|
const metadata = configParser.extractMetadata(content, '/path/to/config.toml');
|
|
|
|
expect(metadata.path).toBe('/path/to/config.toml');
|
|
expect(metadata.name).toBe('config.toml');
|
|
expect(metadata.envVars).toContain('API_KEY');
|
|
expect(metadata.hasUnresolvedEnvVars).toBe(true);
|
|
});
|
|
|
|
it('should detect no env vars when none present', () => {
|
|
const content = `
|
|
[server]
|
|
host = "127.0.0.1"
|
|
port = 4200
|
|
`;
|
|
const metadata = configParser.extractMetadata(content, '/path/to/config.toml');
|
|
|
|
expect(metadata.envVars).toEqual([]);
|
|
expect(metadata.hasUnresolvedEnvVars).toBe(false);
|
|
});
|
|
});
|
|
|
|
describe('mergeWithDefaults', () => {
|
|
it('should merge partial config with defaults', () => {
|
|
const partial = {
|
|
server: {
|
|
port: 3000,
|
|
},
|
|
};
|
|
|
|
const result = configParser.mergeWithDefaults(partial);
|
|
|
|
expect(result.server?.port).toBe(3000);
|
|
expect(result.server?.host).toBe('127.0.0.1'); // from defaults
|
|
});
|
|
});
|
|
|
|
describe('isOpenFangConfig', () => {
|
|
it('should return true for valid config', () => {
|
|
const config: OpenFangConfig = {
|
|
server: {
|
|
host: '127.0.0.1',
|
|
port: 4200,
|
|
},
|
|
agent: {
|
|
defaults: {
|
|
workspace: '~/.openfang/workspace',
|
|
default_model: 'gpt-4',
|
|
},
|
|
},
|
|
llm: {
|
|
default_provider: 'openai',
|
|
default_model: 'gpt-4',
|
|
},
|
|
};
|
|
|
|
expect(configParser.isOpenFangConfig(config)).toBe(true);
|
|
});
|
|
|
|
it('should return false for invalid config', () => {
|
|
expect(configParser.isOpenFangConfig(null)).toBe(false);
|
|
expect(configParser.isOpenFangConfig({})).toBe(false);
|
|
});
|
|
});
|
|
});
|