Files
zclaw_openfang/desktop/tests/config-parser.test.ts
iven ce562e8bfc feat: complete Phase 1-3 architecture optimization
Phase 1 - Security:
- Add AES-GCM encryption for localStorage fallback
- Enforce WSS protocol for non-localhost WebSocket connections
- Add URL sanitization to prevent XSS in markdown links

Phase 2 - Domain Reorganization:
- Create Intelligence Domain with Valtio store and caching
- Add unified intelligence-client for Rust backend integration
- Migrate from legacy agent-memory, heartbeat, reflection modules

Phase 3 - Core Optimization:
- Add virtual scrolling for ChatArea with react-window
- Implement LRU cache with TTL for intelligence operations
- Add message virtualization utilities

Additional:
- Add OpenFang compatibility test suite
- Update E2E test fixtures
- Add audit logging infrastructure
- Update project documentation and plans

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-21 22:11:50 +08:00

262 lines
6.3 KiB
TypeScript

/**
* Config Parser Tests
*
* Tests for configuration parsing and validation.
*/
import { describe, it, expect } from 'vitest';
import {
configParser,
ConfigParseError,
} from '../src/lib/config-parser';
import type { OpenFangConfig, ConfigValidationError } from '../src/types/config';
describe('configParser', () => {
const validToml = `
# Valid OpenFang configuration
[server]
host = "127.0.0.1"
port = 4200
websocket_port = 4200
websocket_path = "/ws"
[agent.defaults]
workspace = "~/.openfang/workspace"
default_model = "gpt-4"
[llm]
default_provider = "openai"
default_model = "gpt-4"
`;
describe('parseConfig', () => {
it('should parse valid TOML configuration', () => {
const config = configParser.parseConfig(validToml);
expect(config).toBeDefined();
expect(config.server).toEqual({
host: '127.0.0.1',
port: 4200,
websocket_port: 4200,
websocket_path: '/ws',
});
expect(config.agent).toBeDefined();
expect(config.agent.defaults).toEqual({
workspace: '~/.openfang/workspace',
default_model: 'gpt-4',
});
expect(config.llm).toEqual({
default_provider: 'openai',
default_model: 'gpt-4',
});
});
});
describe('validateConfig', () => {
it('should validate correct configuration', () => {
const config: OpenFangConfig = {
server: {
host: '127.0.0.1',
port: 4200,
},
agent: {
defaults: {
workspace: '~/.openfang/workspace',
default_model: 'gpt-4',
},
},
llm: {
default_provider: 'openai',
default_model: 'gpt-4',
},
};
const result = configParser.validateConfig(config);
expect(result.valid).toBe(true);
expect(result.errors).toHaveLength(0);
});
it('should detect missing required fields', () => {
const config = {
server: {
host: '127.0.0.1',
// missing port
},
};
const result = configParser.validateConfig(config);
expect(result.valid).toBe(false);
expect(result.errors.length).toBeGreaterThan(0);
});
it('should validate port range', () => {
const config = {
server: {
host: '127.0.0.1',
port: 99999, // invalid port
},
agent: {
defaults: {
workspace: '~/.openfang/workspace',
default_model: 'gpt-4',
},
},
llm: {
default_provider: 'openai',
default_model: 'gpt-4',
},
};
const result = configParser.validateConfig(config);
expect(result.valid).toBe(false);
const portError = result.errors.find(e => e.path === 'server.port');
expect(portError).toBeDefined();
});
it('should detect empty required fields', () => {
const config = {
server: {
host: '',
port: 4200,
},
agent: {
defaults: {
workspace: '~/.openfang/workspace',
default_model: '',
},
},
llm: {
default_provider: '',
default_model: 'gpt-4',
},
};
const result = configParser.validateConfig(config);
expect(result.valid).toBe(false);
});
});
describe('parseAndValidate', () => {
it('should parse and validate valid configuration', () => {
const config = configParser.parseAndValidate(validToml);
expect(config).toBeDefined();
expect(config.server.host).toBe('127.0.0.1');
});
it('should throw on invalid configuration', () => {
const invalidToml = `
[server]
host = "127.0.0.1"
# missing port
`;
expect(() => configParser.parseAndValidate(invalidToml)).toThrow(ConfigValidationError);
});
});
describe('stringifyConfig', () => {
it('should stringify configuration to TOML', () => {
const config: OpenFangConfig = {
server: {
host: '127.0.0.1',
port: 4200,
},
agent: {
defaults: {
workspace: '~/.openfang/workspace',
default_model: 'gpt-4',
},
},
llm: {
default_provider: 'openai',
default_model: 'gpt-4',
},
};
const result = configParser.stringifyConfig(config);
expect(result).toContain('host = "127.0.0.1"');
expect(result).toContain('port = 4200');
});
});
describe('extractMetadata', () => {
it('should extract metadata from TOML content', () => {
const content = `
[server]
host = "127.0.0.1"
port = 4200
[llm.providers]
api_key = "\${API_KEY}"
`;
const metadata = configParser.extractMetadata(content, '/path/to/config.toml');
expect(metadata.path).toBe('/path/to/config.toml');
expect(metadata.name).toBe('config.toml');
expect(metadata.envVars).toContain('API_KEY');
expect(metadata.hasUnresolvedEnvVars).toBe(true);
});
it('should detect no env vars when none present', () => {
const content = `
[server]
host = "127.0.0.1"
port = 4200
`;
const metadata = configParser.extractMetadata(content, '/path/to/config.toml');
expect(metadata.envVars).toEqual([]);
expect(metadata.hasUnresolvedEnvVars).toBe(false);
});
});
describe('mergeWithDefaults', () => {
it('should merge partial config with defaults', () => {
const partial = {
server: {
port: 3000,
},
};
const result = configParser.mergeWithDefaults(partial);
expect(result.server?.port).toBe(3000);
expect(result.server?.host).toBe('127.0.0.1'); // from defaults
});
});
describe('isOpenFangConfig', () => {
it('should return true for valid config', () => {
const config: OpenFangConfig = {
server: {
host: '127.0.0.1',
port: 4200,
},
agent: {
defaults: {
workspace: '~/.openfang/workspace',
default_model: 'gpt-4',
},
},
llm: {
default_provider: 'openai',
default_model: 'gpt-4',
},
};
expect(configParser.isOpenFangConfig(config)).toBe(true);
});
it('should return false for invalid config', () => {
expect(configParser.isOpenFangConfig(null)).toBe(false);
expect(configParser.isOpenFangConfig({})).toBe(false);
});
});
});