refactor(hands): 移除空壳 Hand — Whiteboard/Slideshow/Speech (Phase 5)
Some checks failed
CI / Lint & TypeCheck (push) Has been cancelled
CI / Unit Tests (push) Has been cancelled
CI / Build Frontend (push) Has been cancelled
CI / Rust Check (push) Has been cancelled
CI / Security Scan (push) Has been cancelled
CI / E2E Tests (push) Has been cancelled

删除 3 个仅含 UI 占位的 Hand,清理 Rust 实现与前端引用:
- Rust: whiteboard.rs(422行) + slideshow.rs(797行) + speech.rs(442行)
- 前端: WhiteboardCanvas + SlideshowRenderer + speech-synth + 相关类型/常量
- 配置: 3 个 HAND.toml
- 净减 ~5400 行,Hands 9→6(启用) + Quiz/Browser/Researcher/Collector/Clip/Twitter/Reminder
This commit is contained in:
iven
2026-04-17 19:55:59 +08:00
parent 14fa7e150a
commit cb9e48f11d
21 changed files with 11 additions and 3014 deletions

View File

@@ -1,195 +0,0 @@
/**
* Speech Synthesis Service — Browser TTS via Web Speech API
*
* Provides text-to-speech playback using the browser's native SpeechSynthesis API.
* Zero external dependencies, works offline, supports Chinese and English voices.
*
* Architecture:
* - SpeechHand (Rust) returns tts_method + text + voice config
* - This service handles Browser TTS playback in the webview
* - OpenAI/Azure TTS is handled via backend API calls
*/
export interface SpeechSynthOptions {
text: string;
voice?: string;
language?: string;
rate?: number;
pitch?: number;
volume?: number;
}
export interface SpeechSynthState {
playing: boolean;
paused: boolean;
currentText: string | null;
voices: SpeechSynthesisVoice[];
}
type SpeechEventCallback = (state: SpeechSynthState) => void;
class SpeechSynthService {
private synth: SpeechSynthesis | null = null;
private currentUtterance: SpeechSynthesisUtterance | null = null;
private listeners: Set<SpeechEventCallback> = new Set();
private cachedVoices: SpeechSynthesisVoice[] = [];
constructor() {
if (typeof window !== 'undefined' && window.speechSynthesis) {
this.synth = window.speechSynthesis;
this.loadVoices();
// Voices may load asynchronously
this.synth.onvoiceschanged = () => this.loadVoices();
}
}
private loadVoices() {
if (!this.synth) return;
this.cachedVoices = this.synth.getVoices();
this.notify();
}
private notify() {
const state = this.getState();
this.listeners.forEach(cb => cb(state));
}
/** Subscribe to state changes */
subscribe(callback: SpeechEventCallback): () => void {
this.listeners.add(callback);
return () => this.listeners.delete(callback);
}
/** Get current state */
getState(): SpeechSynthState {
return {
playing: this.synth?.speaking ?? false,
paused: this.synth?.paused ?? false,
currentText: this.currentUtterance?.text ?? null,
voices: this.cachedVoices,
};
}
/** Check if TTS is available */
isAvailable(): boolean {
return this.synth != null;
}
/** Get available voices, optionally filtered by language */
getVoices(language?: string): SpeechSynthesisVoice[] {
if (!language) return this.cachedVoices;
const langPrefix = language.split('-')[0].toLowerCase();
return this.cachedVoices.filter(v =>
v.lang.toLowerCase().startsWith(langPrefix)
);
}
/** Speak text with given options */
speak(options: SpeechSynthOptions): Promise<void> {
return new Promise((resolve, reject) => {
if (!this.synth) {
reject(new Error('Speech synthesis not available'));
return;
}
// Cancel any ongoing speech
this.stop();
const utterance = new SpeechSynthesisUtterance(options.text);
this.currentUtterance = utterance;
// Set language
utterance.lang = options.language ?? 'zh-CN';
// Set voice if specified
if (options.voice && options.voice !== 'default') {
const voice = this.cachedVoices.find(v =>
v.name === options.voice || v.voiceURI === options.voice
);
if (voice) utterance.voice = voice;
} else {
// Auto-select best voice for the language
this.selectBestVoice(utterance, options.language ?? 'zh-CN');
}
// Set parameters
utterance.rate = options.rate ?? 1.0;
utterance.pitch = options.pitch ?? 1.0;
utterance.volume = options.volume ?? 1.0;
utterance.onstart = () => {
this.notify();
};
utterance.onend = () => {
this.currentUtterance = null;
this.notify();
resolve();
};
utterance.onerror = (event) => {
this.currentUtterance = null;
this.notify();
// "canceled" is not a real error (happens on stop())
if (event.error !== 'canceled') {
reject(new Error(`Speech error: ${event.error}`));
} else {
resolve();
}
};
this.synth.speak(utterance);
});
}
/** Pause current speech */
pause() {
this.synth?.pause();
this.notify();
}
/** Resume paused speech */
resume() {
this.synth?.resume();
this.notify();
}
/** Stop current speech */
stop() {
this.synth?.cancel();
this.currentUtterance = null;
this.notify();
}
/** Auto-select the best voice for a language */
private selectBestVoice(utterance: SpeechSynthesisUtterance, language: string) {
const langPrefix = language.split('-')[0].toLowerCase();
const candidates = this.cachedVoices.filter(v =>
v.lang.toLowerCase().startsWith(langPrefix)
);
if (candidates.length === 0) return;
// Prefer voices with "Neural" or "Enhanced" in name (higher quality)
const neural = candidates.find(v =>
v.name.includes('Neural') || v.name.includes('Enhanced') || v.name.includes('Premium')
);
if (neural) {
utterance.voice = neural;
return;
}
// Prefer local voices (work offline)
const local = candidates.find(v => v.localService);
if (local) {
utterance.voice = local;
return;
}
// Fall back to first matching voice
utterance.voice = candidates[0];
}
}
// Singleton instance
export const speechSynth = new SpeechSynthService();