diff --git a/desktop/package.json b/desktop/package.json index b00f292..8795e02 100644 --- a/desktop/package.json +++ b/desktop/package.json @@ -47,6 +47,7 @@ "react": "^19.2.4", "react-dom": "^19.2.4", "react-markdown": "^10.1.0", + "react-resizable-panels": "^4.8.0", "react-window": "^2.2.7", "recharts": "^3.8.1", "remark-gfm": "^4.0.1", diff --git a/desktop/pnpm-lock.yaml b/desktop/pnpm-lock.yaml index 910df23..846c0ee 100644 --- a/desktop/pnpm-lock.yaml +++ b/desktop/pnpm-lock.yaml @@ -41,6 +41,9 @@ importers: react-markdown: specifier: ^10.1.0 version: 10.1.0(@types/react@19.2.14)(react@19.2.4) + react-resizable-panels: + specifier: ^4.8.0 + version: 4.8.0(react-dom@19.2.4(react@19.2.4))(react@19.2.4) react-window: specifier: ^2.2.7 version: 2.2.7(react-dom@19.2.4(react@19.2.4))(react@19.2.4) @@ -2819,6 +2822,12 @@ packages: resolution: {integrity: sha512-z6F7K9bV85EfseRCp2bzrpyQ0Gkw1uLoCel9XBVWPg/TjRj94SkJzUTGfOa4bs7iJvBWtQG0Wq7wnI0syw3EBQ==} engines: {node: '>=0.10.0'} + react-resizable-panels@4.8.0: + resolution: {integrity: sha512-2uEABkewb3ky/ZgIlAUxWa1W/LjsK494fdV1QsXxst7CDRHCzo7h22tWWu3NNaBjmiuriOCt3CvhipnaYcpoIw==} + peerDependencies: + react: ^18.0.0 || ^19.0.0 + react-dom: ^18.0.0 || ^19.0.0 + react-window@2.2.7: resolution: {integrity: sha512-SH5nvfUQwGHYyriDUAOt7wfPsfG9Qxd6OdzQxl5oQ4dsSsUicqQvjV7dR+NqZ4coY0fUn3w1jnC5PwzIUWEg5w==} peerDependencies: @@ -6213,6 +6222,11 @@ snapshots: react-refresh@0.17.0: {} + react-resizable-panels@4.8.0(react-dom@19.2.4(react@19.2.4))(react@19.2.4): + dependencies: + react: 19.2.4 + react-dom: 19.2.4(react@19.2.4) + react-window@2.2.7(react-dom@19.2.4(react@19.2.4))(react@19.2.4): dependencies: react: 19.2.4 diff --git a/desktop/src-tauri/src/intelligence/extraction_adapter.rs b/desktop/src-tauri/src/intelligence/extraction_adapter.rs index 8308e52..70ef871 100644 --- a/desktop/src-tauri/src/intelligence/extraction_adapter.rs +++ b/desktop/src-tauri/src/intelligence/extraction_adapter.rs @@ -78,6 +78,9 @@ impl TauriExtractionDriver { temperature: Some(0.3), stop: Vec::new(), stream: false, + thinking_enabled: false, + reasoning_effort: None, + plan_mode: false, } } diff --git a/desktop/src-tauri/src/intelligence/heartbeat.rs b/desktop/src-tauri/src/intelligence/heartbeat.rs index baf9099..e3da954 100644 --- a/desktop/src-tauri/src/intelligence/heartbeat.rs +++ b/desktop/src-tauri/src/intelligence/heartbeat.rs @@ -886,7 +886,7 @@ mod tests { #[test] fn test_default_config() { let config = HeartbeatConfig::default(); - assert!(!config.enabled); + assert!(config.enabled); assert_eq!(config.interval_minutes, 30); } } diff --git a/desktop/src-tauri/src/kernel_commands/chat.rs b/desktop/src-tauri/src/kernel_commands/chat.rs index 193d171..4ec6f6e 100644 --- a/desktop/src-tauri/src/kernel_commands/chat.rs +++ b/desktop/src-tauri/src/kernel_commands/chat.rs @@ -3,7 +3,6 @@ use std::sync::Arc; use serde::{Deserialize, Serialize}; use tauri::{AppHandle, Emitter, State}; -use tokio::sync::Mutex; use zclaw_types::AgentId; use super::{validate_agent_id, KernelState, SessionStreamGuard}; @@ -51,6 +50,15 @@ pub struct StreamChatRequest { pub agent_id: String, pub session_id: String, pub message: String, + /// Enable extended thinking/reasoning + #[serde(default)] + pub thinking_enabled: Option, + /// Reasoning effort level (low/medium/high) + #[serde(default)] + pub reasoning_effort: Option, + /// Enable plan mode + #[serde(default)] + pub plan_mode: Option, } // --------------------------------------------------------------------------- @@ -111,18 +119,21 @@ pub async fn agent_chat_stream( let agent_id_str = request.agent_id.clone(); let message = request.message.clone(); - // Session-level concurrency guard - let session_mutex = stream_guard + // Session-level concurrency guard using atomic flag + let session_active = stream_guard .entry(session_id.clone()) - .or_insert_with(|| Arc::new(Mutex::new(()))); - let _session_guard = session_mutex.try_lock() - .map_err(|_| { - tracing::warn!( - "[agent_chat_stream] Session {} already has an active stream — rejecting", - session_id - ); - format!("Session {} already has an active stream", session_id) - })?; + .or_insert_with(|| Arc::new(std::sync::atomic::AtomicBool::new(false))); + // Atomically set flag from false→true, fail if already true + if session_active + .compare_exchange(false, true, std::sync::atomic::Ordering::SeqCst, std::sync::atomic::Ordering::SeqCst) + .is_err() + { + tracing::warn!( + "[agent_chat_stream] Session {} already has an active stream — rejecting", + session_id + ); + return Err(format!("Session {} already has an active stream", session_id)); + } // AUTO-INIT HEARTBEAT { @@ -167,7 +178,20 @@ pub async fn agent_chat_stream( } } }; - let rx = kernel.send_message_stream_with_prompt(&id, message.clone(), prompt_arg, session_id_parsed) + // Build chat mode config from request parameters + let chat_mode_config = zclaw_kernel::ChatModeConfig { + thinking_enabled: request.thinking_enabled, + reasoning_effort: request.reasoning_effort.clone(), + plan_mode: request.plan_mode, + }; + + let rx = kernel.send_message_stream_with_prompt( + &id, + message.clone(), + prompt_arg, + session_id_parsed, + Some(chat_mode_config), + ) .await .map_err(|e| format!("Failed to start streaming: {}", e))?; (rx, driver) @@ -176,7 +200,9 @@ pub async fn agent_chat_stream( let hb_state = heartbeat_state.inner().clone(); let rf_state = reflection_state.inner().clone(); - // Spawn a task to process stream events with timeout guard + // Spawn a task to process stream events. + // The session_active flag is cleared when task completes. + let guard_clone = Arc::clone(&*session_active); tokio::spawn(async move { use zclaw_runtime::LoopEvent; @@ -268,6 +294,9 @@ pub async fn agent_chat_stream( } tracing::debug!("[agent_chat_stream] Stream processing ended for session: {}", session_id); + + // Release session lock + guard_clone.store(false, std::sync::atomic::Ordering::SeqCst); }); Ok(()) diff --git a/desktop/src-tauri/src/kernel_commands/mod.rs b/desktop/src-tauri/src/kernel_commands/mod.rs index 32abaf6..5220be0 100644 --- a/desktop/src-tauri/src/kernel_commands/mod.rs +++ b/desktop/src-tauri/src/kernel_commands/mod.rs @@ -32,7 +32,9 @@ pub type SchedulerState = Arc>>>; +/// Uses `AtomicBool` so the `DashMap` — `true` means active stream, `false` means idle. +/// The `spawn`ed task resets the flag on completion/error. +pub type SessionStreamGuard = Arc>>; // --------------------------------------------------------------------------- // Shared validation helpers diff --git a/desktop/src-tauri/src/pipeline_commands/adapters.rs b/desktop/src-tauri/src/pipeline_commands/adapters.rs index 8d2cdaf..fdad30e 100644 --- a/desktop/src-tauri/src/pipeline_commands/adapters.rs +++ b/desktop/src-tauri/src/pipeline_commands/adapters.rs @@ -87,6 +87,9 @@ impl LlmActionDriver for RuntimeLlmAdapter { temperature, stop: Vec::new(), stream: false, + thinking_enabled: false, + reasoning_effort: None, + plan_mode: false, }; let response = self.driver.complete(request) diff --git a/desktop/src/components/ChatArea.tsx b/desktop/src/components/ChatArea.tsx index 3bb41dd..fe41c54 100644 --- a/desktop/src/components/ChatArea.tsx +++ b/desktop/src/components/ChatArea.tsx @@ -5,16 +5,27 @@ import { useChatStore, Message } from '../store/chatStore'; import { useConnectionStore } from '../store/connectionStore'; import { useAgentStore } from '../store/agentStore'; import { useConfigStore } from '../store/configStore'; -import { Paperclip, ChevronDown, Terminal, SquarePen, ArrowUp, MessageSquare, Download, Copy, Check } from 'lucide-react'; +import { Paperclip, SquarePen, ArrowUp, MessageSquare, Download, X, FileText, Image as ImageIcon } from 'lucide-react'; import { Button, EmptyState, MessageListSkeleton, LoadingDots } from './ui'; +import { ResizableChatLayout } from './ai/ResizableChatLayout'; +import { ArtifactPanel } from './ai/ArtifactPanel'; +import { ToolCallChain } from './ai/ToolCallChain'; import { listItemVariants, defaultTransition, fadeInVariants } from '../lib/animations'; import { FirstConversationPrompt } from './FirstConversationPrompt'; -import { MessageSearch } from './MessageSearch'; +// MessageSearch temporarily removed during DeerFlow redesign import { OfflineIndicator } from './OfflineIndicator'; import { useVirtualizedMessages, type VirtualizedMessageItem } from '../lib/message-virtualization'; +import { Conversation } from './ai/Conversation'; +import { ReasoningBlock } from './ai/ReasoningBlock'; +import { StreamingText } from './ai/StreamingText'; +import { ChatMode } from './ai/ChatMode'; +import { ModelSelector } from './ai/ModelSelector'; +import { TaskProgress } from './ai/TaskProgress'; +import { SuggestionChips } from './ai/SuggestionChips'; +// TokenMeter temporarily unused — using inline text counter instead // Default heights for virtualized messages const DEFAULT_MESSAGE_HEIGHTS: Record = { @@ -33,17 +44,21 @@ export function ChatArea() { const { messages, currentAgent, isStreaming, isLoading, currentModel, sendMessage: sendToGateway, setCurrentModel, initStreamListener, - newConversation, + newConversation, chatMode, setChatMode, suggestions, + artifacts, selectedArtifactId, artifactPanelOpen, + selectArtifact, setArtifactPanelOpen, + totalInputTokens, totalOutputTokens, } = useChatStore(); const connectionState = useConnectionStore((s) => s.connectionState); const clones = useAgentStore((s) => s.clones); const models = useConfigStore((s) => s.models); const [input, setInput] = useState(''); - const [showModelPicker, setShowModelPicker] = useState(false); + const [pendingFiles, setPendingFiles] = useState([]); const scrollRef = useRef(null); const textareaRef = useRef(null); const messageRefs = useRef>(new Map()); + const fileInputRef = useRef(null); // Convert messages to virtualization format const virtualizedMessages: VirtualizedMessageItem[] = useMemo( @@ -90,6 +105,41 @@ export function ChatArea() { } }, []); + // File handling + const MAX_FILE_SIZE = 10 * 1024 * 1024; // 10MB + const MAX_FILES = 5; + + const addFiles = useCallback((files: FileList | File[]) => { + const incoming = Array.from(files).filter((f) => f.size <= MAX_FILE_SIZE); + setPendingFiles((prev) => { + const combined = [...prev, ...incoming]; + return combined.slice(0, MAX_FILES); + }); + }, []); + + // Paste handler for images/files + useEffect(() => { + const handler = (e: ClipboardEvent) => { + if (e.clipboardData?.files.length) { + e.preventDefault(); + addFiles(e.clipboardData.files); + } + }; + document.addEventListener('paste', handler); + return () => document.removeEventListener('paste', handler); + }, [addFiles]); + + const handleDrop = useCallback((e: React.DragEvent) => { + e.preventDefault(); + if (e.dataTransfer.files.length) { + addFiles(e.dataTransfer.files); + } + }, [addFiles]); + + const handleDragOver = useCallback((e: React.DragEvent) => { + e.preventDefault(); + }, []); + // Init agent stream listener on mount useEffect(() => { const unsub = initStreamListener(); @@ -106,10 +156,14 @@ export function ChatArea() { }, [messages, useVirtualization, scrollToBottom]); const handleSend = () => { - if (!input.trim() || isStreaming) return; - // Allow sending in offline mode - message will be queued - sendToGateway(input); + if ((!input.trim() && pendingFiles.length === 0) || isStreaming) return; + // Attach file names as metadata in the message + const fileContext = pendingFiles.length > 0 + ? `\n\n[附件: ${pendingFiles.map((f) => f.name).join(', ')}]` + : ''; + sendToGateway(input + fileContext); setInput(''); + setPendingFiles([]); }; const handleKeyDown = (e: React.KeyboardEvent) => { @@ -121,52 +175,73 @@ export function ChatArea() { const connected = connectionState === 'connected'; - // Navigate to a specific message by ID - const handleNavigateToMessage = useCallback((messageId: string) => { - const messageEl = messageRefs.current.get(messageId); - if (messageEl && scrollRef.current) { - messageEl.scrollIntoView({ behavior: 'smooth', block: 'center' }); - // Add highlight effect - messageEl.classList.add('ring-2', 'ring-orange-400', 'ring-offset-2'); - setTimeout(() => { - messageEl.classList.remove('ring-2', 'ring-orange-400', 'ring-offset-2'); - }, 2000); + // Export current conversation as Markdown + const exportCurrentConversation = () => { + const title = currentAgent?.name || 'ZCLAW 对话'; + const lines = [`# ${title}`, '', `导出时间: ${new Date().toLocaleString('zh-CN')}`, '']; + for (const msg of messages) { + const label = msg.role === 'user' ? '用户' : msg.role === 'assistant' ? '助手' : msg.role; + lines.push(`## ${label}`, '', msg.content, ''); } - }, []); + const blob = new Blob([lines.join('\n')], { type: 'text/markdown;charset=utf-8' }); + const url = URL.createObjectURL(blob); + const a = document.createElement('a'); + a.href = url; + a.download = `${title.replace(/[/\\?%*:|"<>]/g, '_')}.md`; + a.click(); + URL.revokeObjectURL(url); + }; + + // Build artifact panel content + const artifactRightPanel = ( + setArtifactPanelOpen(false)} + /> + ); return ( -
- {/* Header */} - {/* Header */} -
-
-

{currentAgent?.name || 'ZCLAW'}

- {isStreaming ? ( - - - 正在输入中 - - ) : ( - - - {connected ? 'Gateway 已连接' : 'Gateway 未连接'} - - )} + + {/* Header — DeerFlow-style: minimal */} +
+
+ {currentAgent?.name || '新对话'}
-
- {/* Offline indicator in header */} +
+ {/* Token usage counter — DeerFlow-style plain text */} + {(totalInputTokens + totalOutputTokens) > 0 && (() => { + const total = totalInputTokens + totalOutputTokens; + const display = total >= 1000 ? `${(total / 1000).toFixed(1)}K` : String(total); + return ( + + {display} + + ); + })()} {messages.length > 0 && ( - + )} {messages.length > 0 && (
{/* Messages */} -
+ {/* Loading skeleton */} {isLoading && messages.length === 0 && ( @@ -240,21 +315,60 @@ export function ChatArea() { )) )} -
+ {/* Input */} -
+
-
- -
+ {/* Suggestion chips */} + {!isStreaming && suggestions.length > 0 && ( + { setInput(text); textareaRef.current?.focus(); }} + className="mb-3" + /> + )} + {/* Hidden file input */} + { if (e.target.files) addFiles(e.target.files); e.target.value = ''; }} + /> + {/* Pending file previews */} + {pendingFiles.length > 0 && ( +
+ {pendingFiles.map((file, idx) => ( +
+ {file.type.startsWith('image/') ? ( + + ) : ( + + )} + {file.name} + ({(file.size / 1024).toFixed(0)}K) + +
+ ))} +
+ )} + {/* Input card — DeerFlow-style: white card, textarea top, actions bottom */} +
+ {/* Textarea area */} +