feat(desktop): DeerFlow visual redesign + stream hang fix + intelligence client
Some checks failed
CI / Lint & TypeCheck (push) Has been cancelled
CI / Unit Tests (push) Has been cancelled
CI / Build Frontend (push) Has been cancelled
CI / Rust Check (push) Has been cancelled
CI / Security Scan (push) Has been cancelled
CI / E2E Tests (push) Has been cancelled

DeerFlow frontend visual overhaul:
- Card-style input box (white rounded card, textarea top, actions bottom)
- Dropdown mode selector (闪速/思考/Pro/Ultra with icons+descriptions)
- Colored quick-action chips (小惊喜/写作/研究/收集/学习)
- Minimal top bar (title + token count + export)
- Warm gray color system (#faf9f6 bg, #f5f4f1 sidebar, #e8e6e1 border)
- DeerFlow-style sidebar (新对话/对话/智能体 nav)
- Reasoning block, tool call chain, task progress visualization
- Streaming text, model selector, suggestion chips components
- Resizable artifact panel with drag handle
- Virtualized message list for 100+ messages

Bug fixes:
- Stream hang: GatewayClient onclose code 1000 now calls onComplete
- WebView2 textarea border: CSS !important override for UA styles
- Gateway stream event handling (response/phase/tool_call types)

Intelligence client:
- Unified client with fallback drivers (compactor/heartbeat/identity/memory/reflection)
- Gateway API types and type conversions
This commit is contained in:
iven
2026-04-01 22:03:07 +08:00
parent e3b93ff96d
commit 73ff5e8c5e
43 changed files with 4817 additions and 905 deletions

View File

@@ -78,6 +78,9 @@ impl TauriExtractionDriver {
temperature: Some(0.3),
stop: Vec::new(),
stream: false,
thinking_enabled: false,
reasoning_effort: None,
plan_mode: false,
}
}

View File

@@ -886,7 +886,7 @@ mod tests {
#[test]
fn test_default_config() {
let config = HeartbeatConfig::default();
assert!(!config.enabled);
assert!(config.enabled);
assert_eq!(config.interval_minutes, 30);
}
}

View File

@@ -3,7 +3,6 @@
use std::sync::Arc;
use serde::{Deserialize, Serialize};
use tauri::{AppHandle, Emitter, State};
use tokio::sync::Mutex;
use zclaw_types::AgentId;
use super::{validate_agent_id, KernelState, SessionStreamGuard};
@@ -51,6 +50,15 @@ pub struct StreamChatRequest {
pub agent_id: String,
pub session_id: String,
pub message: String,
/// Enable extended thinking/reasoning
#[serde(default)]
pub thinking_enabled: Option<bool>,
/// Reasoning effort level (low/medium/high)
#[serde(default)]
pub reasoning_effort: Option<String>,
/// Enable plan mode
#[serde(default)]
pub plan_mode: Option<bool>,
}
// ---------------------------------------------------------------------------
@@ -111,18 +119,21 @@ pub async fn agent_chat_stream(
let agent_id_str = request.agent_id.clone();
let message = request.message.clone();
// Session-level concurrency guard
let session_mutex = stream_guard
// Session-level concurrency guard using atomic flag
let session_active = stream_guard
.entry(session_id.clone())
.or_insert_with(|| Arc::new(Mutex::new(())));
let _session_guard = session_mutex.try_lock()
.map_err(|_| {
tracing::warn!(
"[agent_chat_stream] Session {} already has an active stream — rejecting",
session_id
);
format!("Session {} already has an active stream", session_id)
})?;
.or_insert_with(|| Arc::new(std::sync::atomic::AtomicBool::new(false)));
// Atomically set flag from false→true, fail if already true
if session_active
.compare_exchange(false, true, std::sync::atomic::Ordering::SeqCst, std::sync::atomic::Ordering::SeqCst)
.is_err()
{
tracing::warn!(
"[agent_chat_stream] Session {} already has an active stream — rejecting",
session_id
);
return Err(format!("Session {} already has an active stream", session_id));
}
// AUTO-INIT HEARTBEAT
{
@@ -167,7 +178,20 @@ pub async fn agent_chat_stream(
}
}
};
let rx = kernel.send_message_stream_with_prompt(&id, message.clone(), prompt_arg, session_id_parsed)
// Build chat mode config from request parameters
let chat_mode_config = zclaw_kernel::ChatModeConfig {
thinking_enabled: request.thinking_enabled,
reasoning_effort: request.reasoning_effort.clone(),
plan_mode: request.plan_mode,
};
let rx = kernel.send_message_stream_with_prompt(
&id,
message.clone(),
prompt_arg,
session_id_parsed,
Some(chat_mode_config),
)
.await
.map_err(|e| format!("Failed to start streaming: {}", e))?;
(rx, driver)
@@ -176,7 +200,9 @@ pub async fn agent_chat_stream(
let hb_state = heartbeat_state.inner().clone();
let rf_state = reflection_state.inner().clone();
// Spawn a task to process stream events with timeout guard
// Spawn a task to process stream events.
// The session_active flag is cleared when task completes.
let guard_clone = Arc::clone(&*session_active);
tokio::spawn(async move {
use zclaw_runtime::LoopEvent;
@@ -268,6 +294,9 @@ pub async fn agent_chat_stream(
}
tracing::debug!("[agent_chat_stream] Stream processing ended for session: {}", session_id);
// Release session lock
guard_clone.store(false, std::sync::atomic::Ordering::SeqCst);
});
Ok(())

View File

@@ -32,7 +32,9 @@ pub type SchedulerState = Arc<Mutex<Option<zclaw_kernel::scheduler::SchedulerSer
/// Session-level stream concurrency guard.
/// Prevents two concurrent `agent_chat_stream` calls from interleaving events
/// for the same session_id.
pub type SessionStreamGuard = Arc<dashmap::DashMap<String, Arc<Mutex<()>>>>;
/// Uses `AtomicBool` so the `DashMap` — `true` means active stream, `false` means idle.
/// The `spawn`ed task resets the flag on completion/error.
pub type SessionStreamGuard = Arc<dashmap::DashMap<String, Arc<std::sync::atomic::AtomicBool>>>;
// ---------------------------------------------------------------------------
// Shared validation helpers

View File

@@ -87,6 +87,9 @@ impl LlmActionDriver for RuntimeLlmAdapter {
temperature,
stop: Vec::new(),
stream: false,
thinking_enabled: false,
reasoning_effort: None,
plan_mode: false,
};
let response = self.driver.complete(request)