fix(desktop): DeerFlow UI — ChatArea refactor + ai-elements + dead CSS cleanup

ChatArea retry button uses setInput instead of direct sendToGateway,
fix bootstrap spinner stuck for non-logged-in users,
remove dead CSS (aurora-title/sidebar-open/quick-action-chips),
add ai components (ReasoningBlock/StreamingText/ChatMode/ModelSelector/TaskProgress),
add ClassroomPlayer + ResizableChatLayout + artifact panel

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
iven
2026-04-02 19:24:44 +08:00
parent d40c4605b2
commit 28299807b6
70 changed files with 4938 additions and 618 deletions

View File

@@ -10,6 +10,44 @@ use crate::config::SaaSConfig;
use crate::workers::WorkerDispatcher;
use crate::cache::AppCache;
// ============ SpawnLimiter ============
/// 可复用的并发限制器,基于 Arc<Semaphore>。
/// 复用 SSE_SPAWN_SEMAPHORE 模式,为 Worker、中间件等场景提供统一门控。
#[derive(Clone)]
pub struct SpawnLimiter {
semaphore: Arc<tokio::sync::Semaphore>,
name: &'static str,
}
impl SpawnLimiter {
pub fn new(name: &'static str, max_permits: usize) -> Self {
Self {
semaphore: Arc::new(tokio::sync::Semaphore::new(max_permits)),
name,
}
}
/// 尝试获取 permit满时返回 None适用于可丢弃的操作如 usage 记录)
pub fn try_acquire(&self) -> Option<tokio::sync::OwnedSemaphorePermit> {
self.semaphore.clone().try_acquire_owned().ok()
}
/// 异步等待 permit适用于不可丢弃的操作如 Worker 任务)
pub async fn acquire(&self) -> tokio::sync::OwnedSemaphorePermit {
self.semaphore
.clone()
.acquire_owned()
.await
.expect("SpawnLimiter semaphore closed unexpectedly")
}
pub fn name(&self) -> &'static str { self.name }
pub fn available(&self) -> usize { self.semaphore.available_permits() }
}
// ============ AppState ============
/// 全局应用状态,通过 Axum State 共享
#[derive(Clone)]
pub struct AppState {
@@ -33,10 +71,20 @@ pub struct AppState {
pub shutdown_token: CancellationToken,
/// 应用缓存: Model/Provider/队列计数器
pub cache: AppCache,
/// Worker spawn 并发限制器
pub worker_limiter: SpawnLimiter,
/// 限流事件批量累加器: key → 待写入计数
pub rate_limit_batch: Arc<dashmap::DashMap<String, i64>>,
}
impl AppState {
pub fn new(db: PgPool, config: SaaSConfig, worker_dispatcher: WorkerDispatcher, shutdown_token: CancellationToken) -> anyhow::Result<Self> {
pub fn new(
db: PgPool,
config: SaaSConfig,
worker_dispatcher: WorkerDispatcher,
shutdown_token: CancellationToken,
worker_limiter: SpawnLimiter,
) -> anyhow::Result<Self> {
let jwt_secret = config.jwt_secret()?;
let rpm = config.rate_limit.requests_per_minute;
Ok(Self {
@@ -50,6 +98,8 @@ impl AppState {
worker_dispatcher,
shutdown_token,
cache: AppCache::new(),
worker_limiter,
rate_limit_batch: Arc::new(dashmap::DashMap::new()),
})
}
@@ -96,4 +146,60 @@ impl AppState {
tracing::warn!("Failed to dispatch log_operation: {}", e);
}
}
/// 限流事件批量 flush 到 DB
///
/// 使用 swap-to-zero 模式先将计数器原子归零DB 写入成功后删除条目。
/// 如果 DB 写入失败,归零的计数会在下次 flush 时重新累加(因 middleware 持续写入)。
pub async fn flush_rate_limit_batch(&self, max_batch: usize) {
// 阶段1: 收集非零 key将计数器原子归零而非删除
// 这样如果 DB 写入失败middleware 的新累加会在已有 key 上继续
let mut batch: Vec<(String, i64)> = Vec::with_capacity(max_batch.min(64));
let keys: Vec<String> = self.rate_limit_batch.iter()
.filter(|e| *e.value() > 0)
.take(max_batch)
.map(|e| e.key().clone())
.collect();
for key in &keys {
// 原子交换为 0取走当前值
if let Some(mut entry) = self.rate_limit_batch.get_mut(key) {
if *entry > 0 {
batch.push((key.clone(), *entry));
*entry = 0; // 归零而非删除
}
}
}
if batch.is_empty() { return; }
let keys_buf: Vec<String> = batch.iter().map(|(k, _)| k.clone()).collect();
let counts: Vec<i64> = batch.iter().map(|(_, c)| *c).collect();
let result = sqlx::query(
"INSERT INTO rate_limit_events (key, window_start, count)
SELECT u.key, NOW(), u.cnt FROM UNNEST($1::text[], $2::bigint[]) AS u(key, cnt)"
)
.bind(&keys_buf)
.bind(&counts)
.execute(&self.db)
.await;
if let Err(e) = result {
// DB 写入失败:将归零的计数加回去,避免数据丢失
tracing::warn!("[RateLimitBatch] flush failed ({} entries), restoring counts: {}", batch.len(), e);
for (key, count) in &batch {
if let Some(mut entry) = self.rate_limit_batch.get_mut(key) {
*entry += *count;
}
}
} else {
// DB 写入成功:删除已归零的条目
for (key, _) in &batch {
self.rate_limit_batch.remove_if(key, |_, v| *v == 0);
}
tracing::debug!("[RateLimitBatch] flushed {} entries", batch.len());
}
}
}