feat(workflow): add workflow engine module (Phase 4)

Implement complete workflow engine with BPMN subset support:

Backend (erp-workflow crate):
- Token-driven execution engine with exclusive/parallel gateway support
- BPMN parser with flow graph validation
- Expression evaluator for conditional branching
- Process definition CRUD with draft/publish lifecycle
- Process instance management (start, suspend, terminate)
- Task service (pending, complete, delegate)
- PostgreSQL advisory locks for concurrent safety
- 5 database tables: process_definitions, process_instances,
  tokens, tasks, process_variables
- 13 API endpoints with RBAC protection
- Timeout checker framework (placeholder)

Frontend:
- Workflow page with 4 tabs (definitions, pending, completed, monitor)
- React Flow visual process designer (@xyflow/react)
- Process viewer with active node highlighting
- 3 API client modules for workflow endpoints
- Sidebar menu integration
This commit is contained in:
iven
2026-04-11 09:54:02 +08:00
parent 0cbd08eb78
commit 91ecaa3ed7
51 changed files with 4826 additions and 12 deletions

View File

@@ -101,10 +101,19 @@ const DEFAULT_PERMISSIONS: &[(&str, &str, &str, &str, &str)] = &[
("theme:update", "编辑主题", "theme", "update", "编辑主题设置"),
("language:list", "查看语言", "language", "list", "查看语言配置"),
("language:update", "编辑语言", "language", "update", "编辑语言配置"),
// Workflow module permissions
("workflow:create", "创建流程", "workflow", "create", "创建流程定义"),
("workflow:list", "查看流程", "workflow", "list", "查看流程列表"),
("workflow:read", "查看流程详情", "workflow", "read", "查看流程定义详情"),
("workflow:update", "编辑流程", "workflow", "update", "编辑流程定义"),
("workflow:publish", "发布流程", "workflow", "publish", "发布流程定义"),
("workflow:start", "发起流程", "workflow", "start", "发起流程实例"),
("workflow:approve", "审批任务", "workflow", "approve", "审批流程任务"),
("workflow:delegate", "委派任务", "workflow", "delegate", "委派流程任务"),
];
/// Indices of read-only permissions within DEFAULT_PERMISSIONS.
const READ_PERM_INDICES: &[usize] = &[1, 5, 9, 11, 15, 19, 23, 24, 28, 29, 34, 38];
const READ_PERM_INDICES: &[usize] = &[1, 5, 9, 11, 15, 19, 23, 24, 28, 29, 34, 38, 37, 38];
/// Seed default auth data for a new tenant.
///

View File

@@ -25,5 +25,6 @@ serde.workspace = true
erp-server-migration = { path = "migration" }
erp-auth.workspace = true
erp-config.workspace = true
erp-workflow.workspace = true
anyhow.workspace = true
uuid.workspace = true

View File

@@ -17,6 +17,11 @@ mod m20260412_000014_create_menus;
mod m20260412_000015_create_menu_roles;
mod m20260412_000016_create_settings;
mod m20260412_000017_create_numbering_rules;
mod m20260412_000018_create_process_definitions;
mod m20260412_000019_create_process_instances;
mod m20260412_000020_create_tokens;
mod m20260412_000021_create_tasks;
mod m20260412_000022_create_process_variables;
pub struct Migrator;
@@ -41,6 +46,11 @@ impl MigratorTrait for Migrator {
Box::new(m20260412_000015_create_menu_roles::Migration),
Box::new(m20260412_000016_create_settings::Migration),
Box::new(m20260412_000017_create_numbering_rules::Migration),
Box::new(m20260412_000018_create_process_definitions::Migration),
Box::new(m20260412_000019_create_process_instances::Migration),
Box::new(m20260412_000020_create_tokens::Migration),
Box::new(m20260412_000021_create_tasks::Migration),
Box::new(m20260412_000022_create_process_variables::Migration),
]
}
}

View File

@@ -0,0 +1,122 @@
use sea_orm_migration::prelude::*;
#[derive(DeriveMigrationName)]
pub struct Migration;
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.create_table(
Table::create()
.table(ProcessDefinitions::Table)
.if_not_exists()
.col(
ColumnDef::new(ProcessDefinitions::Id)
.uuid()
.not_null()
.primary_key(),
)
.col(ColumnDef::new(ProcessDefinitions::TenantId).uuid().not_null())
.col(ColumnDef::new(ProcessDefinitions::Name).string().not_null())
.col(ColumnDef::new(ProcessDefinitions::Key).string().not_null())
.col(
ColumnDef::new(ProcessDefinitions::Version)
.integer()
.not_null()
.default(1),
)
.col(ColumnDef::new(ProcessDefinitions::Category).string().null())
.col(ColumnDef::new(ProcessDefinitions::Description).text().null())
.col(
ColumnDef::new(ProcessDefinitions::Nodes)
.json_binary()
.not_null()
.default(Expr::val("[]")),
)
.col(
ColumnDef::new(ProcessDefinitions::Edges)
.json_binary()
.not_null()
.default(Expr::val("[]")),
)
.col(
ColumnDef::new(ProcessDefinitions::Status)
.string()
.not_null()
.default("draft"),
)
.col(
ColumnDef::new(ProcessDefinitions::CreatedAt)
.timestamp_with_time_zone()
.not_null()
.default(Expr::current_timestamp()),
)
.col(
ColumnDef::new(ProcessDefinitions::UpdatedAt)
.timestamp_with_time_zone()
.not_null()
.default(Expr::current_timestamp()),
)
.col(ColumnDef::new(ProcessDefinitions::CreatedBy).uuid().not_null())
.col(ColumnDef::new(ProcessDefinitions::UpdatedBy).uuid().not_null())
.col(
ColumnDef::new(ProcessDefinitions::DeletedAt)
.timestamp_with_time_zone()
.null(),
)
.col(
ColumnDef::new(ProcessDefinitions::VersionField)
.integer()
.not_null()
.default(1),
)
.to_owned(),
)
.await?;
manager
.create_index(
Index::create()
.name("idx_process_definitions_tenant_id")
.table(ProcessDefinitions::Table)
.col(ProcessDefinitions::TenantId)
.to_owned(),
)
.await?;
manager.get_connection().execute(sea_orm::Statement::from_string(
sea_orm::DatabaseBackend::Postgres,
"CREATE UNIQUE INDEX idx_process_definitions_key_version ON process_definitions (tenant_id, key, version) WHERE deleted_at IS NULL".to_string(),
)).await.map_err(|e| DbErr::Custom(e.to_string()))?;
Ok(())
}
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.drop_table(Table::drop().table(ProcessDefinitions::Table).to_owned())
.await
}
}
#[derive(DeriveIden)]
enum ProcessDefinitions {
Table,
Id,
TenantId,
Name,
Key,
Version,
Category,
Description,
Nodes,
Edges,
Status,
CreatedAt,
UpdatedAt,
CreatedBy,
UpdatedBy,
DeletedAt,
VersionField,
}

View File

@@ -0,0 +1,124 @@
use sea_orm_migration::prelude::*;
#[derive(DeriveMigrationName)]
pub struct Migration;
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.create_table(
Table::create()
.table(ProcessInstances::Table)
.if_not_exists()
.col(
ColumnDef::new(ProcessInstances::Id)
.uuid()
.not_null()
.primary_key(),
)
.col(ColumnDef::new(ProcessInstances::TenantId).uuid().not_null())
.col(ColumnDef::new(ProcessInstances::DefinitionId).uuid().not_null())
.col(ColumnDef::new(ProcessInstances::BusinessKey).string().null())
.col(
ColumnDef::new(ProcessInstances::Status)
.string()
.not_null()
.default("running"),
)
.col(ColumnDef::new(ProcessInstances::StartedBy).uuid().not_null())
.col(
ColumnDef::new(ProcessInstances::StartedAt)
.timestamp_with_time_zone()
.not_null()
.default(Expr::current_timestamp()),
)
.col(
ColumnDef::new(ProcessInstances::CompletedAt)
.timestamp_with_time_zone()
.null(),
)
.col(
ColumnDef::new(ProcessInstances::CreatedAt)
.timestamp_with_time_zone()
.not_null()
.default(Expr::current_timestamp()),
)
.col(
ColumnDef::new(ProcessInstances::UpdatedAt)
.timestamp_with_time_zone()
.not_null()
.default(Expr::current_timestamp()),
)
.col(ColumnDef::new(ProcessInstances::CreatedBy).uuid().not_null())
.col(ColumnDef::new(ProcessInstances::UpdatedBy).uuid().not_null())
.col(
ColumnDef::new(ProcessInstances::DeletedAt)
.timestamp_with_time_zone()
.null(),
)
.col(
ColumnDef::new(ProcessInstances::Version)
.integer()
.not_null()
.default(1),
)
.to_owned(),
)
.await?;
manager
.create_index(
Index::create()
.name("idx_instances_tenant_status")
.table(ProcessInstances::Table)
.col(ProcessInstances::TenantId)
.col(ProcessInstances::Status)
.to_owned(),
)
.await?;
manager
.create_foreign_key(
ForeignKey::create()
.name("fk_instances_definition")
.from(ProcessInstances::Table, ProcessInstances::DefinitionId)
.to(ProcessDefinitions::Table, ProcessDefinitions::Id)
.to_owned(),
)
.await?;
Ok(())
}
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.drop_table(Table::drop().table(ProcessInstances::Table).to_owned())
.await
}
}
#[derive(DeriveIden)]
enum ProcessInstances {
Table,
Id,
TenantId,
DefinitionId,
BusinessKey,
Status,
StartedBy,
StartedAt,
CompletedAt,
CreatedAt,
UpdatedAt,
CreatedBy,
UpdatedBy,
DeletedAt,
Version,
}
#[derive(DeriveIden)]
enum ProcessDefinitions {
Table,
Id,
}

View File

@@ -0,0 +1,90 @@
use sea_orm_migration::prelude::*;
#[derive(DeriveMigrationName)]
pub struct Migration;
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.create_table(
Table::create()
.table(Tokens::Table)
.if_not_exists()
.col(
ColumnDef::new(Tokens::Id)
.uuid()
.not_null()
.primary_key(),
)
.col(ColumnDef::new(Tokens::TenantId).uuid().not_null())
.col(ColumnDef::new(Tokens::InstanceId).uuid().not_null())
.col(ColumnDef::new(Tokens::NodeId).string().not_null())
.col(
ColumnDef::new(Tokens::Status)
.string()
.not_null()
.default("active"),
)
.col(
ColumnDef::new(Tokens::CreatedAt)
.timestamp_with_time_zone()
.not_null()
.default(Expr::current_timestamp()),
)
.col(
ColumnDef::new(Tokens::ConsumedAt)
.timestamp_with_time_zone()
.null(),
)
.to_owned(),
)
.await?;
manager
.create_index(
Index::create()
.name("idx_tokens_instance")
.table(Tokens::Table)
.col(Tokens::InstanceId)
.to_owned(),
)
.await?;
manager
.create_foreign_key(
ForeignKey::create()
.name("fk_tokens_instance")
.from(Tokens::Table, Tokens::InstanceId)
.to(ProcessInstances::Table, ProcessInstances::Id)
.to_owned(),
)
.await?;
Ok(())
}
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.drop_table(Table::drop().table(Tokens::Table).to_owned())
.await
}
}
#[derive(DeriveIden)]
enum Tokens {
Table,
Id,
TenantId,
InstanceId,
NodeId,
Status,
CreatedAt,
ConsumedAt,
}
#[derive(DeriveIden)]
enum ProcessInstances {
Table,
Id,
}

View File

@@ -0,0 +1,160 @@
use sea_orm_migration::prelude::*;
#[derive(DeriveMigrationName)]
pub struct Migration;
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.create_table(
Table::create()
.table(Tasks::Table)
.if_not_exists()
.col(
ColumnDef::new(Tasks::Id)
.uuid()
.not_null()
.primary_key(),
)
.col(ColumnDef::new(Tasks::TenantId).uuid().not_null())
.col(ColumnDef::new(Tasks::InstanceId).uuid().not_null())
.col(ColumnDef::new(Tasks::TokenId).uuid().not_null())
.col(ColumnDef::new(Tasks::NodeId).string().not_null())
.col(ColumnDef::new(Tasks::NodeName).string().null())
.col(ColumnDef::new(Tasks::AssigneeId).uuid().null())
.col(ColumnDef::new(Tasks::CandidateGroups).json_binary().null())
.col(
ColumnDef::new(Tasks::Status)
.string()
.not_null()
.default("pending"),
)
.col(ColumnDef::new(Tasks::Outcome).string().null())
.col(ColumnDef::new(Tasks::FormData).json_binary().null())
.col(
ColumnDef::new(Tasks::DueDate)
.timestamp_with_time_zone()
.null(),
)
.col(
ColumnDef::new(Tasks::CompletedAt)
.timestamp_with_time_zone()
.null(),
)
.col(
ColumnDef::new(Tasks::CreatedAt)
.timestamp_with_time_zone()
.not_null()
.default(Expr::current_timestamp()),
)
.col(
ColumnDef::new(Tasks::UpdatedAt)
.timestamp_with_time_zone()
.not_null()
.default(Expr::current_timestamp()),
)
.col(ColumnDef::new(Tasks::CreatedBy).uuid().not_null())
.col(ColumnDef::new(Tasks::UpdatedBy).uuid().not_null())
.col(
ColumnDef::new(Tasks::DeletedAt)
.timestamp_with_time_zone()
.null(),
)
.col(
ColumnDef::new(Tasks::Version)
.integer()
.not_null()
.default(1),
)
.to_owned(),
)
.await?;
manager
.create_index(
Index::create()
.name("idx_tasks_assignee")
.table(Tasks::Table)
.col(Tasks::TenantId)
.col(Tasks::AssigneeId)
.col(Tasks::Status)
.to_owned(),
)
.await?;
manager
.create_index(
Index::create()
.name("idx_tasks_instance")
.table(Tasks::Table)
.col(Tasks::InstanceId)
.to_owned(),
)
.await?;
manager
.create_foreign_key(
ForeignKey::create()
.name("fk_tasks_instance")
.from(Tasks::Table, Tasks::InstanceId)
.to(ProcessInstances::Table, ProcessInstances::Id)
.to_owned(),
)
.await?;
manager
.create_foreign_key(
ForeignKey::create()
.name("fk_tasks_token")
.from(Tasks::Table, Tasks::TokenId)
.to(WfTokens::Table, WfTokens::Id)
.to_owned(),
)
.await?;
Ok(())
}
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.drop_table(Table::drop().table(Tasks::Table).to_owned())
.await
}
}
#[derive(DeriveIden)]
enum Tasks {
Table,
Id,
TenantId,
InstanceId,
TokenId,
NodeId,
NodeName,
AssigneeId,
CandidateGroups,
Status,
Outcome,
FormData,
DueDate,
CompletedAt,
CreatedAt,
UpdatedAt,
CreatedBy,
UpdatedBy,
DeletedAt,
Version,
}
#[derive(DeriveIden)]
enum ProcessInstances {
Table,
Id,
}
#[derive(DeriveIden)]
enum WfTokens {
Table,
Id,
}

View File

@@ -0,0 +1,84 @@
use sea_orm_migration::prelude::*;
#[derive(DeriveMigrationName)]
pub struct Migration;
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.create_table(
Table::create()
.table(ProcessVariables::Table)
.if_not_exists()
.col(
ColumnDef::new(ProcessVariables::Id)
.uuid()
.not_null()
.primary_key(),
)
.col(ColumnDef::new(ProcessVariables::TenantId).uuid().not_null())
.col(ColumnDef::new(ProcessVariables::InstanceId).uuid().not_null())
.col(ColumnDef::new(ProcessVariables::Name).string().not_null())
.col(
ColumnDef::new(ProcessVariables::VarType)
.string()
.not_null()
.default("string"),
)
.col(ColumnDef::new(ProcessVariables::ValueString).text().null())
.col(ColumnDef::new(ProcessVariables::ValueNumber).double().null())
.col(ColumnDef::new(ProcessVariables::ValueBoolean).boolean().null())
.col(
ColumnDef::new(ProcessVariables::ValueDate)
.timestamp_with_time_zone()
.null(),
)
.to_owned(),
)
.await?;
manager.get_connection().execute(sea_orm::Statement::from_string(
sea_orm::DatabaseBackend::Postgres,
"CREATE UNIQUE INDEX idx_process_variables_instance_name ON process_variables (instance_id, name)".to_string(),
)).await.map_err(|e| DbErr::Custom(e.to_string()))?;
manager
.create_foreign_key(
ForeignKey::create()
.name("fk_variables_instance")
.from(ProcessVariables::Table, ProcessVariables::InstanceId)
.to(ProcessInstances::Table, ProcessInstances::Id)
.to_owned(),
)
.await?;
Ok(())
}
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.drop_table(Table::drop().table(ProcessVariables::Table).to_owned())
.await
}
}
#[derive(DeriveIden)]
enum ProcessVariables {
Table,
Id,
TenantId,
InstanceId,
Name,
VarType,
ValueString,
ValueNumber,
ValueBoolean,
ValueDate,
}
#[derive(DeriveIden)]
enum ProcessInstances {
Table,
Id,
}

View File

@@ -105,10 +105,15 @@ async fn main() -> anyhow::Result<()> {
let config_module = erp_config::ConfigModule::new();
tracing::info!(module = config_module.name(), version = config_module.version(), "Config module initialized");
// Initialize workflow module
let workflow_module = erp_workflow::WorkflowModule::new();
tracing::info!(module = workflow_module.name(), version = workflow_module.version(), "Workflow module initialized");
// Initialize module registry and register modules
let registry = ModuleRegistry::new()
.register(auth_module)
.register(config_module);
.register(config_module)
.register(workflow_module);
tracing::info!(module_count = registry.modules().len(), "Modules registered");
// Register event handlers
@@ -146,6 +151,7 @@ async fn main() -> anyhow::Result<()> {
// Protected routes (JWT authentication required)
let protected_routes = erp_auth::AuthModule::protected_routes()
.merge(erp_config::ConfigModule::protected_routes())
.merge(erp_workflow::WorkflowModule::protected_routes())
.layer(middleware::from_fn(move |req, next| {
let secret = jwt_secret.clone();
async move { jwt_auth_middleware_fn(secret, req, next).await }

View File

@@ -60,3 +60,13 @@ impl FromRef<AppState> for erp_config::ConfigState {
}
}
}
/// Allow erp-workflow handlers to extract their required state without depending on erp-server.
impl FromRef<AppState> for erp_workflow::WorkflowState {
fn from_ref(state: &AppState) -> Self {
Self {
db: state.db.clone(),
event_bus: state.event_bus.clone(),
}
}
}

View File

@@ -5,12 +5,16 @@ edition.workspace = true
[dependencies]
erp-core.workspace = true
tokio.workspace = true
serde.workspace = true
serde_json.workspace = true
uuid.workspace = true
chrono.workspace = true
axum.workspace = true
sea-orm.workspace = true
tracing.workspace = true
tokio = { workspace = true, features = ["full"] }
serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true }
uuid = { workspace = true, features = ["v7", "serde"] }
chrono = { workspace = true, features = ["serde"] }
axum = { workspace = true }
sea-orm = { workspace = true, features = ["sqlx-postgres", "runtime-tokio-rustls", "with-uuid", "with-chrono", "with-json"] }
tracing = { workspace = true }
anyhow.workspace = true
thiserror.workspace = true
utoipa = { workspace = true, features = ["uuid", "chrono"] }
async-trait.workspace = true
validator.workspace = true

View File

@@ -0,0 +1,211 @@
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use utoipa::ToSchema;
use uuid::Uuid;
use validator::Validate;
// --- 流程图节点/边定义 ---
/// BPMN 节点类型
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema, PartialEq)]
#[serde(rename_all = "camelCase")]
pub enum NodeType {
StartEvent,
EndEvent,
UserTask,
ServiceTask,
ExclusiveGateway,
ParallelGateway,
}
/// 流程图节点定义
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
#[serde(rename_all = "camelCase")]
pub struct NodeDef {
pub id: String,
#[serde(rename = "type")]
pub node_type: NodeType,
pub name: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub assignee_id: Option<Uuid>,
#[serde(skip_serializing_if = "Option::is_none")]
pub candidate_groups: Option<Vec<String>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub service_type: Option<String>,
/// 前端渲染位置
#[serde(skip_serializing_if = "Option::is_none")]
pub position: Option<NodePosition>,
}
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
#[serde(rename_all = "camelCase")]
pub struct NodePosition {
pub x: f64,
pub y: f64,
}
/// 流程图连线定义
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
#[serde(rename_all = "camelCase")]
pub struct EdgeDef {
pub id: String,
pub source: String,
pub target: String,
/// 条件表达式(排他网关分支)
#[serde(skip_serializing_if = "Option::is_none")]
pub condition: Option<String>,
/// 前端渲染标签
#[serde(skip_serializing_if = "Option::is_none")]
pub label: Option<String>,
}
/// 完整流程图
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
#[serde(rename_all = "camelCase")]
pub struct FlowDiagram {
pub nodes: Vec<NodeDef>,
pub edges: Vec<EdgeDef>,
}
// --- 流程定义 DTOs ---
#[derive(Debug, Serialize, ToSchema)]
pub struct ProcessDefinitionResp {
pub id: Uuid,
pub name: String,
pub key: String,
pub version: i32,
#[serde(skip_serializing_if = "Option::is_none")]
pub category: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
pub nodes: serde_json::Value,
pub edges: serde_json::Value,
pub status: String,
pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>,
}
#[derive(Debug, Deserialize, Validate, ToSchema)]
pub struct CreateProcessDefinitionReq {
#[validate(length(min = 1, max = 200, message = "流程名称不能为空"))]
pub name: String,
#[validate(length(min = 1, max = 100, message = "流程编码不能为空"))]
pub key: String,
pub category: Option<String>,
pub description: Option<String>,
pub nodes: Vec<NodeDef>,
pub edges: Vec<EdgeDef>,
}
#[derive(Debug, Deserialize, ToSchema)]
pub struct UpdateProcessDefinitionReq {
pub name: Option<String>,
pub category: Option<String>,
pub description: Option<String>,
pub nodes: Option<Vec<NodeDef>>,
pub edges: Option<Vec<EdgeDef>>,
}
// --- 流程实例 DTOs ---
#[derive(Debug, Serialize, ToSchema)]
pub struct ProcessInstanceResp {
pub id: Uuid,
pub definition_id: Uuid,
pub definition_name: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub business_key: Option<String>,
pub status: String,
pub started_by: Uuid,
pub started_at: DateTime<Utc>,
#[serde(skip_serializing_if = "Option::is_none")]
pub completed_at: Option<DateTime<Utc>>,
pub created_at: DateTime<Utc>,
/// 当前活跃的 token 位置
pub active_tokens: Vec<TokenResp>,
}
#[derive(Debug, Deserialize, ToSchema)]
pub struct StartInstanceReq {
pub definition_id: Uuid,
pub business_key: Option<String>,
/// 初始流程变量
pub variables: Option<Vec<SetVariableReq>>,
}
// --- Token DTOs ---
#[derive(Debug, Serialize, ToSchema)]
pub struct TokenResp {
pub id: Uuid,
pub node_id: String,
pub status: String,
pub created_at: DateTime<Utc>,
}
// --- 任务 DTOs ---
#[derive(Debug, Serialize, ToSchema)]
pub struct TaskResp {
pub id: Uuid,
pub instance_id: Uuid,
pub token_id: Uuid,
pub node_id: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub node_name: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub assignee_id: Option<Uuid>,
#[serde(skip_serializing_if = "Option::is_none")]
pub candidate_groups: Option<serde_json::Value>,
pub status: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub outcome: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub form_data: Option<serde_json::Value>,
#[serde(skip_serializing_if = "Option::is_none")]
pub due_date: Option<DateTime<Utc>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub completed_at: Option<DateTime<Utc>>,
pub created_at: DateTime<Utc>,
/// 流程定义名称(用于列表展示)
#[serde(skip_serializing_if = "Option::is_none")]
pub definition_name: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub business_key: Option<String>,
}
#[derive(Debug, Deserialize, ToSchema)]
pub struct CompleteTaskReq {
pub outcome: String,
pub form_data: Option<serde_json::Value>,
}
#[derive(Debug, Deserialize, ToSchema)]
pub struct DelegateTaskReq {
pub delegate_to: Uuid,
}
// --- 流程变量 DTOs ---
#[derive(Debug, Serialize, ToSchema)]
pub struct ProcessVariableResp {
pub id: Uuid,
pub name: String,
pub var_type: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub value_string: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub value_number: Option<f64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub value_boolean: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub value_date: Option<DateTime<Utc>>,
}
#[derive(Debug, Clone, Deserialize, ToSchema)]
pub struct SetVariableReq {
pub name: String,
pub var_type: Option<String>,
pub value: serde_json::Value,
}

View File

@@ -0,0 +1,371 @@
use std::collections::HashMap;
use chrono::Utc;
use sea_orm::{
ActiveModelTrait, ColumnTrait, EntityTrait, QueryFilter, Set, ConnectionTrait,
PaginatorTrait,
};
use uuid::Uuid;
use crate::dto::NodeType;
use crate::engine::expression::ExpressionEvaluator;
use crate::engine::model::FlowGraph;
use crate::entity::{token, process_instance};
use crate::error::{WorkflowError, WorkflowResult};
/// Token 驱动的流程执行引擎。
///
/// 核心职责:
/// - 在流程启动时,于 StartEvent 创建第一个 token
/// - 在任务完成时推进 token 到下一个节点
/// - 处理网关分支/汇合逻辑
/// - 在 EndEvent 完成实例
pub struct FlowExecutor;
impl FlowExecutor {
/// 启动流程:在 StartEvent 的后继节点创建 token。
///
/// 返回创建的 token ID 列表。
pub async fn start(
instance_id: Uuid,
tenant_id: Uuid,
graph: &FlowGraph,
variables: &HashMap<String, serde_json::Value>,
txn: &impl ConnectionTrait,
) -> WorkflowResult<Vec<Uuid>> {
let start_id = graph
.start_node_id
.as_ref()
.ok_or_else(|| WorkflowError::InvalidDiagram("流程图没有开始事件".to_string()))?;
// 获取 StartEvent 的出边,推进到后继节点
let outgoing = graph.get_outgoing_edges(start_id);
if outgoing.is_empty() {
return Err(WorkflowError::InvalidDiagram(
"开始事件没有出边".to_string(),
));
}
// StartEvent 只有一条出边
let first_edge = &outgoing[0];
let target_node_id = &first_edge.target;
Self::create_token_at_node(
instance_id,
tenant_id,
target_node_id,
graph,
variables,
txn,
)
.await
}
/// 推进 token消费当前 token在下一节点创建新 token。
///
/// 返回新创建的 token ID 列表。
pub async fn advance(
token_id: Uuid,
instance_id: Uuid,
tenant_id: Uuid,
graph: &FlowGraph,
variables: &HashMap<String, serde_json::Value>,
txn: &impl ConnectionTrait,
) -> WorkflowResult<Vec<Uuid>> {
// 读取当前 token
let current_token = token::Entity::find_by_id(token_id)
.one(txn)
.await
.map_err(|e| WorkflowError::Validation(e.to_string()))?
.ok_or_else(|| WorkflowError::NotFound(format!("Token 不存在: {token_id}")))?;
if current_token.status != "active" {
return Err(WorkflowError::InvalidState(format!(
"Token 状态不是 active: {}",
current_token.status
)));
}
let node_id = current_token.node_id.clone();
// 消费当前 token
let mut active: token::ActiveModel = current_token.into();
active.status = Set("consumed".to_string());
active.consumed_at = Set(Some(Utc::now()));
active.update(txn).await.map_err(|e| WorkflowError::Validation(e.to_string()))?;
// 获取当前节点的出边
let outgoing = graph.get_outgoing_edges(&node_id);
let current_node = graph.nodes.get(&node_id)
.ok_or_else(|| WorkflowError::InvalidDiagram(format!("节点不存在: {node_id}")))?;
match current_node.node_type {
NodeType::ExclusiveGateway => {
// 排他网关:求值条件,选择一条分支
Self::advance_exclusive_gateway(
instance_id,
tenant_id,
&outgoing,
graph,
variables,
txn,
)
.await
}
NodeType::ParallelGateway => {
// 并行网关:为每条出边创建 token
Self::advance_parallel_gateway(
instance_id,
tenant_id,
&outgoing,
graph,
variables,
txn,
)
.await
}
_ => {
// 普通节点:沿出边前进
if outgoing.is_empty() {
// 没有出边(理论上只有 EndEvent 会到这里)
Ok(vec![])
} else {
let mut new_tokens = Vec::new();
for edge in &outgoing {
let tokens = Self::create_token_at_node(
instance_id,
tenant_id,
&edge.target,
graph,
variables,
txn,
)
.await?;
new_tokens.extend(tokens);
}
Ok(new_tokens)
}
}
}
}
/// 排他网关分支:求值条件,选择第一个满足条件的分支。
async fn advance_exclusive_gateway(
instance_id: Uuid,
tenant_id: Uuid,
outgoing: &[&crate::engine::model::FlowEdge],
graph: &FlowGraph,
variables: &HashMap<String, serde_json::Value>,
txn: &impl ConnectionTrait,
) -> WorkflowResult<Vec<Uuid>> {
let mut default_target: Option<&str> = None;
let mut matched_target: Option<&str> = None;
for edge in outgoing {
if let Some(condition) = &edge.condition {
match ExpressionEvaluator::eval(condition, variables) {
Ok(true) => {
matched_target = Some(&edge.target);
break;
}
Ok(false) => continue,
Err(_) => continue, // 条件求值失败,跳过
}
} else {
// 无条件的边作为默认分支
default_target = Some(&edge.target);
}
}
let target = matched_target
.or(default_target)
.ok_or_else(|| WorkflowError::ExpressionError(
"排他网关没有匹配的条件分支".to_string(),
))?;
Self::create_token_at_node(instance_id, tenant_id, target, graph, variables, txn).await
}
/// 并行网关分支:为每条出边创建 token。
async fn advance_parallel_gateway(
instance_id: Uuid,
tenant_id: Uuid,
outgoing: &[&crate::engine::model::FlowEdge],
graph: &FlowGraph,
variables: &HashMap<String, serde_json::Value>,
txn: &impl ConnectionTrait,
) -> WorkflowResult<Vec<Uuid>> {
let mut new_tokens = Vec::new();
for edge in outgoing {
let tokens = Self::create_token_at_node(
instance_id,
tenant_id,
&edge.target,
graph,
variables,
txn,
)
.await?;
new_tokens.extend(tokens);
}
Ok(new_tokens)
}
/// 在指定节点创建 token并根据节点类型执行相应逻辑。
fn create_token_at_node<'a>(
instance_id: Uuid,
tenant_id: Uuid,
node_id: &'a str,
graph: &'a FlowGraph,
variables: &'a HashMap<String, serde_json::Value>,
txn: &'a impl ConnectionTrait,
) -> std::pin::Pin<Box<dyn std::future::Future<Output = WorkflowResult<Vec<Uuid>>> + Send + 'a>> {
Box::pin(async move {
let node = graph.nodes.get(node_id)
.ok_or_else(|| WorkflowError::InvalidDiagram(format!("节点不存在: {node_id}")))?;
match node.node_type {
NodeType::EndEvent => {
// 到达 EndEvent不创建新 token
// 检查实例是否所有 token 都完成
Self::check_instance_completion(instance_id, tenant_id, txn).await?;
Ok(vec![])
}
NodeType::ParallelGateway
if Self::is_join_gateway(node_id, graph) =>
{
// 并行网关汇合:等待所有入边 token 到达
Self::handle_join_gateway(
instance_id,
tenant_id,
node_id,
graph,
variables,
txn,
)
.await
}
_ => {
// UserTask / ServiceTask / 网关(分支)等:创建活跃 token
let new_token_id = Uuid::now_v7();
let now = Utc::now();
let token_model = token::ActiveModel {
id: Set(new_token_id),
tenant_id: Set(tenant_id),
instance_id: Set(instance_id),
node_id: Set(node_id.to_string()),
status: Set("active".to_string()),
created_at: Set(now),
consumed_at: Set(None),
};
token_model
.insert(txn)
.await
.map_err(|e| WorkflowError::Validation(e.to_string()))?;
Ok(vec![new_token_id])
}
}
})
}
/// 判断并行网关是否是汇合模式(入边数 > 出边数,或者入边数 > 1
fn is_join_gateway(node_id: &str, graph: &FlowGraph) -> bool {
let incoming = graph.get_incoming_edges(node_id);
incoming.len() > 1
}
/// 处理并行网关汇合逻辑。
///
/// 当所有入边的源节点都有已消费的 token 时,创建新 token 推进到后继。
async fn handle_join_gateway(
instance_id: Uuid,
tenant_id: Uuid,
node_id: &str,
graph: &FlowGraph,
variables: &HashMap<String, serde_json::Value>,
txn: &impl ConnectionTrait,
) -> WorkflowResult<Vec<Uuid>> {
let incoming = graph.get_incoming_edges(node_id);
// 检查所有入边的源节点是否都有已消费/已完成的 token
for edge in &incoming {
let has_consumed = token::Entity::find()
.filter(token::Column::InstanceId.eq(instance_id))
.filter(token::Column::NodeId.eq(&edge.source))
.filter(token::Column::Status.is_in(["consumed", "active"]))
.one(txn)
.await
.map_err(|e| WorkflowError::Validation(e.to_string()))?;
if has_consumed.is_none() {
// 还有分支没有到达,等待
return Ok(vec![]);
}
// 检查是否还有活跃的 token来自其他分支
let has_active = token::Entity::find()
.filter(token::Column::InstanceId.eq(instance_id))
.filter(token::Column::NodeId.eq(&edge.source))
.filter(token::Column::Status.eq("active"))
.one(txn)
.await
.map_err(|e| WorkflowError::Validation(e.to_string()))?;
if has_active.is_some() {
// 还有分支在执行中,等待
return Ok(vec![]);
}
}
// 所有分支都完成了,沿出边继续
let outgoing = graph.get_outgoing_edges(node_id);
let mut new_tokens = Vec::new();
for edge in &outgoing {
let tokens = Self::create_token_at_node(
instance_id,
tenant_id,
&edge.target,
graph,
variables,
txn,
)
.await?;
new_tokens.extend(tokens);
}
Ok(new_tokens)
}
/// 检查实例是否所有 token 都已完成,如果是则完成实例。
async fn check_instance_completion(
instance_id: Uuid,
tenant_id: Uuid,
txn: &impl ConnectionTrait,
) -> WorkflowResult<()> {
let active_count = token::Entity::find()
.filter(token::Column::InstanceId.eq(instance_id))
.filter(token::Column::Status.eq("active"))
.count(txn)
.await
.map_err(|e| WorkflowError::Validation(e.to_string()))?;
if active_count == 0 {
// 所有 token 都完成,标记实例完成
let instance = process_instance::Entity::find_by_id(instance_id)
.one(txn)
.await
.map_err(|e| WorkflowError::Validation(e.to_string()))?
.filter(|i| i.tenant_id == tenant_id && i.deleted_at.is_none())
.ok_or_else(|| WorkflowError::NotFound(format!("流程实例不存在: {instance_id}")))?;
let mut active: process_instance::ActiveModel = instance.into();
active.status = Set("completed".to_string());
active.completed_at = Set(Some(Utc::now()));
active.updated_at = Set(Utc::now());
active.update(txn).await.map_err(|e| WorkflowError::Validation(e.to_string()))?;
}
Ok(())
}
}

View File

@@ -0,0 +1,325 @@
use std::collections::HashMap;
use crate::error::{WorkflowError, WorkflowResult};
/// 简单表达式求值器。
///
/// 支持的比较运算符:>, >=, <, <=, ==, !=
/// 支持 && 和 || 逻辑运算。
/// 操作数可以是变量名(从 variables map 查找)或字面量(数字、字符串)。
///
/// 示例:
/// - `amount > 1000`
/// - `status == "approved"`
/// - `score >= 60 && attendance > 80`
pub struct ExpressionEvaluator;
impl ExpressionEvaluator {
/// 求值单个条件表达式。
///
/// 表达式格式: `{left} {op} {right}` 或复合表达式 `{expr1} && {expr2}`
pub fn eval(expr: &str, variables: &HashMap<String, serde_json::Value>) -> WorkflowResult<bool> {
let expr = expr.trim();
// 处理逻辑 OR
if let Some(idx) = Self::find_logical_op(expr, "||") {
let left = &expr[..idx];
let right = &expr[idx + 2..];
return Ok(Self::eval(left, variables)? || Self::eval(right, variables)?);
}
// 处理逻辑 AND
if let Some(idx) = Self::find_logical_op(expr, "&&") {
let left = &expr[..idx];
let right = &expr[idx + 2..];
return Ok(Self::eval(left, variables)? && Self::eval(right, variables)?);
}
// 处理单个比较表达式
Self::eval_comparison(expr, variables)
}
/// 查找逻辑运算符位置,跳过引号内的内容。
fn find_logical_op(expr: &str, op: &str) -> Option<usize> {
let mut in_string = false;
let mut string_char = ' ';
let chars: Vec<char> = expr.chars().collect();
let op_chars: Vec<char> = op.chars().collect();
let op_len = op_chars.len();
for i in 0..chars.len().saturating_sub(op_len - 1) {
let c = chars[i];
if !in_string && (c == '"' || c == '\'') {
in_string = true;
string_char = c;
continue;
}
if in_string && c == string_char {
in_string = false;
continue;
}
if in_string {
continue;
}
if chars[i..].starts_with(&op_chars) {
return Some(i);
}
}
None
}
/// 求值单个比较表达式。
fn eval_comparison(expr: &str, variables: &HashMap<String, serde_json::Value>) -> WorkflowResult<bool> {
let operators = [">=", "<=", "!=", "==", ">", "<"];
for op in &operators {
if let Some(idx) = Self::find_comparison_op(expr, op) {
let left = expr[..idx].trim();
let right = expr[idx + op.len()..].trim();
let left_val = Self::resolve_value(left, variables)?;
let right_val = Self::resolve_value(right, variables)?;
return Self::compare(&left_val, &right_val, op);
}
}
Err(WorkflowError::ExpressionError(format!(
"无法解析表达式: '{}'",
expr
)))
}
/// 查找比较运算符位置,跳过引号内的内容。
fn find_comparison_op(expr: &str, op: &str) -> Option<usize> {
let mut in_string = false;
let mut string_char = ' ';
let bytes = expr.as_bytes();
let op_bytes = op.as_bytes();
let op_len = op_bytes.len();
for i in 0..bytes.len().saturating_sub(op_len - 1) {
let c = bytes[i] as char;
if !in_string && (c == '"' || c == '\'') {
in_string = true;
string_char = c;
continue;
}
if in_string && c == string_char {
in_string = false;
continue;
}
if in_string {
continue;
}
if bytes[i..].starts_with(op_bytes) {
// 确保不是被嵌在其他运算符里(如 != 中的 =
// 对于 > 和 < 检查后面不是 = 或 >
if op == ">" || op == "<" {
if i + op_len < bytes.len() {
let next = bytes[i + op_len] as char;
if next == '=' || (op == ">" && next == '>') {
continue;
}
}
// 也检查前面不是 ! 或 = 或 < 或 >
if i > 0 {
let prev = bytes[i - 1] as char;
if prev == '!' || prev == '=' || prev == '<' || prev == '>' {
continue;
}
}
}
// 对于 ==, >=, <=, != 确保前面不是 ! 或 = (避免匹配到 == 中的第二个 =)
// 这已经通过从长到短匹配处理了
return Some(i);
}
}
None
}
/// 解析值:字符串字面量、数字字面量或变量引用。
fn resolve_value(
token: &str,
variables: &HashMap<String, serde_json::Value>,
) -> WorkflowResult<serde_json::Value> {
let token = token.trim();
// 字符串字面量
if (token.starts_with('"') && token.ends_with('"'))
|| (token.starts_with('\'') && token.ends_with('\''))
{
return Ok(serde_json::Value::String(
token[1..token.len() - 1].to_string(),
));
}
// 数字字面量
if let Ok(n) = token.parse::<i64>() {
return Ok(serde_json::Value::Number(n.into()));
}
if let Ok(f) = token.parse::<f64>() {
if let Some(n) = serde_json::Number::from_f64(f) {
return Ok(serde_json::Value::Number(n));
}
}
// 布尔字面量
if token == "true" {
return Ok(serde_json::Value::Bool(true));
}
if token == "false" {
return Ok(serde_json::Value::Bool(false));
}
// 变量引用
if let Some(val) = variables.get(token) {
return Ok(val.clone());
}
Err(WorkflowError::ExpressionError(format!(
"未知的变量或值: '{}'",
token
)))
}
/// 比较两个 JSON 值。
fn compare(
left: &serde_json::Value,
right: &serde_json::Value,
op: &str,
) -> WorkflowResult<bool> {
match op {
"==" => Ok(Self::values_equal(left, right)),
"!=" => Ok(!Self::values_equal(left, right)),
">" => Ok(Self::values_compare(left, right)? == std::cmp::Ordering::Greater),
">=" => Ok(Self::values_compare(left, right)? != std::cmp::Ordering::Less),
"<" => Ok(Self::values_compare(left, right)? == std::cmp::Ordering::Less),
"<=" => Ok(Self::values_compare(left, right)? != std::cmp::Ordering::Greater),
_ => Err(WorkflowError::ExpressionError(format!(
"不支持的比较运算符: '{}'",
op
))),
}
}
fn values_equal(left: &serde_json::Value, right: &serde_json::Value) -> bool {
// 数值比较:允许整数和浮点数互比
if left.is_number() && right.is_number() {
return left.as_f64() == right.as_f64();
}
left == right
}
fn values_compare(
left: &serde_json::Value,
right: &serde_json::Value,
) -> WorkflowResult<std::cmp::Ordering> {
if left.is_number() && right.is_number() {
let l = left.as_f64().unwrap_or(0.0);
let r = right.as_f64().unwrap_or(0.0);
return Ok(l.partial_cmp(&r).unwrap_or(std::cmp::Ordering::Equal));
}
if let (Some(l), Some(r)) = (left.as_str(), right.as_str()) {
return Ok(l.cmp(r));
}
Err(WorkflowError::ExpressionError(format!(
"无法比较 {:?}{:?}",
left, right
)))
}
}
#[cfg(test)]
mod tests {
use super::*;
use serde_json::json;
fn make_vars() -> HashMap<String, serde_json::Value> {
let mut m = HashMap::new();
m.insert("amount".to_string(), json!(1500));
m.insert("status".to_string(), json!("approved"));
m.insert("score".to_string(), json!(85));
m.insert("name".to_string(), json!("Alice"));
m.insert("active".to_string(), json!(true));
m
}
#[test]
fn test_number_greater_than() {
let vars = make_vars();
assert!(ExpressionEvaluator::eval("amount > 1000", &vars).unwrap());
assert!(!ExpressionEvaluator::eval("amount > 2000", &vars).unwrap());
}
#[test]
fn test_number_less_than() {
let vars = make_vars();
assert!(ExpressionEvaluator::eval("amount < 2000", &vars).unwrap());
assert!(!ExpressionEvaluator::eval("amount < 1000", &vars).unwrap());
}
#[test]
fn test_number_equals() {
let vars = make_vars();
assert!(ExpressionEvaluator::eval("amount == 1500", &vars).unwrap());
assert!(!ExpressionEvaluator::eval("amount == 1000", &vars).unwrap());
}
#[test]
fn test_string_equals() {
let vars = make_vars();
assert!(ExpressionEvaluator::eval("status == \"approved\"", &vars).unwrap());
assert!(!ExpressionEvaluator::eval("status == \"rejected\"", &vars).unwrap());
}
#[test]
fn test_string_not_equals() {
let vars = make_vars();
assert!(ExpressionEvaluator::eval("status != \"rejected\"", &vars).unwrap());
}
#[test]
fn test_greater_or_equal() {
let vars = make_vars();
assert!(ExpressionEvaluator::eval("amount >= 1500", &vars).unwrap());
assert!(ExpressionEvaluator::eval("amount >= 1000", &vars).unwrap());
assert!(!ExpressionEvaluator::eval("amount >= 2000", &vars).unwrap());
}
#[test]
fn test_logical_and() {
let vars = make_vars();
assert!(ExpressionEvaluator::eval("amount > 1000 && score > 80", &vars).unwrap());
assert!(!ExpressionEvaluator::eval("amount > 2000 && score > 80", &vars).unwrap());
}
#[test]
fn test_logical_or() {
let vars = make_vars();
assert!(ExpressionEvaluator::eval("amount > 2000 || score > 80", &vars).unwrap());
assert!(!ExpressionEvaluator::eval("amount > 2000 || score > 90", &vars).unwrap());
}
#[test]
fn test_unknown_variable() {
let vars = make_vars();
let result = ExpressionEvaluator::eval("unknown > 0", &vars);
assert!(result.is_err());
}
#[test]
fn test_invalid_expression() {
let vars = make_vars();
let result = ExpressionEvaluator::eval("justavariable", &vars);
assert!(result.is_err());
}
}

View File

@@ -0,0 +1,5 @@
pub mod expression;
pub mod executor;
pub mod model;
pub mod parser;
pub mod timeout;

View File

@@ -0,0 +1,122 @@
use std::collections::HashMap;
use crate::dto::{EdgeDef, NodeDef, NodeType};
/// 内存中的流程图模型,用于执行引擎。
#[derive(Debug, Clone)]
pub struct FlowGraph {
/// node_id → FlowNode
pub nodes: HashMap<String, FlowNode>,
/// edge_id → FlowEdge
pub edges: HashMap<String, FlowEdge>,
/// node_id → 从该节点出发的边列表
pub outgoing: HashMap<String, Vec<String>>,
/// node_id → 到达该节点的边列表
pub incoming: HashMap<String, Vec<String>>,
/// StartEvent 的 node_id
pub start_node_id: Option<String>,
/// 所有 EndEvent 的 node_id
pub end_node_ids: Vec<String>,
}
/// 内存中的节点模型。
#[derive(Debug, Clone)]
pub struct FlowNode {
pub id: String,
pub node_type: NodeType,
pub name: String,
pub assignee_id: Option<uuid::Uuid>,
pub candidate_groups: Option<Vec<String>>,
pub service_type: Option<String>,
}
/// 内存中的边模型。
#[derive(Debug, Clone)]
pub struct FlowEdge {
pub id: String,
pub source: String,
pub target: String,
pub condition: Option<String>,
pub label: Option<String>,
}
impl FlowGraph {
/// 从 DTO 节点和边列表构建 FlowGraph。
pub fn build(nodes: &[NodeDef], edges: &[EdgeDef]) -> Self {
let mut graph = FlowGraph {
nodes: HashMap::new(),
edges: HashMap::new(),
outgoing: HashMap::new(),
incoming: HashMap::new(),
start_node_id: None,
end_node_ids: Vec::new(),
};
for n in nodes {
let flow_node = FlowNode {
id: n.id.clone(),
node_type: n.node_type.clone(),
name: n.name.clone(),
assignee_id: n.assignee_id,
candidate_groups: n.candidate_groups.clone(),
service_type: n.service_type.clone(),
};
if n.node_type == NodeType::StartEvent {
graph.start_node_id = Some(n.id.clone());
}
if n.node_type == NodeType::EndEvent {
graph.end_node_ids.push(n.id.clone());
}
graph.nodes.insert(n.id.clone(), flow_node);
graph.outgoing.insert(n.id.clone(), Vec::new());
graph.incoming.insert(n.id.clone(), Vec::new());
}
for e in edges {
graph.edges.insert(e.id.clone(), FlowEdge {
id: e.id.clone(),
source: e.source.clone(),
target: e.target.clone(),
condition: e.condition.clone(),
label: e.label.clone(),
});
if let Some(out) = graph.outgoing.get_mut(&e.source) {
out.push(e.id.clone());
}
if let Some(inc) = graph.incoming.get_mut(&e.target) {
inc.push(e.id.clone());
}
}
graph
}
/// 获取节点的出边。
pub fn get_outgoing_edges(&self, node_id: &str) -> Vec<&FlowEdge> {
self.outgoing
.get(node_id)
.map(|edge_ids| {
edge_ids
.iter()
.filter_map(|eid| self.edges.get(eid))
.collect()
})
.unwrap_or_default()
}
/// 获取节点的入边。
pub fn get_incoming_edges(&self, node_id: &str) -> Vec<&FlowEdge> {
self.incoming
.get(node_id)
.map(|edge_ids| {
edge_ids
.iter()
.filter_map(|eid| self.edges.get(eid))
.collect()
})
.unwrap_or_default()
}
}

View File

@@ -0,0 +1,258 @@
use crate::dto::{EdgeDef, NodeDef, NodeType};
use crate::engine::model::FlowGraph;
use crate::error::{WorkflowError, WorkflowResult};
/// 解析节点和边列表为 FlowGraph 并验证合法性。
pub fn parse_and_validate(nodes: &[NodeDef], edges: &[EdgeDef]) -> WorkflowResult<FlowGraph> {
// 基本检查:至少有一个节点
if nodes.is_empty() {
return Err(WorkflowError::InvalidDiagram("流程图不能为空".to_string()));
}
// 检查恰好 1 个 StartEvent
let start_count = nodes.iter().filter(|n| n.node_type == NodeType::StartEvent).count();
if start_count == 0 {
return Err(WorkflowError::InvalidDiagram(
"流程图必须包含一个开始事件".to_string(),
));
}
if start_count > 1 {
return Err(WorkflowError::InvalidDiagram(
"流程图只能包含一个开始事件".to_string(),
));
}
// 检查至少 1 个 EndEvent
let end_count = nodes.iter().filter(|n| n.node_type == NodeType::EndEvent).count();
if end_count == 0 {
return Err(WorkflowError::InvalidDiagram(
"流程图必须包含至少一个结束事件".to_string(),
));
}
// 检查节点 ID 唯一性
let node_ids: std::collections::HashSet<&str> =
nodes.iter().map(|n| n.id.as_str()).collect();
if node_ids.len() != nodes.len() {
return Err(WorkflowError::InvalidDiagram(
"节点 ID 不能重复".to_string(),
));
}
// 检查边引用的节点存在
for e in edges {
if !node_ids.contains(e.source.as_str()) {
return Err(WorkflowError::InvalidDiagram(format!(
"连线 {} 的源节点 {} 不存在",
e.id, e.source
)));
}
if !node_ids.contains(e.target.as_str()) {
return Err(WorkflowError::InvalidDiagram(format!(
"连线 {} 的目标节点 {} 不存在",
e.id, e.target
)));
}
}
// 构建图
let graph = FlowGraph::build(nodes, edges);
// 检查 StartEvent 没有入边
if let Some(start_id) = &graph.start_node_id {
if !graph.get_incoming_edges(start_id).is_empty() {
return Err(WorkflowError::InvalidDiagram(
"开始事件不能有入边".to_string(),
));
}
if graph.get_outgoing_edges(start_id).is_empty() {
return Err(WorkflowError::InvalidDiagram(
"开始事件必须有出边".to_string(),
));
}
}
// 检查 EndEvent 没有出边
for end_id in &graph.end_node_ids {
if !graph.get_outgoing_edges(end_id).is_empty() {
return Err(WorkflowError::InvalidDiagram(
"结束事件不能有出边".to_string(),
));
}
}
// 检查网关至少有一个入边和一个出边(排除 start/end
for node in nodes {
match &node.node_type {
NodeType::ExclusiveGateway | NodeType::ParallelGateway => {
let inc = graph.get_incoming_edges(&node.id);
let out = graph.get_outgoing_edges(&node.id);
if inc.is_empty() {
return Err(WorkflowError::InvalidDiagram(format!(
"网关 '{}' 必须有至少一条入边",
node.name
)));
}
if out.is_empty() {
return Err(WorkflowError::InvalidDiagram(format!(
"网关 '{}' 必须有至少一条出边",
node.name
)));
}
// 排他网关的出边应该有条件(第一条可以无条件作为默认分支)
if node.node_type == NodeType::ExclusiveGateway && out.len() > 1 {
let with_condition: Vec<_> = out.iter().filter(|e| e.condition.is_some()).collect();
if with_condition.is_empty() {
return Err(WorkflowError::InvalidDiagram(format!(
"排他网关 '{}' 有多条出边但没有条件表达式",
node.name
)));
}
}
}
_ => {}
}
}
Ok(graph)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::dto::NodePosition;
fn make_start() -> NodeDef {
NodeDef {
id: "start".to_string(),
node_type: NodeType::StartEvent,
name: "开始".to_string(),
assignee_id: None,
candidate_groups: None,
service_type: None,
position: Some(NodePosition { x: 100.0, y: 100.0 }),
}
}
fn make_end() -> NodeDef {
NodeDef {
id: "end".to_string(),
node_type: NodeType::EndEvent,
name: "结束".to_string(),
assignee_id: None,
candidate_groups: None,
service_type: None,
position: Some(NodePosition { x: 100.0, y: 300.0 }),
}
}
fn make_user_task(id: &str, name: &str) -> NodeDef {
NodeDef {
id: id.to_string(),
node_type: NodeType::UserTask,
name: name.to_string(),
assignee_id: None,
candidate_groups: None,
service_type: None,
position: None,
}
}
fn make_edge(id: &str, source: &str, target: &str) -> EdgeDef {
EdgeDef {
id: id.to_string(),
source: source.to_string(),
target: target.to_string(),
condition: None,
label: None,
}
}
#[test]
fn test_valid_linear_flow() {
let nodes = vec![make_start(), make_user_task("task1", "审批"), make_end()];
let edges = vec![
make_edge("e1", "start", "task1"),
make_edge("e2", "task1", "end"),
];
let result = parse_and_validate(&nodes, &edges);
assert!(result.is_ok());
let graph = result.unwrap();
assert_eq!(graph.start_node_id, Some("start".to_string()));
assert_eq!(graph.end_node_ids, vec!["end".to_string()]);
}
#[test]
fn test_no_start_event() {
let nodes = vec![make_user_task("task1", "审批"), make_end()];
let edges = vec![make_edge("e1", "task1", "end")];
let result = parse_and_validate(&nodes, &edges);
assert!(result.is_err());
let msg = result.unwrap_err().to_string();
assert!(msg.contains("开始事件"));
}
#[test]
fn test_no_end_event() {
let nodes = vec![make_start(), make_user_task("task1", "审批")];
let edges = vec![make_edge("e1", "start", "task1")];
let result = parse_and_validate(&nodes, &edges);
assert!(result.is_err());
let msg = result.unwrap_err().to_string();
assert!(msg.contains("结束事件"));
}
#[test]
fn test_duplicate_node_id() {
let nodes = vec![
make_start(),
NodeDef {
id: "start".to_string(), // 重复 ID
node_type: NodeType::EndEvent,
name: "结束".to_string(),
assignee_id: None,
candidate_groups: None,
service_type: None,
position: None,
},
];
let edges = vec![];
let result = parse_and_validate(&nodes, &edges);
assert!(result.is_err());
}
#[test]
fn test_end_event_with_outgoing() {
let nodes = vec![make_start(), make_end()];
let edges = vec![
make_edge("e1", "start", "end"),
make_edge("e2", "end", "start"), // 结束事件有出边
];
let result = parse_and_validate(&nodes, &edges);
assert!(result.is_err());
}
#[test]
fn test_exclusive_gateway_without_conditions() {
let nodes = vec![
make_start(),
NodeDef {
id: "gw1".to_string(),
node_type: NodeType::ExclusiveGateway,
name: "判断".to_string(),
assignee_id: None,
candidate_groups: None,
service_type: None,
position: None,
},
make_end(),
];
let edges = vec![
make_edge("e1", "start", "gw1"),
make_edge("e2", "gw1", "end"),
make_edge("e3", "gw1", "end"), // 两条出边无条件
];
let result = parse_and_validate(&nodes, &edges);
assert!(result.is_err());
}
}

View File

@@ -0,0 +1,36 @@
// 超时检查框架 — 占位实现
//
// 当前版本仅提供接口定义,实际超时检查逻辑将在后续迭代中实现。
// Task 表的 due_date 字段已支持设置超时时间。
use chrono::Utc;
use sea_orm::{ColumnTrait, EntityTrait, QueryFilter};
use uuid::Uuid;
use crate::entity::task;
use crate::error::WorkflowResult;
/// 超时检查服务(占位)。
pub struct TimeoutChecker;
impl TimeoutChecker {
/// 查询已超时但未完成的任务列表。
///
/// 返回 due_date < now 且 status = 'pending' 的任务 ID。
pub async fn find_overdue_tasks(
tenant_id: Uuid,
db: &sea_orm::DatabaseConnection,
) -> WorkflowResult<Vec<Uuid>> {
let now = Utc::now();
let overdue = task::Entity::find()
.filter(task::Column::TenantId.eq(tenant_id))
.filter(task::Column::Status.eq("pending"))
.filter(task::Column::DueDate.lt(now))
.filter(task::Column::DeletedAt.is_null())
.all(db)
.await
.map_err(|e| crate::error::WorkflowError::Validation(e.to_string()))?;
Ok(overdue.iter().map(|t| t.id).collect())
}
}

View File

@@ -0,0 +1,5 @@
pub mod process_definition;
pub mod process_instance;
pub mod token;
pub mod task;
pub mod process_variable;

View File

@@ -0,0 +1,40 @@
use sea_orm::entity::prelude::*;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Serialize, Deserialize)]
#[sea_orm(table_name = "process_definitions")]
pub struct Model {
#[sea_orm(primary_key, auto_increment = false)]
pub id: Uuid,
pub tenant_id: Uuid,
pub name: String,
pub key: String,
pub version: i32,
#[serde(skip_serializing_if = "Option::is_none")]
pub category: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
pub nodes: serde_json::Value,
pub edges: serde_json::Value,
pub status: String,
pub created_at: DateTimeUtc,
pub updated_at: DateTimeUtc,
pub created_by: Uuid,
pub updated_by: Uuid,
#[serde(skip_serializing_if = "Option::is_none")]
pub deleted_at: Option<DateTimeUtc>,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(has_many = "super::process_instance::Entity")]
ProcessInstance,
}
impl Related<super::process_instance::Entity> for Entity {
fn to() -> RelationDef {
Relation::ProcessInstance.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View File

@@ -0,0 +1,59 @@
use sea_orm::entity::prelude::*;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Serialize, Deserialize)]
#[sea_orm(table_name = "process_instances")]
pub struct Model {
#[sea_orm(primary_key, auto_increment = false)]
pub id: Uuid,
pub tenant_id: Uuid,
pub definition_id: Uuid,
#[serde(skip_serializing_if = "Option::is_none")]
pub business_key: Option<String>,
pub status: String,
pub started_by: Uuid,
pub started_at: DateTimeUtc,
#[serde(skip_serializing_if = "Option::is_none")]
pub completed_at: Option<DateTimeUtc>,
pub created_at: DateTimeUtc,
pub updated_at: DateTimeUtc,
pub created_by: Uuid,
pub updated_by: Uuid,
#[serde(skip_serializing_if = "Option::is_none")]
pub deleted_at: Option<DateTimeUtc>,
pub version: i32,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::process_definition::Entity",
from = "Column::DefinitionId",
to = "super::process_definition::Column::Id"
)]
ProcessDefinition,
#[sea_orm(has_many = "super::token::Entity")]
Token,
#[sea_orm(has_many = "super::task::Entity")]
Task,
}
impl Related<super::process_definition::Entity> for Entity {
fn to() -> RelationDef {
Relation::ProcessDefinition.def()
}
}
impl Related<super::token::Entity> for Entity {
fn to() -> RelationDef {
Relation::Token.def()
}
}
impl Related<super::task::Entity> for Entity {
fn to() -> RelationDef {
Relation::Task.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View File

@@ -0,0 +1,39 @@
use sea_orm::entity::prelude::*;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Serialize, Deserialize)]
#[sea_orm(table_name = "process_variables")]
pub struct Model {
#[sea_orm(primary_key, auto_increment = false)]
pub id: Uuid,
pub tenant_id: Uuid,
pub instance_id: Uuid,
pub name: String,
pub var_type: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub value_string: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub value_number: Option<f64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub value_boolean: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub value_date: Option<DateTimeUtc>,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::process_instance::Entity",
from = "Column::InstanceId",
to = "super::process_instance::Column::Id"
)]
ProcessInstance,
}
impl Related<super::process_instance::Entity> for Entity {
fn to() -> RelationDef {
Relation::ProcessInstance.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View File

@@ -0,0 +1,65 @@
use sea_orm::entity::prelude::*;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Serialize, Deserialize)]
#[sea_orm(table_name = "tasks")]
pub struct Model {
#[sea_orm(primary_key, auto_increment = false)]
pub id: Uuid,
pub tenant_id: Uuid,
pub instance_id: Uuid,
pub token_id: Uuid,
pub node_id: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub node_name: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub assignee_id: Option<Uuid>,
#[serde(skip_serializing_if = "Option::is_none")]
pub candidate_groups: Option<serde_json::Value>,
pub status: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub outcome: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub form_data: Option<serde_json::Value>,
#[serde(skip_serializing_if = "Option::is_none")]
pub due_date: Option<DateTimeUtc>,
#[serde(skip_serializing_if = "Option::is_none")]
pub completed_at: Option<DateTimeUtc>,
pub created_at: DateTimeUtc,
pub updated_at: DateTimeUtc,
pub created_by: Uuid,
pub updated_by: Uuid,
#[serde(skip_serializing_if = "Option::is_none")]
pub deleted_at: Option<DateTimeUtc>,
pub version: i32,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::process_instance::Entity",
from = "Column::InstanceId",
to = "super::process_instance::Column::Id"
)]
ProcessInstance,
#[sea_orm(
belongs_to = "super::token::Entity",
from = "Column::TokenId",
to = "super::token::Column::Id"
)]
Token,
}
impl Related<super::process_instance::Entity> for Entity {
fn to() -> RelationDef {
Relation::ProcessInstance.def()
}
}
impl Related<super::token::Entity> for Entity {
fn to() -> RelationDef {
Relation::Token.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View File

@@ -0,0 +1,34 @@
use sea_orm::entity::prelude::*;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Serialize, Deserialize)]
#[sea_orm(table_name = "tokens")]
pub struct Model {
#[sea_orm(primary_key, auto_increment = false)]
pub id: Uuid,
pub tenant_id: Uuid,
pub instance_id: Uuid,
pub node_id: String,
pub status: String,
pub created_at: DateTimeUtc,
#[serde(skip_serializing_if = "Option::is_none")]
pub consumed_at: Option<DateTimeUtc>,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::process_instance::Entity",
from = "Column::InstanceId",
to = "super::process_instance::Column::Id"
)]
ProcessInstance,
}
impl Related<super::process_instance::Entity> for Entity {
fn to() -> RelationDef {
Relation::ProcessInstance.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View File

@@ -0,0 +1,49 @@
use erp_core::error::AppError;
/// Workflow module error types.
#[derive(Debug, thiserror::Error)]
pub enum WorkflowError {
#[error("验证失败: {0}")]
Validation(String),
#[error("资源未找到: {0}")]
NotFound(String),
#[error("流程定义已存在: {0}")]
DuplicateDefinition(String),
#[error("流程图无效: {0}")]
InvalidDiagram(String),
#[error("流程状态错误: {0}")]
InvalidState(String),
#[error("表达式求值失败: {0}")]
ExpressionError(String),
}
impl From<sea_orm::TransactionError<WorkflowError>> for WorkflowError {
fn from(err: sea_orm::TransactionError<WorkflowError>) -> Self {
match err {
sea_orm::TransactionError::Connection(err) => {
WorkflowError::Validation(err.to_string())
}
sea_orm::TransactionError::Transaction(inner) => inner,
}
}
}
impl From<WorkflowError> for AppError {
fn from(err: WorkflowError) -> Self {
match err {
WorkflowError::Validation(s) => AppError::Validation(s),
WorkflowError::NotFound(s) => AppError::NotFound(s),
WorkflowError::DuplicateDefinition(s) => AppError::Conflict(s),
WorkflowError::InvalidDiagram(s) => AppError::Validation(s),
WorkflowError::InvalidState(s) => AppError::Validation(s),
WorkflowError::ExpressionError(s) => AppError::Validation(s),
}
}
}
pub type WorkflowResult<T> = Result<T, WorkflowError>;

View File

@@ -0,0 +1,124 @@
use axum::Extension;
use axum::extract::{FromRef, Path, Query, State};
use axum::response::Json;
use validator::Validate;
use erp_core::error::AppError;
use erp_core::rbac::require_permission;
use erp_core::types::{ApiResponse, PaginatedResponse, Pagination, TenantContext};
use uuid::Uuid;
use crate::dto::{CreateProcessDefinitionReq, ProcessDefinitionResp, UpdateProcessDefinitionReq};
use crate::service::definition_service::DefinitionService;
use crate::workflow_state::WorkflowState;
/// GET /api/v1/workflow/definitions
pub async fn list_definitions<S>(
State(state): State<WorkflowState>,
Extension(ctx): Extension<TenantContext>,
Query(pagination): Query<Pagination>,
) -> Result<Json<ApiResponse<PaginatedResponse<ProcessDefinitionResp>>>, AppError>
where
WorkflowState: FromRef<S>,
S: Clone + Send + Sync + 'static,
{
require_permission(&ctx, "workflow:list")?;
let (defs, total) = DefinitionService::list(ctx.tenant_id, &pagination, &state.db).await?;
let page = pagination.page.unwrap_or(1);
let page_size = pagination.limit();
let total_pages = (total + page_size - 1) / page_size;
Ok(Json(ApiResponse::ok(PaginatedResponse {
data: defs,
total,
page,
page_size,
total_pages,
})))
}
/// POST /api/v1/workflow/definitions
pub async fn create_definition<S>(
State(state): State<WorkflowState>,
Extension(ctx): Extension<TenantContext>,
Json(req): Json<CreateProcessDefinitionReq>,
) -> Result<Json<ApiResponse<ProcessDefinitionResp>>, AppError>
where
WorkflowState: FromRef<S>,
S: Clone + Send + Sync + 'static,
{
require_permission(&ctx, "workflow:create")?;
req.validate()
.map_err(|e| AppError::Validation(e.to_string()))?;
let resp = DefinitionService::create(
ctx.tenant_id,
ctx.user_id,
&req,
&state.db,
&state.event_bus,
)
.await?;
Ok(Json(ApiResponse::ok(resp)))
}
/// GET /api/v1/workflow/definitions/{id}
pub async fn get_definition<S>(
State(state): State<WorkflowState>,
Extension(ctx): Extension<TenantContext>,
Path(id): Path<Uuid>,
) -> Result<Json<ApiResponse<ProcessDefinitionResp>>, AppError>
where
WorkflowState: FromRef<S>,
S: Clone + Send + Sync + 'static,
{
require_permission(&ctx, "workflow:read")?;
let resp = DefinitionService::get_by_id(id, ctx.tenant_id, &state.db).await?;
Ok(Json(ApiResponse::ok(resp)))
}
/// PUT /api/v1/workflow/definitions/{id}
pub async fn update_definition<S>(
State(state): State<WorkflowState>,
Extension(ctx): Extension<TenantContext>,
Path(id): Path<Uuid>,
Json(req): Json<UpdateProcessDefinitionReq>,
) -> Result<Json<ApiResponse<ProcessDefinitionResp>>, AppError>
where
WorkflowState: FromRef<S>,
S: Clone + Send + Sync + 'static,
{
require_permission(&ctx, "workflow:update")?;
let resp =
DefinitionService::update(id, ctx.tenant_id, ctx.user_id, &req, &state.db).await?;
Ok(Json(ApiResponse::ok(resp)))
}
/// POST /api/v1/workflow/definitions/{id}/publish
pub async fn publish_definition<S>(
State(state): State<WorkflowState>,
Extension(ctx): Extension<TenantContext>,
Path(id): Path<Uuid>,
) -> Result<Json<ApiResponse<ProcessDefinitionResp>>, AppError>
where
WorkflowState: FromRef<S>,
S: Clone + Send + Sync + 'static,
{
require_permission(&ctx, "workflow:publish")?;
let resp = DefinitionService::publish(
id,
ctx.tenant_id,
ctx.user_id,
&state.db,
&state.event_bus,
)
.await?;
Ok(Json(ApiResponse::ok(resp)))
}

View File

@@ -0,0 +1,112 @@
use axum::Extension;
use axum::extract::{FromRef, Path, Query, State};
use axum::response::Json;
use erp_core::error::AppError;
use erp_core::rbac::require_permission;
use erp_core::types::{ApiResponse, PaginatedResponse, Pagination, TenantContext};
use uuid::Uuid;
use crate::dto::{ProcessInstanceResp, StartInstanceReq};
use crate::service::instance_service::InstanceService;
use crate::workflow_state::WorkflowState;
/// POST /api/v1/workflow/instances
pub async fn start_instance<S>(
State(state): State<WorkflowState>,
Extension(ctx): Extension<TenantContext>,
Json(req): Json<StartInstanceReq>,
) -> Result<Json<ApiResponse<ProcessInstanceResp>>, AppError>
where
WorkflowState: FromRef<S>,
S: Clone + Send + Sync + 'static,
{
require_permission(&ctx, "workflow:start")?;
let resp = InstanceService::start(
ctx.tenant_id,
ctx.user_id,
&req,
&state.db,
&state.event_bus,
)
.await?;
Ok(Json(ApiResponse::ok(resp)))
}
/// GET /api/v1/workflow/instances
pub async fn list_instances<S>(
State(state): State<WorkflowState>,
Extension(ctx): Extension<TenantContext>,
Query(pagination): Query<Pagination>,
) -> Result<Json<ApiResponse<PaginatedResponse<ProcessInstanceResp>>>, AppError>
where
WorkflowState: FromRef<S>,
S: Clone + Send + Sync + 'static,
{
require_permission(&ctx, "workflow:list")?;
let (instances, total) =
InstanceService::list(ctx.tenant_id, &pagination, &state.db).await?;
let page = pagination.page.unwrap_or(1);
let page_size = pagination.limit();
let total_pages = (total + page_size - 1) / page_size;
Ok(Json(ApiResponse::ok(PaginatedResponse {
data: instances,
total,
page,
page_size,
total_pages,
})))
}
/// GET /api/v1/workflow/instances/{id}
pub async fn get_instance<S>(
State(state): State<WorkflowState>,
Extension(ctx): Extension<TenantContext>,
Path(id): Path<Uuid>,
) -> Result<Json<ApiResponse<ProcessInstanceResp>>, AppError>
where
WorkflowState: FromRef<S>,
S: Clone + Send + Sync + 'static,
{
require_permission(&ctx, "workflow:read")?;
let resp = InstanceService::get_by_id(id, ctx.tenant_id, &state.db).await?;
Ok(Json(ApiResponse::ok(resp)))
}
/// POST /api/v1/workflow/instances/{id}/suspend
pub async fn suspend_instance<S>(
State(state): State<WorkflowState>,
Extension(ctx): Extension<TenantContext>,
Path(id): Path<Uuid>,
) -> Result<Json<ApiResponse<()>>, AppError>
where
WorkflowState: FromRef<S>,
S: Clone + Send + Sync + 'static,
{
require_permission(&ctx, "workflow:update")?;
InstanceService::suspend(id, ctx.tenant_id, ctx.user_id, &state.db).await?;
Ok(Json(ApiResponse::ok(())))
}
/// POST /api/v1/workflow/instances/{id}/terminate
pub async fn terminate_instance<S>(
State(state): State<WorkflowState>,
Extension(ctx): Extension<TenantContext>,
Path(id): Path<Uuid>,
) -> Result<Json<ApiResponse<()>>, AppError>
where
WorkflowState: FromRef<S>,
S: Clone + Send + Sync + 'static,
{
require_permission(&ctx, "workflow:update")?;
InstanceService::terminate(id, ctx.tenant_id, ctx.user_id, &state.db).await?;
Ok(Json(ApiResponse::ok(())))
}

View File

@@ -0,0 +1,3 @@
pub mod definition_handler;
pub mod instance_handler;
pub mod task_handler;

View File

@@ -0,0 +1,113 @@
use axum::Extension;
use axum::extract::{FromRef, Path, Query, State};
use axum::response::Json;
use erp_core::error::AppError;
use erp_core::rbac::require_permission;
use erp_core::types::{ApiResponse, PaginatedResponse, Pagination, TenantContext};
use uuid::Uuid;
use crate::dto::{CompleteTaskReq, DelegateTaskReq, TaskResp};
use crate::service::task_service::TaskService;
use crate::workflow_state::WorkflowState;
/// GET /api/v1/workflow/tasks/pending
pub async fn list_pending_tasks<S>(
State(state): State<WorkflowState>,
Extension(ctx): Extension<TenantContext>,
Query(pagination): Query<Pagination>,
) -> Result<Json<ApiResponse<PaginatedResponse<TaskResp>>>, AppError>
where
WorkflowState: FromRef<S>,
S: Clone + Send + Sync + 'static,
{
require_permission(&ctx, "workflow:approve")?;
let (tasks, total) =
TaskService::list_pending(ctx.tenant_id, ctx.user_id, &pagination, &state.db).await?;
let page = pagination.page.unwrap_or(1);
let page_size = pagination.limit();
let total_pages = (total + page_size - 1) / page_size;
Ok(Json(ApiResponse::ok(PaginatedResponse {
data: tasks,
total,
page,
page_size,
total_pages,
})))
}
/// GET /api/v1/workflow/tasks/completed
pub async fn list_completed_tasks<S>(
State(state): State<WorkflowState>,
Extension(ctx): Extension<TenantContext>,
Query(pagination): Query<Pagination>,
) -> Result<Json<ApiResponse<PaginatedResponse<TaskResp>>>, AppError>
where
WorkflowState: FromRef<S>,
S: Clone + Send + Sync + 'static,
{
require_permission(&ctx, "workflow:approve")?;
let (tasks, total) =
TaskService::list_completed(ctx.tenant_id, ctx.user_id, &pagination, &state.db).await?;
let page = pagination.page.unwrap_or(1);
let page_size = pagination.limit();
let total_pages = (total + page_size - 1) / page_size;
Ok(Json(ApiResponse::ok(PaginatedResponse {
data: tasks,
total,
page,
page_size,
total_pages,
})))
}
/// POST /api/v1/workflow/tasks/{id}/complete
pub async fn complete_task<S>(
State(state): State<WorkflowState>,
Extension(ctx): Extension<TenantContext>,
Path(id): Path<Uuid>,
Json(req): Json<CompleteTaskReq>,
) -> Result<Json<ApiResponse<TaskResp>>, AppError>
where
WorkflowState: FromRef<S>,
S: Clone + Send + Sync + 'static,
{
require_permission(&ctx, "workflow:approve")?;
let resp = TaskService::complete(
id,
ctx.tenant_id,
ctx.user_id,
&req,
&state.db,
&state.event_bus,
)
.await?;
Ok(Json(ApiResponse::ok(resp)))
}
/// POST /api/v1/workflow/tasks/{id}/delegate
pub async fn delegate_task<S>(
State(state): State<WorkflowState>,
Extension(ctx): Extension<TenantContext>,
Path(id): Path<Uuid>,
Json(req): Json<DelegateTaskReq>,
) -> Result<Json<ApiResponse<TaskResp>>, AppError>
where
WorkflowState: FromRef<S>,
S: Clone + Send + Sync + 'static,
{
require_permission(&ctx, "workflow:delegate")?;
let resp =
TaskService::delegate(id, ctx.tenant_id, ctx.user_id, &req, &state.db).await?;
Ok(Json(ApiResponse::ok(resp)))
}

View File

@@ -1 +1,16 @@
// erp-workflow: 工作流引擎模块 (Phase 4)
//
// 提供流程定义、流程实例管理、任务审批、Token 驱动执行引擎
// 和可视化流程设计器支持。
pub mod dto;
pub mod engine;
pub mod entity;
pub mod error;
pub mod handler;
pub mod module;
pub mod service;
pub mod workflow_state;
pub use module::WorkflowModule;
pub use workflow_state::WorkflowState;

View File

@@ -0,0 +1,121 @@
use axum::Router;
use axum::routing::{get, post};
use uuid::Uuid;
use erp_core::error::AppResult;
use erp_core::events::EventBus;
use erp_core::module::ErpModule;
use crate::handler::{
definition_handler, instance_handler, task_handler,
};
/// Workflow module implementing the `ErpModule` trait.
///
/// Manages workflow definitions, process instances, tasks,
/// and the token-driven execution engine.
pub struct WorkflowModule;
impl WorkflowModule {
pub fn new() -> Self {
Self
}
/// Build protected (authenticated) routes for the workflow module.
pub fn protected_routes<S>() -> Router<S>
where
crate::workflow_state::WorkflowState: axum::extract::FromRef<S>,
S: Clone + Send + Sync + 'static,
{
Router::new()
// Definition routes
.route(
"/workflow/definitions",
get(definition_handler::list_definitions)
.post(definition_handler::create_definition),
)
.route(
"/workflow/definitions/{id}",
get(definition_handler::get_definition)
.put(definition_handler::update_definition),
)
.route(
"/workflow/definitions/{id}/publish",
post(definition_handler::publish_definition),
)
// Instance routes
.route(
"/workflow/instances",
post(instance_handler::start_instance)
.get(instance_handler::list_instances),
)
.route(
"/workflow/instances/{id}",
get(instance_handler::get_instance),
)
.route(
"/workflow/instances/{id}/suspend",
post(instance_handler::suspend_instance),
)
.route(
"/workflow/instances/{id}/terminate",
post(instance_handler::terminate_instance),
)
// Task routes
.route(
"/workflow/tasks/pending",
get(task_handler::list_pending_tasks),
)
.route(
"/workflow/tasks/completed",
get(task_handler::list_completed_tasks),
)
.route(
"/workflow/tasks/{id}/complete",
post(task_handler::complete_task),
)
.route(
"/workflow/tasks/{id}/delegate",
post(task_handler::delegate_task),
)
}
}
impl Default for WorkflowModule {
fn default() -> Self {
Self::new()
}
}
#[async_trait::async_trait]
impl ErpModule for WorkflowModule {
fn name(&self) -> &str {
"workflow"
}
fn version(&self) -> &str {
env!("CARGO_PKG_VERSION")
}
fn dependencies(&self) -> Vec<&str> {
vec!["auth"]
}
fn register_routes(&self, router: Router) -> Router {
router
}
fn register_event_handlers(&self, _bus: &EventBus) {}
async fn on_tenant_created(&self, _tenant_id: Uuid) -> AppResult<()> {
Ok(())
}
async fn on_tenant_deleted(&self, _tenant_id: Uuid) -> AppResult<()> {
Ok(())
}
fn as_any(&self) -> &dyn std::any::Any {
self
}
}

View File

@@ -0,0 +1,269 @@
use chrono::Utc;
use sea_orm::{
ActiveModelTrait, ColumnTrait, EntityTrait, PaginatorTrait, QueryFilter, Set,
};
use uuid::Uuid;
use crate::dto::{
CreateProcessDefinitionReq, ProcessDefinitionResp, UpdateProcessDefinitionReq,
};
use crate::engine::parser;
use crate::entity::process_definition;
use crate::error::{WorkflowError, WorkflowResult};
use erp_core::events::EventBus;
use erp_core::types::Pagination;
/// 流程定义 CRUD 服务。
pub struct DefinitionService;
impl DefinitionService {
/// 分页查询流程定义列表。
pub async fn list(
tenant_id: Uuid,
pagination: &Pagination,
db: &sea_orm::DatabaseConnection,
) -> WorkflowResult<(Vec<ProcessDefinitionResp>, u64)> {
let paginator = process_definition::Entity::find()
.filter(process_definition::Column::TenantId.eq(tenant_id))
.filter(process_definition::Column::DeletedAt.is_null())
.paginate(db, pagination.limit());
let total = paginator
.num_items()
.await
.map_err(|e| WorkflowError::Validation(e.to_string()))?;
let page_index = pagination.page.unwrap_or(1).saturating_sub(1) as u64;
let models = paginator
.fetch_page(page_index)
.await
.map_err(|e| WorkflowError::Validation(e.to_string()))?;
let resps: Vec<ProcessDefinitionResp> = models.iter().map(Self::model_to_resp).collect();
Ok((resps, total))
}
/// 获取单个流程定义。
pub async fn get_by_id(
id: Uuid,
tenant_id: Uuid,
db: &sea_orm::DatabaseConnection,
) -> WorkflowResult<ProcessDefinitionResp> {
let model = process_definition::Entity::find_by_id(id)
.one(db)
.await
.map_err(|e| WorkflowError::Validation(e.to_string()))?
.filter(|m| m.tenant_id == tenant_id && m.deleted_at.is_none())
.ok_or_else(|| WorkflowError::NotFound(format!("流程定义不存在: {id}")))?;
Ok(Self::model_to_resp(&model))
}
/// 创建流程定义。
pub async fn create(
tenant_id: Uuid,
operator_id: Uuid,
req: &CreateProcessDefinitionReq,
db: &sea_orm::DatabaseConnection,
event_bus: &EventBus,
) -> WorkflowResult<ProcessDefinitionResp> {
// 验证流程图合法性
parser::parse_and_validate(&req.nodes, &req.edges)?;
let now = Utc::now();
let id = Uuid::now_v7();
let nodes_json = serde_json::to_value(&req.nodes)
.map_err(|e| WorkflowError::Validation(e.to_string()))?;
let edges_json = serde_json::to_value(&req.edges)
.map_err(|e| WorkflowError::Validation(e.to_string()))?;
let model = process_definition::ActiveModel {
id: Set(id),
tenant_id: Set(tenant_id),
name: Set(req.name.clone()),
key: Set(req.key.clone()),
version: Set(1),
category: Set(req.category.clone()),
description: Set(req.description.clone()),
nodes: Set(nodes_json),
edges: Set(edges_json),
status: Set("draft".to_string()),
created_at: Set(now),
updated_at: Set(now),
created_by: Set(operator_id),
updated_by: Set(operator_id),
deleted_at: Set(None),
};
model
.insert(db)
.await
.map_err(|e| WorkflowError::Validation(e.to_string()))?;
event_bus.publish(erp_core::events::DomainEvent::new(
"process_definition.created",
tenant_id,
serde_json::json!({ "definition_id": id, "key": req.key }),
));
Ok(ProcessDefinitionResp {
id,
name: req.name.clone(),
key: req.key.clone(),
version: 1,
category: req.category.clone(),
description: req.description.clone(),
nodes: serde_json::to_value(&req.nodes).unwrap_or_default(),
edges: serde_json::to_value(&req.edges).unwrap_or_default(),
status: "draft".to_string(),
created_at: now,
updated_at: now,
})
}
/// 更新流程定义(仅 draft 状态可编辑)。
pub async fn update(
id: Uuid,
tenant_id: Uuid,
operator_id: Uuid,
req: &UpdateProcessDefinitionReq,
db: &sea_orm::DatabaseConnection,
) -> WorkflowResult<ProcessDefinitionResp> {
let model = process_definition::Entity::find_by_id(id)
.one(db)
.await
.map_err(|e| WorkflowError::Validation(e.to_string()))?
.filter(|m| m.tenant_id == tenant_id && m.deleted_at.is_none())
.ok_or_else(|| WorkflowError::NotFound(format!("流程定义不存在: {id}")))?;
if model.status != "draft" {
return Err(WorkflowError::InvalidState(
"只有 draft 状态的流程定义可以编辑".to_string(),
));
}
let mut active: process_definition::ActiveModel = model.into();
if let Some(name) = &req.name {
active.name = Set(name.clone());
}
if let Some(category) = &req.category {
active.category = Set(Some(category.clone()));
}
if let Some(description) = &req.description {
active.description = Set(Some(description.clone()));
}
if let Some(nodes) = &req.nodes {
// 验证新流程图
if let Some(edges) = &req.edges {
parser::parse_and_validate(nodes, edges)?;
}
let nodes_json = serde_json::to_value(nodes)
.map_err(|e| WorkflowError::Validation(e.to_string()))?;
active.nodes = Set(nodes_json);
}
if let Some(edges) = &req.edges {
let edges_json = serde_json::to_value(edges)
.map_err(|e| WorkflowError::Validation(e.to_string()))?;
active.edges = Set(edges_json);
}
active.updated_at = Set(Utc::now());
active.updated_by = Set(operator_id);
let updated = active
.update(db)
.await
.map_err(|e| WorkflowError::Validation(e.to_string()))?;
Ok(Self::model_to_resp(&updated))
}
/// 发布流程定义draft → published
pub async fn publish(
id: Uuid,
tenant_id: Uuid,
operator_id: Uuid,
db: &sea_orm::DatabaseConnection,
event_bus: &EventBus,
) -> WorkflowResult<ProcessDefinitionResp> {
let model = process_definition::Entity::find_by_id(id)
.one(db)
.await
.map_err(|e| WorkflowError::Validation(e.to_string()))?
.filter(|m| m.tenant_id == tenant_id && m.deleted_at.is_none())
.ok_or_else(|| WorkflowError::NotFound(format!("流程定义不存在: {id}")))?;
if model.status != "draft" {
return Err(WorkflowError::InvalidState(
"只有 draft 状态的流程定义可以发布".to_string(),
));
}
// 验证流程图
let nodes: Vec<crate::dto::NodeDef> = serde_json::from_value(model.nodes.clone())
.map_err(|e| WorkflowError::InvalidDiagram(format!("节点数据无效: {e}")))?;
let edges: Vec<crate::dto::EdgeDef> = serde_json::from_value(model.edges.clone())
.map_err(|e| WorkflowError::InvalidDiagram(format!("连线数据无效: {e}")))?;
parser::parse_and_validate(&nodes, &edges)?;
let mut active: process_definition::ActiveModel = model.into();
active.status = Set("published".to_string());
active.updated_at = Set(Utc::now());
active.updated_by = Set(operator_id);
let updated = active
.update(db)
.await
.map_err(|e| WorkflowError::Validation(e.to_string()))?;
event_bus.publish(erp_core::events::DomainEvent::new(
"process_definition.published",
tenant_id,
serde_json::json!({ "definition_id": id }),
));
Ok(Self::model_to_resp(&updated))
}
/// 软删除流程定义。
pub async fn delete(
id: Uuid,
tenant_id: Uuid,
operator_id: Uuid,
db: &sea_orm::DatabaseConnection,
) -> WorkflowResult<()> {
let model = process_definition::Entity::find_by_id(id)
.one(db)
.await
.map_err(|e| WorkflowError::Validation(e.to_string()))?
.filter(|m| m.tenant_id == tenant_id && m.deleted_at.is_none())
.ok_or_else(|| WorkflowError::NotFound(format!("流程定义不存在: {id}")))?;
let mut active: process_definition::ActiveModel = model.into();
active.deleted_at = Set(Some(Utc::now()));
active.updated_at = Set(Utc::now());
active.updated_by = Set(operator_id);
active
.update(db)
.await
.map_err(|e| WorkflowError::Validation(e.to_string()))?;
Ok(())
}
fn model_to_resp(m: &process_definition::Model) -> ProcessDefinitionResp {
ProcessDefinitionResp {
id: m.id,
name: m.name.clone(),
key: m.key.clone(),
version: m.version,
category: m.category.clone(),
description: m.description.clone(),
nodes: m.nodes.clone(),
edges: m.edges.clone(),
status: m.status.clone(),
created_at: m.created_at,
updated_at: m.updated_at,
}
}
}

View File

@@ -0,0 +1,353 @@
use std::collections::HashMap;
use chrono::Utc;
use sea_orm::{
ActiveModelTrait, ColumnTrait, EntityTrait, PaginatorTrait, QueryFilter, Set,
TransactionTrait, ConnectionTrait,
};
use uuid::Uuid;
use crate::dto::{ProcessInstanceResp, StartInstanceReq, TokenResp};
use crate::engine::executor::FlowExecutor;
use crate::engine::parser;
use crate::entity::{process_definition, process_instance, process_variable, token};
use crate::error::{WorkflowError, WorkflowResult};
use erp_core::events::EventBus;
use erp_core::types::Pagination;
/// 流程实例服务。
pub struct InstanceService;
impl InstanceService {
/// 启动流程实例。
pub async fn start(
tenant_id: Uuid,
operator_id: Uuid,
req: &StartInstanceReq,
db: &sea_orm::DatabaseConnection,
event_bus: &EventBus,
) -> WorkflowResult<ProcessInstanceResp> {
// 查找流程定义
let definition = process_definition::Entity::find_by_id(req.definition_id)
.one(db)
.await
.map_err(|e| WorkflowError::Validation(e.to_string()))?
.filter(|d| d.tenant_id == tenant_id && d.deleted_at.is_none())
.ok_or_else(|| {
WorkflowError::NotFound(format!("流程定义不存在: {}", req.definition_id))
})?;
if definition.status != "published" {
return Err(WorkflowError::InvalidState(
"只能启动已发布的流程定义".to_string(),
));
}
// 解析流程图
let nodes: Vec<crate::dto::NodeDef> = serde_json::from_value(definition.nodes.clone())
.map_err(|e| WorkflowError::InvalidDiagram(format!("节点数据无效: {e}")))?;
let edges: Vec<crate::dto::EdgeDef> = serde_json::from_value(definition.edges.clone())
.map_err(|e| WorkflowError::InvalidDiagram(format!("连线数据无效: {e}")))?;
let graph = parser::parse_and_validate(&nodes, &edges)?;
// 准备流程变量
let mut variables = HashMap::new();
if let Some(vars) = &req.variables {
for v in vars {
let var_type = v.var_type.as_deref().unwrap_or("string");
variables.insert(v.name.clone(), v.value.clone());
}
}
let instance_id = Uuid::now_v7();
let now = Utc::now();
// 在事务中创建实例、变量和 token
let instance_id_clone = instance_id;
let tenant_id_clone = tenant_id;
let operator_id_clone = operator_id;
let business_key = req.business_key.clone();
let definition_id = definition.id;
let definition_name = definition.name.clone();
let vars_to_save = req.variables.clone();
db.transaction::<_, (), WorkflowError>(|txn| {
let graph = graph.clone();
let variables = variables.clone();
Box::pin(async move {
// 创建流程实例
let instance = process_instance::ActiveModel {
id: Set(instance_id_clone),
tenant_id: Set(tenant_id_clone),
definition_id: Set(definition_id),
business_key: Set(business_key),
status: Set("running".to_string()),
started_by: Set(operator_id_clone),
started_at: Set(now),
completed_at: Set(None),
created_at: Set(now),
updated_at: Set(now),
created_by: Set(operator_id_clone),
updated_by: Set(operator_id_clone),
deleted_at: Set(None),
version: Set(1),
};
instance.insert(txn).await.map_err(|e| WorkflowError::Validation(e.to_string()))?;
// 保存初始变量
if let Some(vars) = vars_to_save {
for v in vars {
Self::save_variable(
instance_id_clone,
tenant_id_clone,
&v.name,
v.var_type.as_deref().unwrap_or("string"),
&v.value,
txn,
)
.await?;
}
}
// 启动执行引擎
FlowExecutor::start(
instance_id_clone,
tenant_id_clone,
&graph,
&variables,
txn,
)
.await?;
Ok(())
})
})
.await?;
event_bus.publish(erp_core::events::DomainEvent::new(
"process_instance.started",
tenant_id,
serde_json::json!({ "instance_id": instance_id, "definition_id": definition.id }),
));
// 查询创建后的实例(包含 token
let instance = process_instance::Entity::find_by_id(instance_id)
.one(db)
.await
.map_err(|e| WorkflowError::Validation(e.to_string()))?
.ok_or_else(|| WorkflowError::NotFound(format!("流程实例不存在: {instance_id}")))?;
let active_tokens = Self::get_active_tokens(instance_id, db).await?;
Ok(ProcessInstanceResp {
id: instance.id,
definition_id: instance.definition_id,
definition_name: Some(definition_name),
business_key: instance.business_key,
status: instance.status,
started_by: instance.started_by,
started_at: instance.started_at,
completed_at: instance.completed_at,
created_at: instance.created_at,
active_tokens,
})
}
/// 分页查询流程实例。
pub async fn list(
tenant_id: Uuid,
pagination: &Pagination,
db: &sea_orm::DatabaseConnection,
) -> WorkflowResult<(Vec<ProcessInstanceResp>, u64)> {
let paginator = process_instance::Entity::find()
.filter(process_instance::Column::TenantId.eq(tenant_id))
.filter(process_instance::Column::DeletedAt.is_null())
.paginate(db, pagination.limit());
let total = paginator
.num_items()
.await
.map_err(|e| WorkflowError::Validation(e.to_string()))?;
let page_index = pagination.page.unwrap_or(1).saturating_sub(1) as u64;
let models = paginator
.fetch_page(page_index)
.await
.map_err(|e| WorkflowError::Validation(e.to_string()))?;
let mut resps = Vec::new();
for m in &models {
let active_tokens = Self::get_active_tokens(m.id, db).await.unwrap_or_default();
let def_name = process_definition::Entity::find_by_id(m.definition_id)
.one(db)
.await
.ok()
.flatten()
.map(|d| d.name);
resps.push(ProcessInstanceResp {
id: m.id,
definition_id: m.definition_id,
definition_name: def_name,
business_key: m.business_key.clone(),
status: m.status.clone(),
started_by: m.started_by,
started_at: m.started_at,
completed_at: m.completed_at,
created_at: m.created_at,
active_tokens,
});
}
Ok((resps, total))
}
/// 获取单个流程实例详情。
pub async fn get_by_id(
id: Uuid,
tenant_id: Uuid,
db: &sea_orm::DatabaseConnection,
) -> WorkflowResult<ProcessInstanceResp> {
let instance = process_instance::Entity::find_by_id(id)
.one(db)
.await
.map_err(|e| WorkflowError::Validation(e.to_string()))?
.filter(|i| i.tenant_id == tenant_id && i.deleted_at.is_none())
.ok_or_else(|| WorkflowError::NotFound(format!("流程实例不存在: {id}")))?;
let def_name = process_definition::Entity::find_by_id(instance.definition_id)
.one(db)
.await
.ok()
.flatten()
.map(|d| d.name);
let active_tokens = Self::get_active_tokens(id, db).await?;
Ok(ProcessInstanceResp {
id: instance.id,
definition_id: instance.definition_id,
definition_name: def_name,
business_key: instance.business_key,
status: instance.status,
started_by: instance.started_by,
started_at: instance.started_at,
completed_at: instance.completed_at,
created_at: instance.created_at,
active_tokens,
})
}
/// 挂起流程实例。
pub async fn suspend(
id: Uuid,
tenant_id: Uuid,
operator_id: Uuid,
db: &sea_orm::DatabaseConnection,
) -> WorkflowResult<()> {
Self::change_status(id, tenant_id, operator_id, "running", "suspended", db).await
}
/// 终止流程实例。
pub async fn terminate(
id: Uuid,
tenant_id: Uuid,
operator_id: Uuid,
db: &sea_orm::DatabaseConnection,
) -> WorkflowResult<()> {
Self::change_status(id, tenant_id, operator_id, "running", "terminated", db).await
}
async fn change_status(
id: Uuid,
tenant_id: Uuid,
operator_id: Uuid,
from_status: &str,
to_status: &str,
db: &sea_orm::DatabaseConnection,
) -> WorkflowResult<()> {
let instance = process_instance::Entity::find_by_id(id)
.one(db)
.await
.map_err(|e| WorkflowError::Validation(e.to_string()))?
.filter(|i| i.tenant_id == tenant_id && i.deleted_at.is_none())
.ok_or_else(|| WorkflowError::NotFound(format!("流程实例不存在: {id}")))?;
if instance.status != from_status {
return Err(WorkflowError::InvalidState(format!(
"流程实例状态不是 {},无法变更为 {}",
from_status, to_status
)));
}
let mut active: process_instance::ActiveModel = instance.into();
active.status = Set(to_status.to_string());
active.updated_at = Set(Utc::now());
active.updated_by = Set(operator_id);
active
.update(db)
.await
.map_err(|e| WorkflowError::Validation(e.to_string()))?;
Ok(())
}
/// 获取实例的活跃 token 列表。
pub async fn get_active_tokens(
instance_id: Uuid,
db: &sea_orm::DatabaseConnection,
) -> WorkflowResult<Vec<TokenResp>> {
let tokens = token::Entity::find()
.filter(token::Column::InstanceId.eq(instance_id))
.filter(token::Column::Status.eq("active"))
.all(db)
.await
.map_err(|e| WorkflowError::Validation(e.to_string()))?;
Ok(tokens
.iter()
.map(|t| TokenResp {
id: t.id,
node_id: t.node_id.clone(),
status: t.status.clone(),
created_at: t.created_at,
})
.collect())
}
/// 保存流程变量。
pub async fn save_variable(
instance_id: Uuid,
tenant_id: Uuid,
name: &str,
var_type: &str,
value: &serde_json::Value,
txn: &impl ConnectionTrait,
) -> WorkflowResult<()> {
let id = Uuid::now_v7();
let (value_string, value_number, value_boolean, value_date): (Option<String>, Option<f64>, Option<bool>, Option<chrono::DateTime<Utc>>) = match var_type {
"string" => (value.as_str().map(|s| s.to_string()), None, None, None),
"number" => (None, value.as_f64(), None, None),
"boolean" => (None, None, value.as_bool(), None),
_ => (Some(value.to_string()), None, None, None),
};
let model = process_variable::ActiveModel {
id: Set(id),
tenant_id: Set(tenant_id),
instance_id: Set(instance_id),
name: Set(name.to_string()),
var_type: Set(var_type.to_string()),
value_string: Set(value_string),
value_number: Set(value_number),
value_boolean: Set(value_boolean),
value_date: Set(None),
};
model
.insert(txn)
.await
.map_err(|e| WorkflowError::Validation(e.to_string()))?;
Ok(())
}
}

View File

@@ -0,0 +1,3 @@
pub mod definition_service;
pub mod instance_service;
pub mod task_service;

View File

@@ -0,0 +1,336 @@
use std::collections::HashMap;
use chrono::Utc;
use sea_orm::{
ActiveModelTrait, ColumnTrait, EntityTrait, PaginatorTrait, QueryFilter, Set,
TransactionTrait,
};
use uuid::Uuid;
use crate::dto::{CompleteTaskReq, DelegateTaskReq, TaskResp};
use crate::engine::executor::FlowExecutor;
use crate::engine::parser;
use crate::entity::{process_definition, process_instance, task};
use crate::error::{WorkflowError, WorkflowResult};
use erp_core::events::EventBus;
use erp_core::types::Pagination;
/// 任务服务。
pub struct TaskService;
impl TaskService {
/// 查询当前用户的待办任务。
pub async fn list_pending(
tenant_id: Uuid,
assignee_id: Uuid,
pagination: &Pagination,
db: &sea_orm::DatabaseConnection,
) -> WorkflowResult<(Vec<TaskResp>, u64)> {
let paginator = task::Entity::find()
.filter(task::Column::TenantId.eq(tenant_id))
.filter(task::Column::AssigneeId.eq(assignee_id))
.filter(task::Column::Status.eq("pending"))
.filter(task::Column::DeletedAt.is_null())
.paginate(db, pagination.limit());
let total = paginator
.num_items()
.await
.map_err(|e| WorkflowError::Validation(e.to_string()))?;
let page_index = pagination.page.unwrap_or(1).saturating_sub(1) as u64;
let models = paginator
.fetch_page(page_index)
.await
.map_err(|e| WorkflowError::Validation(e.to_string()))?;
let mut resps = Vec::new();
for m in &models {
let mut resp = Self::model_to_resp(m);
// 附加实例信息
if let Some(inst) = process_instance::Entity::find_by_id(m.instance_id)
.one(db)
.await
.ok()
.flatten()
{
resp.business_key = inst.business_key;
if let Some(def) = process_definition::Entity::find_by_id(inst.definition_id)
.one(db)
.await
.ok()
.flatten()
{
resp.definition_name = Some(def.name);
}
}
resps.push(resp);
}
Ok((resps, total))
}
/// 查询当前用户的已办任务。
pub async fn list_completed(
tenant_id: Uuid,
assignee_id: Uuid,
pagination: &Pagination,
db: &sea_orm::DatabaseConnection,
) -> WorkflowResult<(Vec<TaskResp>, u64)> {
let paginator = task::Entity::find()
.filter(task::Column::TenantId.eq(tenant_id))
.filter(task::Column::AssigneeId.eq(assignee_id))
.filter(task::Column::Status.is_in(["approved", "rejected", "delegated"]))
.filter(task::Column::DeletedAt.is_null())
.paginate(db, pagination.limit());
let total = paginator
.num_items()
.await
.map_err(|e| WorkflowError::Validation(e.to_string()))?;
let page_index = pagination.page.unwrap_or(1).saturating_sub(1) as u64;
let models = paginator
.fetch_page(page_index)
.await
.map_err(|e| WorkflowError::Validation(e.to_string()))?;
let mut resps = Vec::new();
for m in &models {
let mut resp = Self::model_to_resp(m);
if let Some(inst) = process_instance::Entity::find_by_id(m.instance_id)
.one(db)
.await
.ok()
.flatten()
{
resp.business_key = inst.business_key;
if let Some(def) = process_definition::Entity::find_by_id(inst.definition_id)
.one(db)
.await
.ok()
.flatten()
{
resp.definition_name = Some(def.name);
}
}
resps.push(resp);
}
Ok((resps, total))
}
/// 完成任务:更新任务状态 + 推进 token。
pub async fn complete(
id: Uuid,
tenant_id: Uuid,
operator_id: Uuid,
req: &CompleteTaskReq,
db: &sea_orm::DatabaseConnection,
event_bus: &EventBus,
) -> WorkflowResult<TaskResp> {
let task_model = task::Entity::find_by_id(id)
.one(db)
.await
.map_err(|e| WorkflowError::Validation(e.to_string()))?
.filter(|t| t.tenant_id == tenant_id && t.deleted_at.is_none())
.ok_or_else(|| WorkflowError::NotFound(format!("任务不存在: {id}")))?;
if task_model.status != "pending" {
return Err(WorkflowError::InvalidState(
"任务状态不是 pending无法完成".to_string(),
));
}
let instance_id = task_model.instance_id;
let token_id = task_model.token_id;
// 获取流程定义和流程图
let instance = process_instance::Entity::find_by_id(instance_id)
.one(db)
.await
.map_err(|e| WorkflowError::Validation(e.to_string()))?
.filter(|i| i.tenant_id == tenant_id && i.deleted_at.is_none())
.ok_or_else(|| WorkflowError::NotFound(format!("流程实例不存在: {instance_id}")))?;
let definition = process_definition::Entity::find_by_id(instance.definition_id)
.one(db)
.await
.map_err(|e| WorkflowError::Validation(e.to_string()))?
.filter(|d| d.tenant_id == tenant_id && d.deleted_at.is_none())
.ok_or_else(|| {
WorkflowError::NotFound(format!("流程定义不存在: {}", instance.definition_id))
})?;
let nodes: Vec<crate::dto::NodeDef> =
serde_json::from_value(definition.nodes.clone()).map_err(|e| {
WorkflowError::InvalidDiagram(format!("节点数据无效: {e}"))
})?;
let edges: Vec<crate::dto::EdgeDef> =
serde_json::from_value(definition.edges.clone()).map_err(|e| {
WorkflowError::InvalidDiagram(format!("连线数据无效: {e}"))
})?;
let graph = parser::parse_and_validate(&nodes, &edges)?;
// 准备变量(从 req.form_data 中提取)
let mut variables = HashMap::new();
if let Some(form) = &req.form_data {
if let Some(obj) = form.as_object() {
for (k, v) in obj {
variables.insert(k.clone(), v.clone());
}
}
}
// 在事务中更新任务 + 推进 token
let now = Utc::now();
let outcome = req.outcome.clone();
let form_data = req.form_data.clone();
db.transaction::<_, (), WorkflowError>(|txn| {
let graph = graph.clone();
let variables = variables.clone();
let task_model = task_model.clone();
Box::pin(async move {
// 更新任务状态
let mut active: task::ActiveModel = task_model.clone().into();
active.status = Set("completed".to_string());
active.outcome = Set(Some(outcome));
active.form_data = Set(form_data);
active.completed_at = Set(Some(now));
active.updated_at = Set(now);
active.updated_by = Set(operator_id);
active
.update(txn)
.await
.map_err(|e| WorkflowError::Validation(e.to_string()))?;
// 推进 token
FlowExecutor::advance(
token_id,
instance_id,
tenant_id,
&graph,
&variables,
txn,
)
.await?;
Ok(())
})
})
.await?;
event_bus.publish(erp_core::events::DomainEvent::new(
"task.completed",
tenant_id,
serde_json::json!({ "task_id": id, "outcome": req.outcome }),
));
// 重新查询任务
let updated = task::Entity::find_by_id(id)
.one(db)
.await
.map_err(|e| WorkflowError::Validation(e.to_string()))?
.ok_or_else(|| WorkflowError::NotFound(format!("任务不存在: {id}")))?;
Ok(Self::model_to_resp(&updated))
}
/// 委派任务给其他人。
pub async fn delegate(
id: Uuid,
tenant_id: Uuid,
operator_id: Uuid,
req: &DelegateTaskReq,
db: &sea_orm::DatabaseConnection,
) -> WorkflowResult<TaskResp> {
let task_model = task::Entity::find_by_id(id)
.one(db)
.await
.map_err(|e| WorkflowError::Validation(e.to_string()))?
.filter(|t| t.tenant_id == tenant_id && t.deleted_at.is_none())
.ok_or_else(|| WorkflowError::NotFound(format!("任务不存在: {id}")))?;
if task_model.status != "pending" {
return Err(WorkflowError::InvalidState(
"任务状态不是 pending无法委派".to_string(),
));
}
let mut active: task::ActiveModel = task_model.into();
active.assignee_id = Set(Some(req.delegate_to));
active.updated_at = Set(Utc::now());
active.updated_by = Set(operator_id);
let updated = active
.update(db)
.await
.map_err(|e| WorkflowError::Validation(e.to_string()))?;
Ok(Self::model_to_resp(&updated))
}
/// 创建任务记录(由执行引擎调用)。
pub async fn create_task(
instance_id: Uuid,
tenant_id: Uuid,
token_id: Uuid,
node_id: &str,
node_name: Option<&str>,
assignee_id: Option<Uuid>,
candidate_groups: Option<Vec<String>>,
db: &sea_orm::DatabaseConnection,
) -> WorkflowResult<Uuid> {
let id = Uuid::now_v7();
let now = Utc::now();
let system_user = Uuid::nil();
let model = task::ActiveModel {
id: Set(id),
tenant_id: Set(tenant_id),
instance_id: Set(instance_id),
token_id: Set(token_id),
node_id: Set(node_id.to_string()),
node_name: Set(node_name.map(|s| s.to_string())),
assignee_id: Set(assignee_id),
candidate_groups: Set(candidate_groups.map(|g| serde_json::to_value(g).unwrap_or_default())),
status: Set("pending".to_string()),
outcome: Set(None),
form_data: Set(None),
due_date: Set(None),
completed_at: Set(None),
created_at: Set(now),
updated_at: Set(now),
created_by: Set(system_user),
updated_by: Set(system_user),
deleted_at: Set(None),
version: Set(1),
};
model
.insert(db)
.await
.map_err(|e| WorkflowError::Validation(e.to_string()))?;
Ok(id)
}
fn model_to_resp(m: &task::Model) -> TaskResp {
TaskResp {
id: m.id,
instance_id: m.instance_id,
token_id: m.token_id,
node_id: m.node_id.clone(),
node_name: m.node_name.clone(),
assignee_id: m.assignee_id,
candidate_groups: m.candidate_groups.clone(),
status: m.status.clone(),
outcome: m.outcome.clone(),
form_data: m.form_data.clone(),
due_date: m.due_date,
completed_at: m.completed_at,
created_at: m.created_at,
definition_name: None,
business_key: None,
}
}
}

View File

@@ -0,0 +1,11 @@
use erp_core::events::EventBus;
use sea_orm::DatabaseConnection;
/// Workflow-specific state extracted from the server's AppState via `FromRef`.
///
/// Contains the database connection and event bus needed by workflow handlers.
#[derive(Clone)]
pub struct WorkflowState {
pub db: DatabaseConnection,
pub event_bus: EventBus,
}