move rhailib to herolib
This commit is contained in:
51
rhailib/_archive/orchestrator/Cargo.toml
Normal file
51
rhailib/_archive/orchestrator/Cargo.toml
Normal file
@@ -0,0 +1,51 @@
|
||||
[package]
|
||||
name = "orchestrator"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
# Core async runtime
|
||||
tokio = { version = "1", features = ["macros", "rt-multi-thread", "sync", "time"] }
|
||||
async-trait = "0.1"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
|
||||
# Serialization
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
|
||||
# Error handling
|
||||
thiserror = "1.0"
|
||||
|
||||
# Collections
|
||||
uuid = { version = "1.6", features = ["v4", "serde"] }
|
||||
|
||||
# Time handling
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
|
||||
# HTTP client
|
||||
reqwest = { version = "0.11", features = ["json"] }
|
||||
|
||||
# WebSocket client
|
||||
tokio-tungstenite = "0.20"
|
||||
|
||||
# Rhai scripting
|
||||
rhai = "1.21.0"
|
||||
|
||||
# Database and models
|
||||
heromodels = { path = "/Users/timurgordon/code/git.ourworld.tf/herocode/db/heromodels" }
|
||||
heromodels_core = { path = "/Users/timurgordon/code/git.ourworld.tf/herocode/db/heromodels_core" }
|
||||
|
||||
# DSL integration for flow models
|
||||
rhailib_dsl = { path = "../dsl" }
|
||||
|
||||
# Dispatcher integration
|
||||
rhai_dispatcher = { path = "../dispatcher" }
|
||||
|
||||
# Logging
|
||||
log = "0.4"
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = "0.3"
|
||||
|
||||
[dev-dependencies]
|
||||
tokio-test = "0.4"
|
320
rhailib/_archive/orchestrator/README.md
Normal file
320
rhailib/_archive/orchestrator/README.md
Normal file
@@ -0,0 +1,320 @@
|
||||
# Rationale for Orchestrator
|
||||
|
||||
We may have scripts that run asynchrounsly, depend on human input or depend on other scripts to complete. We want to be able to implement high-level workflows of rhai scripts.
|
||||
|
||||
## Design
|
||||
|
||||
Direct Acyclic Graphs (DAGs) are a natural fit for representing workflows.
|
||||
|
||||
## Requirements
|
||||
|
||||
1. Uses Direct Acyclic Graphs (DAGs) to represent workflows.
|
||||
2. Each step in the workflow defines the script to execute, the inputs to pass to it, and the outputs to expect from it.
|
||||
3. Simplicity: the output cases are binary (success or failure), and params inputted / outputted are simple key-value pairs.
|
||||
4. Multiple steps can depend on the same step.
|
||||
5. Scripts are executed using [RhaiDispatcher](../dispatcher/README.md).
|
||||
|
||||
## Architecture
|
||||
|
||||
The Orchestrator is a simple DAG-based workflow execution system that extends the heromodels flow structures to support workflows with dependencies and distributed script execution.
|
||||
|
||||
### Core Component
|
||||
|
||||
```mermaid
|
||||
graph TB
|
||||
subgraph "Orchestrator"
|
||||
O[Orchestrator] --> RE[RhaiExecutor Trait]
|
||||
O --> DB[(Database)]
|
||||
end
|
||||
|
||||
subgraph "Executor Implementations"
|
||||
RE --> RD[RhaiDispatcher]
|
||||
RE --> WS[WebSocketClient]
|
||||
RE --> HTTP[HttpClient]
|
||||
RE --> LOCAL[LocalExecutor]
|
||||
end
|
||||
|
||||
subgraph "Data Models (heromodels)"
|
||||
F[Flow] --> FS[FlowStep]
|
||||
FS --> SR[SignatureRequirement]
|
||||
end
|
||||
|
||||
subgraph "Infrastructure"
|
||||
RD --> RQ[Redis Queues]
|
||||
RD --> W[Workers]
|
||||
WS --> WSS[WebSocket Server]
|
||||
HTTP --> API[REST API]
|
||||
end
|
||||
```
|
||||
|
||||
### Execution Abstraction
|
||||
|
||||
The orchestrator uses a trait-based approach for script execution, allowing different execution backends:
|
||||
|
||||
#### RhaiExecutor Trait
|
||||
```rust
|
||||
use rhai_dispatcher::{PlayRequestBuilder, RhaiTaskDetails, RhaiDispatcherError};
|
||||
|
||||
#[async_trait]
|
||||
pub trait RhaiExecutor {
|
||||
async fn call(&self, request: PlayRequestBuilder<'_>) -> Result<RhaiTaskDetails, RhaiDispatcherError>;
|
||||
}
|
||||
```
|
||||
|
||||
#### Executor Implementations
|
||||
|
||||
**RhaiDispatcher Implementation:**
|
||||
```rust
|
||||
pub struct DispatcherExecutor {
|
||||
dispatcher: RhaiDispatcher,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl RhaiExecutor for DispatcherExecutor {
|
||||
async fn call(&self, request: PlayRequestBuilder<'_>) -> Result<RhaiTaskDetails, RhaiDispatcherError> {
|
||||
// Use RhaiDispatcher to execute script via Redis queues
|
||||
request.await_response().await
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**WebSocket Client Implementation:**
|
||||
```rust
|
||||
pub struct WebSocketExecutor {
|
||||
ws_client: WebSocketClient,
|
||||
endpoint: String,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl RhaiExecutor for WebSocketExecutor {
|
||||
async fn call(&self, request: PlayRequestBuilder<'_>) -> Result<RhaiTaskDetails, RhaiDispatcherError> {
|
||||
// Build the PlayRequest and send via WebSocket
|
||||
let play_request = request.build()?;
|
||||
|
||||
// Send script execution request via WebSocket
|
||||
let ws_message = serde_json::to_string(&play_request)?;
|
||||
self.ws_client.send(ws_message).await?;
|
||||
|
||||
// Wait for response and convert to RhaiTaskDetails
|
||||
let response = self.ws_client.receive().await?;
|
||||
serde_json::from_str(&response).map_err(RhaiDispatcherError::from)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**HTTP Client Implementation:**
|
||||
```rust
|
||||
pub struct HttpExecutor {
|
||||
http_client: reqwest::Client,
|
||||
base_url: String,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl RhaiExecutor for HttpExecutor {
|
||||
async fn call(&self, request: PlayRequestBuilder<'_>) -> Result<RhaiTaskDetails, RhaiDispatcherError> {
|
||||
// Build the PlayRequest and send via HTTP
|
||||
let play_request = request.build()?;
|
||||
|
||||
// Send script execution request via HTTP API
|
||||
let response = self.http_client
|
||||
.post(&format!("{}/execute", self.base_url))
|
||||
.json(&play_request)
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
response.json().await.map_err(RhaiDispatcherError::from)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Local Executor Implementation:**
|
||||
```rust
|
||||
pub struct LocalExecutor {
|
||||
engine: Engine,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl RhaiExecutor for LocalExecutor {
|
||||
async fn call(&self, request: PlayRequestBuilder<'_>) -> Result<RhaiTaskDetails, RhaiDispatcherError> {
|
||||
// Build the PlayRequest and execute locally
|
||||
let play_request = request.build()?;
|
||||
|
||||
// Execute script directly in local Rhai engine
|
||||
let result = self.engine.eval::<String>(&play_request.script);
|
||||
|
||||
// Convert to RhaiTaskDetails format
|
||||
let task_details = RhaiTaskDetails {
|
||||
task_id: play_request.id,
|
||||
script: play_request.script,
|
||||
status: if result.is_ok() { "completed".to_string() } else { "error".to_string() },
|
||||
output: result.ok(),
|
||||
error: result.err().map(|e| e.to_string()),
|
||||
created_at: chrono::Utc::now(),
|
||||
updated_at: chrono::Utc::now(),
|
||||
caller_id: "local".to_string(),
|
||||
context_id: play_request.context_id,
|
||||
worker_id: "local".to_string(),
|
||||
};
|
||||
|
||||
Ok(task_details)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Data Model Extensions
|
||||
|
||||
Simple extensions to the existing heromodels flow structures:
|
||||
|
||||
#### Enhanced FlowStep Model
|
||||
```rust
|
||||
// Extends heromodels::models::flow::FlowStep
|
||||
pub struct FlowStep {
|
||||
// ... existing heromodels::models::flow::FlowStep fields
|
||||
pub script: String, // Rhai script to execute
|
||||
pub depends_on: Vec<u32>, // IDs of steps this step depends on
|
||||
pub context_id: String, // Execution context (circle)
|
||||
pub inputs: HashMap<String, String>, // Input parameters
|
||||
pub outputs: HashMap<String, String>, // Output results
|
||||
}
|
||||
```
|
||||
|
||||
### Execution Flow
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant Client as Client
|
||||
participant O as Orchestrator
|
||||
participant RE as RhaiExecutor
|
||||
participant DB as Database
|
||||
|
||||
Client->>O: Submit Flow
|
||||
O->>DB: Store flow and steps
|
||||
O->>O: Find steps with no dependencies
|
||||
|
||||
loop Until all steps complete
|
||||
O->>RE: Execute ready steps
|
||||
RE-->>O: Return results
|
||||
O->>DB: Update step status
|
||||
O->>O: Find newly ready steps
|
||||
end
|
||||
|
||||
O->>Client: Flow completed
|
||||
```
|
||||
|
||||
### Flexible Orchestrator Implementation
|
||||
|
||||
```rust
|
||||
use rhai_dispatcher::{RhaiDispatcher, PlayRequestBuilder};
|
||||
use std::collections::HashSet;
|
||||
|
||||
pub struct Orchestrator<E: RhaiExecutor> {
|
||||
executor: E,
|
||||
database: Arc<Database>,
|
||||
}
|
||||
|
||||
impl<E: RhaiExecutor> Orchestrator<E> {
|
||||
pub fn new(executor: E, database: Arc<Database>) -> Self {
|
||||
Self { executor, database }
|
||||
}
|
||||
|
||||
pub async fn execute_flow(&self, flow: Flow) -> Result<(), OrchestratorError> {
|
||||
// 1. Store flow in database
|
||||
self.database.collection::<Flow>()?.set(&flow)?;
|
||||
|
||||
// 2. Find steps with no dependencies (depends_on is empty)
|
||||
let mut pending_steps: Vec<FlowStep> = flow.steps.clone();
|
||||
let mut completed_steps: HashSet<u32> = HashSet::new();
|
||||
|
||||
while !pending_steps.is_empty() {
|
||||
// Find ready steps (all dependencies completed)
|
||||
let ready_steps: Vec<FlowStep> = pending_steps
|
||||
.iter()
|
||||
.filter(|step| {
|
||||
step.depends_on.iter().all(|dep_id| completed_steps.contains(dep_id))
|
||||
})
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
if ready_steps.is_empty() {
|
||||
return Err(OrchestratorError::NoReadySteps);
|
||||
}
|
||||
|
||||
// Execute ready steps concurrently
|
||||
let mut tasks = Vec::new();
|
||||
for step in ready_steps {
|
||||
let executor = &self.executor;
|
||||
let task = async move {
|
||||
// Create PlayRequestBuilder for this step
|
||||
let request = RhaiDispatcher::new_play_request()
|
||||
.script(&step.script)
|
||||
.context_id(&step.context_id)
|
||||
.worker_id(&step.worker_id);
|
||||
|
||||
// Execute via the trait
|
||||
let result = executor.call(request).await?;
|
||||
Ok((step.base_data.id, result))
|
||||
};
|
||||
tasks.push(task);
|
||||
}
|
||||
|
||||
// Wait for all ready steps to complete
|
||||
let results = futures::future::try_join_all(tasks).await?;
|
||||
|
||||
// Update step status and mark as completed
|
||||
for (step_id, task_details) in results {
|
||||
if task_details.status == "completed" {
|
||||
completed_steps.insert(step_id);
|
||||
// Update step status in database
|
||||
// self.update_step_status(step_id, "completed", task_details.output).await?;
|
||||
} else {
|
||||
return Err(OrchestratorError::StepFailed(step_id, task_details.error));
|
||||
}
|
||||
}
|
||||
|
||||
// Remove completed steps from pending
|
||||
pending_steps.retain(|step| !completed_steps.contains(&step.base_data.id));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn get_flow_status(&self, flow_id: u32) -> Result<FlowStatus, OrchestratorError> {
|
||||
// Return current status of flow and all its steps
|
||||
let flow = self.database.collection::<Flow>()?.get(flow_id)?;
|
||||
// Implementation would check step statuses and return overall flow status
|
||||
Ok(FlowStatus::Running) // Placeholder
|
||||
}
|
||||
}
|
||||
|
||||
pub enum OrchestratorError {
|
||||
DatabaseError(String),
|
||||
ExecutorError(RhaiDispatcherError),
|
||||
NoReadySteps,
|
||||
StepFailed(u32, Option<String>),
|
||||
}
|
||||
|
||||
pub enum FlowStatus {
|
||||
Pending,
|
||||
Running,
|
||||
Completed,
|
||||
Failed,
|
||||
}
|
||||
|
||||
// Usage examples:
|
||||
// let orchestrator = Orchestrator::new(DispatcherExecutor::new(dispatcher), db);
|
||||
// let orchestrator = Orchestrator::new(WebSocketExecutor::new(ws_client), db);
|
||||
// let orchestrator = Orchestrator::new(HttpExecutor::new(http_client), db);
|
||||
// let orchestrator = Orchestrator::new(LocalExecutor::new(engine), db);
|
||||
```
|
||||
|
||||
### Key Features
|
||||
|
||||
1. **DAG Validation**: Ensures no circular dependencies exist in the `depends_on` relationships
|
||||
2. **Parallel Execution**: Executes independent steps concurrently via multiple workers
|
||||
3. **Simple Dependencies**: Each step lists the step IDs it depends on
|
||||
4. **RhaiDispatcher Integration**: Uses existing dispatcher for script execution
|
||||
5. **Binary Outcomes**: Steps either succeed or fail (keeping it simple as per requirements)
|
||||
|
||||
This simple architecture provides DAG-based workflow execution while leveraging the existing rhailib infrastructure and keeping complexity minimal.
|
||||
|
||||
|
283
rhailib/_archive/orchestrator/examples/basic_workflow.rs
Normal file
283
rhailib/_archive/orchestrator/examples/basic_workflow.rs
Normal file
@@ -0,0 +1,283 @@
|
||||
//! Basic workflow example demonstrating orchestrator usage
|
||||
|
||||
use orchestrator::{
|
||||
interface::LocalInterface,
|
||||
orchestrator::Orchestrator,
|
||||
OrchestratedFlow, OrchestratedFlowStep, FlowStatus,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use std::collections::HashMap;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
// Initialize logging
|
||||
tracing_subscriber::fmt().init();
|
||||
|
||||
// Create executor
|
||||
let executor = Arc::new(LocalInterface::new());
|
||||
|
||||
// Create orchestrator
|
||||
let orchestrator = Orchestrator::new(executor);
|
||||
|
||||
println!("🚀 Starting basic workflow example");
|
||||
|
||||
// Example 1: Simple sequential workflow
|
||||
println!("\n📋 Example 1: Sequential Workflow");
|
||||
let sequential_flow = create_sequential_workflow();
|
||||
let flow_id = orchestrator.execute_flow(sequential_flow).await?;
|
||||
|
||||
// Wait for completion and show results
|
||||
wait_and_show_results(&orchestrator, flow_id, "Sequential").await;
|
||||
|
||||
// Example 2: Parallel workflow with convergence
|
||||
println!("\n📋 Example 2: Parallel Workflow");
|
||||
let parallel_flow = create_parallel_workflow();
|
||||
let flow_id = orchestrator.execute_flow(parallel_flow).await?;
|
||||
|
||||
// Wait for completion and show results
|
||||
wait_and_show_results(&orchestrator, flow_id, "Parallel").await;
|
||||
|
||||
// Example 3: Complex workflow with multiple dependencies
|
||||
println!("\n📋 Example 3: Complex Workflow");
|
||||
let complex_flow = create_complex_workflow();
|
||||
let flow_id = orchestrator.execute_flow(complex_flow).await?;
|
||||
|
||||
// Wait for completion and show results
|
||||
wait_and_show_results(&orchestrator, flow_id, "Complex").await;
|
||||
|
||||
// Clean up completed flows
|
||||
orchestrator.cleanup_completed_flows().await;
|
||||
|
||||
println!("\n✅ All examples completed successfully!");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create a simple sequential workflow
|
||||
fn create_sequential_workflow() -> OrchestratedFlow {
|
||||
let step1 = OrchestratedFlowStep::new("data_preparation")
|
||||
.script(r#"
|
||||
let data = [1, 2, 3, 4, 5];
|
||||
let sum = 0;
|
||||
for item in data {
|
||||
sum += item;
|
||||
}
|
||||
let result = sum;
|
||||
"#)
|
||||
.context_id("sequential_context")
|
||||
.worker_id("worker_1");
|
||||
|
||||
let step2 = OrchestratedFlowStep::new("data_processing")
|
||||
.script(r#"
|
||||
let processed_data = dep_1_result * 2;
|
||||
let result = processed_data;
|
||||
"#)
|
||||
.depends_on(step1.id())
|
||||
.context_id("sequential_context")
|
||||
.worker_id("worker_2");
|
||||
|
||||
let step3 = OrchestratedFlowStep::new("data_output")
|
||||
.script(r#"
|
||||
let final_result = "Processed value: " + dep_2_result;
|
||||
let result = final_result;
|
||||
"#)
|
||||
.depends_on(step2.id())
|
||||
.context_id("sequential_context")
|
||||
.worker_id("worker_3");
|
||||
|
||||
OrchestratedFlow::new("sequential_workflow")
|
||||
.add_step(step1)
|
||||
.add_step(step2)
|
||||
.add_step(step3)
|
||||
}
|
||||
|
||||
/// Create a parallel workflow with convergence
|
||||
fn create_parallel_workflow() -> OrchestratedFlow {
|
||||
let step1 = OrchestratedFlowStep::new("fetch_user_data")
|
||||
.script(r#"
|
||||
let user_id = 12345;
|
||||
let user_name = "Alice";
|
||||
let result = user_name;
|
||||
"#)
|
||||
.context_id("parallel_context")
|
||||
.worker_id("user_service");
|
||||
|
||||
let step2 = OrchestratedFlowStep::new("fetch_order_data")
|
||||
.script(r#"
|
||||
let order_id = 67890;
|
||||
let order_total = 99.99;
|
||||
let result = order_total;
|
||||
"#)
|
||||
.context_id("parallel_context")
|
||||
.worker_id("order_service");
|
||||
|
||||
let step3 = OrchestratedFlowStep::new("fetch_inventory_data")
|
||||
.script(r#"
|
||||
let product_id = "ABC123";
|
||||
let stock_count = 42;
|
||||
let result = stock_count;
|
||||
"#)
|
||||
.context_id("parallel_context")
|
||||
.worker_id("inventory_service");
|
||||
|
||||
let step4 = OrchestratedFlowStep::new("generate_report")
|
||||
.script(r#"
|
||||
let report = "User: " + dep_1_result +
|
||||
", Order Total: $" + dep_2_result +
|
||||
", Stock: " + dep_3_result + " units";
|
||||
let result = report;
|
||||
"#)
|
||||
.depends_on(step1.id())
|
||||
.depends_on(step2.id())
|
||||
.depends_on(step3.id())
|
||||
.context_id("parallel_context")
|
||||
.worker_id("report_service");
|
||||
|
||||
OrchestratedFlow::new("parallel_workflow")
|
||||
.add_step(step1)
|
||||
.add_step(step2)
|
||||
.add_step(step3)
|
||||
.add_step(step4)
|
||||
}
|
||||
|
||||
/// Create a complex workflow with multiple dependency levels
|
||||
fn create_complex_workflow() -> OrchestratedFlow {
|
||||
// Level 1: Initial data gathering
|
||||
let step1 = OrchestratedFlowStep::new("load_config")
|
||||
.script(r#"
|
||||
let config = #{
|
||||
api_url: "https://api.example.com",
|
||||
timeout: 30,
|
||||
retries: 3
|
||||
};
|
||||
let result = config.api_url;
|
||||
"#)
|
||||
.context_id("complex_context")
|
||||
.worker_id("config_service");
|
||||
|
||||
let step2 = OrchestratedFlowStep::new("authenticate")
|
||||
.script(r#"
|
||||
let token = "auth_token_12345";
|
||||
let expires_in = 3600;
|
||||
let result = token;
|
||||
"#)
|
||||
.context_id("complex_context")
|
||||
.worker_id("auth_service");
|
||||
|
||||
// Level 2: Data fetching (depends on config and auth)
|
||||
let step3 = OrchestratedFlowStep::new("fetch_customers")
|
||||
.script(r#"
|
||||
let api_url = dep_1_result;
|
||||
let auth_token = dep_2_result;
|
||||
let customers = ["Customer A", "Customer B", "Customer C"];
|
||||
let result = customers.len();
|
||||
"#)
|
||||
.depends_on(step1.id())
|
||||
.depends_on(step2.id())
|
||||
.context_id("complex_context")
|
||||
.worker_id("customer_service");
|
||||
|
||||
let step4 = OrchestratedFlowStep::new("fetch_products")
|
||||
.script(r#"
|
||||
let api_url = dep_1_result;
|
||||
let auth_token = dep_2_result;
|
||||
let products = ["Product X", "Product Y", "Product Z"];
|
||||
let result = products.len();
|
||||
"#)
|
||||
.depends_on(step1.id())
|
||||
.depends_on(step2.id())
|
||||
.context_id("complex_context")
|
||||
.worker_id("product_service");
|
||||
|
||||
// Level 3: Data processing (depends on fetched data)
|
||||
let step5 = OrchestratedFlowStep::new("calculate_metrics")
|
||||
.script(r#"
|
||||
let customer_count = dep_3_result;
|
||||
let product_count = dep_4_result;
|
||||
let ratio = customer_count / product_count;
|
||||
let result = ratio;
|
||||
"#)
|
||||
.depends_on(step3.id())
|
||||
.depends_on(step4.id())
|
||||
.context_id("complex_context")
|
||||
.worker_id("analytics_service");
|
||||
|
||||
// Level 4: Final reporting
|
||||
let step6 = OrchestratedFlowStep::new("generate_dashboard")
|
||||
.script(r#"
|
||||
let customer_count = dep_3_result;
|
||||
let product_count = dep_4_result;
|
||||
let ratio = dep_5_result;
|
||||
let dashboard = "Dashboard: " + customer_count + " customers, " +
|
||||
product_count + " products, ratio: " + ratio;
|
||||
let result = dashboard;
|
||||
"#)
|
||||
.depends_on(step3.id())
|
||||
.depends_on(step4.id())
|
||||
.depends_on(step5.id())
|
||||
.context_id("complex_context")
|
||||
.worker_id("dashboard_service");
|
||||
|
||||
OrchestratedFlow::new("complex_workflow")
|
||||
.add_step(step1)
|
||||
.add_step(step2)
|
||||
.add_step(step3)
|
||||
.add_step(step4)
|
||||
.add_step(step5)
|
||||
.add_step(step6)
|
||||
}
|
||||
|
||||
/// Wait for flow completion and show results
|
||||
async fn wait_and_show_results(
|
||||
orchestrator: &Orchestrator<LocalInterface>,
|
||||
flow_id: u32,
|
||||
workflow_name: &str,
|
||||
) {
|
||||
println!(" ⏳ Executing {} workflow (ID: {})...", workflow_name, flow_id);
|
||||
|
||||
// Poll for completion
|
||||
loop {
|
||||
tokio::time::sleep(tokio::time::Duration::from_millis(50)).await;
|
||||
|
||||
if let Some(execution) = orchestrator.get_flow_status(flow_id).await {
|
||||
match execution.status {
|
||||
FlowStatus::Completed => {
|
||||
println!(" ✅ {} workflow completed successfully!", workflow_name);
|
||||
println!(" 📊 Executed {} steps in {:?}",
|
||||
execution.completed_steps.len(),
|
||||
execution.completed_at.unwrap() - execution.started_at);
|
||||
|
||||
// Show step results
|
||||
for (step_id, outputs) in &execution.step_results {
|
||||
if let Some(result) = outputs.get("result") {
|
||||
let step_name = execution.flow.orchestrated_steps
|
||||
.iter()
|
||||
.find(|s| s.id() == *step_id)
|
||||
.map(|s| s.flow_step.name.as_str())
|
||||
.unwrap_or("unknown");
|
||||
println!(" 📝 Step '{}': {}", step_name, result);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
FlowStatus::Failed => {
|
||||
println!(" ❌ {} workflow failed!", workflow_name);
|
||||
if !execution.failed_steps.is_empty() {
|
||||
println!(" 💥 Failed steps: {:?}", execution.failed_steps);
|
||||
}
|
||||
break;
|
||||
}
|
||||
FlowStatus::Running => {
|
||||
print!(".");
|
||||
std::io::Write::flush(&mut std::io::stdout()).unwrap();
|
||||
}
|
||||
FlowStatus::Pending => {
|
||||
println!(" ⏸️ {} workflow is pending...", workflow_name);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
println!(" ❓ {} workflow not found!", workflow_name);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
61
rhailib/_archive/orchestrator/src/interface/dispatcher.rs
Normal file
61
rhailib/_archive/orchestrator/src/interface/dispatcher.rs
Normal file
@@ -0,0 +1,61 @@
|
||||
//! Dispatcher interface implementation using RhaiDispatcher
|
||||
|
||||
use crate::RhaiInterface;
|
||||
use async_trait::async_trait;
|
||||
use rhai_dispatcher::{PlayRequest, RhaiDispatcher, RhaiDispatcherError};
|
||||
use std::sync::Arc;
|
||||
|
||||
/// Dispatcher-based interface using RhaiDispatcher
|
||||
pub struct DispatcherInterface {
|
||||
dispatcher: Arc<RhaiDispatcher>,
|
||||
}
|
||||
|
||||
impl DispatcherInterface {
|
||||
/// Create a new dispatcher interface
|
||||
pub fn new(dispatcher: Arc<RhaiDispatcher>) -> Self {
|
||||
Self { dispatcher }
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl RhaiInterface for DispatcherInterface {
|
||||
async fn submit_play_request(&self, play_request: &PlayRequest) -> Result<(), RhaiDispatcherError> {
|
||||
self.dispatcher.submit_play_request(play_request).await
|
||||
}
|
||||
|
||||
async fn submit_play_request_and_await_result(&self, play_request: &PlayRequest) -> Result<String, RhaiDispatcherError> {
|
||||
self.dispatcher.submit_play_request_and_await_result(play_request).await
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_dispatcher_interface_creation() {
|
||||
// This test just verifies we can create the interface
|
||||
// Note: Actual testing would require a properly configured RhaiDispatcher
|
||||
// For now, we'll create a mock or skip the actual dispatcher creation
|
||||
|
||||
// This is a placeholder test - adjust based on actual RhaiDispatcher constructor
|
||||
// let dispatcher = Arc::new(RhaiDispatcher::new());
|
||||
// let interface = DispatcherInterface::new(dispatcher);
|
||||
|
||||
// Just verify the test compiles for now
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_dispatcher_interface_methods() {
|
||||
// This test would verify the interface methods work correctly
|
||||
// when a proper RhaiDispatcher is available
|
||||
|
||||
let play_request = PlayRequest {
|
||||
script: "let x = 5; x + 3".to_string(),
|
||||
};
|
||||
|
||||
// Placeholder assertions - would test actual functionality with real dispatcher
|
||||
assert_eq!(play_request.script, "let x = 5; x + 3");
|
||||
}
|
||||
}
|
111
rhailib/_archive/orchestrator/src/interface/local.rs
Normal file
111
rhailib/_archive/orchestrator/src/interface/local.rs
Normal file
@@ -0,0 +1,111 @@
|
||||
//! Local interface implementation for in-process script execution
|
||||
|
||||
use crate::RhaiInterface;
|
||||
use async_trait::async_trait;
|
||||
use rhai_dispatcher::{PlayRequest, RhaiDispatcherError};
|
||||
|
||||
/// Local interface for in-process script execution
|
||||
pub struct LocalInterface {
|
||||
engine: rhai::Engine,
|
||||
}
|
||||
|
||||
impl LocalInterface {
|
||||
/// Create a new local interface
|
||||
pub fn new() -> Self {
|
||||
let engine = rhai::Engine::new();
|
||||
Self { engine }
|
||||
}
|
||||
|
||||
/// Create a new local interface with custom engine
|
||||
pub fn with_engine(engine: rhai::Engine) -> Self {
|
||||
Self { engine }
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for LocalInterface {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl RhaiInterface for LocalInterface {
|
||||
async fn submit_play_request(&self, _play_request: &PlayRequest) -> Result<(), RhaiDispatcherError> {
|
||||
// For local interface, fire-and-forget doesn't make much sense
|
||||
// We'll just execute and ignore the result
|
||||
let _ = self.submit_play_request_and_await_result(_play_request).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn submit_play_request_and_await_result(&self, play_request: &PlayRequest) -> Result<String, RhaiDispatcherError> {
|
||||
let mut scope = rhai::Scope::new();
|
||||
|
||||
// Execute the script
|
||||
let result = self
|
||||
.engine
|
||||
.eval_with_scope::<rhai::Dynamic>(&mut scope, &play_request.script)
|
||||
.map_err(|e| RhaiDispatcherError::TaskNotFound(format!("Script execution error: {}", e)))?;
|
||||
|
||||
// Return the result as a string
|
||||
if result.is_unit() {
|
||||
Ok(String::new())
|
||||
} else {
|
||||
Ok(result.to_string())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_local_interface_basic() {
|
||||
let interface = LocalInterface::new();
|
||||
let play_request = PlayRequest {
|
||||
script: "let x = 5; x + 3".to_string(),
|
||||
};
|
||||
|
||||
let result = interface.submit_play_request_and_await_result(&play_request).await;
|
||||
assert!(result.is_ok());
|
||||
|
||||
let output = result.unwrap();
|
||||
assert_eq!(output, "8");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_local_interface_fire_and_forget() {
|
||||
let interface = LocalInterface::new();
|
||||
let play_request = PlayRequest {
|
||||
script: "let x = 5; x + 3".to_string(),
|
||||
};
|
||||
|
||||
let result = interface.submit_play_request(&play_request).await;
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_local_interface_with_error() {
|
||||
let interface = LocalInterface::new();
|
||||
let play_request = PlayRequest {
|
||||
script: "invalid_syntax +++".to_string(),
|
||||
};
|
||||
|
||||
let result = interface.submit_play_request_and_await_result(&play_request).await;
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_local_interface_empty_result() {
|
||||
let interface = LocalInterface::new();
|
||||
let play_request = PlayRequest {
|
||||
script: "let x = 42;".to_string(),
|
||||
};
|
||||
|
||||
let result = interface.submit_play_request_and_await_result(&play_request).await;
|
||||
assert!(result.is_ok());
|
||||
|
||||
let output = result.unwrap();
|
||||
assert_eq!(output, "");
|
||||
}
|
||||
}
|
9
rhailib/_archive/orchestrator/src/interface/mod.rs
Normal file
9
rhailib/_archive/orchestrator/src/interface/mod.rs
Normal file
@@ -0,0 +1,9 @@
|
||||
//! Interface implementations for different backends
|
||||
|
||||
pub mod local;
|
||||
pub mod ws;
|
||||
pub mod dispatcher;
|
||||
|
||||
pub use local::*;
|
||||
pub use ws::*;
|
||||
pub use dispatcher::*;
|
117
rhailib/_archive/orchestrator/src/interface/ws.rs
Normal file
117
rhailib/_archive/orchestrator/src/interface/ws.rs
Normal file
@@ -0,0 +1,117 @@
|
||||
//! WebSocket interface implementation for remote script execution
|
||||
|
||||
use crate::RhaiInterface;
|
||||
use async_trait::async_trait;
|
||||
use rhai_dispatcher::{PlayRequest, RhaiDispatcherError};
|
||||
use reqwest::Client;
|
||||
use serde_json::json;
|
||||
|
||||
/// WebSocket-based interface for remote script execution
|
||||
pub struct WsInterface {
|
||||
client: Client,
|
||||
base_url: String,
|
||||
}
|
||||
|
||||
impl WsInterface {
|
||||
/// Create a new WebSocket interface
|
||||
pub fn new(base_url: String) -> Self {
|
||||
Self {
|
||||
client: Client::new(),
|
||||
base_url,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl RhaiInterface for WsInterface {
|
||||
async fn submit_play_request(&self, play_request: &PlayRequest) -> Result<(), RhaiDispatcherError> {
|
||||
let payload = json!({
|
||||
"script": play_request.script
|
||||
});
|
||||
|
||||
let response = self
|
||||
.client
|
||||
.post(&format!("{}/submit", self.base_url))
|
||||
.json(&payload)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| RhaiDispatcherError::TaskNotFound(format!("Network error: {}", e)))?;
|
||||
|
||||
if response.status().is_success() {
|
||||
Ok(())
|
||||
} else {
|
||||
let error_text = response
|
||||
.text()
|
||||
.await
|
||||
.unwrap_or_else(|_| "Unknown error".to_string());
|
||||
Err(RhaiDispatcherError::TaskNotFound(format!("HTTP error: {}", error_text)))
|
||||
}
|
||||
}
|
||||
|
||||
async fn submit_play_request_and_await_result(&self, play_request: &PlayRequest) -> Result<String, RhaiDispatcherError> {
|
||||
let payload = json!({
|
||||
"script": play_request.script
|
||||
});
|
||||
|
||||
let response = self
|
||||
.client
|
||||
.post(&format!("{}/execute", self.base_url))
|
||||
.json(&payload)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| RhaiDispatcherError::TaskNotFound(format!("Network error: {}", e)))?;
|
||||
|
||||
if response.status().is_success() {
|
||||
let result: String = response
|
||||
.text()
|
||||
.await
|
||||
.map_err(|e| RhaiDispatcherError::TaskNotFound(format!("Response parsing error: {}", e)))?;
|
||||
Ok(result)
|
||||
} else {
|
||||
let error_text = response
|
||||
.text()
|
||||
.await
|
||||
.unwrap_or_else(|_| "Unknown error".to_string());
|
||||
Err(RhaiDispatcherError::TaskNotFound(format!("HTTP error: {}", error_text)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_ws_interface_creation() {
|
||||
let interface = WsInterface::new("http://localhost:8080".to_string());
|
||||
assert_eq!(interface.base_url, "http://localhost:8080");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ws_interface_call_with_mock_server() {
|
||||
// This test would require a mock HTTP server
|
||||
// For now, just test that we can create the interface
|
||||
let interface = WsInterface::new("http://localhost:8080".to_string());
|
||||
|
||||
let play_request = PlayRequest {
|
||||
script: "let x = 1;".to_string(),
|
||||
};
|
||||
|
||||
// This will fail without a real server, but that's expected in unit tests
|
||||
let result = interface.submit_play_request_and_await_result(&play_request).await;
|
||||
assert!(result.is_err()); // Expected to fail without server
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ws_interface_fire_and_forget() {
|
||||
let interface = WsInterface::new("http://localhost:8080".to_string());
|
||||
|
||||
let play_request = PlayRequest {
|
||||
script: "let x = 1;".to_string(),
|
||||
};
|
||||
|
||||
// This will fail without a real server, but that's expected in unit tests
|
||||
let result = interface.submit_play_request(&play_request).await;
|
||||
assert!(result.is_err()); // Expected to fail without server
|
||||
}
|
||||
}
|
35
rhailib/_archive/orchestrator/src/lib.rs
Normal file
35
rhailib/_archive/orchestrator/src/lib.rs
Normal file
@@ -0,0 +1,35 @@
|
||||
//! # Orchestrator
|
||||
//!
|
||||
//! A simple DAG-based workflow execution system that extends the heromodels flow structures
|
||||
//! to support workflows with dependencies and distributed script execution.
|
||||
|
||||
use async_trait::async_trait;
|
||||
use rhai_dispatcher::{PlayRequest, RhaiDispatcherError};
|
||||
|
||||
pub mod interface;
|
||||
pub mod orchestrator;
|
||||
|
||||
pub use interface::*;
|
||||
pub use orchestrator::*;
|
||||
|
||||
/// Trait for executing Rhai scripts through different backends
|
||||
/// Uses the same signature as RhaiDispatcher for consistency
|
||||
#[async_trait]
|
||||
pub trait RhaiInterface {
|
||||
/// Submit a play request without waiting for result (fire-and-forget)
|
||||
async fn submit_play_request(&self, play_request: &PlayRequest) -> Result<(), RhaiDispatcherError>;
|
||||
|
||||
/// Submit a play request and await the result
|
||||
/// Returns just the output string on success
|
||||
async fn submit_play_request_and_await_result(&self, play_request: &PlayRequest) -> Result<String, RhaiDispatcherError>;
|
||||
}
|
||||
|
||||
// Re-export the flow models from DSL
|
||||
pub use rhailib_dsl::flow::{OrchestratedFlow, OrchestratedFlowStep, OrchestratorError, FlowStatus};
|
||||
|
||||
// Conversion from RhaiDispatcherError to OrchestratorError
|
||||
impl From<RhaiDispatcherError> for OrchestratorError {
|
||||
fn from(err: RhaiDispatcherError) -> Self {
|
||||
OrchestratorError::ExecutorError(err.to_string())
|
||||
}
|
||||
}
|
418
rhailib/_archive/orchestrator/src/orchestrator.rs
Normal file
418
rhailib/_archive/orchestrator/src/orchestrator.rs
Normal file
@@ -0,0 +1,418 @@
|
||||
//! Main orchestrator implementation for DAG-based workflow execution
|
||||
|
||||
use crate::{
|
||||
OrchestratedFlow, OrchestratedFlowStep, OrchestratorError, FlowStatus, RhaiInterface,
|
||||
};
|
||||
use rhai_dispatcher::PlayRequest;
|
||||
use futures::future::try_join_all;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
/// Main orchestrator for executing DAG-based workflows
|
||||
pub struct Orchestrator<I: RhaiInterface> {
|
||||
/// Interface for running scripts
|
||||
interface: Arc<I>,
|
||||
|
||||
/// Active flow executions
|
||||
active_flows: Arc<RwLock<HashMap<u32, FlowExecution>>>,
|
||||
}
|
||||
|
||||
/// Represents an active flow execution
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct FlowExecution {
|
||||
/// The flow being executed
|
||||
pub flow: OrchestratedFlow,
|
||||
|
||||
/// Current status
|
||||
pub status: FlowStatus,
|
||||
|
||||
/// Completed step IDs
|
||||
pub completed_steps: HashSet<u32>,
|
||||
|
||||
/// Failed step IDs
|
||||
pub failed_steps: HashSet<u32>,
|
||||
|
||||
/// Step results
|
||||
pub step_results: HashMap<u32, HashMap<String, String>>,
|
||||
|
||||
/// Execution start time
|
||||
pub started_at: chrono::DateTime<chrono::Utc>,
|
||||
|
||||
/// Execution end time
|
||||
pub completed_at: Option<chrono::DateTime<chrono::Utc>>,
|
||||
}
|
||||
|
||||
impl FlowExecution {
|
||||
/// Create a new flow execution
|
||||
pub fn new(flow: OrchestratedFlow) -> Self {
|
||||
Self {
|
||||
flow,
|
||||
status: FlowStatus::Pending,
|
||||
completed_steps: HashSet::new(),
|
||||
failed_steps: HashSet::new(),
|
||||
step_results: HashMap::new(),
|
||||
started_at: chrono::Utc::now(),
|
||||
completed_at: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if a step is ready to execute (all dependencies completed)
|
||||
pub fn is_step_ready(&self, step: &OrchestratedFlowStep) -> bool {
|
||||
if self.completed_steps.contains(&step.id()) || self.failed_steps.contains(&step.id()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
step.depends_on.iter().all(|dep_id| self.completed_steps.contains(dep_id))
|
||||
}
|
||||
|
||||
/// Get all ready steps
|
||||
pub fn get_ready_steps(&self) -> Vec<&OrchestratedFlowStep> {
|
||||
self.flow
|
||||
.orchestrated_steps
|
||||
.iter()
|
||||
.filter(|step| self.is_step_ready(step))
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Mark a step as completed
|
||||
pub fn complete_step(&mut self, step_id: u32, outputs: HashMap<String, String>) {
|
||||
self.completed_steps.insert(step_id);
|
||||
self.step_results.insert(step_id, outputs);
|
||||
|
||||
// Check if flow is complete
|
||||
if self.completed_steps.len() == self.flow.orchestrated_steps.len() {
|
||||
self.status = FlowStatus::Completed;
|
||||
self.completed_at = Some(chrono::Utc::now());
|
||||
}
|
||||
}
|
||||
|
||||
/// Mark a step as failed
|
||||
pub fn fail_step(&mut self, step_id: u32) {
|
||||
self.failed_steps.insert(step_id);
|
||||
self.status = FlowStatus::Failed;
|
||||
self.completed_at = Some(chrono::Utc::now());
|
||||
}
|
||||
|
||||
/// Check if the flow execution is finished
|
||||
pub fn is_finished(&self) -> bool {
|
||||
matches!(self.status, FlowStatus::Completed | FlowStatus::Failed)
|
||||
}
|
||||
}
|
||||
|
||||
impl<I: RhaiInterface + Send + Sync + 'static> Orchestrator<I> {
|
||||
/// Create a new orchestrator
|
||||
pub fn new(interface: Arc<I>) -> Self {
|
||||
Self {
|
||||
interface,
|
||||
active_flows: Arc::new(RwLock::new(HashMap::new())),
|
||||
}
|
||||
}
|
||||
|
||||
/// Start executing a flow
|
||||
pub async fn execute_flow(&self, flow: OrchestratedFlow) -> Result<u32, OrchestratorError> {
|
||||
let flow_id = flow.id();
|
||||
flow.validate_dag()?;
|
||||
|
||||
info!("Starting execution of flow {} with {} steps", flow_id, flow.orchestrated_steps.len());
|
||||
|
||||
// Create flow execution
|
||||
let mut execution = FlowExecution::new(flow);
|
||||
execution.status = FlowStatus::Running;
|
||||
|
||||
// Store the execution
|
||||
{
|
||||
let mut active_flows = self.active_flows.write().await;
|
||||
active_flows.insert(flow_id, execution);
|
||||
}
|
||||
|
||||
// Start execution in background
|
||||
let orchestrator = self.clone();
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = orchestrator.execute_flow_steps(flow_id).await {
|
||||
error!("Flow {} execution failed: {}", flow_id, e);
|
||||
|
||||
// Mark flow as failed
|
||||
let mut active_flows = orchestrator.active_flows.write().await;
|
||||
if let Some(execution) = active_flows.get_mut(&flow_id) {
|
||||
execution.status = FlowStatus::Failed;
|
||||
execution.completed_at = Some(chrono::Utc::now());
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Ok(flow_id)
|
||||
}
|
||||
|
||||
/// Execute flow steps using DAG traversal
|
||||
async fn execute_flow_steps(&self, flow_id: u32) -> Result<(), OrchestratorError> {
|
||||
loop {
|
||||
let ready_steps = {
|
||||
let active_flows = self.active_flows.read().await;
|
||||
let execution = active_flows
|
||||
.get(&flow_id)
|
||||
.ok_or(OrchestratorError::StepNotFound(flow_id))?;
|
||||
|
||||
if execution.is_finished() {
|
||||
info!("Flow {} execution completed with status: {:?}", flow_id, execution.status);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
execution.get_ready_steps().into_iter().cloned().collect::<Vec<_>>()
|
||||
};
|
||||
|
||||
if ready_steps.is_empty() {
|
||||
// Check if we're deadlocked
|
||||
let active_flows = self.active_flows.read().await;
|
||||
let execution = active_flows
|
||||
.get(&flow_id)
|
||||
.ok_or(OrchestratorError::StepNotFound(flow_id))?;
|
||||
|
||||
if !execution.is_finished() {
|
||||
warn!("No ready steps found for flow {} - possible deadlock", flow_id);
|
||||
return Err(OrchestratorError::NoReadySteps);
|
||||
}
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
debug!("Executing {} ready steps for flow {}", ready_steps.len(), flow_id);
|
||||
|
||||
// Execute ready steps concurrently
|
||||
let step_futures = ready_steps.into_iter().map(|step| {
|
||||
let orchestrator = self.clone();
|
||||
async move {
|
||||
orchestrator.execute_step(flow_id, step).await
|
||||
}
|
||||
});
|
||||
|
||||
// Wait for all steps to complete
|
||||
let results = try_join_all(step_futures).await?;
|
||||
|
||||
// Update execution state
|
||||
{
|
||||
let mut active_flows = self.active_flows.write().await;
|
||||
let execution = active_flows
|
||||
.get_mut(&flow_id)
|
||||
.ok_or(OrchestratorError::StepNotFound(flow_id))?;
|
||||
|
||||
for (step_id, outputs) in results {
|
||||
execution.complete_step(step_id, outputs);
|
||||
}
|
||||
}
|
||||
|
||||
// Small delay to prevent tight loop
|
||||
tokio::time::sleep(tokio::time::Duration::from_millis(10)).await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Execute a single step
|
||||
async fn execute_step(
|
||||
&self,
|
||||
flow_id: u32,
|
||||
step: OrchestratedFlowStep,
|
||||
) -> Result<(u32, HashMap<String, String>), OrchestratorError> {
|
||||
let step_id = step.id();
|
||||
info!("Executing step {} for flow {}", step_id, flow_id);
|
||||
|
||||
// Prepare inputs with dependency outputs
|
||||
let mut inputs = step.inputs.clone();
|
||||
|
||||
// Add outputs from dependency steps
|
||||
{
|
||||
let active_flows = self.active_flows.read().await;
|
||||
let execution = active_flows
|
||||
.get(&flow_id)
|
||||
.ok_or(OrchestratorError::StepNotFound(flow_id))?;
|
||||
|
||||
for dep_id in &step.depends_on {
|
||||
if let Some(dep_outputs) = execution.step_results.get(dep_id) {
|
||||
for (key, value) in dep_outputs {
|
||||
inputs.insert(format!("dep_{}_{}", dep_id, key), value.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create play request
|
||||
let play_request = PlayRequest {
|
||||
id: format!("{}_{}", flow_id, step_id),
|
||||
worker_id: step.worker_id.clone(),
|
||||
context_id: step.context_id.clone(),
|
||||
script: step.script.clone(),
|
||||
timeout: std::time::Duration::from_secs(30), // Default timeout
|
||||
};
|
||||
|
||||
// Execute the script
|
||||
match self.interface.submit_play_request_and_await_result(&play_request).await {
|
||||
Ok(output) => {
|
||||
info!("Step {} completed successfully", step_id);
|
||||
let mut outputs = HashMap::new();
|
||||
outputs.insert("result".to_string(), output);
|
||||
Ok((step_id, outputs))
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Step {} failed: {}", step_id, e);
|
||||
|
||||
// Mark step as failed
|
||||
{
|
||||
let mut active_flows = self.active_flows.write().await;
|
||||
if let Some(execution) = active_flows.get_mut(&flow_id) {
|
||||
execution.fail_step(step_id);
|
||||
}
|
||||
}
|
||||
|
||||
Err(OrchestratorError::StepFailed(step_id, Some(e.to_string())))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the status of a flow execution
|
||||
pub async fn get_flow_status(&self, flow_id: u32) -> Option<FlowExecution> {
|
||||
let active_flows = self.active_flows.read().await;
|
||||
active_flows.get(&flow_id).cloned()
|
||||
}
|
||||
|
||||
/// Cancel a flow execution
|
||||
pub async fn cancel_flow(&self, flow_id: u32) -> Result<(), OrchestratorError> {
|
||||
let mut active_flows = self.active_flows.write().await;
|
||||
if let Some(execution) = active_flows.get_mut(&flow_id) {
|
||||
execution.status = FlowStatus::Failed;
|
||||
execution.completed_at = Some(chrono::Utc::now());
|
||||
info!("Flow {} cancelled", flow_id);
|
||||
Ok(())
|
||||
} else {
|
||||
Err(OrchestratorError::StepNotFound(flow_id))
|
||||
}
|
||||
}
|
||||
|
||||
/// List all active flows
|
||||
pub async fn list_active_flows(&self) -> Vec<(u32, FlowStatus)> {
|
||||
let active_flows = self.active_flows.read().await;
|
||||
active_flows
|
||||
.iter()
|
||||
.map(|(id, execution)| (*id, execution.status.clone()))
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Clean up completed flows
|
||||
pub async fn cleanup_completed_flows(&self) {
|
||||
let mut active_flows = self.active_flows.write().await;
|
||||
active_flows.retain(|_, execution| !execution.is_finished());
|
||||
}
|
||||
}
|
||||
|
||||
impl<I: RhaiInterface + Send + Sync> Clone for Orchestrator<I> {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
interface: self.interface.clone(),
|
||||
active_flows: self.active_flows.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::interface::LocalInterface;
|
||||
use std::collections::HashMap;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_simple_flow_execution() {
|
||||
let interface = Arc::new(LocalInterface::new());
|
||||
let orchestrator = Orchestrator::new(interface);
|
||||
|
||||
// Create a simple flow with two steps
|
||||
let step1 = OrchestratedFlowStep::new("step1")
|
||||
.script("let result = 10;")
|
||||
.context_id("test")
|
||||
.worker_id("worker1");
|
||||
|
||||
let step2 = OrchestratedFlowStep::new("step2")
|
||||
.script("let result = dep_1_result + 5;")
|
||||
.depends_on(step1.id())
|
||||
.context_id("test")
|
||||
.worker_id("worker1");
|
||||
|
||||
let flow = OrchestratedFlow::new("test_flow")
|
||||
.add_step(step1)
|
||||
.add_step(step2);
|
||||
|
||||
// Execute the flow
|
||||
let flow_id = orchestrator.execute_flow(flow).await.unwrap();
|
||||
|
||||
// Wait for completion
|
||||
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
|
||||
|
||||
let status = orchestrator.get_flow_status(flow_id).await.unwrap();
|
||||
assert_eq!(status.status, FlowStatus::Completed);
|
||||
assert_eq!(status.completed_steps.len(), 2);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_parallel_execution() {
|
||||
let interface = Arc::new(LocalInterface::new());
|
||||
let orchestrator = Orchestrator::new(interface);
|
||||
|
||||
// Create a flow with parallel steps
|
||||
let step1 = OrchestratedFlowStep::new("step1")
|
||||
.script("let result = 10;")
|
||||
.context_id("test")
|
||||
.worker_id("worker1");
|
||||
|
||||
let step2 = OrchestratedFlowStep::new("step2")
|
||||
.script("let result = 20;")
|
||||
.context_id("test")
|
||||
.worker_id("worker2");
|
||||
|
||||
let step3 = OrchestratedFlowStep::new("step3")
|
||||
.script("let result = dep_1_result + dep_2_result;")
|
||||
.depends_on(step1.id())
|
||||
.depends_on(step2.id())
|
||||
.context_id("test")
|
||||
.worker_id("worker3");
|
||||
|
||||
let flow = OrchestratedFlow::new("parallel_flow")
|
||||
.add_step(step1)
|
||||
.add_step(step2)
|
||||
.add_step(step3);
|
||||
|
||||
// Execute the flow
|
||||
let flow_id = orchestrator.execute_flow(flow).await.unwrap();
|
||||
|
||||
// Wait for completion
|
||||
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
|
||||
|
||||
let status = orchestrator.get_flow_status(flow_id).await.unwrap();
|
||||
assert_eq!(status.status, FlowStatus::Completed);
|
||||
assert_eq!(status.completed_steps.len(), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_flow_execution_state() {
|
||||
let step1 = OrchestratedFlowStep::new("step1").script("let x = 1;");
|
||||
let step2 = OrchestratedFlowStep::new("step2")
|
||||
.script("let y = 2;")
|
||||
.depends_on(step1.id());
|
||||
|
||||
let flow = OrchestratedFlow::new("test_flow")
|
||||
.add_step(step1.clone())
|
||||
.add_step(step2.clone());
|
||||
|
||||
let mut execution = FlowExecution::new(flow);
|
||||
|
||||
// Initially, only step1 should be ready
|
||||
assert!(execution.is_step_ready(&step1));
|
||||
assert!(!execution.is_step_ready(&step2));
|
||||
|
||||
// After completing step1, step2 should be ready
|
||||
execution.complete_step(step1.id(), HashMap::new());
|
||||
assert!(!execution.is_step_ready(&step1)); // Already completed
|
||||
assert!(execution.is_step_ready(&step2));
|
||||
|
||||
// After completing step2, flow should be complete
|
||||
execution.complete_step(step2.id(), HashMap::new());
|
||||
assert_eq!(execution.status, FlowStatus::Completed);
|
||||
}
|
||||
}
|
42
rhailib/_archive/orchestrator/src/services.rs
Normal file
42
rhailib/_archive/orchestrator/src/services.rs
Normal file
@@ -0,0 +1,42 @@
|
||||
//! Main orchestrator implementation for DAG-based workflow execution
|
||||
|
||||
use crate::{
|
||||
OrchestratedFlow, OrchestratedFlowStep, OrchestratorError, FlowStatus, RhaiInterface, ScriptRequest,
|
||||
};
|
||||
use futures::future::try_join_all;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
impl<I: RhaiInterface + Send + Sync + 'static> Orchestrator<I> {
|
||||
/// Get a flow by ID
|
||||
pub fn get_flow(&self, flow_id: u32) -> Result<OrchestratedFlow, OrchestratorError> {
|
||||
self.interface
|
||||
.new_play_request()
|
||||
.script(format!("json_encode(get_flow({}))", flow_id))
|
||||
.submit_play_request_and_await_result()
|
||||
.await
|
||||
.map(|result| serde_json::from_str(&result).unwrap())
|
||||
}
|
||||
|
||||
pub fn get_flows(&self) -> Result<Vec<OrchestratedFlow>, OrchestratorError> {
|
||||
self.interface
|
||||
.new_play_request()
|
||||
.script("json_encode(get_flows())")
|
||||
.submit_play_request_and_await_result()
|
||||
.await
|
||||
.map(|result| serde_json::from_str(&result).unwrap())
|
||||
}
|
||||
|
||||
pub fn get_active_flows(&self) -> Result<Vec<OrchestratedFlow>, OrchestratorError> {
|
||||
self.interface
|
||||
.new_play_request()
|
||||
.script("json_encode(get_flows())")
|
||||
.submit_play_request_and_await_result()
|
||||
.await
|
||||
.map(|result| serde_json::from_str(&result).unwrap())
|
||||
|
||||
}
|
||||
|
||||
}
|
Reference in New Issue
Block a user