This commit is contained in:
Timur Gordon 2025-08-01 00:01:08 +02:00
parent 32c2cbe0cc
commit 8ed40ce99c
57 changed files with 2047 additions and 4113 deletions

135
Cargo.lock generated
View File

@ -396,6 +396,28 @@ version = "0.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50"
[[package]]
name = "async-stream"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476"
dependencies = [
"async-stream-impl",
"futures-core",
"pin-project-lite",
]
[[package]]
name = "async-stream-impl"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.104",
]
[[package]] [[package]]
name = "async-trait" name = "async-trait"
version = "0.1.88" version = "0.1.88"
@ -1154,6 +1176,21 @@ version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c"
[[package]]
name = "futures"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876"
dependencies = [
"futures-channel",
"futures-core",
"futures-executor",
"futures-io",
"futures-sink",
"futures-task",
"futures-util",
]
[[package]] [[package]]
name = "futures-channel" name = "futures-channel"
version = "0.3.31" version = "0.3.31"
@ -1170,6 +1207,23 @@ version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e"
[[package]]
name = "futures-executor"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f"
dependencies = [
"futures-core",
"futures-task",
"futures-util",
]
[[package]]
name = "futures-io"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6"
[[package]] [[package]]
name = "futures-macro" name = "futures-macro"
version = "0.3.31" version = "0.3.31"
@ -1199,10 +1253,13 @@ version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81"
dependencies = [ dependencies = [
"futures-channel",
"futures-core", "futures-core",
"futures-io",
"futures-macro", "futures-macro",
"futures-sink", "futures-sink",
"futures-task", "futures-task",
"memchr",
"pin-project-lite", "pin-project-lite",
"pin-utils", "pin-utils",
"slab", "slab",
@ -1437,24 +1494,6 @@ dependencies = [
"tokio", "tokio",
] ]
[[package]]
name = "hero_dispatcher"
version = "0.1.0"
dependencies = [
"chrono",
"clap",
"colored",
"env_logger",
"hero_job",
"log",
"redis 0.25.4",
"rhai",
"serde",
"serde_json",
"tokio",
"uuid",
]
[[package]] [[package]]
name = "hero_examples" name = "hero_examples"
version = "0.1.0" version = "0.1.0"
@ -1462,8 +1501,8 @@ dependencies = [
"chrono", "chrono",
"colored", "colored",
"env_logger", "env_logger",
"hero_dispatcher",
"hero_job", "hero_job",
"hero_supervisor",
"log", "log",
"redis 0.25.4", "redis 0.25.4",
"serde_json", "serde_json",
@ -1485,6 +1524,25 @@ dependencies = [
"uuid", "uuid",
] ]
[[package]]
name = "hero_supervisor"
version = "0.1.0"
dependencies = [
"chrono",
"clap",
"colored",
"env_logger",
"hero_job",
"log",
"redis 0.25.4",
"rhai",
"serde",
"serde_json",
"tokio",
"uuid",
"zinit-client",
]
[[package]] [[package]]
name = "hero_websocket_client" name = "hero_websocket_client"
version = "0.1.0" version = "0.1.0"
@ -1532,8 +1590,8 @@ dependencies = [
"dotenv", "dotenv",
"env_logger", "env_logger",
"futures-util", "futures-util",
"hero_dispatcher",
"hero_job", "hero_job",
"hero_supervisor",
"heromodels", "heromodels",
"hex", "hex",
"hmac", "hmac",
@ -2692,18 +2750,6 @@ dependencies = [
"tokio", "tokio",
] ]
[[package]]
name = "rhailib_engine"
version = "0.1.0"
dependencies = [
"chrono",
"heromodels",
"heromodels-derive",
"heromodels_core",
"rhai",
"rhailib_dsl",
]
[[package]] [[package]]
name = "rhailib_worker" name = "rhailib_worker"
version = "0.1.0" version = "0.1.0"
@ -2711,12 +2757,15 @@ dependencies = [
"chrono", "chrono",
"clap", "clap",
"env_logger", "env_logger",
"hero_job",
"hero_supervisor",
"heromodels", "heromodels",
"heromodels-derive",
"heromodels_core",
"log", "log",
"redis 0.25.4", "redis 0.25.4",
"rhai", "rhai",
"rhai_dispatcher", "rhailib_dsl",
"rhailib_engine",
"serde", "serde",
"serde_json", "serde_json",
"tokio", "tokio",
@ -4181,6 +4230,24 @@ dependencies = [
"syn 2.0.104", "syn 2.0.104",
] ]
[[package]]
name = "zinit-client"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4121c3ba22f1b3ccc4546de32072c9530c7e2735b734641ada5280ac422ac9cd"
dependencies = [
"async-stream",
"async-trait",
"chrono",
"futures",
"rand 0.8.5",
"serde",
"serde_json",
"thiserror",
"tokio",
"tracing",
]
[[package]] [[package]]
name = "zstd" name = "zstd"
version = "0.13.3" version = "0.13.3"

View File

@ -55,9 +55,9 @@ members = [
"interfaces/unix/server", "interfaces/unix/server",
"interfaces/websocket/client", "interfaces/websocket/client",
"interfaces/websocket/server", "interfaces/websocket/server",
"core/dispatcher", "core/supervisor",
"core/engine", "core/worker",
"core/worker", "core/job", "core/examples", "interfaces/websocket/examples", "core/job", "core/examples", "interfaces/websocket/examples",
"proxies/http", "proxies/http",
] ]
resolver = "2" # Recommended for new workspaces resolver = "2" # Recommended for new workspaces

View File

@ -1,4 +1,4 @@
# Hero # Base Object and Actor Backend
Hero is a program that runs scripts in contexts on behalf of a peer. Hero aims to support a language sufficient to support all of one's digital actions. As such, hero can become a tool of digital sovereignty, allowing people and groups to own their own structured data and functionality to act on it. Hero is a program that runs scripts in contexts on behalf of a peer. Hero aims to support a language sufficient to support all of one's digital actions. As such, hero can become a tool of digital sovereignty, allowing people and groups to own their own structured data and functionality to act on it.
@ -12,11 +12,11 @@ Hero is a program that runs scripts in contexts on behalf of a peer. Hero aims t
## Core ## Core
In its core, a [dispatcher](#dispatcher) dispatches jobs to execute scripts to [workers](#worker) over redis. Workers spawn appropriate engine instances to execute scripts within the defined [confines]() of the job. In its core, a [supervisor](#supervisor) dispatches jobs to execute scripts to [workers](#worker) over redis. Workers spawn appropriate engine instances to execute scripts within the defined [confines]() of the job.
### Components ### Components
#### [Dispatcher](./core/dispatcher) #### [Supervisor](./core/supervisor)
Component responsible for distributing jobs to workers over Redis. Component responsible for distributing jobs to workers over Redis.
@ -30,7 +30,7 @@ A unit of work that executes a Rhai or Hero script.
#### [Worker](./core/worker) #### [Worker](./core/worker)
An entity that processes jobs dispatched by the dispatcher. An entity that processes jobs dispatched by the supervisor.
## Interfaces ## Interfaces

View File

@ -1,128 +0,0 @@
# Hero Dispatcher
A Redis-based job dispatcher for managing Rhai/HeroScript execution across distributed workers.
## Overview
The Hero Dispatcher provides a robust job queue system where:
- **Jobs** represent script execution requests (Rhai or HeroScript)
- **Creating a job** stores job parameters in Redis as an hset entry
- **Submitting a job** pushes the job ID to a worker's queue
- **Running a job** creates, submits, and awaits results on a dedicated reply queue
## Key Features
- **Asynchronous Operations**: Built with `tokio` for non-blocking I/O
- **Request-Reply Pattern**: Submit jobs and await results without polling
- **Configurable Jobs**: Set timeouts, retries, concurrency, and logging options
- **Worker Targeting**: Direct job routing to specific worker queues
- **Job Lifecycle**: Create, submit, monitor status, and retrieve results
## Core Components
### `DispatcherBuilder`
Builder for creating `Dispatcher` instances with caller ID, worker ID, context ID, and Redis URL.
### `Dispatcher`
Main interface for job management:
- `new_job()` - Create a new `JobBuilder`
- `create_job()` - Store job in Redis
- `run_job_and_await_result()` - Execute job and wait for completion
- `get_job_status()` - Check job execution status
- `get_job_output()` - Retrieve job results
### `JobBuilder`
Fluent builder for configuring jobs:
- `script()` - Set the script content
- `worker_id()` - Target specific worker
- `timeout()` - Set execution timeout
- `build()` - Create the job
- `submit()` - Fire-and-forget submission
- `await_response()` - Submit and wait for result
### `Job`
Represents a script execution request with:
- Unique ID and timestamps
- Script content and target worker
- Execution settings (timeout, retries, concurrency)
- Logging configuration
## Redis Schema
Jobs are stored using the `hero:` namespace:
- `hero:job:{job_id}` - Job parameters as Redis hash
- `hero:work_queue:{worker_id}` - Worker-specific job queues
- `hero:reply:{job_id}` - Dedicated reply queues for results
## Prerequisites
- Redis server accessible by dispatcher and workers
## Usage Example
### Basic Job Creation and Submission
```rust
use hero_dispatcher::{DispatcherBuilder, DispatcherError};
use std::time::Duration;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create dispatcher
let dispatcher = DispatcherBuilder::new()
.caller_id("my-app")
.worker_id("worker-1")
.context_id("my-context")
.redis_url("redis://127.0.0.1:6379")
.build()?;
// Create a job
let job = dispatcher
.new_job()
.script(r#"print("Hello from worker!"); "success""#)
.timeout(Duration::from_secs(30))
.build()?;
// Store job in Redis
dispatcher.create_job(&job)?;
println!("Job {} created and stored in Redis", job.id);
// Run job and await result (requires worker)
match dispatcher.run_job_and_await_result(&job, "worker-1".to_string()) {
Ok(result) => println!("Job completed: {}", result),
Err(DispatcherError::Timeout(_)) => println!("Job timed out"),
Err(e) => println!("Job failed: {}", e),
}
Ok(())
}
```
### Job Status Monitoring
```rust
// Check job status
match dispatcher.get_job_status(&job.id) {
Ok(status) => println!("Job status: {:?}", status),
Err(e) => println!("Error getting status: {}", e),
}
// Get job output
match dispatcher.get_job_output(&job.id) {
Ok(output) => println!("Job output: {:?}", output),
Err(e) => println!("Error getting output: {}", e),
}
```
## Examples
Run the comprehensive demo to see dispatcher functionality and Redis entries:
```bash
cargo run --example dispatcher_demo
```
Other examples:
- `timeout_example.rs` - Demonstrates timeout handling
Ensure Redis is running at `redis://127.0.0.1:6379`.

View File

@ -1,559 +0,0 @@
use hero_dispatcher::{Dispatcher, DispatcherBuilder, ScriptType};
use log::info;
use redis::AsyncCommands;
use std::collections::HashMap;
use std::time::Duration;
use tokio::time::sleep;
/// Comprehensive example demonstrating the Hero Dispatcher functionality.
///
/// This example shows:
/// 1. Creating a dispatcher instance
/// 2. Creating jobs with different configurations
/// 3. Submitting jobs to the queue
/// 4. Inspecting Redis entries created by the dispatcher
/// 5. Running jobs and awaiting results
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
env_logger::init();
println!("🚀 Hero Dispatcher Demo");
println!("======================\n");
// Create dispatcher client with worker vectors per script type
let dispatcher = DispatcherBuilder::new()
.caller_id("demo-caller")
.context_id("demo-context")
.heroscript_workers(vec!["hero-worker-1".to_string(), "hero-worker-2".to_string()])
.rhai_sal_workers(vec!["rhai-sal-worker-1".to_string()])
.rhai_dsl_workers(vec!["rhai-dsl-worker-1".to_string()])
.redis_url("redis://127.0.0.1/")
.build()?;
println!("✅ Dispatcher created with:");
println!(" - Caller ID: demo-caller");
println!(" - Worker ID: demo-worker");
println!(" - Context ID: demo-context\n");
// Create Redis connection for inspection
let redis_client = redis::Client::open("redis://127.0.0.1:6379")?;
let mut redis_conn = redis_client.get_multiplexed_async_connection().await?;
// Demo 1: Create a simple job
println!("📝 Demo 1: Creating a simple job");
println!("--------------------------------");
let job1 = dispatcher
.new_job()
.script_type(ScriptType::HeroScript)
.script(r#"print("Hello from job 1!");"#)
.timeout(Duration::from_secs(10))
.build()?;
println!("Job 1 created with ID: {}", job1.id);
// Create the job (stores in Redis)
dispatcher.create_job(&job1).await?;
println!("✅ Job 1 stored in Redis");
// Inspect Redis entries for this job
print_job_redis_entries(&mut redis_conn, &job1.id).await?;
println!();
// Demo 2: Create a job with custom settings
println!("📝 Demo 2: Creating a job with custom settings");
println!("----------------------------------------------");
let job2 = dispatcher
.new_job()
.script_type(ScriptType::RhaiSAL)
.script(r#"
let result = 42 * 2;
print("Calculation result: " + result);
result
"#)
.timeout(Duration::from_secs(30))
.build()?;
println!("Job 2 created with ID: {}", job2.id);
// Create the job
dispatcher.create_job(&job2).await?;
println!("✅ Job 2 stored in Redis");
// Inspect Redis entries
print_job_redis_entries(&mut redis_conn, &job2.id).await?;
println!();
// Demo 3: Environment Variables
println!("📝 Demo 3: Jobs with Environment Variables");
println!("------------------------------------------");
// Create environment variables map
let mut env_vars = HashMap::new();
env_vars.insert("API_KEY".to_string(), "secret-api-key-123".to_string());
env_vars.insert("DEBUG_MODE".to_string(), "true".to_string());
env_vars.insert("MAX_RETRIES".to_string(), "5".to_string());
env_vars.insert("SERVICE_URL".to_string(), "https://api.example.com".to_string());
let job_with_env = dispatcher
.new_job()
.script_type(ScriptType::HeroScript)
.script(r#"
print("Environment variables available:");
print("API_KEY: " + env.API_KEY);
print("DEBUG_MODE: " + env.DEBUG_MODE);
print("MAX_RETRIES: " + env.MAX_RETRIES);
print("SERVICE_URL: " + env.SERVICE_URL);
"Environment variables processed successfully"
"#)
.env_vars(env_vars.clone())
.timeout(Duration::from_secs(15))
.build()?;
println!("Job with environment variables created: {}", job_with_env.id);
// Store job in Redis
dispatcher.create_job(&job_with_env).await?;
println!("✅ Job with env vars stored in Redis");
// Show Redis entries including environment variables
print_job_redis_entries(&mut redis_conn, &job_with_env.id).await?;
// Demonstrate individual env var setting
let job_individual_env = dispatcher
.new_job()
.script_type(ScriptType::RhaiSAL)
.script("print('Single env var: ' + env.SINGLE_VAR); 'done'")
.env_var("SINGLE_VAR", "individual-value")
.env_var("ANOTHER_VAR", "another-value")
.build()?;
println!("Job with individual env vars created: {}", job_individual_env.id);
dispatcher.create_job(&job_individual_env).await?;
println!("✅ Job with individual env vars stored in Redis");
print_job_redis_entries(&mut redis_conn, &job_individual_env.id).await?;
println!();
// Demo 4: Create multiple jobs and show queue state
println!("📝 Demo 4: Creating multiple jobs and inspecting queue");
println!("----------------------------------------------------");
let mut job_ids = Vec::new();
for i in 3..=5 {
let script_type = match i {
3 => ScriptType::HeroScript,
4 => ScriptType::RhaiSAL,
5 => ScriptType::RhaiDSL,
_ => ScriptType::HeroScript,
};
let job = dispatcher
.new_job()
.script_type(script_type)
.script(&format!(r#"print("Job {} is running");"#, i))
.timeout(Duration::from_secs(15))
.build()?;
job_ids.push(job.id.clone());
dispatcher.create_job(&job).await?;
println!("✅ Job {} created with ID: {}", i, job.id);
}
// Show all Redis keys related to our jobs
print_all_dispatcher_redis_keys(&mut redis_conn).await?;
println!();
// Demo 4: Show job status checking
println!("📝 Demo 4: Checking job statuses");
println!("--------------------------------");
for job_id in &job_ids {
match dispatcher.get_job_status(job_id).await {
Ok(status) => println!("Job {}: {:?}", job_id, status),
Err(e) => println!("Error getting status for job {}: {}", job_id, e),
}
}
println!();
// Demo 5: Simulate running a job and getting result (if worker is available)
println!("📝 Demo 5: Attempting to run job and await result");
println!("------------------------------------------------");
let simple_job = dispatcher
.new_job()
.script_type(ScriptType::HeroScript)
.script(r#"print("This job will complete quickly"); "success""#)
.timeout(Duration::from_secs(5))
.build()?;
println!("Created job for execution: {}", simple_job.id);
// Try to run the job (this will timeout if no worker is available)
match dispatcher.run_job_and_await_result(&simple_job).await {
Ok(result) => {
println!("✅ Job completed successfully!");
println!("Result: {}", result);
}
Err(e) => {
println!("⚠️ Job execution failed (likely no worker available): {}", e);
println!(" This is expected if no Hero worker is running");
}
}
// Demo 6: List all jobs
println!("📝 Demo 6: Listing all jobs");
println!("-------------------------");
let all_job_ids = match dispatcher.list_jobs().await {
Ok(job_ids) => {
println!("Found {} jobs:", job_ids.len());
for job_id in &job_ids {
println!(" - {}", job_id);
}
job_ids
}
Err(e) => {
println!("Error listing jobs: {}", e);
Vec::new()
}
};
println!();
// Demo 7: Create a job with log path and demonstrate logs functionality
println!("📝 Demo 7: Job with log path and logs retrieval");
println!("-----------------------------------------------");
let log_job = dispatcher
.new_job()
.script(r#"print("This job writes to logs"); "log_test""#)
.log_path("/tmp/hero_job_demo.log")
.timeout(Duration::from_secs(10))
.build()?;
println!("Created job with log path: {}", log_job.id);
dispatcher.create_job(&log_job).await?;
// Try to get logs (will be empty since job hasn't run)
match dispatcher.get_job_logs(&log_job.id).await {
Ok(Some(logs)) => println!("Job logs: {}", logs),
Ok(None) => println!("No logs available for job (expected - job hasn't run or no log file)"),
Err(e) => println!("Error getting logs: {}", e),
}
println!();
// Demo 8: Stop job functionality
println!("📝 Demo 8: Stopping a job");
println!("-------------------------");
if let Some(job_id) = all_job_ids.first() {
println!("Attempting to stop job: {}", job_id);
match dispatcher.stop_job(job_id).await {
Ok(()) => println!("✅ Stop request sent for job {}", job_id),
Err(e) => println!("Error stopping job: {}", e),
}
// Show stop queue
let stop_queue_key = "hero:stop_queue:demo-worker";
let stop_queue_length: i64 = redis_conn.llen(stop_queue_key).await?;
println!("📤 Stop queue length ({}): {}", stop_queue_key, stop_queue_length);
if stop_queue_length > 0 {
let stop_items: Vec<String> = redis_conn.lrange(stop_queue_key, 0, -1).await?;
println!("📋 Stop queue items:");
for (i, item) in stop_items.iter().enumerate() {
println!(" {}: {}", i, item);
}
}
} else {
println!("No jobs available to stop");
}
println!();
// Demo 9: Final Redis state inspection
println!("📝 Demo 9: Final Redis state");
println!("----------------------------");
print_all_dispatcher_redis_keys(&mut redis_conn).await?;
for job_id in &job_ids {
match dispatcher.get_job_status(job_id).await {
Ok(status) => println!("Job {}: {:?}", job_id, status),
Err(e) => println!("Error getting status for job {}: {}", job_id, e),
}
}
println!();
// Demo 5: Simulate running a job and getting result (if worker is available)
println!("📝 Demo 5: Attempting to run job and await result");
println!("------------------------------------------------");
let simple_job = dispatcher
.new_job()
.script_type(ScriptType::HeroScript)
.script(r#"print("This job will complete quickly"); "success""#)
.timeout(Duration::from_secs(5))
.build()?;
println!("Created job for execution: {}", simple_job.id);
// Try to run the job (this will timeout if no worker is available)
match dispatcher.run_job_and_await_result(&simple_job).await {
Ok(result) => {
println!("✅ Job completed successfully!");
println!("Result: {}", result);
}
Err(e) => {
println!("⚠️ Job execution failed (likely no worker available): {}", e);
println!(" This is expected if no Hero worker is running");
}
}
// Demo 6: List all jobs
println!("📝 Demo 6: Listing all jobs");
println!("-------------------------");
let all_job_ids = match dispatcher.list_jobs().await {
Ok(job_ids) => {
println!("Found {} jobs:", job_ids.len());
for job_id in &job_ids {
println!(" - {}", job_id);
}
job_ids
}
Err(e) => {
println!("Error listing jobs: {}", e);
Vec::new()
}
};
println!();
// Demo 7: Create a job with log path and demonstrate logs functionality
println!("📝 Demo 7: Job with log path and logs retrieval");
println!("-----------------------------------------------");
let log_job = dispatcher
.new_job()
.script(r#"print("This job writes to logs"); "log_test""#)
.log_path("/tmp/hero_job_demo.log")
.timeout(Duration::from_secs(10))
.build()?;
println!("Created job with log path: {}", log_job.id);
dispatcher.create_job(&log_job).await?;
// Try to get logs (will be empty since job hasn't run)
match dispatcher.get_job_logs(&log_job.id).await {
Ok(Some(logs)) => println!("Job logs: {}", logs),
Ok(None) => println!("No logs available for job (expected - job hasn't run or no log file)"),
Err(e) => println!("Error getting logs: {}", e),
}
println!();
// Demo 8: Stop job functionality
println!("📝 Demo 8: Stopping a job");
println!("-------------------------");
if let Some(job_id) = all_job_ids.first() {
println!("Attempting to stop job: {}", job_id);
match dispatcher.stop_job(job_id).await {
Ok(()) => println!("✅ Stop request sent for job {}", job_id),
Err(e) => println!("Error stopping job: {}", e),
}
// Show stop queue
let stop_queue_key = "hero:stop_queue:demo-worker";
let stop_queue_length: i64 = redis_conn.llen(stop_queue_key).await?;
println!("📤 Stop queue length ({}): {}", stop_queue_key, stop_queue_length);
if stop_queue_length > 0 {
let stop_items: Vec<String> = redis_conn.lrange(stop_queue_key, 0, -1).await?;
println!("📋 Stop queue items:");
for (i, item) in stop_items.iter().enumerate() {
println!(" {}: {}", i, item);
}
}
} else {
println!("No jobs available to stop");
}
println!();
// Demo 9: Final Redis state inspection
println!("📝 Demo 9: Final Redis state");
println!("----------------------------");
print_all_dispatcher_redis_keys(&mut redis_conn).await?;
println!("\n🎉 Dispatcher demo completed!");
println!("💡 New features demonstrated:");
println!(" - list_jobs(): List all job IDs");
println!(" - stop_job(): Send stop request to worker");
println!(" - get_job_logs(): Retrieve job logs from file");
println!(" - log_path(): Configure log file for jobs");
println!("💡 To see job execution in action, start a Hero worker that processes the 'demo-worker' queue");
// Demo 6: Demonstrate new job management features
println!("📝 Demo 6: Job Management - Delete and Clear Operations");
println!("--------------------------------------------------------");
// List all current jobs
match dispatcher.list_jobs().await {
Ok(jobs) => {
println!("Current jobs in system: {:?}", jobs);
if !jobs.is_empty() {
// Delete the first job as an example
let job_to_delete = &jobs[0];
println!("Deleting job: {}", job_to_delete);
match dispatcher.delete_job(job_to_delete).await {
Ok(()) => println!("✅ Job {} deleted successfully", job_to_delete),
Err(e) => println!("❌ Error deleting job {}: {}", job_to_delete, e),
}
// Show updated job list
match dispatcher.list_jobs().await {
Ok(remaining_jobs) => println!("Remaining jobs: {:?}", remaining_jobs),
Err(e) => println!("Error listing jobs: {}", e),
}
}
}
Err(e) => println!("Error listing jobs: {}", e),
}
println!();
// Demonstrate clear all jobs
println!("Clearing all remaining jobs...");
match dispatcher.clear_all_jobs().await {
Ok(count) => println!("✅ Cleared {} jobs from Redis", count),
Err(e) => println!("❌ Error clearing jobs: {}", e),
}
// Verify all jobs are cleared
match dispatcher.list_jobs().await {
Ok(jobs) => {
if jobs.is_empty() {
println!("✅ All jobs successfully cleared from Redis");
} else {
println!("⚠️ Some jobs remain: {:?}", jobs);
}
}
Err(e) => println!("Error verifying job clearance: {}", e),
}
println!();
println!("🎉 Demo completed! The dispatcher now supports:");
println!(" • Script type routing (HeroScript, RhaiSAL, RhaiDSL)");
println!(" • Multiple workers per script type for load balancing");
println!(" • Automatic worker selection based on job script type");
println!(" • Job management: list, delete, and clear operations");
println!(" • Enhanced job logging and monitoring");
Ok(())
}
/// Print Redis entries for a specific job
async fn print_job_redis_entries(
conn: &mut redis::aio::MultiplexedConnection,
job_id: &str,
) -> Result<(), redis::RedisError> {
let job_key = format!("hero:job:{}", job_id);
println!("🔍 Redis entries for job {}:", job_id);
// Check if job hash exists
let exists: bool = conn.exists(&job_key).await?;
if exists {
// Check if the key is actually a hash before trying to get all fields
let key_type: String = redis::cmd("TYPE").arg(&job_key).query_async(conn).await?;
if key_type == "hash" {
let job_data: std::collections::HashMap<String, String> = conn.hgetall(&job_key).await?;
println!(" 📋 Job data ({}): ", job_key);
for (field, value) in job_data {
println!(" {}: {}", field, value);
}
} else {
println!(" ⚠️ Key {} exists but is not a hash (type: {})", job_key, key_type);
}
} else {
println!(" ❌ No job data found at key: {}", job_key);
}
// Check work queue
let queue_key = "hero:work_queue:demo-worker";
let queue_length: i64 = conn.llen(queue_key).await?;
println!(" 📤 Work queue length ({}): {}", queue_key, queue_length);
if queue_length > 0 {
let queue_items: Vec<String> = conn.lrange(queue_key, 0, -1).await?;
println!(" 📋 Queue items:");
for (i, item) in queue_items.iter().enumerate() {
println!(" {}: {}", i, item);
}
}
Ok(())
}
/// Print all dispatcher-related Redis keys
async fn print_all_dispatcher_redis_keys(
conn: &mut redis::aio::MultiplexedConnection,
) -> Result<(), redis::RedisError> {
println!("🔍 All Hero Dispatcher Redis keys:");
// Get all keys with hero: prefix
let keys: Vec<String> = conn.keys("hero:*").await?;
if keys.is_empty() {
println!(" ❌ No Hero keys found in Redis");
return Ok(());
}
// Group keys by type
let mut job_keys = Vec::new();
let mut queue_keys = Vec::new();
let mut other_keys = Vec::new();
for key in keys {
if key.starts_with("hero:job:") {
job_keys.push(key);
} else if key.contains("queue") {
queue_keys.push(key);
} else {
other_keys.push(key);
}
}
// Print job keys
if !job_keys.is_empty() {
println!(" 📋 Job entries:");
for key in job_keys {
// Check if the key is actually a hash before trying to get all fields
let key_type: String = redis::cmd("TYPE").arg(&key).query_async(conn).await?;
if key_type == "hash" {
let job_data: std::collections::HashMap<String, String> = conn.hgetall(&key).await?;
println!(" {}: {} fields", key, job_data.len());
} else {
println!(" {}: {} (not a hash, skipping)", key, key_type);
}
}
}
// Print queue keys
if !queue_keys.is_empty() {
println!(" 📤 Queue entries:");
for key in queue_keys {
let length: i64 = conn.llen(&key).await?;
println!(" {}: {} items", key, length);
}
}
// Print other keys
if !other_keys.is_empty() {
println!(" 🔧 Other entries:");
for key in other_keys {
println!(" {}", key);
}
}
Ok(())
}

View File

@ -1,68 +0,0 @@
// Added error
// Duration is still used, Instant and sleep were removed
/// Comprehensive error type for all possible failures in the Rhai client.
///
/// This enum covers all error scenarios that can occur during client operations,
/// from Redis connectivity issues to task execution timeouts.
#[derive(Debug)]
pub enum DispatcherError {
/// Redis connection or operation error
RedisError(redis::RedisError),
/// JSON serialization/deserialization error
SerializationError(serde_json::Error),
/// Task execution timeout - contains the task_id that timed out
Timeout(String),
/// Task not found after submission - contains the task_id (rare occurrence)
TaskNotFound(String),
/// Context ID is missing
ContextIdMissing,
/// Invalid input provided
InvalidInput(String),
/// Job operation error
JobError(hero_job::JobError),
}
impl From<redis::RedisError> for DispatcherError {
fn from(err: redis::RedisError) -> Self {
DispatcherError::RedisError(err)
}
}
impl From<serde_json::Error> for DispatcherError {
fn from(err: serde_json::Error) -> Self {
DispatcherError::SerializationError(err)
}
}
impl From<hero_job::JobError> for DispatcherError {
fn from(err: hero_job::JobError) -> Self {
DispatcherError::JobError(err)
}
}
impl std::fmt::Display for DispatcherError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
DispatcherError::RedisError(e) => write!(f, "Redis error: {}", e),
DispatcherError::SerializationError(e) => write!(f, "Serialization error: {}", e),
DispatcherError::Timeout(task_id) => {
write!(f, "Timeout waiting for task {} to complete", task_id)
}
DispatcherError::TaskNotFound(task_id) => {
write!(f, "Task {} not found after submission", task_id)
}
DispatcherError::ContextIdMissing => {
write!(f, "Context ID is missing")
}
DispatcherError::InvalidInput(msg) => {
write!(f, "Invalid input: {}", msg)
}
DispatcherError::JobError(e) => {
write!(f, "Job error: {}", e)
}
}
}
}
impl std::error::Error for DispatcherError {}

View File

@ -0,0 +1,5 @@
# Architecture
Supervisor runs actors and manages their lifecycle. Additionally supervisor dispatches jobs to workers and provides an API for job supervision. Jobs are dispatched to workers over a redis protocol. Jobs have a script which is the code that is to be executed by the worker. There are two script formats used: Rhai and HeroScript. Jobs also have params such as timeout and priority for job management, and context variables which are available to the script such as CALLER_ID and CONTEXT_ID. There are four different types of workers: OSIS, SAL, V and Python. OSIS and SAL workers use Rhai scripts, while V and Python workers use HeroScript. Each worker has its own queue and is responsible for processing jobs of its type. Each worker has a unique way of executing the script.
The OSIS worker executes non-blocking Rhai scripts one after another using the Rhai engine on a single thread. The SAL worker executes blocking asynchronous Rhai scripts concurrently: it spawns a new thread for each script evaluation. V and Python workers execute HeroScript scripts using a V or Python heroscript engine.

794
core/engine/Cargo.lock generated
View File

@ -1,794 +0,0 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 4
[[package]]
name = "ahash"
version = "0.8.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75"
dependencies = [
"cfg-if",
"const-random",
"getrandom 0.3.3",
"once_cell",
"version_check",
"zerocopy",
]
[[package]]
name = "android-tzdata"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0"
[[package]]
name = "android_system_properties"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311"
dependencies = [
"libc",
]
[[package]]
name = "arrayvec"
version = "0.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50"
[[package]]
name = "autocfg"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26"
[[package]]
name = "bincode"
version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "36eaf5d7b090263e8150820482d5d93cd964a81e4019913c972f4edcc6edb740"
dependencies = [
"bincode_derive",
"serde",
"unty",
]
[[package]]
name = "bincode_derive"
version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bf95709a440f45e986983918d0e8a1f30a9b1df04918fc828670606804ac3c09"
dependencies = [
"virtue",
]
[[package]]
name = "bitflags"
version = "2.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967"
[[package]]
name = "bumpalo"
version = "3.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf"
[[package]]
name = "cc"
version = "1.2.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d0fc897dc1e865cc67c0e05a836d9d3f1df3cbe442aa4a9473b18e12624a4951"
dependencies = [
"shlex",
]
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "chrono"
version = "0.4.41"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d"
dependencies = [
"android-tzdata",
"iana-time-zone",
"js-sys",
"num-traits",
"serde",
"wasm-bindgen",
"windows-link",
]
[[package]]
name = "const-random"
version = "0.1.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "87e00182fe74b066627d63b85fd550ac2998d4b0bd86bfed477a0ae4c7c71359"
dependencies = [
"const-random-macro",
]
[[package]]
name = "const-random-macro"
version = "0.1.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e"
dependencies = [
"getrandom 0.2.16",
"once_cell",
"tiny-keccak",
]
[[package]]
name = "core-foundation-sys"
version = "0.8.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b"
[[package]]
name = "crc32fast"
version = "1.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3"
dependencies = [
"cfg-if",
]
[[package]]
name = "crunchy"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929"
[[package]]
name = "engine"
version = "0.1.0"
dependencies = [
"chrono",
"heromodels",
"heromodels-derive",
"heromodels_core",
"rhai",
]
[[package]]
name = "getrandom"
version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592"
dependencies = [
"cfg-if",
"libc",
"wasi 0.11.0+wasi-snapshot-preview1",
]
[[package]]
name = "getrandom"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4"
dependencies = [
"cfg-if",
"libc",
"r-efi",
"wasi 0.14.2+wasi-0.2.4",
]
[[package]]
name = "heck"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
[[package]]
name = "heromodels"
version = "0.1.0"
dependencies = [
"bincode",
"chrono",
"heromodels-derive",
"heromodels_core",
"ourdb",
"rhai",
"rhai_client_macros",
"serde",
"serde_json",
"strum",
"strum_macros",
"tst",
"uuid",
]
[[package]]
name = "heromodels-derive"
version = "0.1.0"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "heromodels_core"
version = "0.1.0"
dependencies = [
"chrono",
"serde",
]
[[package]]
name = "iana-time-zone"
version = "0.1.63"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b0c919e5debc312ad217002b8048a17b7d83f80703865bbfcfebb0458b0b27d8"
dependencies = [
"android_system_properties",
"core-foundation-sys",
"iana-time-zone-haiku",
"js-sys",
"log",
"wasm-bindgen",
"windows-core",
]
[[package]]
name = "iana-time-zone-haiku"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f"
dependencies = [
"cc",
]
[[package]]
name = "instant"
version = "0.1.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222"
dependencies = [
"cfg-if",
]
[[package]]
name = "itoa"
version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c"
[[package]]
name = "js-sys"
version = "0.3.77"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f"
dependencies = [
"once_cell",
"wasm-bindgen",
]
[[package]]
name = "libc"
version = "0.2.172"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa"
[[package]]
name = "log"
version = "0.4.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94"
[[package]]
name = "memchr"
version = "2.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
[[package]]
name = "no-std-compat"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b93853da6d84c2e3c7d730d6473e8817692dd89be387eb01b94d7f108ecb5b8c"
dependencies = [
"spin",
]
[[package]]
name = "num-traits"
version = "0.2.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841"
dependencies = [
"autocfg",
]
[[package]]
name = "once_cell"
version = "1.21.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d"
dependencies = [
"portable-atomic",
]
[[package]]
name = "ourdb"
version = "0.1.0"
dependencies = [
"crc32fast",
"log",
"rand",
"thiserror",
]
[[package]]
name = "portable-atomic"
version = "1.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "350e9b48cbc6b0e028b0473b114454c6316e57336ee184ceab6e53f72c178b3e"
[[package]]
name = "ppv-lite86"
version = "0.2.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9"
dependencies = [
"zerocopy",
]
[[package]]
name = "proc-macro2"
version = "1.0.95"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.40"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d"
dependencies = [
"proc-macro2",
]
[[package]]
name = "r-efi"
version = "5.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5"
[[package]]
name = "rand"
version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
dependencies = [
"libc",
"rand_chacha",
"rand_core",
]
[[package]]
name = "rand_chacha"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
dependencies = [
"ppv-lite86",
"rand_core",
]
[[package]]
name = "rand_core"
version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
dependencies = [
"getrandom 0.2.16",
]
[[package]]
name = "rhai"
version = "1.22.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2780e813b755850e50b178931aaf94ed24f6817f46aaaf5d21c13c12d939a249"
dependencies = [
"ahash",
"bitflags",
"instant",
"no-std-compat",
"num-traits",
"once_cell",
"rhai_codegen",
"rust_decimal",
"smallvec",
"smartstring",
"thin-vec",
]
[[package]]
name = "rhai_client_macros"
version = "0.1.0"
dependencies = [
"proc-macro2",
"quote",
"rhai",
"syn",
]
[[package]]
name = "rhai_codegen"
version = "2.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a5a11a05ee1ce44058fa3d5961d05194fdbe3ad6b40f904af764d81b86450e6b"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "rust_decimal"
version = "1.37.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "faa7de2ba56ac291bd90c6b9bece784a52ae1411f9506544b3eae36dd2356d50"
dependencies = [
"arrayvec",
"num-traits",
]
[[package]]
name = "rustversion"
version = "1.0.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d"
[[package]]
name = "ryu"
version = "1.0.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f"
[[package]]
name = "serde"
version = "1.0.219"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.219"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "serde_json"
version = "1.0.140"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373"
dependencies = [
"itoa",
"memchr",
"ryu",
"serde",
]
[[package]]
name = "shlex"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
[[package]]
name = "smallvec"
version = "1.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8917285742e9f3e1683f0a9c4e6b57960b7314d0b08d30d1ecd426713ee2eee9"
[[package]]
name = "smartstring"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3fb72c633efbaa2dd666986505016c32c3044395ceaf881518399d2f4127ee29"
dependencies = [
"autocfg",
"static_assertions",
"version_check",
]
[[package]]
name = "spin"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d"
[[package]]
name = "static_assertions"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
[[package]]
name = "strum"
version = "0.26.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06"
[[package]]
name = "strum_macros"
version = "0.26.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be"
dependencies = [
"heck",
"proc-macro2",
"quote",
"rustversion",
"syn",
]
[[package]]
name = "syn"
version = "2.0.101"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ce2b7fc941b3a24138a0a7cf8e858bfc6a992e7978a068a5c760deb0ed43caf"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "thin-vec"
version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "144f754d318415ac792f9d69fc87abbbfc043ce2ef041c60f16ad828f638717d"
[[package]]
name = "thiserror"
version = "1.0.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "tiny-keccak"
version = "2.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237"
dependencies = [
"crunchy",
]
[[package]]
name = "tst"
version = "0.1.0"
dependencies = [
"ourdb",
"thiserror",
]
[[package]]
name = "unicode-ident"
version = "1.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512"
[[package]]
name = "unty"
version = "0.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6d49784317cd0d1ee7ec5c716dd598ec5b4483ea832a2dced265471cc0f690ae"
[[package]]
name = "uuid"
version = "1.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3cf4199d1e5d15ddd86a694e4d0dffa9c323ce759fea589f00fef9d81cc1931d"
dependencies = [
"getrandom 0.3.3",
"js-sys",
"wasm-bindgen",
]
[[package]]
name = "version_check"
version = "0.9.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a"
[[package]]
name = "virtue"
version = "0.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "051eb1abcf10076295e815102942cc58f9d5e3b4560e46e53c21e8ff6f3af7b1"
[[package]]
name = "wasi"
version = "0.11.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
[[package]]
name = "wasi"
version = "0.14.2+wasi-0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3"
dependencies = [
"wit-bindgen-rt",
]
[[package]]
name = "wasm-bindgen"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5"
dependencies = [
"cfg-if",
"once_cell",
"rustversion",
"wasm-bindgen-macro",
]
[[package]]
name = "wasm-bindgen-backend"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6"
dependencies = [
"bumpalo",
"log",
"proc-macro2",
"quote",
"syn",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-macro"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407"
dependencies = [
"quote",
"wasm-bindgen-macro-support",
]
[[package]]
name = "wasm-bindgen-macro-support"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de"
dependencies = [
"proc-macro2",
"quote",
"syn",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-shared"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d"
dependencies = [
"unicode-ident",
]
[[package]]
name = "windows-core"
version = "0.61.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3"
dependencies = [
"windows-implement",
"windows-interface",
"windows-link",
"windows-result",
"windows-strings",
]
[[package]]
name = "windows-implement"
version = "0.60.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "windows-interface"
version = "0.59.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "windows-link"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38"
[[package]]
name = "windows-result"
version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6"
dependencies = [
"windows-link",
]
[[package]]
name = "windows-strings"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57"
dependencies = [
"windows-link",
]
[[package]]
name = "wit-bindgen-rt"
version = "0.39.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1"
dependencies = [
"bitflags",
]
[[package]]
name = "zerocopy"
version = "0.8.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1702d9583232ddb9174e01bb7c15a2ab8fb1bc6f227aa1233858c351a3ba0cb"
dependencies = [
"zerocopy-derive",
]
[[package]]
name = "zerocopy-derive"
version = "0.8.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef"
dependencies = [
"proc-macro2",
"quote",
"syn",
]

View File

@ -1,38 +0,0 @@
[package]
name = "rhailib_engine"
version = "0.1.0"
edition = "2021"
description = "Central Rhai engine for heromodels"
[dependencies]
rhai = { version = "1.21.0", features = ["std", "sync", "decimal", "internals"] }
heromodels = { path = "../../../db/heromodels", features = ["rhai"] }
heromodels_core = { path = "../../../db/heromodels_core" }
chrono = "0.4"
heromodels-derive = { path = "../../../db/heromodels-derive" }
rhailib_dsl = { path = "../../../rhailib/src/dsl" }
[features]
default = ["calendar", "finance"]
calendar = []
finance = []
# Flow module is now updated to use our approach to Rhai engine registration
flow = []
legal = []
projects = []
biz = []
[[example]]
name = "calendar_example"
path = "examples/calendar/example.rs"
required-features = ["calendar"]
[[example]]
name = "flow_example"
path = "examples/flow/example.rs"
required-features = ["flow"]
[[example]]
name = "finance"
path = "examples/finance/example.rs"
required-features = ["finance"]

View File

@ -1,135 +0,0 @@
# HeroModels Rhai Engine (`engine`)
The `engine` crate provides a central Rhai scripting engine for the HeroModels project. It offers a unified way to interact with various HeroModels modules (like Calendar, Flow, Legal, etc.) through Rhai scripts, leveraging a shared database connection.
## Overview
This crate facilitates:
1. **Centralized Engine Creation**: A function `create_heromodels_engine` to instantiate a Rhai engine pre-configured with common settings and all enabled HeroModels modules.
2. **Modular Registration**: HeroModels modules (Calendar, Flow, etc.) can be registered with a Rhai engine based on feature flags.
3. **Script Evaluation Utilities**: Helper functions for compiling Rhai scripts into Abstract Syntax Trees (ASTs) and for evaluating scripts or ASTs.
4. **Mock Database**: Includes a `mock_db` module for testing and running examples without needing a live database.
## Core Components & Usage
### Library (`src/lib.rs`)
- **`create_heromodels_engine(db: Arc<OurDB>) -> Engine`**:
Creates and returns a new `rhai::Engine` instance. This engine is configured with default settings (e.g., max expression depths, string/array/map sizes) and then all available HeroModels modules (controlled by feature flags) are registered with it, using the provided `db` (an `Arc<OurDB>`) instance.
- **`register_all_modules(engine: &mut Engine, db: Arc<OurDB>)`**:
Registers all HeroModels modules for which features are enabled (e.g., `calendar`, `flow`, `legal`, `projects`, `biz`) with the given Rhai `engine`. Each module is passed the shared `db` instance.
- **`eval_script(engine: &Engine, script: &str) -> Result<rhai::Dynamic, Box<rhai::EvalAltResult>>`**:
A utility function to directly evaluate a Rhai script string using the provided `engine`.
- **`compile_script(engine: &Engine, script: &str) -> Result<AST, Box<rhai::EvalAltResult>>`**:
Compiles a Rhai script string into an `AST` (Abstract Syntax Tree) for potentially faster repeated execution.
- **`run_ast(engine: &Engine, ast: &AST, scope: &mut Scope) -> Result<rhai::Dynamic, Box<rhai::EvalAltResult>>`**:
Runs a pre-compiled `AST` with a given `scope` using the provided `engine`.
- **`mock_db` module**:
Provides `create_mock_db()` which returns an `Arc<OurDB>` instance suitable for testing and examples. This allows scripts that interact with database functionalities to run without external database dependencies.
### Basic Usage
```rust
use std::sync::Arc;
use engine::{create_heromodels_engine, eval_script};
use engine::mock_db::create_mock_db; // For example usage
use heromodels::db::hero::OurDB; // Actual DB type
// Create a mock database (or connect to a real one)
let db: Arc<OurDB> = create_mock_db();
// Create the Rhai engine with all enabled modules registered
let engine = create_heromodels_engine(db);
// Run a Rhai script
let script = r#"
// Example: Assuming 'calendar' feature is enabled
let cal = new_calendar("My Test Calendar");
cal.set_description("This is a test.");
print(`Created calendar: ${cal.get_name()}`);
cal.get_id() // Return the ID
"#;
match eval_script(&engine, script) {
Ok(val) => println!("Script returned: {:?}", val),
Err(err) => eprintln!("Script error: {}", err),
}
```
### Using Specific Modules Manually
If you need more fine-grained control or only want specific modules (and prefer not to rely solely on feature flags at compile time for `create_heromodels_engine`), you can initialize an engine and register modules manually:
```rust
use std::sync::Arc;
use rhai::Engine;
use engine::mock_db::create_mock_db; // For example usage
use heromodels::db::hero::OurDB;
// Import the specific module registration function
use heromodels::models::calendar::register_calendar_rhai_module;
// Create a mock database
let db: Arc<OurDB> = create_mock_db();
// Create a new Rhai engine
let mut engine = Engine::new();
// Register only the calendar module
register_calendar_rhai_module(&mut engine, db.clone());
// Now you can use calendar-related functions in your scripts
let result = engine.eval::<String>(r#" let c = new_calendar("Solo Cal"); c.get_name() "#);
match result {
Ok(name) => println!("Calendar name: {}", name),
Err(err) => eprintln!("Error: {}", err),
}
```
## Examples
This crate includes several examples demonstrating how to use different HeroModels modules with Rhai. Each example typically requires its corresponding feature to be enabled.
- `calendar_example`: Working with calendars, events, and attendees (requires `calendar` feature).
- `flow_example`: Working with flows, steps, and signature requirements (requires `flow` feature).
- `finance_example`: Working with financial models (requires `finance` feature).
- *(Additional examples for `legal`, `projects`, `biz` would follow the same pattern if present).*
To run an example (e.g., `calendar_example`):
```bash
cargo run --example calendar_example --features calendar
```
*(Note: Examples in `Cargo.toml` already specify `required-features`, so simply `cargo run --example calendar_example` might suffice if those features are part of the default set or already enabled.)*
## Features
The crate uses feature flags to control which HeroModels modules are compiled and registered:
- `calendar`: Enables the Calendar module.
- `finance`: Enables the Finance module.
- `flow`: Enables the Flow module.
- `legal`: Enables the Legal module.
- `projects`: Enables the Projects module.
- `biz`: Enables the Business module.
The `default` features are `["calendar", "finance"]`. You can enable other modules by specifying them during the build or in your project's `Cargo.toml` if this `engine` crate is a dependency.
## Dependencies
Key dependencies include:
- `rhai`: The Rhai scripting engine.
- `heromodels`: Provides the core data models and database interaction logic, including the Rhai registration functions for each module.
- `heromodels_core`: Core utilities for HeroModels.
- `chrono`: For date/time utilities.
- `heromodels-derive`: Procedural macros used by HeroModels.
## License
This crate is part of the HeroModels project and shares its license.

View File

@ -1,16 +0,0 @@
fn main() {
// Tell Cargo to re-run this build script if the calendar/rhai.rs file changes
println!("cargo:rerun-if-changed=../heromodels/src/models/calendar/rhai.rs");
// Tell Cargo to re-run this build script if the flow/rhai.rs file changes
println!("cargo:rerun-if-changed=../heromodels/src/models/flow/rhai.rs");
// Tell Cargo to re-run this build script if the legal/rhai.rs file changes
println!("cargo:rerun-if-changed=../heromodels/src/models/legal/rhai.rs");
// Tell Cargo to re-run this build script if the projects/rhai.rs file changes
println!("cargo:rerun-if-changed=../heromodels/src/models/projects/rhai.rs");
// Tell Cargo to re-run this build script if the biz/rhai.rs file changes
println!("cargo:rerun-if-changed=../heromodels/src/models/biz/rhai.rs");
}

View File

@ -1,331 +0,0 @@
# Architecture of the `rhailib_engine` Crate
The `rhailib_engine` crate serves as the central Rhai scripting engine for the heromodels ecosystem. It provides a unified interface for creating, configuring, and executing Rhai scripts with access to all business domain modules through a feature-based architecture.
## Core Architecture
The engine acts as an orchestration layer that brings together the DSL modules and provides execution utilities:
```mermaid
graph TD
A[rhailib_engine] --> B[Engine Creation]
A --> C[Script Execution]
A --> D[Mock Database]
A --> E[Feature Management]
B --> B1[create_heromodels_engine]
B --> B2[Engine Configuration]
B --> B3[DSL Registration]
C --> C1[eval_script]
C --> C2[eval_file]
C --> C3[compile_script]
C --> C4[run_ast]
D --> D1[create_mock_db]
D --> D2[seed_mock_db]
D --> D3[Domain Data Seeding]
E --> E1[calendar]
E --> E2[finance]
E --> E3[flow]
E --> E4[legal]
E --> E5[projects]
E --> E6[biz]
B3 --> F[rhailib_dsl]
F --> G[All Domain Modules]
```
## Core Components
### 1. Engine Factory (`create_heromodels_engine`)
The primary entry point for creating a fully configured Rhai engine:
```rust
pub fn create_heromodels_engine() -> Engine
```
**Responsibilities:**
- Creates a new Rhai engine instance
- Configures engine limits and settings
- Registers all available DSL modules
- Returns a ready-to-use engine
**Configuration Settings:**
- **Expression Depth**: 128 levels for both expressions and functions
- **String Size Limit**: 10 MB maximum string size
- **Array Size Limit**: 10,000 elements maximum
- **Map Size Limit**: 10,000 key-value pairs maximum
### 2. Script Execution Utilities
#### Direct Script Evaluation
```rust
pub fn eval_script(engine: &Engine, script: &str) -> Result<Dynamic, Box<EvalAltResult>>
```
Executes Rhai script strings directly with immediate results.
#### File-Based Script Execution
```rust
pub fn eval_file(engine: &Engine, file_path: &Path) -> Result<Dynamic, Box<EvalAltResult>>
```
Loads and executes Rhai scripts from filesystem with proper error handling.
#### Compiled Script Execution
```rust
pub fn compile_script(engine: &Engine, script: &str) -> Result<AST, Box<EvalAltResult>>
pub fn run_ast(engine: &Engine, ast: &AST, scope: &mut Scope) -> Result<Dynamic, Box<EvalAltResult>>
```
Provides compilation and execution of scripts for performance optimization.
### 3. Mock Database System
#### Database Creation
```rust
pub fn create_mock_db() -> Arc<OurDB>
```
Creates an in-memory database instance for testing and examples.
#### Data Seeding
```rust
pub fn seed_mock_db(db: Arc<OurDB>)
```
Populates the mock database with representative data across all domains.
## Feature-Based Architecture
The engine uses Cargo features to control which domain modules are included:
### Available Features
- **`calendar`** (default): Calendar and event management
- **`finance`** (default): Financial accounts, assets, and marketplace
- **`flow`**: Workflow and approval processes
- **`legal`**: Contract and legal document management
- **`projects`**: Project and task management
- **`biz`**: Business operations and entities
### Feature Integration Pattern
```rust
#[cfg(feature = "calendar")]
use heromodels::models::calendar::*;
#[cfg(feature = "finance")]
use heromodels::models::finance::*;
```
This allows for:
- **Selective Compilation**: Only include needed functionality
- **Reduced Binary Size**: Exclude unused domain modules
- **Modular Deployment**: Different configurations for different use cases
## Mock Database Architecture
### Database Structure
The mock database provides a complete testing environment:
```mermaid
graph LR
A[Mock Database] --> B[Calendar Data]
A --> C[Finance Data]
A --> D[Flow Data]
A --> E[Legal Data]
A --> F[Projects Data]
B --> B1[Calendars]
B --> B2[Events]
B --> B3[Attendees]
C --> C1[Accounts]
C --> C2[Assets - ERC20/ERC721]
C --> C3[Marketplace Listings]
D --> D1[Flows]
D --> D2[Flow Steps]
D --> D3[Signature Requirements]
E --> E1[Contracts]
E --> E2[Contract Revisions]
E --> E3[Contract Signers]
F --> F1[Projects]
F --> F2[Project Members]
F --> F3[Project Tags]
```
### Seeding Strategy
Each domain has its own seeding function that creates realistic test data:
#### Calendar Seeding
- Creates work calendars with descriptions
- Adds team meetings with attendees
- Sets up recurring events
#### Finance Seeding
- Creates demo trading accounts
- Generates ERC20 tokens and ERC721 NFTs
- Sets up marketplace listings with metadata
#### Flow Seeding (Feature-Gated)
- Creates document approval workflows
- Defines multi-step approval processes
- Sets up signature requirements
#### Legal Seeding (Feature-Gated)
- Creates service agreements
- Adds contract revisions and versions
- Defines contract signers and roles
#### Projects Seeding (Feature-Gated)
- Creates project instances with status tracking
- Assigns team members and priorities
- Adds project tags and categorization
## Error Handling Architecture
### Comprehensive Error Propagation
```rust
Result<Dynamic, Box<EvalAltResult>>
```
All functions return proper Rhai error types that include:
- **Script Compilation Errors**: Syntax and parsing issues
- **Runtime Errors**: Execution failures and exceptions
- **File System Errors**: File reading and path resolution issues
- **Database Errors**: Mock database operation failures
### Error Context Enhancement
File operations include enhanced error context:
```rust
Err(Box::new(EvalAltResult::ErrorSystem(
format!("Failed to read script file: {}", file_path.display()),
Box::new(io_err),
)))
```
## Performance Considerations
### Engine Configuration
Optimized settings for production use:
- **Memory Limits**: Prevent runaway script execution
- **Depth Limits**: Avoid stack overflow from deep recursion
- **Size Limits**: Control memory usage for large data structures
### Compilation Strategy
- **AST Caching**: Compile once, execute multiple times
- **Scope Management**: Efficient variable scope handling
- **Module Registration**: One-time registration at engine creation
### Mock Database Performance
- **In-Memory Storage**: Fast access for testing scenarios
- **Temporary Directories**: Automatic cleanup after use
- **Lazy Loading**: Data seeded only when needed
## Integration Patterns
### Script Development Workflow
```rust
// 1. Create engine with all modules
let engine = create_heromodels_engine();
// 2. Execute business logic scripts
let result = eval_script(&engine, r#"
let company = new_company()
.name("Tech Startup")
.business_type("startup");
save_company(company)
"#)?;
// 3. Handle results and errors
match result {
Ok(value) => println!("Success: {:?}", value),
Err(error) => eprintln!("Error: {}", error),
}
```
### Testing Integration
```rust
// 1. Create mock database
let db = create_mock_db();
seed_mock_db(db.clone());
// 2. Create engine
let engine = create_heromodels_engine();
// 3. Test scripts against seeded data
let script = r#"
let calendars = list_calendars();
calendars.len()
"#;
let count = eval_script(&engine, script)?;
```
### File-Based Script Execution
```rust
// Execute scripts from files
let result = eval_file(&engine, Path::new("scripts/business_logic.rhai"))?;
```
## Deployment Configurations
### Minimal Configuration
```toml
[dependencies]
rhailib_engine = { version = "0.1.0", default-features = false, features = ["calendar"] }
```
### Full Configuration
```toml
[dependencies]
rhailib_engine = { version = "0.1.0", features = ["calendar", "finance", "flow", "legal", "projects", "biz"] }
```
### Custom Configuration
```toml
[dependencies]
rhailib_engine = { version = "0.1.0", default-features = false, features = ["finance", "biz"] }
```
## Security Considerations
### Script Execution Limits
- **Resource Limits**: Prevent resource exhaustion attacks
- **Execution Time**: Configurable timeouts for long-running scripts
- **Memory Bounds**: Controlled memory allocation
### Database Access
- **Mock Environment**: Safe testing without production data exposure
- **Temporary Storage**: Automatic cleanup prevents data persistence
- **Isolated Execution**: Each test run gets fresh database state
## Extensibility
### Adding New Domains
1. Create new feature flag in `Cargo.toml`
2. Add conditional imports for new models
3. Implement seeding function for test data
4. Register with DSL module system
### Custom Engine Configuration
```rust
let mut engine = Engine::new();
// Custom configuration
engine.set_max_expr_depths(256, 256);
// Register specific modules
rhailib_dsl::register_dsl_modules(&mut engine);
```
This architecture provides a robust, feature-rich foundation for Rhai script execution while maintaining flexibility, performance, and security.

View File

@ -1,101 +0,0 @@
// calendar_script.rhai
// Example Rhai script for working with Calendar models
// Constants for AttendanceStatus
const NO_RESPONSE = "NoResponse";
const ACCEPTED = "Accepted";
const DECLINED = "Declined";
const TENTATIVE = "Tentative";
// Create a new calendar using builder pattern
let my_calendar = new_calendar()
.name("Team Calendar")
.description("Calendar for team events and meetings");
print(`Created calendar: ${my_calendar.name} (${my_calendar.id})`);
// Add attendees to the event
let alice = new_attendee()
.with_contact_id(1)
.with_status(NO_RESPONSE);
let bob = new_attendee()
.with_contact_id(2)
.with_status(ACCEPTED);
let charlie = new_attendee()
.with_contact_id(3)
.with_status(TENTATIVE);
// Create a new event using builder pattern
// Note: Timestamps are in seconds since epoch
let now = timestamp_now();
let one_hour = 60 * 60;
let meeting = new_event()
.title("Weekly Sync")
.reschedule(now, now + one_hour)
.location("Conference Room A")
.description("Regular team sync meeting")
.add_attendee(alice)
.add_attendee(bob)
.add_attendee(charlie)
.save_event();
print(`Created event: ${meeting.title}`);
meeting.delete_event();
print(`Deleted event: ${meeting.title}`);
// Print attendees info
let attendees = meeting.attendees;
print(`Added attendees to the event`);
// Update Charlie's attendee status directly
meeting.update_attendee_status(3, ACCEPTED);
print(`Updated Charlie's status to: ${ACCEPTED}`);
// Add the event to the calendar
my_calendar.add_event_to_calendar(meeting);
// Print events info
print(`Added event to calendar`);
// Save the calendar to the database
let saved_calendar = my_calendar.save_calendar();
print(`Calendar saved to database with ID: ${saved_calendar.id}`);
// Retrieve the calendar from the database using the ID from the saved calendar
let retrieved_calendar = get_calendar_by_id(saved_calendar.id);
if retrieved_calendar != () {
print(`Retrieved calendar: ${retrieved_calendar.name}`);
print(`Retrieved calendar successfully`);
} else {
print("Failed to retrieve calendar from database");
}
// List all calendars in the database
let all_calendars = list_calendars();
print("\nListing all calendars in database:");
let calendar_count = 0;
for calendar in all_calendars {
print(` - Calendar: ${calendar.name} (ID: ${calendar.id})`);
calendar_count += 1;
}
print(`Total calendars: ${calendar_count}`);
// List all events in the database
let all_events = list_events();
print("\nListing all events in database:");
let event_count = 0;
for event in all_events {
print(` - Event: ${event.title} (ID: ${event.id})`);
event_count += 1;
}
print(`Total events: ${event_count}`);
// Helper function to get current timestamp
fn timestamp_now() {
// This would typically be provided by the host application
// For this example, we'll use a fixed timestamp
1685620800 // June 1, 2023, 12:00 PM
}

View File

@ -1,70 +0,0 @@
use engine::mock_db::create_mock_db;
use engine::{create_heromodels_engine, eval_file};
use rhai::Engine;
mod mock;
use mock::seed_calendar_data;
fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("Calendar Rhai Example");
println!("=====================");
// Create a mock database
let db = create_mock_db();
// Seed the database with some initial data
seed_calendar_data(db.clone());
// Create the Rhai engine using our central engine creator
let mut engine = create_heromodels_engine(db.clone());
// Register timestamp helper functions
register_timestamp_helpers(&mut engine);
// Get the path to the script
let manifest_dir = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"));
let script_path = manifest_dir
.join("examples")
.join("calendar")
.join("calendar_script.rhai");
println!("\nRunning script: {}", script_path.display());
println!("---------------------");
// Run the script
match eval_file(&engine, &script_path) {
Ok(result) => {
if !result.is_unit() {
println!("\nScript returned: {:?}", result);
}
println!("\nScript executed successfully!");
Ok(())
}
Err(err) => {
eprintln!("\nError running script: {}", err);
Err(Box::new(std::io::Error::new(
std::io::ErrorKind::Other,
err.to_string(),
)))
}
}
}
// Register timestamp helper functions with the engine
fn register_timestamp_helpers(engine: &mut Engine) {
use chrono::{TimeZone, Utc};
// Function to get current timestamp
engine.register_fn("timestamp_now", || Utc::now().timestamp() as i64);
// Function to format a timestamp
engine.register_fn("format_timestamp", |ts: i64| {
let dt = Utc
.timestamp_opt(ts, 0)
.single()
.expect("Invalid timestamp");
dt.format("%Y-%m-%d %H:%M:%S UTC").to_string()
});
println!("Timestamp helper functions registered successfully.");
}

View File

@ -1,60 +0,0 @@
use chrono::Utc;
use heromodels::db::hero::OurDB;
use heromodels::db::{Collection, Db};
use heromodels::models::calendar::{Calendar, Event};
use heromodels_core::Model;
use std::sync::Arc;
/// Seed the mock database with calendar data
pub fn seed_calendar_data(db: Arc<OurDB>) {
// Create a calendar
let calendar = Calendar::new(None, "Work Calendar".to_string())
.description("My work schedule".to_string());
// Store the calendar in the database
let (calendar_id, mut saved_calendar) = db
.collection::<Calendar>()
.expect("Failed to get Calendar collection")
.set(&calendar)
.expect("Failed to store calendar");
// Create an event
let now = Utc::now().timestamp();
let end_time = now + 3600; // Add 1 hour in seconds
let event = Event::new()
.title("Team Meeting".to_string())
.reschedule(now, end_time)
.location("Conference Room A".to_string())
.description("Weekly sync".to_string())
.build();
// Store the event in the database first to get its ID
let (event_id, saved_event) = db
.collection()
.expect("Failed to get Event collection")
.set(&event)
.expect("Failed to store event");
// Add the event ID to the calendar
saved_calendar = saved_calendar.add_event(event_id as i64);
// Store the updated calendar in the database
let (_calendar_id, final_calendar) = db
.collection::<Calendar>()
.expect("Failed to get Calendar collection")
.set(&saved_calendar)
.expect("Failed to store calendar");
println!("Mock database seeded with calendar data:");
println!(
" - Added calendar: {} (ID: {})",
final_calendar.name,
final_calendar.get_id()
);
println!(
" - Added event: {} (ID: {})",
saved_event.title,
saved_event.get_id()
);
}

View File

@ -1,70 +0,0 @@
use engine::mock_db::create_mock_db;
use engine::{create_heromodels_engine, eval_file};
use rhai::Engine;
use std::path::Path;
mod mock;
use mock::seed_finance_data;
fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("Finance Rhai Example");
println!("===================");
// Create a mock database
let db = create_mock_db();
// Seed the database with some initial data
seed_finance_data(db.clone());
// Create the Rhai engine using our central engine creator
let mut engine = create_heromodels_engine(db.clone());
// Register timestamp helper functions
register_timestamp_helpers(&mut engine);
// Get the path to the script
let script_path = Path::new(file!())
.parent()
.unwrap()
.join("finance_script.rhai");
println!("\nRunning script: {}", script_path.display());
println!("---------------------");
// Run the script
match eval_file(&engine, &script_path) {
Ok(result) => {
if !result.is_unit() {
println!("\nScript returned: {:?}", result);
}
println!("\nScript executed successfully!");
Ok(())
}
Err(err) => {
eprintln!("\nError running script: {}", err);
Err(Box::new(std::io::Error::new(
std::io::ErrorKind::Other,
err.to_string(),
)))
}
}
}
// Register timestamp helper functions with the engine
fn register_timestamp_helpers(engine: &mut Engine) {
use chrono::{TimeZone, Utc};
// Function to get current timestamp
engine.register_fn("timestamp_now", || Utc::now().timestamp() as i64);
// Function to format a timestamp
engine.register_fn("format_timestamp", |ts: i64| {
let dt = Utc
.timestamp_opt(ts, 0)
.single()
.expect("Invalid timestamp");
dt.format("%Y-%m-%d %H:%M:%S UTC").to_string()
});
println!("Timestamp helper functions registered successfully.");
}

View File

@ -1,202 +0,0 @@
// finance_script.rhai
// Example Rhai script for working with Finance models
// Constants for AssetType
const NATIVE = "Native";
const ERC20 = "Erc20";
const ERC721 = "Erc721";
const ERC1155 = "Erc1155";
// Constants for ListingStatus
const ACTIVE = "Active";
const SOLD = "Sold";
const CANCELLED = "Cancelled";
const EXPIRED = "Expired";
// Constants for ListingType
const FIXED_PRICE = "FixedPrice";
const AUCTION = "Auction";
const EXCHANGE = "Exchange";
// Constants for BidStatus
const BID_ACTIVE = "Active";
const BID_ACCEPTED = "Accepted";
const BID_REJECTED = "Rejected";
const BID_CANCELLED = "Cancelled";
// Create a new account using builder pattern
let alice_account = new_account()
.name("Alice's Account")
.user_id(101)
.description("Alice's primary trading account")
.ledger("ethereum")
.address("0x1234567890abcdef1234567890abcdef12345678")
.pubkey("0xabcdef1234567890abcdef1234567890abcdef12");
print(`Created account: ${alice_account.get_name()} (User ID: ${alice_account.get_user_id()})`);
// Save the account to the database
let saved_alice = set_account(alice_account);
print(`Account saved to database with ID: ${saved_alice.get_id()}`);
// Create a new asset using builder pattern
let token_asset = new_asset()
.name("HERO Token")
.description("Herocode governance token")
.amount(1000.0)
.address("0x9876543210abcdef9876543210abcdef98765432")
.asset_type(ERC20)
.decimals(18);
print(`Created asset: ${token_asset.get_name()} (${token_asset.get_amount()} ${token_asset.get_asset_type()})`);
// Save the asset to the database
let saved_token = set_asset(token_asset);
print(`Asset saved to database with ID: ${saved_token.get_id()}`);
// Add the asset to Alice's account
saved_alice = saved_alice.add_asset(saved_token.get_id());
saved_alice = set_account(saved_alice);
print(`Added asset ${saved_token.get_name()} to ${saved_alice.get_name()}`);
// Create a new NFT asset
let nft_asset = new_asset()
.name("Herocode #42")
.description("Unique digital collectible")
.amount(1.0)
.address("0xabcdef1234567890abcdef1234567890abcdef12")
.asset_type(ERC721)
.decimals(0);
// Save the NFT to the database
let saved_nft = set_asset(nft_asset);
print(`NFT saved to database with ID: ${saved_nft.get_id()}`);
// Create Bob's account
let bob_account = new_account()
.name("Bob's Account")
.user_id(102)
.description("Bob's trading account")
.ledger("ethereum")
.address("0xfedcba0987654321fedcba0987654321fedcba09")
.pubkey("0x654321fedcba0987654321fedcba0987654321fe");
// Save Bob's account
let saved_bob = set_account(bob_account);
print(`Created and saved Bob's account with ID: ${saved_bob.get_id()}`);
// Create a listing for the NFT
let nft_listing = new_listing()
.seller_id(saved_alice.get_id())
.asset_id(saved_nft.get_id())
.price(0.5)
.currency("ETH")
.listing_type(AUCTION)
.title("Rare Herocode NFT")
.description("One of a kind digital collectible")
.image_url("https://example.com/nft/42.png")
.expires_at(timestamp_now() + 86400) // 24 hours from now
.add_tag("rare")
.add_tag("collectible")
.add_tag("digital art")
.set_listing();
// Save the listing
print(`Created listing: ${nft_listing.get_title()} (ID: ${nft_listing.get_id()})`);
print(`Listing status: ${nft_listing.get_status()}, Type: ${nft_listing.get_listing_type()}`);
print(`Listing price: ${nft_listing.get_price()} ${nft_listing.get_currency()}`);
// Create a bid from Bob
let bob_bid = new_bid()
.listing_id(nft_listing.get_id().to_string())
.bidder_id(saved_bob.get_id())
.amount(1.5)
.currency("ETH")
.set_bid();
// Save the bid
print(`Created bid from ${saved_bob.get_name()} for ${bob_bid.get_amount()} ${bob_bid.get_currency()}`);
// Add the bid to the listing
nft_listing.add_bid(bob_bid);
nft_listing.set_listing();
print(`Added bid to listing ${nft_listing.get_title()}`);
// Create another bid with higher amount
let charlie_account = new_account()
.name("Charlie's Account")
.user_id(103)
.description("Charlie's trading account")
.ledger("ethereum")
.address("0x1122334455667788991122334455667788990011")
.pubkey("0x8877665544332211887766554433221188776655");
let saved_charlie = set_account(charlie_account);
print(`Created and saved Charlie's account with ID: ${saved_charlie.get_id()}`);
let charlie_bid = new_bid()
.listing_id(nft_listing.get_id().to_string())
.bidder_id(saved_charlie.get_id())
.amount(2.5)
.currency("ETH")
.set_bid();
print(`Created higher bid from ${saved_charlie.get_name()} for ${charlie_bid.get_amount()} ${charlie_bid.get_currency()}`);
// Add the higher bid to the listing
nft_listing.add_bid(charlie_bid)
.set_listing();
print(`Added higher bid to listing ${nft_listing.get_title()}`);
nft_listing.sale_price(2.5)
.set_listing();
// Complete the sale to the highest bidder (Charlie)
nft_listing.complete_sale(saved_charlie.get_id())
.set_listing();
print(`Completed sale of ${nft_listing.get_title()} to ${saved_charlie.get_name()}`);
print(`New listing status: ${saved_listing.get_status()}`);
// Retrieve the listing from the database
let retrieved_listing = get_listing_by_id(saved_listing.get_id());
print(`Retrieved listing: ${retrieved_listing.get_title()} (Status: ${retrieved_listing.get_status()})`);
// Create a fixed price listing
let token_listing = new_listing()
.seller_id(saved_alice.get_id())
.asset_id(saved_token.get_id())
.price(100.0)
.currency("USDC")
.listing_type(FIXED_PRICE)
.title("HERO Tokens for Sale")
.description("100 HERO tokens at fixed price")
.set_listing();
// Save the fixed price listing
print(`Created fixed price listing: ${token_listing.get_title()} (ID: ${token_listing.get_id()})`);
// Cancel the listing
token_listing.cancel();
token_listing.set_listing();
print(`Cancelled listing: ${token_listing.get_title()}`);
print(`Listing status: ${token_listing.get_status()}`);
// Print summary of all accounts
print("\nAccount Summary:");
print(`Alice (ID: ${saved_alice.get_id()}): ${saved_alice.get_assets().len()} assets`);
print(`Bob (ID: ${saved_bob.get_id()}): ${saved_bob.get_assets().len()} assets`);
print(`Charlie (ID: ${saved_charlie.get_id()}): ${saved_charlie.get_assets().len()} assets`);
// Print summary of all listings
print("\nListing Summary:");
print(`NFT Auction (ID: ${nft_listing.get_id()}): ${nft_listing.get_status()}`);
print(`Token Sale (ID: ${token_listing.get_id()}): ${token_listing.get_status()}`);
// Print summary of all bids
print("\nBid Summary:");
print(`Bob's bid: ${bob_bid.get_amount()} ${bob_bid.get_currency()} (Status: ${bob_bid.get_status()})`);
print(`Charlie's bid: ${charlie_bid.get_amount()} ${charlie_bid.get_currency()} (Status: ${charlie_bid.get_status()})`);

View File

@ -1,111 +0,0 @@
use heromodels::db::hero::OurDB;
use heromodels::db::{Collection, Db};
use heromodels::models::finance::account::Account;
use heromodels::models::finance::asset::{Asset, AssetType};
use heromodels::models::finance::marketplace::{Listing, ListingType};
use heromodels_core::Model;
use std::sync::Arc;
/// Seed the mock database with finance data
pub fn seed_finance_data(db: Arc<OurDB>) {
// Create a user account
let account = Account::new()
.name("Demo Account")
.user_id(1)
.description("Demo trading account")
.ledger("ethereum")
.address("0x1234567890abcdef1234567890abcdef12345678")
.pubkey("0xabcdef1234567890abcdef1234567890abcdef12");
// Store the account in the database
let (account_id, mut updated_account) = db
.collection::<Account>()
.expect("Failed to get Account collection")
.set(&account)
.expect("Failed to store account");
// Create an ERC20 token asset
let token_asset = Asset::new()
.name("HERO Token")
.description("Herocode governance token")
.amount(1000.0)
.address("0x9876543210abcdef9876543210abcdef98765432")
.asset_type(AssetType::Erc20)
.decimals(18);
// Store the token asset in the database
let (token_id, updated_token) = db
.collection::<Asset>()
.expect("Failed to get Asset collection")
.set(&token_asset)
.expect("Failed to store token asset");
// Create an NFT asset
let nft_asset = Asset::new()
.name("Herocode #1")
.description("Unique digital collectible")
.amount(1.0)
.address("0xabcdef1234567890abcdef1234567890abcdef12")
.asset_type(AssetType::Erc721)
.decimals(0);
// Store the NFT asset in the database
let (nft_id, updated_nft) = db
.collection::<Asset>()
.expect("Failed to get Asset collection")
.set(&nft_asset)
.expect("Failed to store NFT asset");
// Add assets to the account
updated_account = updated_account.add_asset(token_id);
updated_account = updated_account.add_asset(nft_id);
// Update the account in the database
let (_, final_account) = db
.collection::<Account>()
.expect("Failed to get Account collection")
.set(&updated_account)
.expect("Failed to store updated account");
// Create a listing for the NFT
let listing = Listing::new()
.seller_id(account_id)
.asset_id(nft_id)
.price(0.5)
.currency("ETH")
.listing_type(ListingType::Auction)
.title("Rare Herocode NFT".to_string())
.description("One of a kind digital collectible".to_string())
.image_url(Some("https://example.com/nft/1.png".to_string()))
.add_tag("rare".to_string())
.add_tag("collectible".to_string());
// Store the listing in the database
let (_listing_id, updated_listing) = db
.collection::<Listing>()
.expect("Failed to get Listing collection")
.set(&listing)
.expect("Failed to store listing");
println!("Mock database seeded with finance data:");
println!(
" - Added account: {} (ID: {})",
final_account.name,
final_account.get_id()
);
println!(
" - Added token asset: {} (ID: {})",
updated_token.name,
updated_token.get_id()
);
println!(
" - Added NFT asset: {} (ID: {})",
updated_nft.name,
updated_nft.get_id()
);
println!(
" - Added listing: {} (ID: {})",
updated_listing.title,
updated_listing.get_id()
);
}

View File

@ -1,162 +0,0 @@
use engine::mock_db::create_mock_db;
use engine::{create_heromodels_engine, eval_file};
use heromodels::models::flow::{Flow, FlowStep, SignatureRequirement};
use heromodels_core::Model;
use rhai::Scope;
use std::path::Path;
mod mock;
use mock::seed_flow_data;
fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("Flow Rhai Example");
println!("=================");
// Create a mock database
let db = create_mock_db();
// Seed the database with initial data
seed_flow_data(db.clone());
// Create the Rhai engine with all modules registered
let engine = create_heromodels_engine(db.clone());
// Get the path to the script
let script_path = Path::new(file!())
.parent()
.unwrap()
.join("flow_script.rhai");
println!("\nRunning script: {}", script_path.display());
println!("---------------------");
// Run the script
match eval_file(&engine, &script_path.to_string_lossy()) {
Ok(result) => {
if !result.is_unit() {
println!("\nScript returned: {:?}", result);
}
println!("\nScript executed successfully!");
}
Err(err) => {
eprintln!("\nError running script: {}", err);
return Err(Box::new(std::io::Error::new(
std::io::ErrorKind::Other,
err.to_string(),
)));
}
}
// Demonstrate direct Rust interaction with the Rhai-exposed flow functionality
println!("\nDirect Rust interaction with Rhai-exposed flow functionality");
println!("----------------------------------------------------------");
// Create a new scope
let mut scope = Scope::new();
// Create a new flow using the Rhai function
let result = engine.eval::<Flow>("new_flow(0, \"Direct Rust Flow\")");
match result {
Ok(mut flow) => {
println!(
"Created flow from Rust: {} (ID: {})",
flow.name,
flow.get_id()
);
// Set flow status using the builder pattern
flow = flow.status("active".to_string());
println!("Set flow status to: {}", flow.status);
// Create a new flow step using the Rhai function
let result = engine.eval::<FlowStep>("new_flow_step(0, 1)");
match result {
Ok(mut step) => {
println!(
"Created flow step from Rust: Step Order {} (ID: {})",
step.step_order,
step.get_id()
);
// Set step description
step = step.description("Direct Rust Step".to_string());
println!(
"Set step description to: {}",
step.description
.clone()
.unwrap_or_else(|| "None".to_string())
);
// Create a signature requirement using the Rhai function
let result = engine.eval::<SignatureRequirement>(
"new_signature_requirement(0, 1, \"Direct Rust Signer\", \"Please sign this document\")"
);
match result {
Ok(req) => {
println!(
"Created signature requirement from Rust: Public Key {} (ID: {})",
req.public_key,
req.get_id()
);
// Add the step to the flow using the builder pattern
flow = flow.add_step(step);
println!(
"Added step to flow. Flow now has {} steps",
flow.steps.len()
);
// Save the flow to the database using the Rhai function
let save_flow_script = "fn save_it(f) { return db::save_flow(f); }";
let save_flow_ast = engine.compile(save_flow_script).unwrap();
let result = engine.call_fn::<Flow>(
&mut scope,
&save_flow_ast,
"save_it",
(flow,),
);
match result {
Ok(saved_flow) => {
println!(
"Saved flow to database with ID: {}",
saved_flow.get_id()
);
}
Err(err) => eprintln!("Error saving flow: {}", err),
}
// Save the signature requirement to the database using the Rhai function
let save_req_script =
"fn save_it(r) { return db::save_signature_requirement(r); }";
let save_req_ast = engine.compile(save_req_script).unwrap();
let result = engine.call_fn::<SignatureRequirement>(
&mut scope,
&save_req_ast,
"save_it",
(req,),
);
match result {
Ok(saved_req) => {
println!(
"Saved signature requirement to database with ID: {}",
saved_req.get_id()
);
}
Err(err) => {
eprintln!("Error saving signature requirement: {}", err)
}
}
}
Err(err) => eprintln!("Error creating signature requirement: {}", err),
}
}
Err(err) => eprintln!("Error creating flow step: {}", err),
}
}
Err(err) => eprintln!("Error creating flow: {}", err),
}
Ok(())
}

View File

@ -1,111 +0,0 @@
// flow_script.rhai
// Example Rhai script for working with Flow models
// Constants for Flow status
const STATUS_DRAFT = "draft";
const STATUS_ACTIVE = "active";
const STATUS_COMPLETED = "completed";
const STATUS_CANCELLED = "cancelled";
// Create a new flow using builder pattern
let my_flow = new_flow(0, "flow-123");
name(my_flow, "Document Approval Flow");
status(my_flow, STATUS_DRAFT);
print(`Created flow: ${get_flow_name(my_flow)} (ID: ${get_flow_id(my_flow)})`);
print(`Status: ${get_flow_status(my_flow)}`);
// Create flow steps using builder pattern
let step1 = new_flow_step(0, 1);
description(step1, "Initial review by legal team");
status(step1, STATUS_DRAFT);
let step2 = new_flow_step(0, 2);
description(step2, "Approval by department head");
status(step2, STATUS_DRAFT);
let step3 = new_flow_step(0, 3);
description(step3, "Final signature by CEO");
status(step3, STATUS_DRAFT);
// Create signature requirements using builder pattern
let req1 = new_signature_requirement(0, get_flow_step_id(step1), "legal@example.com", "Please review this document");
signed_by(req1, "Legal Team");
status(req1, STATUS_DRAFT);
let req2 = new_signature_requirement(0, get_flow_step_id(step2), "dept@example.com", "Department approval needed");
signed_by(req2, "Department Head");
status(req2, STATUS_DRAFT);
let req3 = new_signature_requirement(0, get_flow_step_id(step3), "ceo@example.com", "Final approval required");
signed_by(req3, "CEO");
status(req3, STATUS_DRAFT);
print(`Created flow steps with signature requirements`);
// Add steps to the flow
let flow_with_steps = my_flow;
add_step(flow_with_steps, step1);
add_step(flow_with_steps, step2);
add_step(flow_with_steps, step3);
print(`Added steps to flow. Flow now has ${get_flow_steps(flow_with_steps).len()} steps`);
// Activate the flow
let active_flow = flow_with_steps;
status(active_flow, STATUS_ACTIVE);
print(`Updated flow status to: ${get_flow_status(active_flow)}`);
// Save the flow to the database
let saved_flow = db::save_flow(active_flow);
print(`Flow saved to database with ID: ${get_flow_id(saved_flow)}`);
// Save signature requirements to the database
let saved_req1 = db::save_signature_requirement(req1);
let saved_req2 = db::save_signature_requirement(req2);
let saved_req3 = db::save_signature_requirement(req3);
print(`Signature requirements saved to database with IDs: ${get_signature_requirement_id(saved_req1)}, ${get_signature_requirement_id(saved_req2)}, ${get_signature_requirement_id(saved_req3)}`);
// Retrieve the flow from the database
let retrieved_flow = db::get_flow_by_id(get_flow_id(saved_flow));
print(`Retrieved flow: ${get_flow_name(retrieved_flow)}`);
print(`It has ${get_flow_steps(retrieved_flow).len()} steps`);
// Complete the flow
let completed_flow = retrieved_flow;
status(completed_flow, STATUS_COMPLETED);
print(`Updated retrieved flow status to: ${get_flow_status(completed_flow)}`);
// Save the updated flow
db::save_flow(completed_flow);
print("Updated flow saved to database");
// List all flows in the database
let all_flows = db::list_flows();
print("\nListing all flows in database:");
let flow_count = 0;
for flow in all_flows {
print(` - Flow: ${get_flow_name(flow)} (ID: ${get_flow_id(flow)})`);
flow_count += 1;
}
print(`Total flows: ${flow_count}`);
// List all signature requirements
let all_reqs = db::list_signature_requirements();
print("\nListing all signature requirements in database:");
let req_count = 0;
for req in all_reqs {
print(` - Requirement for step ${get_signature_requirement_flow_step_id(req)} (ID: ${get_signature_requirement_id(req)})`);
req_count += 1;
}
print(`Total signature requirements: ${req_count}`);
// Clean up - delete the flow
db::delete_flow(get_flow_id(completed_flow));
print(`Deleted flow with ID: ${get_flow_id(completed_flow)}`);
// Clean up - delete signature requirements
db::delete_signature_requirement(get_signature_requirement_id(saved_req1));
db::delete_signature_requirement(get_signature_requirement_id(saved_req2));
db::delete_signature_requirement(get_signature_requirement_id(saved_req3));
print("Deleted all signature requirements");

View File

@ -1,65 +0,0 @@
use heromodels::db::hero::OurDB;
use heromodels::db::{Collection, Db};
use heromodels::models::flow::{Flow, FlowStep, SignatureRequirement};
use heromodels_core::Model;
use std::sync::Arc;
/// Seed the mock database with flow data
#[cfg(feature = "flow")]
pub fn seed_flow_data(db: Arc<OurDB>) {
// Create a flow
let flow = Flow::new(None, "Onboarding Flow".to_string())
.description("New employee onboarding process".to_string())
.status("active".to_string());
// Create a signature requirement first
let sig_req = SignatureRequirement::new(
None,
1,
"hr_manager_pubkey".to_string(),
"Please sign the employment contract".to_string(),
);
let (sig_req_id, saved_sig_req) = db
.collection::<SignatureRequirement>()
.expect("Failed to get SignatureRequirement collection")
.set(&sig_req)
.expect("Failed to store signature requirement");
// Create a flow step and add the signature requirement
let step = FlowStep::new(None, 1)
.description("Complete HR paperwork".to_string())
.add_signature_requirement(sig_req_id);
let (step_id, saved_step) = db
.collection::<FlowStep>()
.expect("Failed to get FlowStep collection")
.set(&step)
.expect("Failed to store flow step");
// Add the step to the flow
let flow_with_step = flow.add_step(step_id);
// Store the flow
let (_flow_id, saved_flow) = db
.collection::<Flow>()
.expect("Failed to get Flow collection")
.set(&flow_with_step)
.expect("Failed to store flow");
println!("Mock database seeded with flow data:");
println!(
" - Added flow: {} (ID: {})",
saved_flow.name,
saved_flow.get_id()
);
println!(
" - Added step with order: {} (ID: {})",
saved_step.step_order,
saved_step.get_id()
);
println!(
" - Added signature requirement for: {} (ID: {})",
saved_sig_req.public_key,
saved_sig_req.get_id()
);
}

View File

@ -1,374 +0,0 @@
use chrono::Utc;
use heromodels::db::hero::OurDB;
use heromodels::db::{Collection, Db}; // Import both Db and Collection traits
use heromodels::models::calendar::{Calendar, Event};
use heromodels_core::Model; // Import Model trait to use build method
use std::env;
use std::sync::Arc;
// Import finance models
use heromodels::models::finance::account::Account;
use heromodels::models::finance::asset::{Asset, AssetType};
use heromodels::models::finance::marketplace::{Listing, ListingType};
// Conditionally import other modules based on features
#[cfg(feature = "flow")]
use heromodels::models::flow::{Flow, FlowStep, SignatureRequirement};
#[cfg(feature = "legal")]
use heromodels::models::legal::{
Contract, ContractRevision, ContractSigner, ContractStatus, SignerStatus,
};
#[cfg(feature = "projects")]
use heromodels::models::projects::{ItemType, Priority, Project, Status as ProjectStatus};
/// Create a mock in-memory database for examples
pub fn create_mock_db() -> Arc<OurDB> {
// Create a temporary directory for the database files
let temp_dir = env::temp_dir().join("engine_examples");
std::fs::create_dir_all(&temp_dir).expect("Failed to create temp directory");
// Create a new OurDB instance with reset=true to ensure it's clean
let db = OurDB::new(temp_dir, true).expect("Failed to create OurDB instance");
Arc::new(db)
}
/// Seed the mock database with some initial data for all modules
pub fn seed_mock_db(db: Arc<OurDB>) {
// Seed calendar data
seed_calendar_data(db.clone());
// Seed finance data
seed_finance_data(db.clone());
// Seed flow data if the feature is enabled
#[cfg(feature = "flow")]
seed_flow_data(db.clone());
// Seed legal data if the feature is enabled
#[cfg(feature = "legal")]
seed_legal_data(db.clone());
// Seed projects data if the feature is enabled
#[cfg(feature = "projects")]
seed_projects_data(db.clone());
println!("Mock database seeded with initial data for all enabled modules.");
}
/// Seed the mock database with calendar data
fn seed_calendar_data(db: Arc<OurDB>) {
// Create a calendar
let mut calendar = Calendar::new(None, "Work Calendar".to_string());
calendar.description = Some("My work schedule".to_string());
// Store the calendar in the database
let (_calendar_id, _updated_calendar) = db
.collection::<Calendar>()
.expect("Failed to get Calendar collection")
.set(&calendar)
.expect("Failed to store calendar");
// Create an event
let now = Utc::now().timestamp();
let end_time = now + 3600; // Add 1 hour in seconds
// Use the builder pattern for Event
let event = Event::new()
.title("Team Meeting".to_string())
.reschedule(now, end_time)
.location("Conference Room A".to_string())
.description("Weekly sync".to_string())
// .add_attendee(Attendee::new(1))
// .add_attendee(Attendee::new(2))
.build();
// // Add attendees to the event using the builder pattern
// let attendee1 = Attendee::new(1);
// let attendee2 = Attendee::new(2);
// // Add attendees using the builder pattern
// event = event.add_attendee(attendee1);
// event = event.add_attendee(attendee2);
// Call build and capture the returned value
// let event = event.build();
// Store the event in the database first to get its ID
let (event_id, updated_event) = db
.collection()
.expect("Failed to get Event collection")
.set(&event)
.expect("Failed to store event");
// Add the event ID to the calendar
calendar = calendar.add_event(event_id as i64);
// Store the calendar in the database
let (_calendar_id, updated_calendar) = db
.collection::<Calendar>()
.expect("Failed to get Calendar collection")
.set(&calendar)
.expect("Failed to store calendar");
println!("Mock database seeded with calendar data:");
println!(
" - Added calendar: {} (ID: {})",
updated_calendar.name, updated_calendar.base_data.id
);
println!(
" - Added event: {} (ID: {})",
updated_event.title, updated_event.base_data.id
);
}
/// Seed the mock database with flow data
#[cfg(feature = "flow")]
fn seed_flow_data(db: Arc<OurDB>) {
// Create a flow
let mut flow = Flow::new(0, "Document Approval".to_string());
// Set flow properties using the builder pattern
flow = flow.status("draft".to_string());
flow = flow.name("Document Approval Flow".to_string());
// Create flow steps
let mut step1 = FlowStep::new(0, 1);
step1 = step1.description("Initial review by legal team".to_string());
step1 = step1.status("pending".to_string());
let mut step2 = FlowStep::new(0, 2);
step2 = step2.description("Approval by department head".to_string());
step2 = step2.status("pending".to_string());
// Add signature requirements
let mut req1 = SignatureRequirement::new(
0,
1,
"Legal Team".to_string(),
"Please review this document".to_string(),
);
let mut req2 = SignatureRequirement::new(
0,
2,
"Department Head".to_string(),
"Please approve this document".to_string(),
);
// Add steps to flow
flow = flow.add_step(step1);
flow = flow.add_step(step2);
// Store in the database
let (_, updated_flow) = db
.collection::<Flow>()
.expect("Failed to get Flow collection")
.set(&flow)
.expect("Failed to store flow");
// Store signature requirements in the database
let (_, updated_req1) = db
.collection::<SignatureRequirement>()
.expect("Failed to get SignatureRequirement collection")
.set(&req1)
.expect("Failed to store signature requirement");
let (_, updated_req2) = db
.collection::<SignatureRequirement>()
.expect("Failed to get SignatureRequirement collection")
.set(&req2)
.expect("Failed to store signature requirement");
println!("Mock database seeded with flow data:");
println!(
" - Added flow: {} (ID: {})",
updated_flow.name, updated_flow.base_data.id
);
println!(" - Added {} steps", updated_flow.steps.len());
println!(
" - Added signature requirements with IDs: {} and {}",
updated_req1.base_data.id, updated_req2.base_data.id
);
}
/// Seed the mock database with legal data
#[cfg(feature = "legal")]
fn seed_legal_data(db: Arc<OurDB>) {
// Create a contract
let mut contract = Contract::new(None, "Service Agreement".to_string());
contract.description = Some("Agreement for software development services".to_string());
contract.status = ContractStatus::Draft;
// Create a revision
let revision = ContractRevision::new(
None,
"Initial draft".to_string(),
"https://example.com/contract/v1".to_string(),
);
// Create signers
let signer1 = ContractSigner::new(None, 1, "Client".to_string());
let signer2 = ContractSigner::new(None, 2, "Provider".to_string());
// Add revision and signers to contract
contract.add_revision(revision);
contract.add_signer(signer1);
contract.add_signer(signer2);
// Store in the database
let (_, updated_contract) = db
.collection::<Contract>()
.expect("Failed to get Contract collection")
.set(&contract)
.expect("Failed to store contract");
println!("Mock database seeded with legal data:");
println!(
" - Added contract: {} (ID: {})",
updated_contract.name, updated_contract.base_data.id
);
println!(
" - Added {} revisions and {} signers",
updated_contract.revisions.len(),
updated_contract.signers.len()
);
}
/// Seed the mock database with projects data
#[cfg(feature = "projects")]
fn seed_projects_data(db: Arc<OurDB>) {
// Create a project
let mut project = Project::new(None, "Website Redesign".to_string());
project.description = Some("Redesign the company website".to_string());
project.status = ProjectStatus::InProgress;
project.priority = Priority::High;
// Add members and tags
project.add_member_id(1);
project.add_member_id(2);
project.add_tag("design".to_string());
project.add_tag("web".to_string());
// Store in the database
let (_, updated_project) = db
.collection::<Project>()
.expect("Failed to get Project collection")
.set(&project)
.expect("Failed to store project");
println!("Mock database seeded with projects data:");
println!(
" - Added project: {} (ID: {})",
updated_project.name, updated_project.base_data.id
);
println!(
" - Status: {}, Priority: {}",
updated_project.status, updated_project.priority
);
println!(
" - Added {} members and {} tags",
updated_project.member_ids.len(),
updated_project.tags.len()
);
}
/// Seed the mock database with finance data
fn seed_finance_data(db: Arc<OurDB>) {
// Create a user account
let mut account = Account::new()
.name("Demo Account")
.user_id(1)
.description("Demo trading account")
.ledger("ethereum")
.address("0x1234567890abcdef1234567890abcdef12345678")
.pubkey("0xabcdef1234567890abcdef1234567890abcdef12");
// Store the account in the database
let (account_id, updated_account) = db
.collection::<Account>()
.expect("Failed to get Account collection")
.set(&account)
.expect("Failed to store account");
// Create an ERC20 token asset
let token_asset = Asset::new()
.name("HERO Token")
.description("Herocode governance token")
.amount(1000.0)
.address("0x9876543210abcdef9876543210abcdef98765432")
.asset_type(AssetType::Erc20)
.decimals(18);
// Store the token asset in the database
let (token_id, updated_token) = db
.collection::<Asset>()
.expect("Failed to get Asset collection")
.set(&token_asset)
.expect("Failed to store token asset");
// Create an NFT asset
let nft_asset = Asset::new()
.name("Herocode #1")
.description("Unique digital collectible")
.amount(1.0)
.address("0xabcdef1234567890abcdef1234567890abcdef12")
.asset_type(AssetType::Erc721)
.decimals(0);
// Store the NFT asset in the database
let (nft_id, updated_nft) = db
.collection::<Asset>()
.expect("Failed to get Asset collection")
.set(&nft_asset)
.expect("Failed to store NFT asset");
// Add assets to the account
account = updated_account.add_asset(token_id);
account = account.add_asset(nft_id);
// Update the account in the database
let (_, updated_account) = db
.collection::<Account>()
.expect("Failed to get Account collection")
.set(&account)
.expect("Failed to store updated account");
// Create a listing for the NFT
let listing = Listing::new()
.seller_id(account_id)
.asset_id(nft_id)
.price(0.5)
.currency("ETH")
.listing_type(ListingType::Auction)
.title("Rare Herocode NFT".to_string())
.description("One of a kind digital collectible".to_string())
.image_url(Some("hcttps://example.com/nft/1.png".to_string()))
.add_tag("rare".to_string())
.add_tag("collectible".to_string());
// Store the listing in the database
let (_listing_id, updated_listing) = db
.collection::<Listing>()
.expect("Failed to get Listing collection")
.set(&listing)
.expect("Failed to store listing");
println!("Mock database seeded with finance data:");
println!(
" - Added account: {} (ID: {})",
updated_account.name, updated_account.base_data.id
);
println!(
" - Added token asset: {} (ID: {})",
updated_token.name, updated_token.base_data.id
);
println!(
" - Added NFT asset: {} (ID: {})",
updated_nft.name, updated_nft.base_data.id
);
println!(
" - Added listing: {} (ID: {})",
updated_listing.title, updated_listing.base_data.id
);
}

View File

@ -8,7 +8,7 @@ name = "supervisor_worker_demo"
path = "supervisor_worker_demo.rs" path = "supervisor_worker_demo.rs"
[dependencies] [dependencies]
hero_dispatcher = { path = "../dispatcher" } hero_supervisor = { path = "../supervisor" }
hero_job = { path = "../job" } hero_job = { path = "../job" }
tokio = { version = "1.0", features = ["full"] } tokio = { version = "1.0", features = ["full"] }
redis = { version = "0.25", features = ["tokio-comp"] } redis = { version = "0.25", features = ["tokio-comp"] }

View File

@ -1,5 +1,5 @@
use colored::*; use colored::*;
use hero_dispatcher::{DispatcherBuilder, ScriptType, JobStatus}; use hero_supervisor::{SupervisorBuilder, ScriptType, JobStatus};
use log::warn; use log::warn;
use std::process::Stdio; use std::process::Stdio;
use std::time::Duration; use std::time::Duration;
@ -8,7 +8,7 @@ use tokio::time::sleep;
/// Supervisor manages worker lifecycle and job execution /// Supervisor manages worker lifecycle and job execution
pub struct Supervisor { pub struct Supervisor {
dispatcher: hero_dispatcher::Dispatcher, supervisor: hero_supervisor::Supervisor,
worker_processes: Vec<WorkerProcess>, worker_processes: Vec<WorkerProcess>,
redis_url: String, redis_url: String,
} }
@ -22,9 +22,9 @@ pub struct WorkerProcess {
} }
impl Supervisor { impl Supervisor {
/// Create a new supervisor with dispatcher configuration /// Create a new supervisor with supervisor configuration
pub async fn new(redis_url: String) -> Result<Self, Box<dyn std::error::Error>> { pub async fn new(redis_url: String) -> Result<Self, Box<dyn std::error::Error>> {
let dispatcher = DispatcherBuilder::new() let supervisor = SupervisorBuilder::new()
.caller_id("supervisor") .caller_id("supervisor")
.context_id("demo-context") .context_id("demo-context")
.redis_url(&redis_url) .redis_url(&redis_url)
@ -34,7 +34,7 @@ impl Supervisor {
.build()?; .build()?;
Ok(Self { Ok(Self {
dispatcher, supervisor,
worker_processes: Vec::new(), worker_processes: Vec::new(),
redis_url, redis_url,
}) })
@ -109,7 +109,7 @@ impl Supervisor {
/// Submit a job and return the job ID /// Submit a job and return the job ID
pub async fn submit_job(&self, script_type: ScriptType, script: &str) -> Result<String, Box<dyn std::error::Error>> { pub async fn submit_job(&self, script_type: ScriptType, script: &str) -> Result<String, Box<dyn std::error::Error>> {
let job = self.dispatcher let job = self.supervisor
.new_job() .new_job()
.script_type(script_type.clone()) .script_type(script_type.clone())
.script(script) .script(script)
@ -117,7 +117,7 @@ impl Supervisor {
.build()?; .build()?;
let job_id = job.id.clone(); let job_id = job.id.clone();
self.dispatcher.create_job(&job).await?; self.supervisor.create_job(&job).await?;
println!("{}", format!("📝 Job {} submitted for {}", job_id, script_type.as_str()).cyan()); println!("{}", format!("📝 Job {} submitted for {}", job_id, script_type.as_str()).cyan());
Ok(job_id) Ok(job_id)
@ -134,12 +134,12 @@ impl Supervisor {
return Err("Job execution timeout".into()); return Err("Job execution timeout".into());
} }
// Check job status using dispatcher methods // Check job status using supervisor methods
match self.dispatcher.get_job_status(job_id).await { match self.supervisor.get_job_status(job_id).await {
Ok(status) => { Ok(status) => {
match status { match status {
JobStatus::Finished => { JobStatus::Finished => {
if let Ok(Some(result)) = self.dispatcher.get_job_output(job_id).await { if let Ok(Some(result)) = self.supervisor.get_job_output(job_id).await {
println!("{}", format!("✅ Job {} completed successfully", job_id).green()); println!("{}", format!("✅ Job {} completed successfully", job_id).green());
return Ok(result); return Ok(result);
} }
@ -163,12 +163,12 @@ impl Supervisor {
/// List all jobs /// List all jobs
pub async fn list_jobs(&self) -> Result<Vec<String>, Box<dyn std::error::Error>> { pub async fn list_jobs(&self) -> Result<Vec<String>, Box<dyn std::error::Error>> {
self.dispatcher.list_jobs().await.map_err(|e| e.into()) self.supervisor.list_jobs().await.map_err(|e| e.into())
} }
/// Clear all jobs /// Clear all jobs
pub async fn clear_all_jobs(&self) -> Result<usize, Box<dyn std::error::Error>> { pub async fn clear_all_jobs(&self) -> Result<usize, Box<dyn std::error::Error>> {
self.dispatcher.clear_all_jobs().await.map_err(|e| e.into()) self.supervisor.clear_all_jobs().await.map_err(|e| e.into())
} }
/// Get worker status /// Get worker status

15
core/job/README.md Normal file
View File

@ -0,0 +1,15 @@
### `Job`
Represents a script execution request with:
- Unique ID and timestamps
- Script content and target worker
- Execution settings (timeout, retries, concurrency)
- Logging configuration
### `JobBuilder`
Fluent builder for configuring jobs:
- `script()` - Set the script content
- `worker_id()` - Target specific worker
- `timeout()` - Set execution timeout
- `build()` - Create the job
- `submit()` - Fire-and-forget submission
- `await_response()` - Submit and wait for result

View File

@ -9,32 +9,36 @@ use thiserror::Error;
/// Redis namespace prefix for all Hero job-related keys /// Redis namespace prefix for all Hero job-related keys
pub const NAMESPACE_PREFIX: &str = "hero:job:"; pub const NAMESPACE_PREFIX: &str = "hero:job:";
/// Script type enumeration for different script engines /// Script type enumeration for different worker types
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub enum ScriptType { pub enum ScriptType {
/// HeroScript - Hero's native scripting language /// OSIS - A worker that executes Rhai/HeroScript
HeroScript, OSIS,
/// Rhai SAL - Rhai Script Abstraction Layer /// SAL - A worker that executes system abstraction layer functionalities in rhai
RhaiSAL, SAL,
/// Rhai DSL - Rhai Domain Specific Language /// V - A worker that executes heroscript in V
RhaiDSL, V,
/// Python - A worker that executes heroscript in python
Python,
} }
impl ScriptType { impl ScriptType {
/// Get the worker queue suffix for this script type /// Get the worker queue suffix for this script type
pub fn worker_queue_suffix(&self) -> &'static str { pub fn worker_queue_suffix(&self) -> &'static str {
match self { match self {
ScriptType::HeroScript => "heroscript", ScriptType::OSIS => "osis",
ScriptType::RhaiSAL => "rhai_sal", ScriptType::SAL => "sal",
ScriptType::RhaiDSL => "rhai_dsl", ScriptType::V => "v",
ScriptType::Python => "python",
} }
} }
pub fn as_str(&self) -> &'static str { pub fn as_str(&self) -> &'static str {
match self { match self {
ScriptType::HeroScript => "heroscript", ScriptType::OSIS => "osis",
ScriptType::RhaiSAL => "rhai_sal", ScriptType::SAL => "sal",
ScriptType::RhaiDSL => "rhai_dsl", ScriptType::V => "v",
ScriptType::Python => "python",
} }
} }
} }
@ -206,9 +210,10 @@ impl Job {
.ok_or_else(|| JobError::MissingField("script_type".to_string()))?; .ok_or_else(|| JobError::MissingField("script_type".to_string()))?;
let script_type = match script_type_str.as_str() { let script_type = match script_type_str.as_str() {
"HeroScript" => ScriptType::HeroScript, "OSIS" => ScriptType::OSIS,
"RhaiSAL" => ScriptType::RhaiSAL, "SAL" => ScriptType::SAL,
"RhaiDSL" => ScriptType::RhaiDSL, "V" => ScriptType::V,
"Python" => ScriptType::Python,
_ => return Err(JobError::InvalidJobData(format!("Unknown script type: {}", script_type_str))), _ => return Err(JobError::InvalidJobData(format!("Unknown script type: {}", script_type_str))),
}; };

View File

@ -1,11 +1,11 @@
[package] [package]
name = "hero_dispatcher" name = "hero_supervisor"
version = "0.1.0" version = "0.1.0"
edition = "2021" edition = "2021"
[[bin]] [[bin]]
name = "dispatcher" name = "supervisor"
path = "cmd/dispatcher.rs" path = "cmd/supervisor.rs"
[dependencies] [dependencies]
clap = { version = "4.4", features = ["derive"] } clap = { version = "4.4", features = ["derive"] }
@ -19,6 +19,7 @@ log = "0.4"
tokio = { version = "1", features = ["macros", "rt-multi-thread"] } # For async main in examples, and general async tokio = { version = "1", features = ["macros", "rt-multi-thread"] } # For async main in examples, and general async
colored = "2.0" colored = "2.0"
hero_job = { path = "../job" } hero_job = { path = "../job" }
zinit-client = "0.4.0"
[dev-dependencies] # For examples later [dev-dependencies] # For examples later
env_logger = "0.10" env_logger = "0.10"

View File

@ -0,0 +1,315 @@
# Worker Lifecycle Management
The Hero Supervisor includes comprehensive worker lifecycle management functionality using [Zinit](https://github.com/threefoldtech/zinit) as the process manager. This enables the supervisor to manage worker processes, perform health monitoring, and implement load balancing.
## Overview
The lifecycle management system provides:
- **Worker Process Management**: Start, stop, restart, and monitor worker binaries
- **Health Monitoring**: Automatic ping jobs every 10 minutes for idle workers
- **Load Balancing**: Dynamic scaling of workers based on demand
- **Service Dependencies**: Proper startup ordering with dependency management
- **Graceful Shutdown**: Clean termination of worker processes
## Architecture
```
┌─────────────────┐ ┌──────────────────┐ ┌─────────────────┐
│ Supervisor │ │ WorkerLifecycle │ │ Zinit │
│ │◄──►│ Manager │◄──►│ (Process │
│ (Job Dispatch) │ │ │ │ Manager) │
└─────────────────┘ └──────────────────┘ └─────────────────┘
│ │ │
│ │ │
▼ ▼ ▼
┌─────────────────┐ ┌──────────────────┐ ┌─────────────────┐
│ Redis │ │ Health Monitor │ │ Worker Binaries │
│ (Job Queue) │ │ (Ping Jobs) │ │ (OSIS/SAL/V) │
└─────────────────┘ └──────────────────┘ └─────────────────┘
```
## Components
### WorkerConfig
Defines configuration for a worker binary:
```rust
use hero_supervisor::{WorkerConfig, ScriptType};
use std::path::PathBuf;
use std::collections::HashMap;
let config = WorkerConfig::new(
"osis_worker_0".to_string(),
PathBuf::from("/usr/local/bin/osis_worker"),
ScriptType::OSIS,
)
.with_args(vec![
"--redis-url".to_string(),
"redis://localhost:6379".to_string(),
"--worker-id".to_string(),
"osis_worker_0".to_string(),
])
.with_env({
let mut env = HashMap::new();
env.insert("RUST_LOG".to_string(), "info".to_string());
env.insert("WORKER_TYPE".to_string(), "osis".to_string());
env
})
.with_health_check("/usr/local/bin/osis_worker --health-check".to_string())
.with_dependencies(vec!["redis".to_string()]);
```
### WorkerLifecycleManager
Main component for managing worker lifecycles:
```rust
use hero_supervisor::{WorkerLifecycleManagerBuilder, Supervisor};
let supervisor = SupervisorBuilder::new()
.redis_url("redis://localhost:6379")
.caller_id("my_supervisor")
.context_id("production")
.build()?;
let mut lifecycle_manager = WorkerLifecycleManagerBuilder::new("/var/run/zinit.sock".to_string())
.with_supervisor(supervisor.clone())
.add_worker(osis_worker_config)
.add_worker(sal_worker_config)
.add_worker(v_worker_config)
.build();
```
## Supported Script Types
The lifecycle manager supports all Hero script types:
- **OSIS**: Rhai/HeroScript execution workers
- **SAL**: System Abstraction Layer workers
- **V**: HeroScript execution in V language
- **Python**: HeroScript execution in Python
## Key Features
### 1. Worker Management
```rust
// Start all configured workers
lifecycle_manager.start_all_workers().await?;
// Stop all workers
lifecycle_manager.stop_all_workers().await?;
// Restart specific worker
lifecycle_manager.restart_worker("osis_worker_0").await?;
// Get worker status
let status = lifecycle_manager.get_worker_status("osis_worker_0").await?;
println!("Worker state: {:?}, PID: {}", status.state, status.pid);
```
### 2. Health Monitoring
The system automatically monitors worker health:
- Tracks last job execution time for each worker
- Sends ping jobs to workers idle for 10+ minutes
- Restarts workers that fail ping checks 3 times
- Updates job times when workers receive tasks
```rust
// Manual health check
lifecycle_manager.monitor_worker_health().await?;
// Update job time (called automatically by supervisor)
lifecycle_manager.update_worker_job_time("osis_worker_0");
// Start continuous health monitoring
lifecycle_manager.start_health_monitoring().await; // Runs forever
```
### 3. Dynamic Scaling
Scale workers up or down based on demand:
```rust
// Scale OSIS workers to 5 instances
lifecycle_manager.scale_workers(&ScriptType::OSIS, 5).await?;
// Scale down SAL workers to 1 instance
lifecycle_manager.scale_workers(&ScriptType::SAL, 1).await?;
// Check current running count
let count = lifecycle_manager.get_running_worker_count(&ScriptType::V).await;
println!("Running V workers: {}", count);
```
### 4. Service Dependencies
Workers can depend on other services:
```rust
let config = WorkerConfig::new(name, binary, script_type)
.with_dependencies(vec![
"redis".to_string(),
"database".to_string(),
"auth_service".to_string(),
]);
```
Zinit ensures dependencies start before the worker.
## Integration with Supervisor
The lifecycle manager integrates seamlessly with the supervisor:
```rust
use hero_supervisor::{Supervisor, WorkerLifecycleManager};
// Create supervisor and lifecycle manager
let supervisor = SupervisorBuilder::new().build()?;
let mut lifecycle_manager = WorkerLifecycleManagerBuilder::new(zinit_socket)
.with_supervisor(supervisor.clone())
.build();
// Start workers
lifecycle_manager.start_all_workers().await?;
// Create and execute jobs (supervisor automatically routes to workers)
let job = supervisor
.new_job()
.script_type(ScriptType::OSIS)
.script_content("println!(\"Hello World!\");".to_string())
.build()?;
let result = supervisor.run_job_and_await_result(&job).await?;
println!("Job result: {}", result);
```
## Zinit Service Configuration
The lifecycle manager automatically creates Zinit service configurations:
```yaml
# Generated service config for osis_worker_0
exec: "/usr/local/bin/osis_worker --redis-url redis://localhost:6379 --worker-id osis_worker_0"
test: "/usr/local/bin/osis_worker --health-check"
oneshot: false # Restart on exit
after:
- redis
env:
RUST_LOG: "info"
WORKER_TYPE: "osis"
```
## Error Handling
The system provides comprehensive error handling:
```rust
use hero_supervisor::SupervisorError;
match lifecycle_manager.start_worker(&config).await {
Ok(_) => println!("Worker started successfully"),
Err(SupervisorError::WorkerStartFailed(worker, reason)) => {
eprintln!("Failed to start {}: {}", worker, reason);
}
Err(e) => eprintln!("Other error: {}", e),
}
```
## Example Usage
See `examples/lifecycle_demo.rs` for a comprehensive demonstration:
```bash
# Run the lifecycle demo
cargo run --example lifecycle_demo
# Run with custom Redis URL
REDIS_URL=redis://localhost:6379 cargo run --example lifecycle_demo
```
## Prerequisites
1. **Zinit**: Install and run Zinit process manager
```bash
curl https://raw.githubusercontent.com/threefoldtech/zinit/refs/heads/master/install.sh | bash
zinit init --config /etc/zinit/ --socket /var/run/zinit.sock
```
2. **Redis**: Running Redis instance for job queues
```bash
redis-server
```
3. **Worker Binaries**: Compiled worker binaries for each script type
- `/usr/local/bin/osis_worker`
- `/usr/local/bin/sal_worker`
- `/usr/local/bin/v_worker`
- `/usr/local/bin/python_worker`
## Configuration Best Practices
1. **Resource Limits**: Configure appropriate resource limits in Zinit
2. **Health Checks**: Implement meaningful health check commands
3. **Dependencies**: Define proper service dependencies
4. **Environment**: Set appropriate environment variables
5. **Logging**: Configure structured logging for debugging
6. **Monitoring**: Use health monitoring for production deployments
## Troubleshooting
### Common Issues
1. **Zinit Connection Failed**
- Ensure Zinit is running: `ps aux | grep zinit`
- Check socket permissions: `ls -la /var/run/zinit.sock`
- Verify socket path in configuration
2. **Worker Start Failed**
- Check binary exists and is executable
- Verify dependencies are running
- Review Zinit logs: `zinit logs <service-name>`
3. **Health Check Failures**
- Implement proper health check endpoint in workers
- Verify health check command syntax
- Check worker responsiveness
4. **Redis Connection Issues**
- Ensure Redis is running and accessible
- Verify Redis URL configuration
- Check network connectivity
### Debug Commands
```bash
# Check Zinit status
zinit list
# View service logs
zinit logs osis_worker_0
# Check service status
zinit status osis_worker_0
# Monitor Redis queues
redis-cli keys "hero:job:*"
```
## Performance Considerations
- **Scaling**: Start with minimal workers and scale based on queue depth
- **Health Monitoring**: Adjust ping intervals based on workload patterns
- **Resource Usage**: Monitor CPU/memory usage of worker processes
- **Queue Depth**: Monitor Redis queue lengths for scaling decisions
## Security
- **Process Isolation**: Zinit provides process isolation
- **User Permissions**: Run workers with appropriate user permissions
- **Network Security**: Secure Redis and Zinit socket access
- **Binary Validation**: Verify worker binary integrity before deployment

103
core/supervisor/README.md Normal file
View File

@ -0,0 +1,103 @@
# Hero Supervisor
The **Hero Supervisor** is responsible for supervising the lifecycle of workers and dispatching jobs to them via Redis queues.
## Overview
The system involves four primary actors:
1. **OSIS**: A worker that executes Rhai and HeroScript.
2. **SAL**: A worker that performs system abstraction layer functionalities using Rhai.
3. **V**: A worker that executes HeroScript in the V programming language.
4. **Python**: A worker that executes HeroScript in Python.
The Supervisor utilizes **zinit** to start and monitor these workers, ensuring they are running correctly.
### Key Features
- **Worker Lifecycle Supervision**: Oversee the lifecycle of workers, including starting, stopping, restarting, and load balancing based on job demand.
- **Job Supervision**: API for efficiently managing jobs dispatched to workers over Redis queues.
## Worker Lifecycle Supervision
The Supervisor oversees the lifecycle of the workers, ensuring they are operational and efficiently allocated. Load balancing is implemented to dynamically adjust the number of active workers based on job demand.
Additionally, the Supervisor implements health monitoring for worker engines: if a worker engine does not receive a job within 10 minutes, the Supervisor sends a ping job. The engine must respond immediately; if it fails to do so, the Supervisor restarts the requested job engine.
### Prerequisites
**Important**: Before running any lifecycle examples or using worker management features, you must start the Zinit daemon:
```bash
# Start Zinit daemon (required for worker lifecycle management)
sudo zinit init
# Or start Zinit with a custom socket path
sudo zinit --socket /var/run/zinit.sock init
```
**Note**: The Supervisor uses Zinit as the process manager for worker lifecycle operations. The default socket path is `/var/run/zinit.sock`, but you can configure a custom path using the `SupervisorBuilder::zinit_socket_path()` method.
**Troubleshooting**: If you get connection errors when running examples, ensure:
1. Zinit daemon is running (`zinit list` should work)
2. The socket path matches between Zinit and your Supervisor configuration
3. You have appropriate permissions to access the Zinit socket
### Supervisor API for Worker Lifecycle
The Supervisor provides the following methods for supervising the worker lifecycle:
- **`start_worker()`**: Initializes and starts a specified worker.
- **`stop_worker()`**: Gracefully stops a specified worker.
- **`restart_worker()`**: Restarts a specified worker to ensure it operates correctly.
- **`get_worker_status()`**: Checks the status of a specific worker.
## Job Supervision
Jobs are dispatched to workers through their designated Redis queues, and the Supervisor provides an API for comprehensive job supervision.
### Supervisor API for Job Supervision
The Supervisor offers the following methods for handling jobs:
- **`new_job()`**: Creates a new `JobBuilder` for configuring a job.
- **`create_job()`**: Stores a job in Redis.
- **`run_job_and_await_result()`**: Executes a job and waits for its completion.
- **`get_job_status()`**: Checks the current execution status of a job.
- **`get_job_output()`**: Retrieves the results of a completed job.
## Running Examples
The supervisor includes several examples demonstrating lifecycle management:
```bash
# 1. First, start the Zinit daemon
sudo zinit init
# 2. In another terminal, start Redis (if not already running)
redis-server
# 3. Run the lifecycle demo
cargo run --example simple_lifecycle_demo
# Or run the comprehensive lifecycle demo
cargo run --example lifecycle_demo
```
**Example Configuration**: The examples use these default paths:
- Redis: `redis://localhost:6379`
- Zinit socket: `/var/run/zinit.sock`
You can modify these in the example source code if your setup differs.
### Redis Schema for Job Supervision
Jobs are managed within the `hero:` namespace in Redis:
- **`hero:job:{job_id}`**: Stores job parameters as a Redis hash.
- **`hero:work_queue:{worker_id}`**: Contains worker-specific job queues for dispatching jobs.
- **`hero:reply:{job_id}`**: Dedicated queues for job results.
## Prerequisites
- A Redis server must be accessible to both the Supervisor and the workers.

View File

@ -150,7 +150,7 @@ The client provides clear error messages for:
### Dependencies ### Dependencies
- `rhai_dispatcher`: Core client library for Redis-based script execution - `rhai_supervisor`: Core client library for Redis-based script execution
- `redis`: Redis client for task queue communication - `redis`: Redis client for task queue communication
- `clap`: Command-line argument parsing - `clap`: Command-line argument parsing
- `env_logger`: Logging infrastructure - `env_logger`: Logging infrastructure

View File

@ -1,5 +1,5 @@
use clap::Parser; use clap::Parser;
use hero_dispatcher::{Dispatcher, DispatcherBuilder, ScriptType}; use hero_supervisor::{Supervisor, SupervisorBuilder, ScriptType};
use log::{error, info}; use log::{error, info};
use colored::Colorize; use colored::Colorize;
use std::io::{self, Write}; use std::io::{self, Write};
@ -16,22 +16,10 @@ struct Args {
#[arg(short = 'k', long = "context-id", help = "Context ID (execution context)")] #[arg(short = 'k', long = "context-id", help = "Context ID (execution context)")]
context_id: String, context_id: String,
/// Script type to execute (heroscript, rhai-sal, rhai-dsl) /// Script type to execute (osis, sal, v, python)
#[arg(short = 'T', long = "script-type", default_value = "heroscript", help = "Script type: heroscript, rhai-sal, or rhai-dsl")] #[arg(short = 'T', long = "script-type", default_value = "osis", help = "Script type: osis, sal, v, or python")]
script_type: String, script_type: String,
/// HeroScript workers (comma-separated)
#[arg(long = "hero-workers", default_value = "hero-worker-1", help = "HeroScript worker IDs (comma-separated)")]
hero_workers: String,
/// Rhai SAL workers (comma-separated)
#[arg(long = "rhai-sal-workers", default_value = "rhai-sal-worker-1", help = "Rhai SAL worker IDs (comma-separated)")]
rhai_sal_workers: String,
/// Rhai DSL workers (comma-separated)
#[arg(long = "rhai-dsl-workers", default_value = "rhai-dsl-worker-1", help = "Rhai DSL worker IDs (comma-separated)")]
rhai_dsl_workers: String,
/// Redis URL /// Redis URL
#[arg(short, long, default_value = "redis://localhost:6379", help = "Redis connection URL")] #[arg(short, long, default_value = "redis://localhost:6379", help = "Redis connection URL")]
redis_url: String, redis_url: String,
@ -63,10 +51,10 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Configure logging based on verbosity level // Configure logging based on verbosity level
let log_config = match args.verbose { let log_config = match args.verbose {
0 => "warn,hero_dispatcher=warn", 0 => "warn,hero_supervisor=warn",
1 => "info,hero_dispatcher=info", 1 => "info,hero_supervisor=info",
2 => "debug,hero_dispatcher=debug", 2 => "debug,hero_supervisor=debug",
_ => "trace,hero_dispatcher=trace", _ => "trace,hero_supervisor=trace",
}; };
std::env::set_var("RUST_LOG", log_config); std::env::set_var("RUST_LOG", log_config);
@ -80,58 +68,33 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
env_logger::init(); env_logger::init();
} }
// Parse worker lists
let hero_workers: Vec<String> = args.hero_workers.split(',').map(|s| s.trim().to_string()).filter(|s| !s.is_empty()).collect();
let rhai_sal_workers: Vec<String> = args.rhai_sal_workers.split(',').map(|s| s.trim().to_string()).filter(|s| !s.is_empty()).collect();
let rhai_dsl_workers: Vec<String> = args.rhai_dsl_workers.split(',').map(|s| s.trim().to_string()).filter(|s| !s.is_empty()).collect();
// Validate that at least one worker is provided for the selected script type
// Validate script type
match args.script_type.to_lowercase().as_str() { match args.script_type.to_lowercase().as_str() {
"heroscript" => { "osis" | "sal" | "v" | "python" => {
if hero_workers.is_empty() { // Valid script types - no worker validation needed since we use hardcoded queues
error!("❌ No HeroScript workers provided. Use --hero-workers to specify at least one worker.");
return Err("At least one HeroScript worker must be provided".into());
}
}
"rhai-sal" => {
if rhai_sal_workers.is_empty() {
error!("❌ No Rhai SAL workers provided. Use --rhai-sal-workers to specify at least one worker.");
return Err("At least one Rhai SAL worker must be provided".into());
}
}
"rhai-dsl" => {
if rhai_dsl_workers.is_empty() {
error!("❌ No Rhai DSL workers provided. Use --rhai-dsl-workers to specify at least one worker.");
return Err("At least one Rhai DSL worker must be provided".into());
}
} }
_ => { _ => {
error!("❌ Invalid script type: {}. Valid types: heroscript, rhai-sal, rhai-dsl", args.script_type); error!("❌ Invalid script type: {}. Valid types: osis, sal, v, python", args.script_type);
return Err(format!("Invalid script type: {}", args.script_type).into()); return Err(format!("Invalid script type: {}", args.script_type).into());
} }
} }
if args.verbose > 0 { if args.verbose > 0 {
info!("🔗 Starting Hero Dispatcher"); info!("🔗 Starting Hero Supervisor");
info!("📋 Configuration:"); info!("📋 Configuration:");
info!(" Caller ID: {}", args.caller_id); info!(" Caller ID: {}", args.caller_id);
info!(" Context ID: {}", args.context_id); info!(" Context ID: {}", args.context_id);
info!(" Script Type: {}", args.script_type); info!(" Script Type: {}", args.script_type);
info!(" HeroScript Workers: {:?}", hero_workers);
info!(" Rhai SAL Workers: {:?}", rhai_sal_workers);
info!(" Rhai DSL Workers: {:?}", rhai_dsl_workers);
info!(" Redis URL: {}", args.redis_url); info!(" Redis URL: {}", args.redis_url);
info!(" Timeout: {}s", args.timeout); info!(" Timeout: {}s", args.timeout);
info!(" Using hardcoded worker queues for script type: {}", args.script_type);
info!(""); info!("");
} }
// Create the dispatcher client // Create the supervisor client
let client = DispatcherBuilder::new() let client = SupervisorBuilder::new()
.caller_id(&args.caller_id)
.context_id(&args.context_id)
.heroscript_workers(hero_workers)
.rhai_sal_workers(rhai_sal_workers)
.rhai_dsl_workers(rhai_dsl_workers)
.redis_url(&args.redis_url) .redis_url(&args.redis_url)
.build()?; .build()?;
@ -165,7 +128,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
} }
async fn execute_script( async fn execute_script(
client: &Dispatcher, client: &Supervisor,
script: String, script: String,
script_type_str: &str, script_type_str: &str,
timeout_secs: u64, timeout_secs: u64,
@ -174,11 +137,12 @@ async fn execute_script(
// Parse script type // Parse script type
let script_type = match script_type_str.to_lowercase().as_str() { let script_type = match script_type_str.to_lowercase().as_str() {
"heroscript" => ScriptType::HeroScript, "osis" => ScriptType::OSIS,
"rhai-sal" => ScriptType::RhaiSAL, "sal" => ScriptType::SAL,
"rhai-dsl" => ScriptType::RhaiDSL, "v" => ScriptType::V,
"python" => ScriptType::Python,
_ => { _ => {
error!("❌ Invalid script type: {}. Valid types: heroscript, rhai-sal, rhai-dsl", script_type_str); error!("❌ Invalid script type: {}. Valid types: osis, sal, v, python", script_type_str);
return Err(format!("Invalid script type: {}", script_type_str).into()); return Err(format!("Invalid script type: {}", script_type_str).into());
} }
}; };
@ -208,18 +172,19 @@ async fn execute_script(
} }
async fn run_interactive_mode( async fn run_interactive_mode(
client: &Dispatcher, client: &Supervisor,
script_type_str: &str, script_type_str: &str,
timeout_secs: u64, timeout_secs: u64,
verbose: u8, verbose: u8,
) -> Result<(), Box<dyn std::error::Error>> { ) -> Result<(), Box<dyn std::error::Error>> {
// Parse script type // Parse script type
let script_type = match script_type_str.to_lowercase().as_str() { let script_type = match script_type_str.to_lowercase().as_str() {
"heroscript" => ScriptType::HeroScript, "osis" => ScriptType::OSIS,
"rhai-sal" => ScriptType::RhaiSAL, "sal" => ScriptType::SAL,
"rhai-dsl" => ScriptType::RhaiDSL, "v" => ScriptType::V,
"python" => ScriptType::Python,
_ => { _ => {
error!("❌ Invalid script type: {}. Valid types: heroscript, rhai-sal, rhai-dsl", script_type_str); error!("❌ Invalid script type: {}. Valid types: osis, sal, v, python", script_type_str);
return Err(format!("Invalid script type: {}", script_type_str).into()); return Err(format!("Invalid script type: {}", script_type_str).into());
} }
}; };

View File

@ -1,6 +1,6 @@
# Architecture of the `rhai_dispatcher` Crate # Architecture of the `rhai_supervisor` Crate
The `rhai_dispatcher` crate provides a Redis-based client library for submitting Rhai scripts to distributed worker services and awaiting their execution results. It implements a request-reply pattern using Redis as the message broker. The `rhai_supervisor` crate provides a Redis-based client library for submitting Rhai scripts to distributed worker services and awaiting their execution results. It implements a request-reply pattern using Redis as the message broker.
## Core Architecture ## Core Architecture
@ -8,7 +8,7 @@ The client follows a builder pattern design with clear separation of concerns:
```mermaid ```mermaid
graph TD graph TD
A[RhaiDispatcherBuilder] --> B[RhaiDispatcher] A[RhaiSupervisorBuilder] --> B[RhaiSupervisor]
B --> C[PlayRequestBuilder] B --> C[PlayRequestBuilder]
C --> D[PlayRequest] C --> D[PlayRequest]
D --> E[Redis Task Queue] D --> E[Redis Task Queue]
@ -35,9 +35,9 @@ graph TD
## Key Components ## Key Components
### 1. RhaiDispatcherBuilder ### 1. RhaiSupervisorBuilder
A builder pattern implementation for constructing `RhaiDispatcher` instances with proper configuration validation. A builder pattern implementation for constructing `RhaiSupervisor` instances with proper configuration validation.
**Responsibilities:** **Responsibilities:**
- Configure Redis connection URL - Configure Redis connection URL
@ -47,9 +47,9 @@ A builder pattern implementation for constructing `RhaiDispatcher` instances wit
**Key Methods:** **Key Methods:**
- `caller_id(id: &str)` - Sets the caller identifier - `caller_id(id: &str)` - Sets the caller identifier
- `redis_url(url: &str)` - Configures Redis connection - `redis_url(url: &str)` - Configures Redis connection
- `build()` - Creates the final `RhaiDispatcher` instance - `build()` - Creates the final `RhaiSupervisor` instance
### 2. RhaiDispatcher ### 2. RhaiSupervisor
The main client interface that manages Redis connections and provides factory methods for creating play requests. The main client interface that manages Redis connections and provides factory methods for creating play requests.
@ -103,7 +103,7 @@ pub struct RhaiTaskDetails {
} }
``` ```
#### RhaiDispatcherError #### RhaiSupervisorError
Comprehensive error handling for various failure scenarios: Comprehensive error handling for various failure scenarios:
- `RedisError` - Redis connection/operation failures - `RedisError` - Redis connection/operation failures
- `SerializationError` - JSON serialization/deserialization issues - `SerializationError` - JSON serialization/deserialization issues

View File

@ -1,14 +1,14 @@
# Hero Dispatcher Protocol # Hero Supervisor Protocol
This document describes the Redis-based protocol used by the Hero Dispatcher for job management and worker communication. This document describes the Redis-based protocol used by the Hero Supervisor for job management and worker communication.
## Overview ## Overview
The Hero Dispatcher uses Redis as a message broker and data store for managing distributed job execution. Jobs are stored as Redis hashes, and communication with workers happens through Redis lists (queues). The Hero Supervisor uses Redis as a message broker and data store for managing distributed job execution. Jobs are stored as Redis hashes, and communication with workers happens through Redis lists (queues).
## Redis Namespace ## Redis Namespace
All dispatcher-related keys use the `hero:` namespace prefix to avoid conflicts with other Redis usage. All supervisor-related keys use the `hero:` namespace prefix to avoid conflicts with other Redis usage.
## Data Structures ## Data Structures
@ -99,7 +99,7 @@ Worker -> Redis: LPUSH hero:reply:{job_id} {result}
### List Jobs ### List Jobs
```rust ```rust
dispatcher.list_jobs() -> Vec<String> supervisor.list_jobs() -> Vec<String>
``` ```
**Redis Operations:** **Redis Operations:**
- `KEYS hero:job:*` - Get all job keys - `KEYS hero:job:*` - Get all job keys
@ -107,14 +107,14 @@ dispatcher.list_jobs() -> Vec<String>
### Stop Job ### Stop Job
```rust ```rust
dispatcher.stop_job(job_id) -> Result<(), DispatcherError> supervisor.stop_job(job_id) -> Result<(), SupervisorError>
``` ```
**Redis Operations:** **Redis Operations:**
- `LPUSH hero:stop_queue:{worker_id} {job_id}` - Send stop request - `LPUSH hero:stop_queue:{worker_id} {job_id}` - Send stop request
### Get Job Status ### Get Job Status
```rust ```rust
dispatcher.get_job_status(job_id) -> Result<JobStatus, DispatcherError> supervisor.get_job_status(job_id) -> Result<JobStatus, SupervisorError>
``` ```
**Redis Operations:** **Redis Operations:**
- `HGETALL hero:job:{job_id}` - Get job data - `HGETALL hero:job:{job_id}` - Get job data
@ -122,7 +122,7 @@ dispatcher.get_job_status(job_id) -> Result<JobStatus, DispatcherError>
### Get Job Logs ### Get Job Logs
```rust ```rust
dispatcher.get_job_logs(job_id) -> Result<Option<String>, DispatcherError> supervisor.get_job_logs(job_id) -> Result<Option<String>, SupervisorError>
``` ```
**Redis Operations:** **Redis Operations:**
- `HGETALL hero:job:{job_id}` - Get job data - `HGETALL hero:job:{job_id}` - Get job data
@ -131,7 +131,7 @@ dispatcher.get_job_logs(job_id) -> Result<Option<String>, DispatcherError>
### Run Job and Await Result ### Run Job and Await Result
```rust ```rust
dispatcher.run_job_and_await_result(job, worker_id) -> Result<String, DispatcherError> supervisor.run_job_and_await_result(job, worker_id) -> Result<String, SupervisorError>
``` ```
**Redis Operations:** **Redis Operations:**
1. `HSET hero:job:{job_id} {job_fields}` - Store job 1. `HSET hero:job:{job_id} {job_fields}` - Store job

View File

@ -0,0 +1,239 @@
use hero_supervisor::{
Supervisor, SupervisorBuilder, WorkerConfig, WorkerLifecycleManager,
WorkerLifecycleManagerBuilder, ScriptType
};
use log::{info, warn, error};
use std::collections::HashMap;
use std::path::PathBuf;
use std::time::Duration;
use tokio::time::sleep;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Initialize logging
env_logger::init();
info!("Starting Worker Lifecycle Management Demo");
// Configuration
let redis_url = "redis://localhost:6379";
let zinit_socket = "/var/run/zinit.sock";
// Create supervisor
let supervisor = SupervisorBuilder::new()
.redis_url(redis_url)
.caller_id("lifecycle_demo")
.context_id("demo_context")
.build()?;
// Configure workers for different script types
let mut worker_configs = Vec::new();
// OSIS workers (Rhai/HeroScript)
for i in 0..2 {
let config = WorkerConfig::new(
format!("osis_worker_{}", i),
PathBuf::from("/usr/local/bin/osis_worker"),
ScriptType::OSIS,
)
.with_args(vec![
"--redis-url".to_string(),
redis_url.to_string(),
"--worker-id".to_string(),
format!("osis_worker_{}", i),
])
.with_env({
let mut env = HashMap::new();
env.insert("RUST_LOG".to_string(), "info".to_string());
env.insert("WORKER_TYPE".to_string(), "osis".to_string());
env
})
.with_health_check("/usr/local/bin/osis_worker --health-check".to_string())
.with_dependencies(vec!["redis".to_string()]);
worker_configs.push(config);
}
// SAL workers (System Abstraction Layer)
for i in 0..3 {
let config = WorkerConfig::new(
format!("sal_worker_{}", i),
PathBuf::from("/usr/local/bin/sal_worker"),
ScriptType::SAL,
)
.with_args(vec![
"--redis-url".to_string(),
redis_url.to_string(),
"--worker-id".to_string(),
format!("sal_worker_{}", i),
])
.with_env({
let mut env = HashMap::new();
env.insert("RUST_LOG".to_string(), "info".to_string());
env.insert("WORKER_TYPE".to_string(), "sal".to_string());
env
})
.with_health_check("/usr/local/bin/sal_worker --health-check".to_string())
.with_dependencies(vec!["redis".to_string()]);
worker_configs.push(config);
}
// V workers (HeroScript in V language)
for i in 0..2 {
let config = WorkerConfig::new(
format!("v_worker_{}", i),
PathBuf::from("/usr/local/bin/v_worker"),
ScriptType::V,
)
.with_args(vec![
"--redis-url".to_string(),
redis_url.to_string(),
"--worker-id".to_string(),
format!("v_worker_{}", i),
])
.with_env({
let mut env = HashMap::new();
env.insert("RUST_LOG".to_string(), "info".to_string());
env.insert("WORKER_TYPE".to_string(), "v".to_string());
env
})
.with_health_check("/usr/local/bin/v_worker --health-check".to_string())
.with_dependencies(vec!["redis".to_string()]);
worker_configs.push(config);
}
// Create lifecycle manager
let mut lifecycle_manager = WorkerLifecycleManagerBuilder::new(zinit_socket.to_string())
.with_supervisor(supervisor.clone());
// Add all worker configurations
for config in worker_configs {
lifecycle_manager = lifecycle_manager.add_worker(config);
}
let mut lifecycle_manager = lifecycle_manager.build();
// Demonstrate lifecycle operations
info!("=== Starting Worker Lifecycle Demo ===");
// 1. Start all workers
info!("1. Starting all workers...");
match lifecycle_manager.start_all_workers().await {
Ok(_) => info!("✅ All workers started successfully"),
Err(e) => {
error!("❌ Failed to start workers: {}", e);
return Err(e.into());
}
}
// Wait for workers to initialize
sleep(Duration::from_secs(5)).await;
// 2. Check worker status
info!("2. Checking worker status...");
match lifecycle_manager.get_all_worker_status().await {
Ok(status_map) => {
for (worker_name, status) in status_map {
info!(" Worker '{}': State={:?}, PID={}", worker_name, status.state, status.pid);
}
}
Err(e) => warn!("Failed to get worker status: {}", e),
}
// 3. Demonstrate scaling
info!("3. Demonstrating worker scaling...");
// Scale up OSIS workers
info!(" Scaling up OSIS workers to 3...");
if let Err(e) = lifecycle_manager.scale_workers(&ScriptType::OSIS, 3).await {
warn!("Failed to scale OSIS workers: {}", e);
}
sleep(Duration::from_secs(3)).await;
// Scale down SAL workers
info!(" Scaling down SAL workers to 1...");
if let Err(e) = lifecycle_manager.scale_workers(&ScriptType::SAL, 1).await {
warn!("Failed to scale SAL workers: {}", e);
}
sleep(Duration::from_secs(3)).await;
// 4. Check running worker counts
info!("4. Checking running worker counts after scaling...");
for script_type in [ScriptType::OSIS, ScriptType::SAL, ScriptType::V] {
let count = lifecycle_manager.get_running_worker_count(&script_type).await;
info!(" {:?}: {} workers running", script_type, count);
}
// 5. Demonstrate restart functionality
info!("5. Demonstrating worker restart...");
if let Err(e) = lifecycle_manager.restart_worker("osis_worker_0").await {
warn!("Failed to restart worker: {}", e);
} else {
info!(" ✅ Successfully restarted osis_worker_0");
}
sleep(Duration::from_secs(3)).await;
// 6. Simulate job dispatch and health monitoring
info!("6. Simulating job dispatch and health monitoring...");
// Update job time for a worker (simulating job dispatch)
lifecycle_manager.update_worker_job_time("sal_worker_0");
info!(" Updated job time for sal_worker_0");
// Perform health monitoring check
if let Err(e) = lifecycle_manager.monitor_worker_health().await {
warn!("Health monitoring failed: {}", e);
} else {
info!(" ✅ Health monitoring completed");
}
// 7. Create and execute a test job
info!("7. Creating and executing a test job...");
let test_job = supervisor
.new_job()
.script_type(ScriptType::OSIS)
.script_content("println!(\"Hello from worker!\");".to_string())
.timeout(Duration::from_secs(30))
.build()?;
match supervisor.run_job_and_await_result(&test_job).await {
Ok(result) => info!(" ✅ Job executed successfully: {}", result),
Err(e) => warn!(" ❌ Job execution failed: {}", e),
}
// 8. Demonstrate graceful shutdown
info!("8. Demonstrating graceful shutdown...");
// Stop specific workers
info!(" Stopping specific workers...");
for worker_name in ["osis_worker_1", "v_worker_0"] {
if let Err(e) = lifecycle_manager.stop_worker(worker_name).await {
warn!("Failed to stop worker {}: {}", worker_name, e);
} else {
info!(" ✅ Stopped worker: {}", worker_name);
}
}
sleep(Duration::from_secs(2)).await;
// Stop all remaining workers
info!(" Stopping all remaining workers...");
if let Err(e) = lifecycle_manager.stop_all_workers().await {
error!("Failed to stop all workers: {}", e);
} else {
info!(" ✅ All workers stopped successfully");
}
info!("=== Worker Lifecycle Demo Completed ===");
// Optional: Start health monitoring loop (commented out for demo)
// info!("Starting health monitoring loop (Ctrl+C to stop)...");
// lifecycle_manager.start_health_monitoring().await;
Ok(())
}

View File

@ -0,0 +1,74 @@
use hero_supervisor::SupervisorBuilder;
use tokio::time::{sleep, Duration};
use log::{info, error};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
env_logger::init();
info!("Starting Hero Supervisor Lifecycle Demo");
// Build supervisor with simplified worker configuration
// Workers are automatically launched during build
let supervisor = SupervisorBuilder::new()
.redis_url("redis://localhost:6379")
.zinit_socket_path("/var/run/zinit.sock")
.osis_worker("/usr/local/bin/osis_worker")
.sal_worker("/usr/local/bin/sal_worker")
.v_worker("/usr/local/bin/v_worker")
.worker_env_var("REDIS_URL", "redis://localhost:6379")
.worker_env_var("LOG_LEVEL", "info")
.build().await?;
info!("Supervisor created and workers launched successfully");
// Wait a moment for workers to start
sleep(Duration::from_secs(2)).await;
// Check worker status using the simplified API
info!("Checking worker status...");
let workers = supervisor.get_workers(&[]).await;
for worker in &workers {
let status_info = if worker.is_running {
format!("Running (PID: {})", worker.status.as_ref().map(|s| s.pid).unwrap_or(0))
} else {
"Stopped".to_string()
};
info!(" Worker '{}' ({:?}): {}", worker.config.name, worker.config.script_type, status_info);
}
// Demonstrate lifecycle operations with simplified API
info!("=== Worker Lifecycle Operations ===");
// 1. Demonstrate restart functionality
info!("1. Demonstrating worker restart...");
if let Err(e) = supervisor.restart_worker("osis_worker_1").await {
error!("Failed to restart worker: {}", e);
} else {
info!(" ✅ Successfully restarted osis_worker_1");
}
sleep(Duration::from_secs(2)).await;
// 2. Send a ping job for health checking
info!("2. Sending ping job for health checking...");
if let Err(e) = supervisor.send_ping_job(hero_job::ScriptType::OSIS).await {
error!("Ping job failed: {}", e);
} else {
info!(" ✅ Ping job completed successfully");
}
// 3. Demonstrate graceful shutdown
info!("3. Demonstrating graceful shutdown...");
// Stop specific workers
if let Err(e) = supervisor.stop_worker("osis_worker_1").await {
error!("Failed to stop worker: {}", e);
} else {
info!(" ✅ Worker stopped successfully");
}
info!("Demo completed successfully!");
Ok(())
}

View File

@ -1,5 +1,5 @@
use log::info; use log::info;
use hero_dispatcher::{DispatcherBuilder, DispatcherError, ScriptType}; use hero_supervisor::{SupervisorBuilder, SupervisorError, ScriptType};
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
#[tokio::main] #[tokio::main]
@ -9,11 +9,11 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
.init(); .init();
// Build the client using the new builder pattern // Build the client using the new builder pattern
let client = DispatcherBuilder::new() let client = SupervisorBuilder::new()
.caller_id("timeout-example-runner") .caller_id("timeout-example-runner")
.redis_url("redis://127.0.0.1/") .redis_url("redis://127.0.0.1/")
.build()?; .build()?;
info!("Dispatcher created."); info!("Supervisor created.");
let script_content = r#" let script_content = r#"
// This script will never be executed by a worker because the recipient does not exist. // This script will never be executed by a worker because the recipient does not exist.
@ -56,8 +56,8 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
info!("Elapsed time: {:?}", elapsed); info!("Elapsed time: {:?}", elapsed);
match e { match e {
DispatcherError::Timeout(task_id) => { SupervisorError::Timeout(task_id) => {
info!("Timeout Example PASSED: Correctly received DispatcherError::Timeout for task_id: {}", task_id); info!("Timeout Example PASSED: Correctly received SupervisorError::Timeout for task_id: {}", task_id);
// Ensure the elapsed time is close to the timeout duration // Ensure the elapsed time is close to the timeout duration
// Allow for some buffer for processing // Allow for some buffer for processing
assert!( assert!(
@ -75,11 +75,11 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
} }
other_error => { other_error => {
log::error!( log::error!(
"Timeout Example FAILED: Expected DispatcherError::Timeout, but got other error: {:?}", "Timeout Example FAILED: Expected SupervisorError::Timeout, but got other error: {:?}",
other_error other_error
); );
Err(format!( Err(format!(
"Expected DispatcherError::Timeout, got other error: {:?}", "Expected SupervisorError::Timeout, got other error: {:?}",
other_error other_error
) )
.into()) .into())

View File

@ -0,0 +1,102 @@
// Added error
// Duration is still used, Instant and sleep were removed
/// Comprehensive error type for all possible failures in the Rhai client.
///
/// This enum covers all error scenarios that can occur during client operations,
/// from Redis connectivity issues to task execution timeouts.
#[derive(Debug)]
pub enum SupervisorError {
/// Redis connection or operation error
RedisError(redis::RedisError),
/// JSON serialization/deserialization error
SerializationError(serde_json::Error),
/// Task execution timeout - contains the task_id that timed out
Timeout(String),
/// Task not found after submission - contains the task_id (rare occurrence)
TaskNotFound(String),
/// Context ID is missing
ContextIdMissing,
/// Invalid input provided
InvalidInput(String),
/// Job operation error
JobError(hero_job::JobError),
/// Worker lifecycle management errors
WorkerStartFailed(String, String),
WorkerStopFailed(String, String),
WorkerRestartFailed(String, String),
WorkerStatusFailed(String, String),
WorkerNotFound(String),
PingJobFailed(String, String),
/// Zinit client operation error
ZinitError(String),
SupervisorNotConfigured,
}
impl From<redis::RedisError> for SupervisorError {
fn from(err: redis::RedisError) -> Self {
SupervisorError::RedisError(err)
}
}
impl From<serde_json::Error> for SupervisorError {
fn from(err: serde_json::Error) -> Self {
SupervisorError::SerializationError(err)
}
}
impl From<hero_job::JobError> for SupervisorError {
fn from(err: hero_job::JobError) -> Self {
SupervisorError::JobError(err)
}
}
impl std::fmt::Display for SupervisorError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
SupervisorError::RedisError(e) => write!(f, "Redis error: {}", e),
SupervisorError::SerializationError(e) => write!(f, "Serialization error: {}", e),
SupervisorError::Timeout(task_id) => {
write!(f, "Timeout waiting for task {} to complete", task_id)
}
SupervisorError::TaskNotFound(task_id) => {
write!(f, "Task {} not found after submission", task_id)
}
SupervisorError::ContextIdMissing => {
write!(f, "Context ID is missing")
}
SupervisorError::InvalidInput(msg) => {
write!(f, "Invalid input: {}", msg)
}
SupervisorError::JobError(e) => {
write!(f, "Job error: {}", e)
}
SupervisorError::WorkerStartFailed(worker, reason) => {
write!(f, "Failed to start worker '{}': {}", worker, reason)
}
SupervisorError::WorkerStopFailed(worker, reason) => {
write!(f, "Failed to stop worker '{}': {}", worker, reason)
}
SupervisorError::WorkerRestartFailed(worker, reason) => {
write!(f, "Failed to restart worker '{}': {}", worker, reason)
}
SupervisorError::WorkerStatusFailed(worker, reason) => {
write!(f, "Failed to get status for worker '{}': {}", worker, reason)
}
SupervisorError::WorkerNotFound(worker) => {
write!(f, "Worker '{}' not found", worker)
}
SupervisorError::PingJobFailed(worker, reason) => {
write!(f, "Ping job failed for worker '{}': {}", worker, reason)
}
SupervisorError::ZinitError(msg) => {
write!(f, "Zinit error: {}", msg)
}
SupervisorError::SupervisorNotConfigured => {
write!(f, "Supervisor not configured for health monitoring")
}
}
}
}
impl std::error::Error for SupervisorError {}

View File

@ -3,7 +3,7 @@ use std::collections::HashMap;
use std::time::Duration; use std::time::Duration;
use uuid::Uuid; use uuid::Uuid;
use crate::{Dispatcher, DispatcherError}; use crate::{Supervisor, SupervisorError};
use hero_job::{Job, ScriptType}; use hero_job::{Job, ScriptType};
/// Builder for constructing and submitting script execution requests. /// Builder for constructing and submitting script execution requests.
@ -16,12 +16,12 @@ use hero_job::{Job, ScriptType};
/// ///
/// ```rust,no_run /// ```rust,no_run
/// use std::time::Duration; /// use std::time::Duration;
/// use hero_dispatcher::ScriptType; /// use hero_supervisor::ScriptType;
/// ///
/// # async fn example(client: &hero_dispatcher::Dispatcher) -> Result<String, hero_dispatcher::DispatcherError> { /// # async fn example(client: &hero_supervisor::Supervisor) -> Result<String, hero_supervisor::SupervisorError> {
/// let result = client /// let result = client
/// .new_job() /// .new_job()
/// .script_type(ScriptType::HeroScript) /// .script_type(ScriptType::OSIS)
/// .script(r#"print("Hello, World!");"#) /// .script(r#"print("Hello, World!");"#)
/// .timeout(Duration::from_secs(30)) /// .timeout(Duration::from_secs(30))
/// .await_response() /// .await_response()
@ -30,7 +30,7 @@ use hero_job::{Job, ScriptType};
/// # } /// # }
/// ``` /// ```
pub struct JobBuilder<'a> { pub struct JobBuilder<'a> {
client: &'a Dispatcher, client: &'a Supervisor,
request_id: String, request_id: String,
context_id: String, context_id: String,
caller_id: String, caller_id: String,
@ -46,14 +46,14 @@ pub struct JobBuilder<'a> {
} }
impl<'a> JobBuilder<'a> { impl<'a> JobBuilder<'a> {
pub fn new(client: &'a Dispatcher) -> Self { pub fn new(client: &'a Supervisor) -> Self {
Self { Self {
client, client,
request_id: "".to_string(), request_id: "".to_string(),
context_id: client.context_id.clone(), context_id: "".to_string(),
caller_id: client.caller_id.clone(), caller_id: "".to_string(),
script: "".to_string(), script: "".to_string(),
script_type: ScriptType::HeroScript, // Default to HeroScript script_type: ScriptType::OSIS, // Default to OSIS
timeout: Duration::from_secs(5), timeout: Duration::from_secs(5),
retries: 0, retries: 0,
concurrent: false, concurrent: false,
@ -153,7 +153,7 @@ impl<'a> JobBuilder<'a> {
self self
} }
pub fn build(self) -> Result<Job, DispatcherError> { pub fn build(self) -> Result<Job, SupervisorError> {
let request_id = if self.request_id.is_empty() { let request_id = if self.request_id.is_empty() {
// Generate a UUID for the request_id // Generate a UUID for the request_id
Uuid::new_v4().to_string() Uuid::new_v4().to_string()
@ -162,11 +162,11 @@ impl<'a> JobBuilder<'a> {
}; };
if self.context_id.is_empty() { if self.context_id.is_empty() {
return Err(DispatcherError::ContextIdMissing); return Err(SupervisorError::ContextIdMissing);
} }
if self.caller_id.is_empty() { if self.caller_id.is_empty() {
return Err(DispatcherError::ContextIdMissing); return Err(SupervisorError::ContextIdMissing);
} }
let now = Utc::now(); let now = Utc::now();
@ -189,7 +189,7 @@ impl<'a> JobBuilder<'a> {
}) })
} }
pub async fn submit(self) -> Result<(), DispatcherError> { pub async fn submit(self) -> Result<(), SupervisorError> {
// Create job first, then use client reference // Create job first, then use client reference
let request_id = if self.request_id.is_empty() { let request_id = if self.request_id.is_empty() {
Uuid::new_v4().to_string() Uuid::new_v4().to_string()
@ -198,7 +198,7 @@ impl<'a> JobBuilder<'a> {
}; };
if self.context_id.is_empty() { if self.context_id.is_empty() {
return Err(DispatcherError::ContextIdMissing); return Err(SupervisorError::ContextIdMissing);
} }
let now = Utc::now(); let now = Utc::now();
@ -224,7 +224,7 @@ impl<'a> JobBuilder<'a> {
Ok(()) Ok(())
} }
pub async fn await_response(self) -> Result<String, DispatcherError> { pub async fn await_response(self) -> Result<String, SupervisorError> {
// Create job first, then use client reference // Create job first, then use client reference
let request_id = if self.request_id.is_empty() { let request_id = if self.request_id.is_empty() {
Uuid::new_v4().to_string() Uuid::new_v4().to_string()
@ -233,7 +233,7 @@ impl<'a> JobBuilder<'a> {
}; };
if self.context_id.is_empty() { if self.context_id.is_empty() {
return Err(DispatcherError::ContextIdMissing); return Err(SupervisorError::ContextIdMissing);
} }
let now = Utc::now(); let now = Utc::now();

View File

@ -1,121 +1,211 @@
use log::{debug, error, info, warn}; use log::{debug, error, info, warn};
use redis::AsyncCommands; use redis::AsyncCommands;
use std::collections::HashMap;
use std::time::Duration; use std::time::Duration;
use hero_job::NAMESPACE_PREFIX; use hero_job::NAMESPACE_PREFIX;
use zinit_client::ZinitClient;
mod job; mod job;
mod error; mod error;
mod lifecycle;
pub use crate::error::DispatcherError; pub use crate::error::SupervisorError;
pub use crate::job::JobBuilder; pub use crate::job::JobBuilder;
pub use crate::lifecycle::WorkerConfig;
// Re-export types from hero_job for public API // Re-export types from hero_job for public API
pub use hero_job::{Job, JobStatus, ScriptType}; pub use hero_job::{Job, JobStatus, ScriptType};
#[derive(Clone)] pub struct Supervisor {
pub struct Dispatcher {
redis_client: redis::Client, redis_client: redis::Client,
caller_id: String, zinit_client: ZinitClient,
context_id: String, builder_data: Option<SupervisorBuilderData>,
heroscript_workers: Vec<String>,
rhai_sal_workers: Vec<String>,
rhai_dsl_workers: Vec<String>,
} }
pub struct DispatcherBuilder { pub struct SupervisorBuilder {
redis_url: Option<String>, redis_url: Option<String>,
caller_id: Option<String>, zinit_socket_path: Option<String>,
context_id: Option<String>, osis_worker: Option<String>,
heroscript_workers: Vec<String>, sal_worker: Option<String>,
rhai_sal_workers: Vec<String>, v_worker: Option<String>,
rhai_dsl_workers: Vec<String>, python_worker: Option<String>,
worker_env_vars: HashMap<String, String>,
} }
impl DispatcherBuilder { /// Helper struct to pass builder data to worker launch method
struct SupervisorBuilderData {
osis_worker: Option<String>,
sal_worker: Option<String>,
v_worker: Option<String>,
python_worker: Option<String>,
worker_env_vars: HashMap<String, String>,
}
impl SupervisorBuilder {
pub fn new() -> Self { pub fn new() -> Self {
Self { Self {
redis_url: None, redis_url: None,
caller_id: Some("default_caller".to_string()), zinit_socket_path: Some("/var/run/zinit.sock".to_string()),
context_id: Some("default_context".to_string()), osis_worker: None,
heroscript_workers: Vec::new(), sal_worker: None,
rhai_sal_workers: Vec::new(), v_worker: None,
rhai_dsl_workers: Vec::new(), python_worker: None,
worker_env_vars: HashMap::new(),
} }
} }
pub fn caller_id(mut self, caller_id: &str) -> Self {
self.caller_id = Some(caller_id.to_string());
self
}
pub fn context_id(mut self, context_id: &str) -> Self {
self.context_id = Some(context_id.to_string());
self
}
pub fn heroscript_workers(mut self, workers: Vec<String>) -> Self {
self.heroscript_workers = workers;
self
}
pub fn rhai_sal_workers(mut self, workers: Vec<String>) -> Self {
self.rhai_sal_workers = workers;
self
}
pub fn rhai_dsl_workers(mut self, workers: Vec<String>) -> Self {
self.rhai_dsl_workers = workers;
self
}
pub fn redis_url(mut self, url: &str) -> Self { pub fn redis_url(mut self, url: &str) -> Self {
self.redis_url = Some(url.to_string()); self.redis_url = Some(url.to_string());
self self
} }
/// Builds the final `Dispatcher` instance. pub fn zinit_socket_path(mut self, path: &str) -> Self {
self.zinit_socket_path = Some(path.to_string());
self
}
pub fn osis_worker(mut self, binary_path: &str) -> Self {
self.osis_worker = Some(binary_path.to_string());
self
}
pub fn sal_worker(mut self, binary_path: &str) -> Self {
self.sal_worker = Some(binary_path.to_string());
self
}
pub fn v_worker(mut self, binary_path: &str) -> Self {
self.v_worker = Some(binary_path.to_string());
self
}
pub fn python_worker(mut self, binary_path: &str) -> Self {
self.python_worker = Some(binary_path.to_string());
self
}
pub fn worker_env_var(mut self, key: &str, value: &str) -> Self {
self.worker_env_vars.insert(key.to_string(), value.to_string());
self
}
pub fn worker_env_vars(mut self, env_vars: HashMap<String, String>) -> Self {
self.worker_env_vars.extend(env_vars);
self
}
/// Builds the final `Supervisor` instance synchronously.
/// ///
/// This method validates the configuration and creates the Redis client. /// This method validates the configuration and creates the Redis client.
/// It will return an error if the caller ID is empty or if the Redis /// Worker launching is deferred to the `start_workers()` method.
/// connection cannot be established.
/// ///
/// # Returns /// # Returns
/// ///
/// * `Ok(Dispatcher)` - Successfully configured client /// * `Ok(Supervisor)` - Successfully configured client
/// * `Err(DispatcherError)` - Configuration or connection error /// * `Err(SupervisorError)` - Configuration or connection error
pub fn build(self) -> Result<Dispatcher, DispatcherError> { pub fn build(self) -> Result<Supervisor, SupervisorError> {
let url = self let url = self.redis_url
.redis_url
.unwrap_or_else(|| "redis://127.0.0.1/".to_string()); .unwrap_or_else(|| "redis://127.0.0.1/".to_string());
let client = redis::Client::open(url)?; let client = redis::Client::open(url)?;
Ok(Dispatcher {
let zinit_socket = self.zinit_socket_path
.unwrap_or_else(|| "/var/run/zinit.sock".to_string());
let zinit_client = ZinitClient::new(&zinit_socket);
// Store builder data for later use in start_workers()
let builder_data = SupervisorBuilderData {
osis_worker: self.osis_worker,
sal_worker: self.sal_worker,
v_worker: self.v_worker,
python_worker: self.python_worker,
worker_env_vars: self.worker_env_vars,
};
let supervisor = Supervisor {
redis_client: client, redis_client: client,
caller_id: self.caller_id.unwrap_or_else(|| "default_caller".to_string()), zinit_client,
context_id: self.context_id.unwrap_or_else(|| "default_context".to_string()), builder_data: Some(builder_data),
heroscript_workers: self.heroscript_workers, };
rhai_sal_workers: self.rhai_sal_workers,
rhai_dsl_workers: self.rhai_dsl_workers, Ok(supervisor)
})
} }
} }
impl Dispatcher { impl Supervisor {
/// Select a worker ID based on the script type using round-robin or first available /// Start all configured workers asynchronously.
fn select_worker_for_script_type(&self, script_type: &ScriptType) -> Result<String, DispatcherError> { /// This method should be called after build() to launch the workers.
let workers = match script_type { pub async fn start_workers(&self) -> Result<(), SupervisorError> {
ScriptType::HeroScript => &self.heroscript_workers, // Clean up any existing worker services first
ScriptType::RhaiSAL => &self.rhai_sal_workers, self.cleanup_existing_workers().await?;
ScriptType::RhaiDSL => &self.rhai_dsl_workers,
};
if workers.is_empty() { // Launch configured workers if builder data is available
return Err(DispatcherError::InvalidInput(format!( if let Some(builder_data) = &self.builder_data {
"No workers configured for script type: {:?}", script_type self.launch_configured_workers(builder_data).await?;
)));
} }
// For now, use simple round-robin by selecting first available worker Ok(())
// TODO: Implement proper load balancing }
Ok(workers[0].clone())
/// Clean up all worker services from zinit on program exit
pub async fn cleanup_and_shutdown(&self) -> Result<(), SupervisorError> {
info!("Cleaning up worker services before shutdown...");
let worker_names = vec![
"osis_worker_1",
"sal_worker_1",
"v_worker_1",
"python_worker_1"
];
for worker_name in worker_names {
if let Err(e) = self.stop_and_delete_worker(worker_name).await {
warn!("Failed to cleanup worker {}: {}", worker_name, e);
}
}
info!("Worker cleanup completed");
Ok(())
}
/// Clean up any existing worker services on startup
async fn cleanup_existing_workers(&self) -> Result<(), SupervisorError> {
info!("Cleaning up any existing worker services...");
let worker_names = vec![
"osis_worker_1",
"sal_worker_1",
"v_worker_1",
"python_worker_1"
];
for worker_name in worker_names {
// Try to stop and delete, but don't fail if they don't exist
let _ = self.stop_and_delete_worker(worker_name).await;
}
info!("Existing worker cleanup completed");
Ok(())
}
/// Stop and delete a worker service from zinit
async fn stop_and_delete_worker(&self, worker_name: &str) -> Result<(), SupervisorError> {
// First try to stop the worker
if let Err(e) = self.zinit_client.stop(worker_name).await {
debug!("Worker {} was not running or failed to stop: {}", worker_name, e);
}
// Then try to delete the service
if let Err(e) = self.zinit_client.delete(worker_name).await {
debug!("Worker {} service did not exist or failed to delete: {}", worker_name, e);
} else {
info!("Successfully deleted worker service: {}", worker_name);
}
Ok(())
}
/// Get the hardcoded worker queue key for the script type
fn get_worker_queue_key(&self, script_type: &ScriptType) -> String {
format!("{}worker_queue:{}", NAMESPACE_PREFIX, script_type.worker_queue_suffix())
} }
pub fn new_job(&self) -> JobBuilder { pub fn new_job(&self) -> JobBuilder {
@ -127,7 +217,7 @@ impl Dispatcher {
&self, &self,
conn: &mut redis::aio::MultiplexedConnection, conn: &mut redis::aio::MultiplexedConnection,
job: &Job, job: &Job,
) -> Result<(), DispatcherError> { ) -> Result<(), SupervisorError> {
debug!( debug!(
"Submitting play request: {} for script type: {:?} with namespace prefix: {}", "Submitting play request: {} for script type: {:?} with namespace prefix: {}",
job.id, job.script_type, NAMESPACE_PREFIX job.id, job.script_type, NAMESPACE_PREFIX
@ -135,7 +225,7 @@ impl Dispatcher {
// Use the shared Job struct's Redis storage method // Use the shared Job struct's Redis storage method
job.store_in_redis(conn).await job.store_in_redis(conn).await
.map_err(|e| DispatcherError::InvalidInput(format!("Failed to store job in Redis: {}", e)))?; .map_err(|e| SupervisorError::InvalidInput(format!("Failed to store job in Redis: {}", e)))?;
Ok(()) Ok(())
} }
@ -145,13 +235,9 @@ impl Dispatcher {
&self, &self,
conn: &mut redis::aio::MultiplexedConnection, conn: &mut redis::aio::MultiplexedConnection,
job_id: String, job_id: String,
worker_id: String script_type: &ScriptType
) -> Result<(), DispatcherError> { ) -> Result<(), SupervisorError> {
let worker_queue_key = format!( let worker_queue_key = self.get_worker_queue_key(script_type);
"{}{}",
NAMESPACE_PREFIX,
worker_id.replace(" ", "_").to_lowercase()
);
// lpush also infers its types, RV is typically i64 (length of list) or () depending on exact command variant // lpush also infers its types, RV is typically i64 (length of list) or () depending on exact command variant
// For `redis::AsyncCommands::lpush`, it's `RedisResult<R>` where R: FromRedisValue // For `redis::AsyncCommands::lpush`, it's `RedisResult<R>` where R: FromRedisValue
@ -169,7 +255,7 @@ impl Dispatcher {
job_key: &String, job_key: &String,
reply_queue_key: &String, reply_queue_key: &String,
timeout: Duration, timeout: Duration,
) -> Result<String, DispatcherError> { ) -> Result<String, SupervisorError> {
// BLPOP on the reply queue // BLPOP on the reply queue
// The timeout for BLPOP is in seconds (integer) // The timeout for BLPOP is in seconds (integer)
let blpop_timeout_secs = timeout.as_secs().max(1); // Ensure at least 1 second for BLPOP timeout let blpop_timeout_secs = timeout.as_secs().max(1); // Ensure at least 1 second for BLPOP timeout
@ -189,7 +275,7 @@ impl Dispatcher {
); );
// Optionally, delete the reply queue // Optionally, delete the reply queue
let _: redis::RedisResult<i32> = conn.del(&reply_queue_key).await; let _: redis::RedisResult<i32> = conn.del(&reply_queue_key).await;
Err(DispatcherError::Timeout(job_key.clone())) Err(SupervisorError::Timeout(job_key.clone()))
} }
Err(e) => { Err(e) => {
// Redis error // Redis error
@ -199,7 +285,7 @@ impl Dispatcher {
); );
// Optionally, delete the reply queue // Optionally, delete the reply queue
let _: redis::RedisResult<i32> = conn.del(&reply_queue_key).await; let _: redis::RedisResult<i32> = conn.del(&reply_queue_key).await;
Err(DispatcherError::RedisError(e)) Err(SupervisorError::RedisError(e))
} }
} }
} }
@ -208,7 +294,7 @@ impl Dispatcher {
pub async fn create_job( pub async fn create_job(
&self, &self,
job: &Job, job: &Job,
) -> Result<(), DispatcherError> { ) -> Result<(), SupervisorError> {
let mut conn = self.redis_client.get_multiplexed_async_connection().await?; let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
self.create_job_using_connection( self.create_job_using_connection(
@ -223,16 +309,13 @@ impl Dispatcher {
pub async fn start_job( pub async fn start_job(
&self, &self,
job_id: &str, job_id: &str,
) -> Result<(), DispatcherError> { ) -> Result<(), SupervisorError> {
let mut conn = self.redis_client.get_multiplexed_async_connection().await?; let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
// Load the job to get its script type // Load the job to get its script type
let job = Job::load_from_redis(&mut conn, job_id).await?; let job = Job::load_from_redis(&mut conn, job_id).await?;
// Select worker based on script type self.start_job_using_connection(&mut conn, job_id.to_string(), &job.script_type).await?;
let worker_id = self.select_worker_for_script_type(&job.script_type)?;
self.start_job_using_connection(&mut conn, job_id.to_string(), worker_id).await?;
Ok(()) Ok(())
} }
@ -240,9 +323,7 @@ impl Dispatcher {
pub async fn run_job_and_await_result( pub async fn run_job_and_await_result(
&self, &self,
job: &Job job: &Job
) -> Result<String, DispatcherError> { ) -> Result<String, SupervisorError> {
// Select worker based on script type
let worker_id = self.select_worker_for_script_type(&job.script_type)?;
let mut conn = self.redis_client.get_multiplexed_async_connection().await?; let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
let reply_queue_key = format!("{}:reply:{}", NAMESPACE_PREFIX, job.id); // Derived from the passed job_id let reply_queue_key = format!("{}:reply:{}", NAMESPACE_PREFIX, job.id); // Derived from the passed job_id
@ -253,7 +334,7 @@ impl Dispatcher {
) )
.await?; .await?;
self.start_job_using_connection(&mut conn, job.id.clone(), worker_id).await?; self.start_job_using_connection(&mut conn, job.id.clone(), &job.script_type).await?;
info!( info!(
"Task {} submitted. Waiting for result on queue {} with timeout {:?}...", "Task {} submitted. Waiting for result on queue {} with timeout {:?}...",
@ -275,7 +356,7 @@ impl Dispatcher {
pub async fn get_job_status( pub async fn get_job_status(
&self, &self,
job_id: &str, job_id: &str,
) -> Result<JobStatus, DispatcherError> { ) -> Result<JobStatus, SupervisorError> {
let mut conn = self.redis_client.get_multiplexed_async_connection().await?; let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
let job_key = format!("{}{}", NAMESPACE_PREFIX, job_id); let job_key = format!("{}{}", NAMESPACE_PREFIX, job_id);
@ -310,7 +391,7 @@ impl Dispatcher {
pub async fn get_job_output( pub async fn get_job_output(
&self, &self,
job_id: &str, job_id: &str,
) -> Result<Option<String>, DispatcherError> { ) -> Result<Option<String>, SupervisorError> {
let mut conn = self.redis_client.get_multiplexed_async_connection().await?; let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
let job_key = format!("{}{}", NAMESPACE_PREFIX, job_id); let job_key = format!("{}{}", NAMESPACE_PREFIX, job_id);
@ -329,16 +410,16 @@ impl Dispatcher {
} }
/// List all jobs in Redis /// List all jobs in Redis
pub async fn list_jobs(&self) -> Result<Vec<String>, DispatcherError> { pub async fn list_jobs(&self) -> Result<Vec<String>, SupervisorError> {
let mut conn = self.redis_client.get_multiplexed_async_connection().await?; let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
// Use the shared Job struct's list method // Use the shared Job struct's list method
Job::list_all_job_ids(&mut conn).await Job::list_all_job_ids(&mut conn).await
.map_err(|e| DispatcherError::InvalidInput(format!("Failed to list jobs: {}", e))) .map_err(|e| SupervisorError::InvalidInput(format!("Failed to list jobs: {}", e)))
} }
/// Stop a job by pushing its ID to the stop queue /// Stop a job by pushing its ID to the stop queue
pub async fn stop_job(&self, job_id: &str) -> Result<(), DispatcherError> { pub async fn stop_job(&self, job_id: &str) -> Result<(), SupervisorError> {
let mut conn = self.redis_client.get_multiplexed_async_connection().await?; let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
// Get job details to determine script type and appropriate worker // Get job details to determine script type and appropriate worker
@ -346,19 +427,18 @@ impl Dispatcher {
let job_data: std::collections::HashMap<String, String> = conn.hgetall(&job_key).await?; let job_data: std::collections::HashMap<String, String> = conn.hgetall(&job_key).await?;
if job_data.is_empty() { if job_data.is_empty() {
return Err(DispatcherError::InvalidInput(format!("Job {} not found", job_id))); return Err(SupervisorError::InvalidInput(format!("Job {} not found", job_id)));
} }
// Parse script type from job data // Parse script type from job data
let script_type_str = job_data.get("script_type") let script_type_str = job_data.get("script_type")
.ok_or_else(|| DispatcherError::InvalidInput("Job missing script_type field".to_string()))?; .ok_or_else(|| SupervisorError::InvalidInput("Job missing script_type field".to_string()))?;
let script_type: ScriptType = serde_json::from_str(&format!("\"{}\"", script_type_str)) let script_type: ScriptType = serde_json::from_str(&format!("\"{}\"", script_type_str))
.map_err(|e| DispatcherError::InvalidInput(format!("Invalid script type: {}", e)))?; .map_err(|e| SupervisorError::InvalidInput(format!("Invalid script type: {}", e)))?;
// Select appropriate worker for this script type // Use hardcoded stop queue key for this script type
let worker_id = self.select_worker_for_script_type(&script_type)?; let stop_queue_key = format!("{}stop_queue:{}", NAMESPACE_PREFIX, script_type.worker_queue_suffix());
let stop_queue_key = format!("{}stop_queue:{}", NAMESPACE_PREFIX, worker_id);
// Push job ID to the stop queue // Push job ID to the stop queue
conn.lpush::<_, _, ()>(&stop_queue_key, job_id).await?; conn.lpush::<_, _, ()>(&stop_queue_key, job_id).await?;
@ -368,7 +448,7 @@ impl Dispatcher {
} }
/// Get logs for a job by reading from its log file /// Get logs for a job by reading from its log file
pub async fn get_job_logs(&self, job_id: &str) -> Result<Option<String>, DispatcherError> { pub async fn get_job_logs(&self, job_id: &str) -> Result<Option<String>, SupervisorError> {
let mut conn = self.redis_client.get_multiplexed_async_connection().await?; let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
let job_key = format!("{}job:{}", NAMESPACE_PREFIX, job_id); let job_key = format!("{}job:{}", NAMESPACE_PREFIX, job_id);
@ -400,48 +480,48 @@ impl Dispatcher {
} }
/// Delete a specific job by ID /// Delete a specific job by ID
pub async fn delete_job(&self, job_id: &str) -> Result<(), DispatcherError> { pub async fn delete_job(&self, job_id: &str) -> Result<(), SupervisorError> {
let mut conn = self.redis_client.get_multiplexed_async_connection().await?; let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
// Use the shared Job struct's delete method // Use the shared Job struct's delete method
Job::delete_from_redis(&mut conn, job_id).await Job::delete_from_redis(&mut conn, job_id).await
.map_err(|e| DispatcherError::InvalidInput(format!("Failed to delete job: {}", e)))?; .map_err(|e| SupervisorError::InvalidInput(format!("Failed to delete job: {}", e)))?;
info!("Job {} deleted successfully", job_id); info!("Job {} deleted successfully", job_id);
Ok(()) Ok(())
} }
/// Clear all jobs from Redis /// Clear all jobs from Redis
pub async fn clear_all_jobs(&self) -> Result<usize, DispatcherError> { pub async fn clear_all_jobs(&self) -> Result<usize, SupervisorError> {
let mut conn = self.redis_client.get_multiplexed_async_connection().await?; let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
// Get all job IDs first // Get all job IDs first
let job_ids = Job::list_all_job_ids(&mut conn).await let job_ids = Job::list_all_job_ids(&mut conn).await
.map_err(|e| DispatcherError::InvalidInput(format!("Failed to list jobs: {}", e)))?; .map_err(|e| SupervisorError::InvalidInput(format!("Failed to list jobs: {}", e)))?;
let count = job_ids.len(); let count = job_ids.len();
// Delete each job using the shared method // Delete each job using the shared method
for job_id in job_ids { for job_id in job_ids {
Job::delete_from_redis(&mut conn, &job_id).await Job::delete_from_redis(&mut conn, &job_id).await
.map_err(|e| DispatcherError::InvalidInput(format!("Failed to delete job {}: {}", job_id, e)))?; .map_err(|e| SupervisorError::InvalidInput(format!("Failed to delete job {}: {}", job_id, e)))?;
} }
Ok(count) Ok(count)
} }
/// Check if all prerequisites for a job are completed /// Check if all prerequisites for a job are completed
pub async fn check_prerequisites_completed(&self, job_id: &str) -> Result<bool, DispatcherError> { pub async fn check_prerequisites_completed(&self, job_id: &str) -> Result<bool, SupervisorError> {
let mut conn = self.redis_client.get_multiplexed_async_connection().await?; let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
// Load the job using the shared Job struct // Load the job using the shared Job struct
let job = Job::load_from_redis(&mut conn, job_id).await let job = Job::load_from_redis(&mut conn, job_id).await
.map_err(|e| DispatcherError::InvalidInput(format!("Failed to load job: {}", e)))?; .map_err(|e| SupervisorError::InvalidInput(format!("Failed to load job: {}", e)))?;
// Check each prerequisite job status // Check each prerequisite job status
for prereq_id in &job.prerequisites { for prereq_id in &job.prerequisites {
let status = Job::get_status(&mut conn, prereq_id).await let status = Job::get_status(&mut conn, prereq_id).await
.map_err(|e| DispatcherError::InvalidInput(format!("Failed to get prerequisite status: {}", e)))?; .map_err(|e| SupervisorError::InvalidInput(format!("Failed to get prerequisite status: {}", e)))?;
if status != JobStatus::Finished { if status != JobStatus::Finished {
return Ok(false); // Prerequisite not completed return Ok(false); // Prerequisite not completed
@ -452,12 +532,12 @@ impl Dispatcher {
} }
/// Update job status and check dependent jobs for readiness /// Update job status and check dependent jobs for readiness
pub async fn update_job_status_and_check_dependents(&self, job_id: &str, new_status: JobStatus) -> Result<Vec<String>, DispatcherError> { pub async fn update_job_status_and_check_dependents(&self, job_id: &str, new_status: JobStatus) -> Result<Vec<String>, SupervisorError> {
let mut conn = self.redis_client.get_multiplexed_async_connection().await?; let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
// Update job status using shared Job method // Update job status using shared Job method
Job::update_status(&mut conn, job_id, new_status.clone()).await Job::update_status(&mut conn, job_id, new_status.clone()).await
.map_err(|e| DispatcherError::InvalidInput(format!("Failed to update job status: {}", e)))?; .map_err(|e| SupervisorError::InvalidInput(format!("Failed to update job status: {}", e)))?;
let mut ready_jobs = Vec::new(); let mut ready_jobs = Vec::new();
@ -465,12 +545,12 @@ impl Dispatcher {
if new_status == JobStatus::Finished { if new_status == JobStatus::Finished {
// Load the job to get its dependents // Load the job to get its dependents
let job = Job::load_from_redis(&mut conn, job_id).await let job = Job::load_from_redis(&mut conn, job_id).await
.map_err(|e| DispatcherError::InvalidInput(format!("Failed to load job: {}", e)))?; .map_err(|e| SupervisorError::InvalidInput(format!("Failed to load job: {}", e)))?;
// Check each dependent job // Check each dependent job
for dependent_id in &job.dependents { for dependent_id in &job.dependents {
let dependent_status = Job::get_status(&mut conn, dependent_id).await let dependent_status = Job::get_status(&mut conn, dependent_id).await
.map_err(|e| DispatcherError::InvalidInput(format!("Failed to get dependent status: {}", e)))?; .map_err(|e| SupervisorError::InvalidInput(format!("Failed to get dependent status: {}", e)))?;
// Only check jobs that are waiting for prerequisites // Only check jobs that are waiting for prerequisites
if dependent_status == JobStatus::WaitingForPrerequisites { if dependent_status == JobStatus::WaitingForPrerequisites {
@ -478,7 +558,7 @@ impl Dispatcher {
if self.check_prerequisites_completed(dependent_id).await? { if self.check_prerequisites_completed(dependent_id).await? {
// Update status to dispatched and add to ready jobs // Update status to dispatched and add to ready jobs
Job::update_status(&mut conn, dependent_id, JobStatus::Dispatched).await Job::update_status(&mut conn, dependent_id, JobStatus::Dispatched).await
.map_err(|e| DispatcherError::InvalidInput(format!("Failed to update dependent status: {}", e)))?; .map_err(|e| SupervisorError::InvalidInput(format!("Failed to update dependent status: {}", e)))?;
ready_jobs.push(dependent_id.clone()); ready_jobs.push(dependent_id.clone());
} }
} }
@ -489,7 +569,7 @@ impl Dispatcher {
} }
/// Dispatch jobs that are ready (have all prerequisites completed) /// Dispatch jobs that are ready (have all prerequisites completed)
pub async fn dispatch_ready_jobs(&self, ready_job_ids: Vec<String>) -> Result<(), DispatcherError> { pub async fn dispatch_ready_jobs(&self, ready_job_ids: Vec<String>) -> Result<(), SupervisorError> {
for job_id in ready_job_ids { for job_id in ready_job_ids {
// Get job data to determine script type and select worker // Get job data to determine script type and select worker
let mut conn = self.redis_client.get_multiplexed_async_connection().await?; let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
@ -497,17 +577,17 @@ impl Dispatcher {
let job_data: std::collections::HashMap<String, String> = conn.hgetall(&job_key).await?; let job_data: std::collections::HashMap<String, String> = conn.hgetall(&job_key).await?;
if let Some(script_type_str) = job_data.get("script_type") { if let Some(script_type_str) = job_data.get("script_type") {
// Parse script type (stored as Debug format, e.g., "HeroScript") // Parse script type (stored as Debug format, e.g., "OSIS")
let script_type = match script_type_str.as_str() { let script_type = match script_type_str.as_str() {
"HeroScript" => ScriptType::HeroScript, "OSIS" => ScriptType::OSIS,
"RhaiSAL" => ScriptType::RhaiSAL, "SAL" => ScriptType::SAL,
"RhaiDSL" => ScriptType::RhaiDSL, "V" => ScriptType::V,
_ => return Err(DispatcherError::InvalidInput(format!("Unknown script type: {}", script_type_str))), "Python" => ScriptType::Python,
_ => return Err(SupervisorError::InvalidInput(format!("Unknown script type: {}", script_type_str))),
}; };
// Select worker and dispatch job // Dispatch job using hardcoded queue
let worker_id = self.select_worker_for_script_type(&script_type)?; self.start_job_using_connection(&mut conn, job_id, &script_type).await?;
self.start_job_using_connection(&mut conn, job_id, worker_id).await?;
} }
} }
Ok(()) Ok(())

View File

@ -0,0 +1,368 @@
//! Worker lifecycle management functionality for the Hero Supervisor
//!
//! This module provides worker process lifecycle management using Zinit as the process manager.
//! All functionality is implemented as methods on the Supervisor struct for a clean API.
use log::{debug, error, info, warn};
use serde_json::json;
use std::collections::HashMap;
use std::path::PathBuf;
use std::time::Duration;
use zinit_client::{ZinitClient, ServiceStatus, ServiceState};
use hero_job::ScriptType;
use crate::{Supervisor, SupervisorError};
/// Information about a worker including its configuration and current status
#[derive(Debug, Clone)]
pub struct WorkerInfo {
pub config: WorkerConfig,
pub status: Option<ServiceStatus>,
pub is_running: bool,
}
/// Configuration for a worker binary
#[derive(Debug, Clone)]
pub struct WorkerConfig {
/// Name of the worker service
pub name: String,
/// Path to the worker binary
pub binary_path: PathBuf,
/// Script type this worker handles
pub script_type: ScriptType,
/// Command line arguments for the worker
pub args: Vec<String>,
/// Environment variables for the worker
pub env: HashMap<String, String>,
/// Whether this worker should restart on exit
pub restart_on_exit: bool,
/// Health check command (optional)
pub health_check: Option<String>,
/// Dependencies that must be running first
pub dependencies: Vec<String>,
}
impl WorkerConfig {
pub fn new(name: String, binary_path: PathBuf, script_type: ScriptType) -> Self {
Self {
name,
binary_path,
script_type,
args: Vec::new(),
env: HashMap::new(),
restart_on_exit: true,
health_check: None,
dependencies: Vec::new(),
}
}
pub fn with_args(mut self, args: Vec<String>) -> Self {
self.args = args;
self
}
pub fn with_env(mut self, env: HashMap<String, String>) -> Self {
self.env = env;
self
}
pub fn with_health_check(mut self, health_check: String) -> Self {
self.health_check = Some(health_check);
self
}
pub fn with_dependencies(mut self, dependencies: Vec<String>) -> Self {
self.dependencies = dependencies;
self
}
pub fn no_restart(mut self) -> Self {
self.restart_on_exit = false;
self
}
}
/// Worker lifecycle management methods for Supervisor
impl Supervisor {
/// Get all workers with their configuration and status - unified method
pub async fn get_workers(&self, worker_configs: &[WorkerConfig]) -> Vec<WorkerInfo> {
let mut workers = Vec::new();
for config in worker_configs {
let status = self.zinit_client.status(&config.name).await.ok();
let is_running = status.as_ref()
.map(|s| matches!(s.state, ServiceState::Running) && s.pid > 0)
.unwrap_or(false);
workers.push(WorkerInfo {
config: config.clone(),
status,
is_running,
});
}
workers
}
/// Start a worker using Zinit
pub async fn start_worker(
&self,
worker_config: &WorkerConfig,
) -> Result<(), SupervisorError> {
info!("Starting worker: {}", worker_config.name);
// Create service configuration for Zinit
let service_config = self.create_service_config(worker_config);
// Create the service in Zinit
self.zinit_client.create_service(&worker_config.name, service_config).await
.map_err(|e| SupervisorError::ZinitError(format!("Failed to create service: {}", e)))?;
// Start the service
self.zinit_client.start(&worker_config.name).await
.map_err(|e| SupervisorError::ZinitError(format!("Failed to start worker: {}", e)))?;
info!("Successfully started worker: {}", worker_config.name);
Ok(())
}
/// Stop a worker using Zinit
pub async fn stop_worker(
&self,
worker_name: &str,
) -> Result<(), SupervisorError> {
info!("Stopping worker: {}", worker_name);
match self.zinit_client.stop(worker_name).await {
Ok(_) => {
info!("Successfully stopped worker: {}", worker_name);
Ok(())
}
Err(e) => {
error!("Failed to stop worker {}: {}", worker_name, e);
Err(SupervisorError::WorkerStopFailed(worker_name.to_string(), e.to_string()))
}
}
}
/// Restart a worker using Zinit
pub async fn restart_worker(
&self,
worker_name: &str,
) -> Result<(), SupervisorError> {
info!("Restarting worker: {}", worker_name);
match self.zinit_client.restart(worker_name).await {
Ok(_) => {
info!("Successfully restarted worker: {}", worker_name);
Ok(())
}
Err(e) => {
error!("Failed to restart worker {}: {}", worker_name, e);
Err(SupervisorError::WorkerRestartFailed(worker_name.to_string(), e.to_string()))
}
}
}
/// Get status of a worker using Zinit
pub async fn get_worker_status(
&self,
worker_name: &str,
zinit_client: &ZinitClient,
) -> Result<ServiceStatus, SupervisorError> {
match zinit_client.status(worker_name).await {
Ok(status) => Ok(status),
Err(e) => {
error!("Failed to get status for worker {}: {}", worker_name, e);
Err(SupervisorError::WorkerStatusFailed(worker_name.to_string(), e.to_string()))
}
}
}
/// Get status of all workers
pub async fn get_all_worker_status(
&self,
worker_configs: &[WorkerConfig],
zinit_client: &ZinitClient,
) -> Result<HashMap<String, ServiceStatus>, SupervisorError> {
let mut status_map = HashMap::new();
for worker in worker_configs {
match zinit_client.status(&worker.name).await {
Ok(status) => {
status_map.insert(worker.name.clone(), status);
}
Err(e) => {
warn!("Failed to get status for worker {}: {}", worker.name, e);
}
}
}
Ok(status_map)
}
/// Start multiple workers
pub async fn start_workers(
&self,
worker_configs: &[WorkerConfig],
) -> Result<(), SupervisorError> {
info!("Starting {} workers", worker_configs.len());
for worker in worker_configs {
self.start_worker(worker).await?;
}
Ok(())
}
/// Stop multiple workers
pub async fn stop_workers(
&self,
worker_names: &[String],
) -> Result<(), SupervisorError> {
info!("Stopping {} workers", worker_names.len());
for worker_name in worker_names {
self.stop_worker(worker_name).await?;
}
Ok(())
}
/// Get count of running workers for a script type
pub async fn get_running_worker_count(
&self,
worker_configs: &[WorkerConfig],
script_type: &ScriptType,
zinit_client: &ZinitClient,
) -> usize {
let mut running_count = 0;
for worker in worker_configs {
if worker.script_type == *script_type {
if let Ok(status) = zinit_client.status(&worker.name).await {
if status.state == ServiceState::Running {
running_count += 1;
}
}
}
}
running_count
}
/// Send a ping job to a worker for health checking
pub async fn send_ping_job(
&self,
script_type: ScriptType,
) -> Result<(), SupervisorError> {
// Create a ping job
let ping_job = self
.new_job()
.script_type(script_type.clone())
.script("ping") // Simple ping script
.timeout(Duration::from_secs(30))
.build()?;
// Execute the ping job with a short timeout
match self.run_job_and_await_result(&ping_job).await {
Ok(_) => {
debug!("Ping job successful for script type: {:?}", script_type);
Ok(())
}
Err(e) => {
warn!("Ping job failed for script type {:?}: {}", script_type, e);
Err(SupervisorError::PingJobFailed(format!("{:?}", script_type), e.to_string()))
}
}
}
/// Create Zinit service configuration from worker config
fn create_service_config(&self, worker: &WorkerConfig) -> serde_json::Value {
let mut config = json!({
"exec": format!("{} {}",
worker.binary_path.display(),
worker.args.join(" ")
),
"oneshot": !worker.restart_on_exit,
});
if let Some(health_check) = &worker.health_check {
config["test"] = json!(health_check);
}
if !worker.dependencies.is_empty() {
config["after"] = json!(worker.dependencies);
}
// Add environment variables if any
if !worker.env.is_empty() {
config["env"] = json!(worker.env);
}
config
}
/// Launch workers based on SupervisorBuilder configuration
pub(crate) async fn launch_configured_workers(&self, builder: &crate::SupervisorBuilderData) -> Result<(), SupervisorError> {
use hero_job::ScriptType;
use std::path::PathBuf;
// Launch OSIS worker if configured
if let Some(binary_path) = &builder.osis_worker {
let worker_id = "osis_worker_1";
let mut config = WorkerConfig::new(
worker_id.to_string(),
PathBuf::from(binary_path),
ScriptType::OSIS
);
config.env.extend(builder.worker_env_vars.clone());
info!("Launching OSIS worker: {}", worker_id);
self.start_worker(&config).await?;
}
// Launch SAL worker if configured
if let Some(binary_path) = &builder.sal_worker {
let worker_id = "sal_worker_1";
let mut config = WorkerConfig::new(
worker_id.to_string(),
PathBuf::from(binary_path),
ScriptType::SAL
);
config.env.extend(builder.worker_env_vars.clone());
info!("Launching SAL worker: {}", worker_id);
self.start_worker(&config).await?;
}
// Launch V worker if configured
if let Some(binary_path) = &builder.v_worker {
let worker_id = "v_worker_1";
let mut config = WorkerConfig::new(
worker_id.to_string(),
PathBuf::from(binary_path),
ScriptType::V
);
config.env.extend(builder.worker_env_vars.clone());
info!("Launching V worker: {}", worker_id);
self.start_worker(&config).await?;
}
// Launch Python worker if configured
if let Some(binary_path) = &builder.python_worker {
let worker_id = "python_worker_1";
let mut config = WorkerConfig::new(
worker_id.to_string(),
PathBuf::from(binary_path),
ScriptType::Python
);
config.env.extend(builder.worker_env_vars.clone());
info!("Launching Python worker: {}", worker_id);
self.start_worker(&config).await?;
}
Ok(())
}
}

View File

@ -15,7 +15,7 @@ path = "cmd/worker.rs"
[dependencies] [dependencies]
redis = { version = "0.25.0", features = ["tokio-comp"] } redis = { version = "0.25.0", features = ["tokio-comp"] }
rhai = { version = "1.18.0", default-features = false, features = ["sync", "decimal", "std"] } # Added "decimal" for broader script support rhai = { version = "1.21.0", features = ["std", "sync", "decimal", "internals"] }
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0" serde_json = "1.0"
tokio = { version = "1", features = ["macros", "rt-multi-thread", "time"] } tokio = { version = "1", features = ["macros", "rt-multi-thread", "time"] }
@ -24,6 +24,18 @@ env_logger = "0.10"
clap = { version = "4.4", features = ["derive"] } clap = { version = "4.4", features = ["derive"] }
uuid = { version = "1.6", features = ["v4", "serde"] } # Though task_id is string, uuid might be useful uuid = { version = "1.6", features = ["v4", "serde"] } # Though task_id is string, uuid might be useful
chrono = { version = "0.4", features = ["serde"] } chrono = { version = "0.4", features = ["serde"] }
rhai_dispatcher = { path = "../../../rhailib/src/dispatcher" } hero_supervisor = { path = "../supervisor" }
rhailib_engine = { path = "../engine" } hero_job = { path = "../job" }
heromodels = { path = "../../../db/heromodels", features = ["rhai"] } heromodels = { path = "../../../db/heromodels", features = ["rhai"] }
heromodels_core = { path = "../../../db/heromodels_core" }
heromodels-derive = { path = "../../../db/heromodels-derive" }
rhailib_dsl = { path = "../../../rhailib/src/dsl" }
[features]
default = ["calendar", "finance"]
calendar = []
finance = []
flow = []
legal = []
projects = []
biz = []

View File

@ -34,7 +34,7 @@ The `rhai_worker` crate implements a standalone worker service that listens for
/path/to/worker --redis-url redis://127.0.0.1/ --circle-public-key 02...abc /path/to/worker --redis-url redis://127.0.0.1/ --circle-public-key 02...abc
``` ```
2. The `run_worker_loop` connects to Redis and starts listening to its designated task queue (e.g., `rhai_tasks:02...abc`). 2. The `run_worker_loop` connects to Redis and starts listening to its designated task queue (e.g., `rhai_tasks:02...abc`).
3. A `rhai_dispatcher` submits a task by pushing a `task_id` to this queue and storing the script and other details in a Redis hash. 3. A `rhai_supervisor` submits a task by pushing a `task_id` to this queue and storing the script and other details in a Redis hash.
4. The worker's `BLPOP` command picks up the `task_id`. 4. The worker's `BLPOP` command picks up the `task_id`.
5. The worker retrieves the script from the corresponding `rhai_task_details:<task_id>` hash. 5. The worker retrieves the script from the corresponding `rhai_task_details:<task_id>` hash.
6. It updates the task's status to "processing". 6. It updates the task's status to "processing".
@ -46,7 +46,7 @@ The `rhai_worker` crate implements a standalone worker service that listens for
- A running Redis instance accessible by the worker. - A running Redis instance accessible by the worker.
- An orchestrator process (like `launcher`) to spawn the worker. - An orchestrator process (like `launcher`) to spawn the worker.
- A `rhai_dispatcher` (or another system) to populate the Redis queues. - A `rhai_supervisor` (or another system) to populate the Redis queues.
## Building and Running ## Building and Running

View File

@ -1,5 +1,5 @@
use clap::Parser; use clap::Parser;
use rhailib_engine::create_heromodels_engine; use rhailib_worker::engine::create_heromodels_engine;
use rhailib_worker::spawn_rhai_worker; use rhailib_worker::spawn_rhai_worker;
use tokio::sync::mpsc; use tokio::sync::mpsc;

View File

@ -44,7 +44,7 @@ graph TD
- **Redis Integration**: Task queue management and communication - **Redis Integration**: Task queue management and communication
- **Rhai Engine**: Script execution with full DSL capabilities - **Rhai Engine**: Script execution with full DSL capabilities
- **Client Integration**: Shared data structures with rhai_dispatcher - **Client Integration**: Shared data structures with rhai_supervisor
- **Heromodels**: Database and business logic integration - **Heromodels**: Database and business logic integration
- **Async Runtime**: Tokio for high-performance concurrent processing - **Async Runtime**: Tokio for high-performance concurrent processing

View File

@ -1,6 +1,6 @@
//! # Rhailib Engine //! # Rhai Engine Module
//! //!
//! The central Rhai scripting engine for the heromodels ecosystem. This crate provides //! The central Rhai scripting engine for the heromodels ecosystem. This module provides
//! a unified interface for creating, configuring, and executing Rhai scripts with access //! a unified interface for creating, configuring, and executing Rhai scripts with access
//! to all business domain modules. //! to all business domain modules.
//! //!
@ -14,7 +14,7 @@
//! ## Quick Start //! ## Quick Start
//! //!
//! ```rust //! ```rust
//! use rhailib_engine::{create_heromodels_engine, eval_script}; //! use rhailib_worker::engine::{create_heromodels_engine, eval_script};
//! //!
//! // Create a fully configured engine //! // Create a fully configured engine
//! let engine = create_heromodels_engine(); //! let engine = create_heromodels_engine();
@ -44,98 +44,53 @@ use rhailib_dsl;
use std::fs; use std::fs;
use std::path::Path; use std::path::Path;
/// Mock database module for testing and examples
pub mod mock_db;
/// Creates a fully configured Rhai engine with all available DSL modules. /// Creates a fully configured Rhai engine with all available DSL modules.
/// ///
/// This function creates a new Rhai engine instance, configures it with appropriate /// This function creates a new Rhai engine and registers all available heromodels
/// limits and settings, and registers all available business domain modules based /// DSL modules based on the enabled features. The engine comes pre-configured
/// on enabled features. /// with all necessary functions and types for business logic scripting.
///
/// # Engine Configuration
///
/// The engine is configured with the following limits:
/// - **Expression Depth**: 128 levels for both expressions and functions
/// - **String Size**: 10 MB maximum
/// - **Array Size**: 10,000 elements maximum
/// - **Map Size**: 10,000 key-value pairs maximum
///
/// # Registered Modules
///
/// All enabled DSL modules are automatically registered, including:
/// - Business operations (companies, products, sales, shareholders)
/// - Financial models (accounts, assets, marketplace)
/// - Content management (collections, images, PDFs, books)
/// - Workflow management (flows, steps, signatures)
/// - And more based on enabled features
/// ///
/// # Returns /// # Returns
/// ///
/// A fully configured `Engine` instance ready for script execution. /// A fully configured `Engine` instance ready for script execution.
/// ///
/// # Features
///
/// The engine includes modules based on enabled Cargo features:
/// - `calendar`: Calendar and event management functions
/// - `finance`: Financial accounts, assets, and marketplace operations
/// - `flow`: Workflow and approval process management
/// - `legal`: Contract and legal document handling
/// - `projects`: Project and task management
/// - `biz`: General business operations and entities
///
/// # Example /// # Example
/// ///
/// ```rust /// ```rust
/// use rhailib_engine::create_heromodels_engine; /// use rhailib_worker::engine::create_heromodels_engine;
/// ///
/// let engine = create_heromodels_engine(); /// let engine = create_heromodels_engine();
/// ///
/// // Engine is now ready to execute scripts with access to all DSL functions /// // The engine is now ready to execute business logic scripts
/// let result = engine.eval::<String>(r#" /// let result = engine.eval::<String>(r#"
/// let company = new_company().name("Test Corp"); /// "Hello from heromodels engine!"
/// company.name /// "#)?;
/// "#).unwrap();
/// assert_eq!(result, "Test Corp");
/// ``` /// ```
///
/// # Performance Notes
///
/// The engine is optimized for production use with reasonable defaults for
/// operation limits, expression depth, and memory usage. For benchmarking
/// or special use cases, you may want to adjust these limits after creation.
pub fn create_heromodels_engine() -> Engine { pub fn create_heromodels_engine() -> Engine {
let mut engine = Engine::new(); let mut engine = Engine::new();
// Configure engine settings
engine.set_max_expr_depths(128, 128);
engine.set_max_string_size(10 * 1024 * 1024); // 10 MB
engine.set_max_array_size(10 * 1024); // 10K elements
engine.set_max_map_size(10 * 1024); // 10K elements
// Register all heromodels Rhai modules // Register all heromodels Rhai modules
rhailib_dsl::register_dsl_modules(&mut engine); rhailib_dsl::register_dsl_modules(&mut engine);
engine engine
} }
// /// Register all heromodels Rhai modules with the engine
// pub fn register_all_modules(engine: &mut Engine, db: Arc<OurDB>) {
// // Register the calendar module if the feature is enabled
// heromodels::models::access::register_access_rhai_module(engine, db.clone());
// #[cfg(feature = "calendar")]
// heromodels::models::calendar::register_calendar_rhai_module(engine, db.clone());
// heromodels::models::contact::register_contact_rhai_module(engine, db.clone());
// heromodels::models::library::register_library_rhai_module(engine, db.clone());
// heromodels::models::circle::register_circle_rhai_module(engine, db.clone());
// // Register the flow module if the feature is enabled
// #[cfg(feature = "flow")]
// heromodels::models::flow::register_flow_rhai_module(engine, db.clone());
// // // Register the finance module if the feature is enabled
// // #[cfg(feature = "finance")]
// // heromodels::models::finance::register_finance_rhai_module(engine, db.clone());
// // Register the legal module if the feature is enabled
// #[cfg(feature = "legal")]
// heromodels::models::legal::register_legal_rhai_module(engine, db.clone());
// // Register the projects module if the feature is enabled
// #[cfg(feature = "projects")]
// heromodels::models::projects::register_projects_rhai_module(engine, db.clone());
// // Register the biz module if the feature is enabled
// #[cfg(feature = "biz")]
// heromodels::models::biz::register_biz_rhai_module(engine, db.clone());
// println!("Heromodels Rhai modules registered successfully.");
// }
/// Evaluates a Rhai script string and returns the result. /// Evaluates a Rhai script string and returns the result.
/// ///
/// This function provides a convenient way to execute Rhai script strings directly /// This function provides a convenient way to execute Rhai script strings directly
@ -155,7 +110,7 @@ pub fn create_heromodels_engine() -> Engine {
/// # Example /// # Example
/// ///
/// ```rust /// ```rust
/// use rhailib_engine::{create_heromodels_engine, eval_script}; /// use rhailib_worker::engine::{create_heromodels_engine, eval_script};
/// ///
/// let engine = create_heromodels_engine(); /// let engine = create_heromodels_engine();
/// let result = eval_script(&engine, r#" /// let result = eval_script(&engine, r#"
@ -169,7 +124,7 @@ pub fn eval_script(
engine: &Engine, engine: &Engine,
script: &str, script: &str,
) -> Result<rhai::Dynamic, Box<rhai::EvalAltResult>> { ) -> Result<rhai::Dynamic, Box<rhai::EvalAltResult>> {
engine.eval::<rhai::Dynamic>(script) engine.eval(script)
} }
/// Evaluates a Rhai script from a file and returns the result. /// Evaluates a Rhai script from a file and returns the result.
@ -191,7 +146,7 @@ pub fn eval_script(
/// # Example /// # Example
/// ///
/// ```rust /// ```rust
/// use rhailib_engine::{create_heromodels_engine, eval_file}; /// use rhailib_worker::engine::{create_heromodels_engine, eval_file};
/// use std::path::Path; /// use std::path::Path;
/// ///
/// let engine = create_heromodels_engine(); /// let engine = create_heromodels_engine();
@ -207,13 +162,14 @@ pub fn eval_file(
engine: &Engine, engine: &Engine,
file_path: &Path, file_path: &Path,
) -> Result<rhai::Dynamic, Box<rhai::EvalAltResult>> { ) -> Result<rhai::Dynamic, Box<rhai::EvalAltResult>> {
match fs::read_to_string(file_path) { let script_content = fs::read_to_string(file_path).map_err(|e| {
Ok(script_content) => engine.eval::<rhai::Dynamic>(&script_content), Box::new(EvalAltResult::ErrorSystem(
Err(io_err) => Err(Box::new(EvalAltResult::ErrorSystem( format!("Failed to read script file '{}': {}", file_path.display(), e),
format!("Failed to read script file: {}", file_path.display()), e.into(),
Box::new(io_err), ))
))), })?;
}
engine.eval(&script_content)
} }
/// Compiles a Rhai script string into an Abstract Syntax Tree (AST). /// Compiles a Rhai script string into an Abstract Syntax Tree (AST).
@ -235,7 +191,7 @@ pub fn eval_file(
/// # Example /// # Example
/// ///
/// ```rust /// ```rust
/// use rhailib_engine::{create_heromodels_engine, compile_script, run_ast}; /// use rhailib_worker::engine::{create_heromodels_engine, compile_script, run_ast};
/// use rhai::Scope; /// use rhai::Scope;
/// ///
/// let engine = create_heromodels_engine(); /// let engine = create_heromodels_engine();
@ -277,7 +233,7 @@ pub fn compile_script(engine: &Engine, script: &str) -> Result<AST, Box<rhai::Ev
/// # Example /// # Example
/// ///
/// ```rust /// ```rust
/// use rhailib_engine::{create_heromodels_engine, compile_script, run_ast}; /// use rhailib_worker::engine::{create_heromodels_engine, compile_script, run_ast};
/// use rhai::Scope; /// use rhai::Scope;
/// ///
/// let engine = create_heromodels_engine(); /// let engine = create_heromodels_engine();

View File

@ -1,43 +1,185 @@
use chrono::Utc; use hero_job::{Job, JobStatus};
use log::{debug, error, info}; use log::{debug, error, info};
use redis::AsyncCommands; use redis::AsyncCommands;
use rhai::{Dynamic, Engine}; use rhai::{Dynamic, Engine};
use rhai_dispatcher::RhaiTaskDetails; // Import for constructing the reply message
use serde_json;
use std::collections::HashMap;
use tokio::sync::mpsc; // For shutdown signal use tokio::sync::mpsc; // For shutdown signal
use tokio::task::JoinHandle; // For serializing the reply message use tokio::task::JoinHandle;
const NAMESPACE_PREFIX: &str = "rhailib:"; /// Engine module containing Rhai engine creation and script execution utilities
pub mod engine;
const NAMESPACE_PREFIX: &str = "hero:job:";
const BLPOP_TIMEOUT_SECONDS: usize = 5; const BLPOP_TIMEOUT_SECONDS: usize = 5;
// This function updates specific fields in the Redis hash. /// Initialize Redis connection for the worker
// It doesn't need to know the full RhaiTaskDetails struct, only the field names. async fn initialize_redis_connection(
async fn update_task_status_in_redis( worker_id: &str,
conn: &mut redis::aio::MultiplexedConnection, redis_url: &str,
task_id: &str, ) -> Result<redis::aio::MultiplexedConnection, Box<dyn std::error::Error + Send + Sync>> {
status: &str, let redis_client = redis::Client::open(redis_url)
output: Option<String>, .map_err(|e| {
error_msg: Option<String>, error!("Worker for Worker ID '{}': Failed to open Redis client: {}", worker_id, e);
) -> redis::RedisResult<()> { e
let task_key = format!("{}{}", NAMESPACE_PREFIX, task_id); })?;
let mut updates: Vec<(&str, String)> = vec![
("status", status.to_string()), let redis_conn = redis_client.get_multiplexed_async_connection().await
("updatedAt", Utc::now().timestamp().to_string()), .map_err(|e| {
]; error!("Worker for Worker ID '{}': Failed to get Redis connection: {}", worker_id, e);
if let Some(out) = output { e
updates.push(("output", out)); })?;
info!("Worker for Worker ID '{}' successfully connected to Redis.", worker_id);
Ok(redis_conn)
}
/// Load job from Redis using Job struct
async fn load_job_from_redis(
redis_conn: &mut redis::aio::MultiplexedConnection,
job_id: &str,
worker_id: &str,
) -> Result<Job, Box<dyn std::error::Error + Send + Sync>> {
debug!("Worker '{}', Job {}: Loading job from Redis", worker_id, job_id);
match Job::load_from_redis(redis_conn, job_id).await {
Ok(job) => {
debug!("Worker '{}', Job {}: Successfully loaded job", worker_id, job_id);
Ok(job)
}
Err(e) => {
error!("Worker '{}', Job {}: Failed to load job from Redis: {}", worker_id, job_id, e);
Err(Box::new(e))
}
} }
if let Some(err) = error_msg { }
updates.push(("error", err));
/// Execute the Rhai script and update job status in Redis
async fn execute_script_and_update_status(
redis_conn: &mut redis::aio::MultiplexedConnection,
engine: &mut Engine,
job: &Job,
db_path: &str,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let mut db_config = rhai::Map::new();
db_config.insert("DB_PATH".into(), db_path.to_string().into());
db_config.insert("CALLER_ID".into(), job.caller_id.clone().into());
db_config.insert("CONTEXT_ID".into(), job.context_id.clone().into());
engine.set_default_tag(Dynamic::from(db_config));
debug!("Worker for Context ID '{}': Evaluating script with Rhai engine.", job.context_id);
match engine.eval::<rhai::Dynamic>(&job.script) {
Ok(result) => {
let output_str = if result.is::<String>() {
result.into_string().unwrap()
} else {
result.to_string()
};
info!("Worker for Context ID '{}' job {} completed. Output: {}", job.context_id, job.id, output_str);
// Update job status to finished and set result
Job::update_status(redis_conn, &job.id, JobStatus::Finished).await
.map_err(|e| {
error!("Failed to update job {} status to finished: {}", job.id, e);
e
})?;
Job::set_result(redis_conn, &job.id, &output_str).await
.map_err(|e| {
error!("Failed to set job {} result: {}", job.id, e);
e
})?;
Ok(())
}
Err(e) => {
let error_str = format!("{:?}", *e);
error!("Worker for Context ID '{}' job {} script evaluation failed. Error: {}", job.context_id, job.id, error_str);
// Update job status to error and set error message
Job::update_status(redis_conn, &job.id, JobStatus::Error).await
.map_err(|e| {
error!("Failed to update job {} status to error: {}", job.id, e);
e
})?;
Job::set_error(redis_conn, &job.id, &error_str).await
.map_err(|e| {
error!("Failed to set job {} error: {}", job.id, e);
e
})?;
Ok(())
}
}
}
/// Clean up job from Redis if preserve_tasks is false
async fn cleanup_job(
redis_conn: &mut redis::aio::MultiplexedConnection,
job_id: &str,
context_id: &str,
preserve_tasks: bool,
) {
if !preserve_tasks {
if let Err(e) = Job::delete_from_redis(redis_conn, job_id).await {
error!("Worker for Context ID '{}', Job {}: Failed to delete job: {}", context_id, job_id, e);
} else {
debug!("Worker for Context ID '{}', Job {}: Cleaned up job.", context_id, job_id);
}
} else {
debug!("Worker for Context ID '{}', Job {}: Preserving job (preserve_tasks=true)", context_id, job_id);
}
}
/// Process a single job from the queue
async fn process_job(
redis_conn: &mut redis::aio::MultiplexedConnection,
job_id: &str,
worker_id: &str,
db_path: &str,
engine: &mut Engine,
preserve_tasks: bool,
) {
debug!("Worker '{}', Job {}: Processing started.", worker_id, job_id);
// Load job from Redis
match load_job_from_redis(redis_conn, job_id, worker_id).await {
Ok(job) => {
info!("Worker '{}' processing job_id: {}. Script: {:.50}...", job.context_id, job_id, job.script);
// Update status to started
debug!("Worker for Context ID '{}', Job {}: Attempting to update status to 'started'.", job.context_id, job_id);
if let Err(e) = Job::update_status(redis_conn, job_id, JobStatus::Started).await {
error!("Worker for Context ID '{}', Job {}: Failed to update status to 'started': {}", job.context_id, job_id, e);
} else {
debug!("Worker for Context ID '{}', Job {}: Status updated to 'started'.", job.context_id, job_id);
}
// Execute the script and update status
if let Err(e) = execute_script_and_update_status(redis_conn, engine, &job, db_path).await {
error!("Worker for Context ID '{}', Job {}: Script execution failed: {}", job.context_id, job_id, e);
// Ensure job status is set to error if execution failed
if let Err(status_err) = Job::update_status(redis_conn, job_id, JobStatus::Error).await {
error!("Worker for Context ID '{}', Job {}: Failed to update status to error after execution failure: {}", job.context_id, job_id, status_err);
}
}
// Clean up job if needed
cleanup_job(redis_conn, job_id, &job.context_id, preserve_tasks).await;
}
Err(e) => {
error!("Worker '{}', Job {}: Failed to load job: {}", worker_id, job_id, e);
// Clean up invalid job if needed
if !preserve_tasks {
if let Err(del_err) = Job::delete_from_redis(redis_conn, job_id).await {
error!("Worker '{}', Job {}: Failed to delete invalid job: {}", worker_id, job_id, del_err);
}
} else {
debug!("Worker '{}', Job {}: Preserving invalid job (preserve_tasks=true)", worker_id, job_id);
}
}
} }
debug!(
"Updating task {} in Redis with status: {}, updates: {:?}",
task_id, status, updates
);
conn.hset_multiple::<_, _, _, ()>(&task_key, &updates)
.await?;
Ok(())
} }
pub fn spawn_rhai_worker( pub fn spawn_rhai_worker(
@ -45,8 +187,8 @@ pub fn spawn_rhai_worker(
db_path: String, db_path: String,
mut engine: Engine, mut engine: Engine,
redis_url: String, redis_url: String,
mut shutdown_rx: mpsc::Receiver<()>, // Add shutdown receiver mut shutdown_rx: mpsc::Receiver<()>,
preserve_tasks: bool, // Flag to control task cleanup preserve_tasks: bool,
) -> JoinHandle<Result<(), Box<dyn std::error::Error + Send + Sync>>> { ) -> JoinHandle<Result<(), Box<dyn std::error::Error + Send + Sync>>> {
tokio::spawn(async move { tokio::spawn(async move {
let queue_key = format!("{}{}", NAMESPACE_PREFIX, worker_id); let queue_key = format!("{}{}", NAMESPACE_PREFIX, worker_id);
@ -54,43 +196,20 @@ pub fn spawn_rhai_worker(
"Rhai Worker for Worker ID '{}' starting. Connecting to Redis at {}. Listening on queue: {}. Waiting for tasks or shutdown signal.", "Rhai Worker for Worker ID '{}' starting. Connecting to Redis at {}. Listening on queue: {}. Waiting for tasks or shutdown signal.",
worker_id, redis_url, queue_key worker_id, redis_url, queue_key
); );
let redis_client = match redis::Client::open(redis_url.as_str()) { let mut redis_conn = initialize_redis_connection(&worker_id, &redis_url).await?;
Ok(client) => client,
Err(e) => {
error!(
"Worker for Worker ID '{}': Failed to open Redis client: {}",
worker_id, e
);
return Err(Box::new(e) as Box<dyn std::error::Error + Send + Sync>);
}
};
let mut redis_conn = match redis_client.get_multiplexed_async_connection().await {
Ok(conn) => conn,
Err(e) => {
error!(
"Worker for Worker ID '{}': Failed to get Redis connection: {}",
worker_id, e
);
return Err(Box::new(e) as Box<dyn std::error::Error + Send + Sync>);
}
};
info!(
"Worker for Worker ID '{}' successfully connected to Redis.",
worker_id
);
loop { loop {
let blpop_keys = vec![queue_key.clone()]; let blpop_keys = vec![queue_key.clone()];
tokio::select! { tokio::select! {
// Listen for shutdown signal // Listen for shutdown signal
_ = shutdown_rx.recv() => { _ = shutdown_rx.recv() => {
info!("Worker for Worker ID '{}': Shutdown signal received. Terminating loop.", worker_id.clone()); info!("Worker for Worker ID '{}': Shutdown signal received. Terminating loop.", worker_id);
break; break;
} }
// Listen for tasks from Redis // Listen for tasks from Redis
blpop_result = redis_conn.blpop(&blpop_keys, BLPOP_TIMEOUT_SECONDS as f64) => { blpop_result = redis_conn.blpop(&blpop_keys, BLPOP_TIMEOUT_SECONDS as f64) => {
debug!("Worker for Worker ID '{}': Attempting BLPOP on queue: {}", worker_id.clone(), queue_key); debug!("Worker for Worker ID '{}': Attempting BLPOP on queue: {}", worker_id, queue_key);
let response: Option<(String, String)> = match blpop_result { let response: Option<(String, String)> = match blpop_result {
Ok(resp) => resp, Ok(resp) => resp,
Err(e) => { Err(e) => {
@ -98,161 +217,17 @@ pub fn spawn_rhai_worker(
return Err(Box::new(e) as Box<dyn std::error::Error + Send + Sync>); return Err(Box::new(e) as Box<dyn std::error::Error + Send + Sync>);
} }
}; };
if let Some((_queue_name_recv, task_id)) = response { if let Some((_queue_name_recv, job_id)) = response {
info!("Worker '{}' received task_id: {} from queue: {}", worker_id, task_id, _queue_name_recv); info!("Worker '{}' received job_id: {} from queue: {}", worker_id, job_id, _queue_name_recv);
debug!("Worker '{}', Task {}: Processing started.", worker_id, task_id); process_job(&mut redis_conn, &job_id, &worker_id, &db_path, &mut engine, preserve_tasks).await;
} else {
let task_details_key = format!("{}{}", NAMESPACE_PREFIX, task_id); debug!("Worker '{}': BLPOP timed out on queue {}. No new tasks. Checking for shutdown signal again.", worker_id, queue_key);
debug!("Worker '{}', Task {}: Attempting HGETALL from key: {}", worker_id, task_id, task_details_key);
let task_details_map_result: Result<HashMap<String, String>, _> =
redis_conn.hgetall(&task_details_key).await;
match task_details_map_result {
Ok(details_map) => {
debug!("Worker '{}', Task {}: HGETALL successful. Details: {:?}", worker_id, task_id, details_map);
let script_content_opt = details_map.get("script").cloned();
let created_at_str_opt = details_map.get("createdAt").cloned();
let caller_id = details_map.get("callerId").cloned().expect("callerId field missing from Redis hash");
let context_id = details_map.get("contextId").cloned().expect("contextId field missing from Redis hash");
if context_id.is_empty() {
error!("Worker '{}', Task {}: contextId field missing from Redis hash", worker_id, task_id);
return Err("contextId field missing from Redis hash".into());
}
if caller_id.is_empty() {
error!("Worker '{}', Task {}: callerId field missing from Redis hash", worker_id, task_id);
return Err("callerId field missing from Redis hash".into());
}
if let Some(script_content) = script_content_opt {
info!("Worker '{}' processing task_id: {}. Script: {:.50}...", context_id, task_id, script_content);
debug!("Worker for Context ID '{}', Task {}: Attempting to update status to 'processing'.", context_id, task_id);
if let Err(e) = update_task_status_in_redis(&mut redis_conn, &task_id, "processing", None, None).await {
error!("Worker for Context ID '{}', Task {}: Failed to update status to 'processing': {}", context_id, task_id, e);
} else {
debug!("Worker for Context ID '{}', Task {}: Status updated to 'processing'.", context_id, task_id);
}
let mut db_config = rhai::Map::new();
db_config.insert("DB_PATH".into(), db_path.clone().into());
db_config.insert("CALLER_ID".into(), caller_id.clone().into());
db_config.insert("CONTEXT_ID".into(), context_id.clone().into());
engine.set_default_tag(Dynamic::from(db_config)); // Or pass via CallFnOptions
debug!("Worker for Context ID '{}', Task {}: Evaluating script with Rhai engine.", context_id, task_id);
let mut final_status = "error".to_string(); // Default to error
let mut final_output: Option<String> = None;
let mut final_error_msg: Option<String> = None;
match engine.eval::<rhai::Dynamic>(&script_content) {
Ok(result) => {
let output_str = if result.is::<String>() {
// If the result is a string, we can unwrap it directly.
// This moves `result`, which is fine because it's the last time we use it in this branch.
result.into_string().unwrap()
} else {
result.to_string()
};
info!("Worker for Context ID '{}' task {} completed. Output: {}", context_id, task_id, output_str);
final_status = "completed".to_string();
final_output = Some(output_str);
}
Err(e) => {
let error_str = format!("{:?}", *e);
error!("Worker for Context ID '{}' task {} script evaluation failed. Error: {}", context_id, task_id, error_str);
final_error_msg = Some(error_str);
// final_status remains "error"
}
}
debug!("Worker for Context ID '{}', Task {}: Attempting to update status to '{}'.", context_id, task_id, final_status);
if let Err(e) = update_task_status_in_redis(
&mut redis_conn,
&task_id,
&final_status,
final_output.clone(), // Clone for task hash update
final_error_msg.clone(), // Clone for task hash update
).await {
error!("Worker for Context ID '{}', Task {}: Failed to update final status to '{}': {}", context_id, task_id, final_status, e);
} else {
debug!("Worker for Context ID '{}', Task {}: Final status updated to '{}'.", context_id, task_id, final_status);
}
// Send to reply queue if specified
let created_at = created_at_str_opt
.and_then(|s| chrono::DateTime::parse_from_rfc3339(&s).ok())
.map(|dt| dt.with_timezone(&Utc))
.unwrap_or_else(Utc::now); // Fallback, though createdAt should exist
let reply_details = RhaiTaskDetails {
task_id: task_id.to_string(), // Add the task_id
script: script_content.clone(), // Include script for context in reply
status: final_status, // The final status
output: final_output, // The final output
error: final_error_msg, // The final error
created_at, // Original creation time
updated_at: Utc::now(), // Time of this final update/reply
caller_id: caller_id.clone(),
context_id: context_id.clone(),
worker_id: worker_id.clone(),
};
let reply_queue_key = format!("{}:reply:{}", NAMESPACE_PREFIX, task_id);
match serde_json::to_string(&reply_details) {
Ok(reply_json) => {
let lpush_result: redis::RedisResult<i64> = redis_conn.lpush(&reply_queue_key, &reply_json).await;
match lpush_result {
Ok(_) => debug!("Worker for Context ID '{}', Task {}: Successfully sent result to reply queue {}", context_id, task_id, reply_queue_key),
Err(e_lpush) => error!("Worker for Context ID '{}', Task {}: Failed to LPUSH result to reply queue {}: {}", context_id, task_id, reply_queue_key, e_lpush),
}
}
Err(e_json) => {
error!("Worker for Context ID '{}', Task {}: Failed to serialize reply details for queue {}: {}", context_id, task_id, reply_queue_key, e_json);
}
}
// Clean up task details based on preserve_tasks flag
if !preserve_tasks {
// The worker is responsible for cleaning up the task details hash.
if let Err(e) = redis_conn.del::<_, ()>(&task_details_key).await {
error!("Worker for Context ID '{}', Task {}: Failed to delete task details key '{}': {}", context_id, task_id, task_details_key, e);
} else {
debug!("Worker for Context ID '{}', Task {}: Cleaned up task details key '{}'.", context_id, task_id, task_details_key);
}
} else {
debug!("Worker for Context ID '{}', Task {}: Preserving task details (preserve_tasks=true)", context_id, task_id);
}
} else { // Script content not found in hash
error!(
"Worker for Context ID '{}', Task {}: Script content not found in Redis hash. Details map: {:?}",
context_id, task_id, details_map
);
// Clean up invalid task details based on preserve_tasks flag
if !preserve_tasks {
// Even if the script is not found, the worker should clean up the invalid task hash.
if let Err(e) = redis_conn.del::<_, ()>(&task_details_key).await {
error!("Worker for Context ID '{}', Task {}: Failed to delete invalid task details key '{}': {}", context_id, task_id, task_details_key, e);
}
} else {
debug!("Worker for Context ID '{}', Task {}: Preserving invalid task details (preserve_tasks=true)", context_id, task_id);
}
}
}
Err(e) => {
error!(
"Worker '{}', Task {}: Failed to fetch details (HGETALL) from Redis for key {}. Error: {:?}",
worker_id, task_id, task_details_key, e
);
} }
} }
} else { }
debug!("Worker '{}': BLPOP timed out on queue {}. No new tasks. Checking for shutdown signal again.", &worker_id, &queue_key); }
}
} // End of blpop_result match
} // End of tokio::select!
} // End of loop
info!("Worker '{}' has shut down.", worker_id); info!("Worker '{}' has shut down.", worker_id);
Ok(()) Ok(())
}) })

View File

@ -595,7 +595,7 @@ dependencies = [
"log", "log",
"once_cell", "once_cell",
"redis", "redis",
"rhai_dispatcher", "rhai_supervisor",
"rustls", "rustls",
"rustls-pemfile", "rustls-pemfile",
"serde", "serde",
@ -1765,7 +1765,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
[[package]] [[package]]
name = "rhai_dispatcher" name = "rhai_supervisor"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"chrono", "chrono",

View File

@ -584,7 +584,7 @@ dependencies = [
"once_cell", "once_cell",
"rand 0.8.5", "rand 0.8.5",
"redis", "redis",
"rhai_dispatcher", "rhai_supervisor",
"rustls", "rustls",
"rustls-pemfile", "rustls-pemfile",
"secp256k1", "secp256k1",
@ -1769,7 +1769,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
[[package]] [[package]]
name = "rhai_dispatcher" name = "rhai_supervisor"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"chrono", "chrono",

View File

@ -44,7 +44,7 @@ redis = { workspace = true }
uuid = { workspace = true } uuid = { workspace = true }
tokio = { workspace = true } tokio = { workspace = true }
chrono = { workspace = true } chrono = { workspace = true }
hero_dispatcher = { path = "../../../core/dispatcher" } hero_supervisor = { path = "../../../core/supervisor" }
hero_job = { path = "../../../core/job" } hero_job = { path = "../../../core/job" }
thiserror = { workspace = true } thiserror = { workspace = true }
heromodels = { path = "../../../../db/heromodels" } heromodels = { path = "../../../../db/heromodels" }

View File

@ -4,9 +4,14 @@ An OpenRPC WebSocket Server to interface with the [cores](../../core) of authori
- [OpenRPC Specification](openrpc.json) defines the API. - [OpenRPC Specification](openrpc.json) defines the API.
- There are RPC Operations specified to authorize a websocket connection. - There are RPC Operations specified to authorize a websocket connection.
- Authorized clients can execute Rhai scripts on the server. - Authorized clients can manage jobs.
- The server uses the [supervisor] to dispatch [jobs] to the [workers]. - The server uses the [supervisor] to dispatch [jobs] to the [workers].
## Circles
Circles are contexts which a hero can act in. Each circle has a unique public key and a set of members.
The server offers a separate path for each circle.
## Authentication ## Authentication
The server provides a robust authentication mechanism to ensure that only authorized clients can execute scripts. The entire flow is handled over the WebSocket connection using two dedicated JSON-RPC methods: The server provides a robust authentication mechanism to ensure that only authorized clients can execute scripts. The entire flow is handled over the WebSocket connection using two dedicated JSON-RPC methods:

View File

@ -1,7 +1,5 @@
use hero_websocket_server::{ServerBuilder, TlsConfigError}; use hero_websocket_server::{ServerBuilder, ServerConfig};
use clap::Parser; use clap::Parser;
use dotenv::dotenv;
use log::info;
#[derive(Parser, Debug)] #[derive(Parser, Debug)]
#[clap(author, version, about, long_about = None)] #[clap(author, version, about, long_about = None)]
@ -39,14 +37,62 @@ struct Args {
#[clap(long, help = "Enable webhook handling")] #[clap(long, help = "Enable webhook handling")]
webhooks: bool, webhooks: bool,
#[clap(long, value_parser, help = "Worker ID for the server")] #[clap(short, long, value_parser, help = "Path to configuration file")]
worker_id: String, config: Option<String>,
#[clap(long, help = "Generate a sample configuration file")]
generate_config: bool,
} }
#[actix_web::main] #[actix_web::main]
async fn main() -> std::io::Result<()> { async fn main() -> std::io::Result<()> {
let args = Args::parse(); let args = Args::parse();
// Handle config file generation
if args.generate_config {
let sample_config = ServerConfig::create_sample();
let config_path = "config.json";
match sample_config.to_file(config_path) {
Ok(_) => {
println!("✅ Sample configuration file generated: {}", config_path);
println!("📝 Edit the file to customize your server configuration.");
return Ok(());
}
Err(e) => {
eprintln!("❌ Failed to generate config file: {}", e);
std::process::exit(1);
}
}
}
// Load configuration from file if provided, otherwise use CLI args
let config = if let Some(config_path) = &args.config {
match ServerConfig::from_file(config_path) {
Ok(config) => {
println!("📄 Loaded configuration from: {}", config_path);
config
}
Err(e) => {
eprintln!("❌ Failed to load config file {}: {}", config_path, e);
std::process::exit(1);
}
}
} else {
// Create config from CLI arguments
ServerConfig {
host: args.host.clone(),
port: args.port,
redis_url: args.redis_url.clone(),
auth: args.auth,
tls: args.tls,
cert: args.cert.clone(),
key: args.key.clone(),
tls_port: args.tls_port,
webhooks: args.webhooks,
circles: std::collections::HashMap::new(), // Empty circles when using CLI
}
};
// Configure logging based on verbosity level // Configure logging based on verbosity level
let log_config = match args.verbose { let log_config = match args.verbose {
0 => { 0 => {
@ -78,39 +124,14 @@ async fn main() -> std::io::Result<()> {
env_logger::init(); env_logger::init();
} }
// Validate TLS configuration // Validate configuration
if args.tls && (args.cert.is_none() || args.key.is_none()) { if let Err(e) = config.validate() {
eprintln!("Error: TLS is enabled but certificate or key path is missing"); eprintln!("❌ Configuration validation failed: {}", e);
eprintln!("Use --cert and --key to specify certificate and key files");
std::process::exit(1); std::process::exit(1);
} }
let mut builder = ServerBuilder::new() // Build server from configuration
.host(args.host.clone()) let builder = ServerBuilder::new().from_config(config.clone());
.port(args.port)
.redis_url(args.redis_url.clone())
.worker_id(args.worker_id.clone());
if args.auth {
builder = builder.with_auth();
}
if args.tls {
if let (Some(cert), Some(key)) = (args.cert.clone(), args.key.clone()) {
builder = builder.with_tls(cert, key);
} else {
eprintln!("Error: TLS is enabled but --cert or --key is missing.");
std::process::exit(1);
}
}
if let Some(tls_port) = args.tls_port {
builder = builder.with_tls_port(tls_port);
}
if args.webhooks {
builder = builder.with_webhooks();
}
let server = match builder.build() { let server = match builder.build() {
Ok(server) => server, Ok(server) => server,
@ -122,27 +143,36 @@ async fn main() -> std::io::Result<()> {
println!("🚀 Starting Circles WebSocket Server"); println!("🚀 Starting Circles WebSocket Server");
println!("📋 Configuration:"); println!("📋 Configuration:");
println!(" Host: {}", args.host); println!(" Host: {}", config.host);
println!(" Port: {}", args.port); println!(" Port: {}", config.port);
if let Some(tls_port) = args.tls_port { println!(" Redis URL: {}", config.redis_url);
if let Some(tls_port) = config.tls_port {
println!(" TLS Port: {}", tls_port); println!(" TLS Port: {}", tls_port);
} }
println!(" Authentication: {}", if args.auth { "ENABLED" } else { "DISABLED" }); println!(" Authentication: {}", if config.auth { "ENABLED" } else { "DISABLED" });
println!(" TLS/WSS: {}", if args.tls { "ENABLED" } else { "DISABLED" }); println!(" TLS/WSS: {}", if config.tls { "ENABLED" } else { "DISABLED" });
println!(" Webhooks: {}", if args.webhooks { "ENABLED" } else { "DISABLED" }); println!(" Webhooks: {}", if config.webhooks { "ENABLED" } else { "DISABLED" });
println!(" Circles configured: {}", config.circles.len());
if args.tls { if config.tls {
if let (Some(cert), Some(key)) = (&args.cert, &args.key) { if let (Some(cert), Some(key)) = (&config.cert, &config.key) {
println!(" Certificate: {}", cert); println!(" Certificate: {}", cert);
println!(" Private Key: {}", key); println!(" Private Key: {}", key);
} }
} }
if args.webhooks { if config.webhooks {
println!(" Webhook secrets loaded from environment variables:"); println!(" Webhook secrets loaded from environment variables:");
println!(" - STRIPE_WEBHOOK_SECRET"); println!(" - STRIPE_WEBHOOK_SECRET");
println!(" - IDENFY_WEBHOOK_SECRET"); println!(" - IDENFY_WEBHOOK_SECRET");
} }
if config.auth && !config.circles.is_empty() {
println!(" Configured circles:");
for (circle_name, members) in &config.circles {
println!(" - {}: {} members", circle_name, members.len());
}
}
println!(); println!();
let (server_task, _server_handle) = server.spawn_circle_server()?; let (server_task, _server_handle) = server.spawn_circle_server()?;

View File

@ -90,7 +90,7 @@ sequenceDiagram
participant HS as HttpServer participant HS as HttpServer
participant WH as Webhook Handler participant WH as Webhook Handler
participant WV as Webhook Verifier participant WV as Webhook Verifier
participant RC as RhaiDispatcher participant RC as RhaiSupervisor
participant Redis as Redis participant Redis as Redis
WS->>+HS: POST /webhooks/{provider}/{circle_pk} WS->>+HS: POST /webhooks/{provider}/{circle_pk}
@ -102,7 +102,7 @@ sequenceDiagram
alt Signature Valid alt Signature Valid
WH->>WH: Parse webhook payload (heromodels types) WH->>WH: Parse webhook payload (heromodels types)
WH->>+RC: Create RhaiDispatcher with caller_id WH->>+RC: Create RhaiSupervisor with caller_id
RC->>+Redis: Execute webhook script RC->>+Redis: Execute webhook script
Redis-->>-RC: Script result Redis-->>-RC: Script result
RC-->>-WH: Execution result RC-->>-WH: Execution result
@ -128,6 +128,6 @@ sequenceDiagram
| **Connection Type** | Persistent, bidirectional | HTTP request/response | | **Connection Type** | Persistent, bidirectional | HTTP request/response |
| **Authentication** | secp256k1 signature-based | HMAC signature verification | | **Authentication** | secp256k1 signature-based | HMAC signature verification |
| **State Management** | Stateful sessions via CircleWs actor | Stateless HTTP requests | | **State Management** | Stateful sessions via CircleWs actor | Stateless HTTP requests |
| **Script Execution** | Direct via authenticated session | Via RhaiDispatcher with provider caller_id | | **Script Execution** | Direct via authenticated session | Via RhaiSupervisor with provider caller_id |
| **Use Case** | Interactive client applications | External service notifications | | **Use Case** | Interactive client applications | External service notifications |
| **Data Types** | JSON-RPC messages | Provider-specific webhook payloads (heromodels) | | **Data Types** | JSON-RPC messages | Provider-specific webhook payloads (heromodels) |

View File

@ -19,8 +19,8 @@ graph TB
E[Webhook Handler] E[Webhook Handler]
F[Stripe Verifier] F[Stripe Verifier]
G[iDenfy Verifier] G[iDenfy Verifier]
H[Script Dispatcher] H[Script Supervisor]
I[RhaiDispatcherBuilder] I[RhaiSupervisorBuilder]
end end
subgraph "Configuration" subgraph "Configuration"
@ -92,8 +92,8 @@ sequenceDiagram
participant WS as Webhook Service participant WS as Webhook Service
participant CS as Circle Server participant CS as Circle Server
participant WV as Webhook Verifier participant WV as Webhook Verifier
participant SD as Script Dispatcher participant SD as Script Supervisor
participant RC as RhaiDispatcher participant RC as RhaiSupervisor
participant RW as Rhai Worker participant RW as Rhai Worker
WS->>CS: POST /webhooks/stripe/{circle_pk} WS->>CS: POST /webhooks/stripe/{circle_pk}
@ -113,7 +113,7 @@ sequenceDiagram
alt Verification Success alt Verification Success
CS->>SD: Dispatch appropriate script CS->>SD: Dispatch appropriate script
SD->>RC: Create RhaiDispatcherBuilder SD->>RC: Create RhaiSupervisorBuilder
RC->>RC: Set caller_id="stripe" or "idenfy" RC->>RC: Set caller_id="stripe" or "idenfy"
RC->>RC: Set recipient_id=circle_pk RC->>RC: Set recipient_id=circle_pk
RC->>RC: Set script="stripe_webhook_received" or "idenfy_webhook_received" RC->>RC: Set script="stripe_webhook_received" or "idenfy_webhook_received"
@ -248,8 +248,8 @@ heromodels/src/models/
### Key Architectural Changes ### Key Architectural Changes
- **Type Organization**: Webhook payload types moved to `heromodels` library for reusability - **Type Organization**: Webhook payload types moved to `heromodels` library for reusability
- **Modular Handlers**: Separate handler files for each webhook provider - **Modular Handlers**: Separate handler files for each webhook provider
- **Simplified Architecture**: Removed unnecessary dispatcher complexity - **Simplified Architecture**: Removed unnecessary supervisor complexity
- **Direct Script Execution**: Handlers directly use `RhaiDispatcher` for script execution - **Direct Script Execution**: Handlers directly use `RhaiSupervisor` for script execution
### Modified Files ### Modified Files
- `src/lib.rs` - Add webhook routes and module imports - `src/lib.rs` - Add webhook routes and module imports

View File

@ -1,5 +1,5 @@
use std::collections::HashMap; use std::collections::HashMap;
use crate::{Server, TlsConfigError}; use crate::{Server, TlsConfigError, ServerConfig};
/// ServerBuilder for constructing Server instances with a fluent API /// ServerBuilder for constructing Server instances with a fluent API
pub struct ServerBuilder { pub struct ServerBuilder {
@ -12,7 +12,7 @@ pub struct ServerBuilder {
tls_port: Option<u16>, tls_port: Option<u16>,
enable_auth: bool, enable_auth: bool,
enable_webhooks: bool, enable_webhooks: bool,
circle_worker_id: String,
circles: HashMap<String, Vec<String>>, circles: HashMap<String, Vec<String>>,
} }
@ -28,7 +28,7 @@ impl ServerBuilder {
tls_port: None, tls_port: None,
enable_auth: false, enable_auth: false,
enable_webhooks: false, enable_webhooks: false,
circle_worker_id: "default".to_string(),
circles: HashMap::new(), circles: HashMap::new(),
} }
} }
@ -48,10 +48,7 @@ impl ServerBuilder {
self self
} }
pub fn worker_id(mut self, worker_id: impl Into<String>) -> Self {
self.circle_worker_id = worker_id.into();
self
}
pub fn with_tls(mut self, cert_path: String, key_path: String) -> Self { pub fn with_tls(mut self, cert_path: String, key_path: String) -> Self {
self.enable_tls = true; self.enable_tls = true;
@ -79,6 +76,21 @@ impl ServerBuilder {
self.circles = circles; self.circles = circles;
self self
} }
/// Load configuration from a ServerConfig instance
pub fn from_config(mut self, config: ServerConfig) -> Self {
self.host = config.host;
self.port = config.port;
self.redis_url = config.redis_url;
self.enable_auth = config.auth;
self.enable_tls = config.tls;
self.cert_path = config.cert;
self.key_path = config.key;
self.tls_port = config.tls_port;
self.enable_webhooks = config.webhooks;
self.circles = config.circles;
self
}
pub fn build(self) -> Result<Server, TlsConfigError> { pub fn build(self) -> Result<Server, TlsConfigError> {
Ok(Server { Ok(Server {
@ -91,13 +103,13 @@ impl ServerBuilder {
tls_port: self.tls_port, tls_port: self.tls_port,
enable_auth: self.enable_auth, enable_auth: self.enable_auth,
enable_webhooks: self.enable_webhooks, enable_webhooks: self.enable_webhooks,
circle_worker_id: self.circle_worker_id,
circle_name: "default".to_string(), circle_name: "default".to_string(),
circle_public_key: "default".to_string(), circle_public_key: "default".to_string(),
circles: self.circles, circles: self.circles,
nonce_store: HashMap::new(), nonce_store: HashMap::new(),
authenticated_pubkey: None, authenticated_pubkey: None,
dispatcher: None, supervisor: None,
}) })
} }
} }

View File

@ -1,7 +1,7 @@
use crate::Server; use crate::Server;
use actix::prelude::*; use actix::prelude::*;
use actix_web_actors::ws; use actix_web_actors::ws;
use hero_dispatcher::{Dispatcher, ScriptType}; use hero_supervisor::{Supervisor, ScriptType};
use serde_json::{json, Value}; use serde_json::{json, Value};
use std::time::Duration; use std::time::Duration;
@ -82,7 +82,7 @@ impl Server {
} }
}; };
let dispatcher = match self.dispatcher.clone() { let supervisor = match self.supervisor.clone() {
Some(d) => d, Some(d) => d,
None => { None => {
let err_resp = JsonRpcResponse { let err_resp = JsonRpcResponse {
@ -90,7 +90,7 @@ impl Server {
result: None, result: None,
error: Some(JsonRpcError { error: Some(JsonRpcError {
code: -32603, code: -32603,
message: "Internal error: dispatcher not available".to_string(), message: "Internal error: supervisor not available".to_string(),
data: None, data: None,
}), }),
id: client_rpc_id, id: client_rpc_id,
@ -102,7 +102,7 @@ impl Server {
let client_rpc_id_clone = client_rpc_id.clone(); let client_rpc_id_clone = client_rpc_id.clone();
let fut = async move { let fut = async move {
dispatcher.start_job(&job_id).await supervisor.start_job(&job_id).await
}; };
ctx.spawn( ctx.spawn(
@ -190,7 +190,7 @@ impl Server {
} }
}; };
let dispatcher = match self.dispatcher.clone() { let supervisor = match self.supervisor.clone() {
Some(d) => d, Some(d) => d,
None => { None => {
let err_resp = JsonRpcResponse { let err_resp = JsonRpcResponse {
@ -198,7 +198,7 @@ impl Server {
result: None, result: None,
error: Some(JsonRpcError { error: Some(JsonRpcError {
code: -32603, code: -32603,
message: "Internal error: dispatcher not available".to_string(), message: "Internal error: supervisor not available".to_string(),
data: None, data: None,
}), }),
id: client_rpc_id, id: client_rpc_id,
@ -210,7 +210,7 @@ impl Server {
let client_rpc_id_clone = client_rpc_id.clone(); let client_rpc_id_clone = client_rpc_id.clone();
let fut = async move { let fut = async move {
dispatcher.get_job_status(&job_id).await supervisor.get_job_status(&job_id).await
}; };
ctx.spawn( ctx.spawn(
@ -279,7 +279,7 @@ impl Server {
return; return;
} }
let dispatcher = match self.dispatcher.clone() { let supervisor = match self.supervisor.clone() {
Some(d) => d, Some(d) => d,
None => { None => {
let err_resp = JsonRpcResponse { let err_resp = JsonRpcResponse {
@ -287,7 +287,7 @@ impl Server {
result: None, result: None,
error: Some(JsonRpcError { error: Some(JsonRpcError {
code: -32603, code: -32603,
message: "Internal error: dispatcher not available".to_string(), message: "Internal error: supervisor not available".to_string(),
data: None, data: None,
}), }),
id: client_rpc_id, id: client_rpc_id,
@ -299,7 +299,7 @@ impl Server {
let client_rpc_id_clone = client_rpc_id.clone(); let client_rpc_id_clone = client_rpc_id.clone();
let fut = async move { let fut = async move {
dispatcher.list_jobs().await supervisor.list_jobs().await
}; };
ctx.spawn( ctx.spawn(
@ -403,7 +403,7 @@ impl Server {
} }
}; };
let dispatcher = match self.dispatcher.clone() { let supervisor = match self.supervisor.clone() {
Some(d) => d, Some(d) => d,
None => { None => {
let err_resp = JsonRpcResponse { let err_resp = JsonRpcResponse {
@ -411,7 +411,7 @@ impl Server {
result: None, result: None,
error: Some(JsonRpcError { error: Some(JsonRpcError {
code: -32603, code: -32603,
message: "Internal error: dispatcher not available".to_string(), message: "Internal error: supervisor not available".to_string(),
data: None, data: None,
}), }),
id: client_rpc_id, id: client_rpc_id,
@ -423,7 +423,7 @@ impl Server {
let client_rpc_id_clone = client_rpc_id.clone(); let client_rpc_id_clone = client_rpc_id.clone();
let fut = async move { let fut = async move {
dispatcher supervisor
.new_job() .new_job()
.context_id(&circle_pk) .context_id(&circle_pk)
.script_type(ScriptType::RhaiSAL) .script_type(ScriptType::RhaiSAL)
@ -518,7 +518,7 @@ impl Server {
} }
}; };
let dispatcher = match self.dispatcher.clone() { let supervisor = match self.supervisor.clone() {
Some(d) => d, Some(d) => d,
None => { None => {
let err_resp = JsonRpcResponse { let err_resp = JsonRpcResponse {
@ -526,7 +526,7 @@ impl Server {
result: None, result: None,
error: Some(JsonRpcError { error: Some(JsonRpcError {
code: -32603, code: -32603,
message: "Internal error: dispatcher not available".to_string(), message: "Internal error: supervisor not available".to_string(),
data: None, data: None,
}), }),
id: client_rpc_id, id: client_rpc_id,
@ -538,7 +538,7 @@ impl Server {
let client_rpc_id_clone = client_rpc_id.clone(); let client_rpc_id_clone = client_rpc_id.clone();
let fut = async move { let fut = async move {
dispatcher.get_job_output(&job_id).await supervisor.get_job_output(&job_id).await
}; };
ctx.spawn( ctx.spawn(
@ -625,7 +625,7 @@ impl Server {
} }
}; };
let dispatcher = match self.dispatcher.clone() { let supervisor = match self.supervisor.clone() {
Some(d) => d, Some(d) => d,
None => { None => {
let err_resp = JsonRpcResponse { let err_resp = JsonRpcResponse {
@ -633,7 +633,7 @@ impl Server {
result: None, result: None,
error: Some(JsonRpcError { error: Some(JsonRpcError {
code: -32603, code: -32603,
message: "Internal error: dispatcher not available".to_string(), message: "Internal error: supervisor not available".to_string(),
data: None, data: None,
}), }),
id: client_rpc_id, id: client_rpc_id,
@ -645,7 +645,7 @@ impl Server {
let client_rpc_id_clone = client_rpc_id.clone(); let client_rpc_id_clone = client_rpc_id.clone();
let fut = async move { let fut = async move {
dispatcher.get_job_logs(&job_id).await supervisor.get_job_logs(&job_id).await
}; };
ctx.spawn( ctx.spawn(
@ -733,7 +733,7 @@ impl Server {
} }
}; };
let dispatcher = match self.dispatcher.clone() { let supervisor = match self.supervisor.clone() {
Some(d) => d, Some(d) => d,
None => { None => {
let err_resp = JsonRpcResponse { let err_resp = JsonRpcResponse {
@ -741,7 +741,7 @@ impl Server {
result: None, result: None,
error: Some(JsonRpcError { error: Some(JsonRpcError {
code: -32603, code: -32603,
message: "Internal error: dispatcher not available".to_string(), message: "Internal error: supervisor not available".to_string(),
data: None, data: None,
}), }),
id: client_rpc_id, id: client_rpc_id,
@ -753,7 +753,7 @@ impl Server {
let client_rpc_id_clone = client_rpc_id.clone(); let client_rpc_id_clone = client_rpc_id.clone();
let fut = async move { let fut = async move {
dispatcher.stop_job(&job_id).await supervisor.stop_job(&job_id).await
}; };
ctx.spawn( ctx.spawn(
@ -840,7 +840,7 @@ impl Server {
} }
}; };
let dispatcher = match self.dispatcher.clone() { let supervisor = match self.supervisor.clone() {
Some(d) => d, Some(d) => d,
None => { None => {
let err_resp = JsonRpcResponse { let err_resp = JsonRpcResponse {
@ -848,7 +848,7 @@ impl Server {
result: None, result: None,
error: Some(JsonRpcError { error: Some(JsonRpcError {
code: -32603, code: -32603,
message: "Internal error: dispatcher not available".to_string(), message: "Internal error: supervisor not available".to_string(),
data: None, data: None,
}), }),
id: client_rpc_id, id: client_rpc_id,
@ -860,7 +860,7 @@ impl Server {
let client_rpc_id_clone = client_rpc_id.clone(); let client_rpc_id_clone = client_rpc_id.clone();
let fut = async move { let fut = async move {
dispatcher.delete_job(&job_id).await supervisor.delete_job(&job_id).await
}; };
ctx.spawn( ctx.spawn(
@ -929,7 +929,7 @@ impl Server {
return; return;
} }
let dispatcher = match self.dispatcher.clone() { let supervisor = match self.supervisor.clone() {
Some(d) => d, Some(d) => d,
None => { None => {
let err_resp = JsonRpcResponse { let err_resp = JsonRpcResponse {
@ -937,7 +937,7 @@ impl Server {
result: None, result: None,
error: Some(JsonRpcError { error: Some(JsonRpcError {
code: -32603, code: -32603,
message: "Internal error: dispatcher not available".to_string(), message: "Internal error: supervisor not available".to_string(),
data: None, data: None,
}), }),
id: client_rpc_id, id: client_rpc_id,
@ -949,7 +949,7 @@ impl Server {
let client_rpc_id_clone = client_rpc_id.clone(); let client_rpc_id_clone = client_rpc_id.clone();
let fut = async move { let fut = async move {
dispatcher.clear_all_jobs().await supervisor.clear_all_jobs().await
}; };
ctx.spawn( ctx.spawn(

View File

@ -3,7 +3,7 @@ use actix_web::{web, App, Error, HttpRequest, HttpResponse, HttpServer};
use actix_web_actors::ws; use actix_web_actors::ws;
use log::{info, error}; // Added error for better logging use log::{info, error}; // Added error for better logging
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use hero_dispatcher::{Dispatcher, DispatcherBuilder, DispatcherError}; use hero_supervisor::{Supervisor, SupervisorBuilder, SupervisorError};
use hero_job::{Job, JobStatus}; use hero_job::{Job, JobStatus};
use rustls::pki_types::PrivateKeyDer; use rustls::pki_types::PrivateKeyDer;
use rustls::ServerConfig as RustlsServerConfig; use rustls::ServerConfig as RustlsServerConfig;
@ -211,7 +211,7 @@ pub struct Server {
pub circles: HashMap<String, Vec<String>>, pub circles: HashMap<String, Vec<String>>,
nonce_store: HashMap<String, NonceResponse>, nonce_store: HashMap<String, NonceResponse>,
authenticated_pubkey: Option<String>, authenticated_pubkey: Option<String>,
pub dispatcher: Option<Dispatcher>, pub supervisor: Option<Supervisor>,
} }
impl Server { impl Server {
@ -552,15 +552,15 @@ impl Server {
let fut = async move { let fut = async move {
let caller_id = public_key.unwrap_or_else(|| "anonymous".to_string()); let caller_id = public_key.unwrap_or_else(|| "anonymous".to_string());
match DispatcherBuilder::new() match SupervisorBuilder::new()
.redis_url(&redis_url_clone) .redis_url(&redis_url_clone)
.caller_id(&caller_id) .caller_id(&caller_id)
.build() { .build() {
Ok(hero_dispatcher) => { Ok(hero_supervisor) => {
hero_dispatcher hero_supervisor
.new_job() .new_job()
.context_id(&circle_pk_clone) .context_id(&circle_pk_clone)
.script_type(hero_dispatcher::ScriptType::RhaiSAL) .script_type(hero_supervisor::ScriptType::RhaiSAL)
.script(&script_content) .script(&script_content)
.timeout(TASK_TIMEOUT_DURATION) .timeout(TASK_TIMEOUT_DURATION)
.await_response() .await_response()
@ -574,7 +574,7 @@ impl Server {
fut.into_actor(self) fut.into_actor(self)
.map(move |res, _act, ctx_inner| match res { .map(move |res, _act, ctx_inner| match res {
Ok(output) => { Ok(output) => {
// The dispatcher returns the actual string output from job execution // The supervisor returns the actual string output from job execution
let result_value = PlayResult { output }; let result_value = PlayResult { output };
let resp = JsonRpcResponse { let resp = JsonRpcResponse {
jsonrpc: "2.0".to_string(), jsonrpc: "2.0".to_string(),
@ -586,7 +586,7 @@ impl Server {
} }
Err(e) => { Err(e) => {
let (code, message) = match e { let (code, message) = match e {
DispatcherError::Timeout(task_id) => ( SupervisorError::Timeout(task_id) => (
-32002, -32002,
format!( format!(
"Timeout waiting for Rhai script (task: {})", "Timeout waiting for Rhai script (task: {})",