initial commit

This commit is contained in:
Timur Gordon 2025-07-29 01:15:23 +02:00
commit 7d7ff0f0ab
108 changed files with 24713 additions and 0 deletions

3
.gitignore vendored Normal file
View File

@ -0,0 +1,3 @@
target
*.pem
.env

4207
Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

63
Cargo.toml Normal file
View File

@ -0,0 +1,63 @@
[package]
name = "hero"
version = "0.0.1"
edition = "2024"
[dependencies]
anyhow = "1.0"
chrono = { version = "0.4", features = ["serde"] }
env_logger = "0.10"
log = "0.4"
redis = { version = "0.25.0", features = ["tokio-comp"] }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
tokio = { version = "1", features = ["macros", "rt-multi-thread", "time", "sync", "signal"] }
rhai = "1.21.0"
[dev-dependencies]
criterion = { version = "0.5", features = ["html_reports"] }
uuid = { version = "1.6", features = ["v4", "serde"] } # For examples like dedicated_reply_queue_demo
tempfile = "3.10"
[[bench]]
name = "simple_rhai_bench"
harness = false
[workspace.dependencies]
actix = "0.13"
actix-web = { version = "4", features = ["rustls-0_23"] }
actix-web-actors = "4.1"
once_cell = "1.19.0"
anyhow = "1.0"
chrono = { version = "0.4", features = ["serde"] }
clap = { version = "4.5.4", features = ["derive"] }
dotenv = "0.15"
env_logger = "0.10"
futures-channel = { version = "0.3" }
futures-util = { version = "0.3" }
hex = "0.4"
log = "0.4"
rand = "0.8"
redis = { version = "0.25.0", features = ["tokio-comp"] }
rhai = "1.21.0"
secp256k1 = { version = "0.27", features = ["rand-std", "recovery"] }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
sha3 = "0.10"
thiserror = "1.0"
tokio = { version = "1", features = ["macros", "rt-multi-thread", "time", "sync", "signal"] }
url = "2.5"
uuid = { version = "1.6", features = ["v4", "serde"] }
[workspace]
members = [
"interfaces/unix/client",
"interfaces/unix/server",
"interfaces/websocket/client",
"interfaces/websocket/server",
"core/dispatcher",
"core/engine",
"core/worker", "core/job", "core/examples", "interfaces/websocket/examples",
"proxies/http",
]
resolver = "2" # Recommended for new workspaces

40
README.md Normal file
View File

@ -0,0 +1,40 @@
# Hero
Hero is a program that runs scripts in contexts on behalf of a peer. Hero aims to support a language sufficient to support all of one's digital actions. As such, hero can become a tool of digital sovereignty, allowing people and groups to own their own structured data and functionality to act on it.
## Terminology
| Term | Definition |
|---------------|-----------------------------------------------------------------------------|
| Hero | A program that runs scripts in various contexts on behalf of a peer. |
| Heroscript | A script that runs in a confined environment on behalf of a peer. |
| Rhai | A scripting language integrated into the Hero environment for script execution. |
## Core
In its core, a [dispatcher](#dispatcher) dispatches jobs to execute scripts to [workers](#worker) over redis. Workers spawn appropriate engine instances to execute scripts within the defined [confines]() of the job.
### Components
#### [Dispatcher](./core/dispatcher)
Component responsible for distributing jobs to workers over Redis.
#### [Engine](./core/engine)
A process that runs a script in a confined environment.
#### [Job](./core/job)
A unit of work that executes a Rhai or Hero script.
#### [Worker](./core/worker)
An entity that processes jobs dispatched by the dispatcher.
## Interfaces
### Websocket
### Unix

View File

@ -0,0 +1,71 @@
# Minimal Rhailib Benchmark
A simplified, minimal benchmarking tool for rhailib performance testing.
## Overview
This benchmark focuses on simplicity and direct timing measurements:
- Creates a single task (n=1) using Lua script
- Measures latency using Redis timestamps
- Uses existing worker binary
- ~85 lines of code total
## Usage
### Prerequisites
- Redis running on `127.0.0.1:6379`
- Worker binary built: `cd src/worker && cargo build --release`
### Run Benchmark
```bash
# From project root
cargo bench
```
### Expected Output
```
🧹 Cleaning up Redis...
🚀 Starting worker...
📝 Creating single task...
⏱️ Waiting for completion...
✅ Task completed in 23.45ms
🧹 Cleaning up...
```
## Files
- `simple_bench.rs` - Main benchmark binary (85 lines)
- `batch_task.lua` - Minimal Lua script for task creation (28 lines)
- `Cargo.toml` - Dependencies and binary configuration
- `README.md` - This file
## How It Works
1. **Cleanup**: Clear Redis queues and task details
2. **Start Worker**: Spawn single worker process
3. **Create Task**: Use Lua script to create one task with timestamp
4. **Wait & Measure**: Poll task until complete, calculate latency
5. **Cleanup**: Kill worker and clear Redis
## Latency Calculation
```
latency_ms = updated_at - created_at
```
Where:
- `created_at`: Timestamp when task was created (Lua script)
- `updated_at`: Timestamp when worker completed task
## Future Iterations
- **Iteration 2**: Small batches (n=5, n=10)
- **Iteration 3**: Larger batches and script complexity
- **Iteration 4**: Performance optimizations
## Benefits
- **Easy to Understand**: Single file, linear flow
- **Direct Timing**: Redis timestamps, no complex stats
- **Fast to Modify**: No abstractions or frameworks
- **Reliable**: Simple Redis operations

View File

@ -0,0 +1,46 @@
-- Minimal Lua script for single task creation (n=1)
-- Args: circle_name, rhai_script_content, task_count (optional, defaults to 1)
-- Returns: array of task keys for timing
if #ARGV < 2 then
return redis.error_reply("Usage: EVAL script 0 circle_name rhai_script_content [task_count]")
end
local circle_name = ARGV[1]
local rhai_script_content = ARGV[2]
local task_count = tonumber(ARGV[3]) or 1
-- Validate task_count
if task_count <= 0 or task_count > 10000 then
return redis.error_reply("task_count must be a positive integer between 1 and 10000")
end
-- Get current timestamp in Unix seconds (to match worker expectations)
local rhai_task_queue = 'rhai_tasks:' .. circle_name
local task_keys = {}
local current_time = redis.call('TIME')[1]
-- Create multiple tasks
for i = 1, task_count do
-- Generate unique task ID
local task_id = 'task_' .. redis.call('INCR', 'global_task_counter')
local task_details_key = 'rhai_task_details:' .. task_id
-- Create task details hash with creation timestamp
redis.call('HSET', task_details_key,
'script', rhai_script_content,
'status', 'pending',
'createdAt', current_time,
'updatedAt', current_time,
'task_sequence', tostring(i)
)
-- Queue the task for workers
redis.call('LPUSH', rhai_task_queue, task_id)
-- Add key to return array
table.insert(task_keys, task_details_key)
end
-- Return array of task keys for timing analysis
return task_keys

View File

@ -0,0 +1,183 @@
use criterion::{criterion_group, criterion_main, Criterion};
use redis::{Client, Commands};
use std::fs;
use std::process::{Child, Command, Stdio};
use std::thread;
use std::time::Duration;
const REDIS_URL: &str = "redis://127.0.0.1:6379";
const CIRCLE_NAME: &str = "bench_circle";
const SIMPLE_SCRIPT: &str = "new_event()\n .title(\"Weekly Sync\")\n .location(\"Conference Room A\")\n .description(\"Regular team sync meeting\")\n .save_event();";
fn cleanup_redis() -> Result<(), redis::RedisError> {
let client = Client::open(REDIS_URL)?;
let mut conn = client.get_connection()?;
// Clear task queue and any existing task details
let _: () = conn.del(format!("rhai_tasks:{}", CIRCLE_NAME))?;
let keys: Vec<String> = conn.scan_match("rhai_task_details:*")?.collect();
if !keys.is_empty() {
let _: () = conn.del(keys)?;
}
Ok(())
}
fn start_worker() -> Result<Child, std::io::Error> {
Command::new("cargo")
.args(&[
"run",
"--release",
"--bin",
"worker",
"--",
"--circle",
CIRCLE_NAME,
"--redis-url",
REDIS_URL,
"--worker-id",
"bench_worker",
"--preserve-tasks",
])
.current_dir("src/worker")
.stdout(Stdio::null())
.stderr(Stdio::null())
.spawn()
}
fn create_batch_tasks(task_count: usize) -> Result<Vec<String>, Box<dyn std::error::Error>> {
let client = Client::open(REDIS_URL)?;
let mut conn = client.get_connection()?;
// Load and execute Lua script
let lua_script = fs::read_to_string("benches/simple_rhai_bench/batch_task.lua")?;
let result: redis::Value = redis::cmd("EVAL")
.arg(lua_script)
.arg(0)
.arg(CIRCLE_NAME)
.arg(SIMPLE_SCRIPT)
.arg(task_count)
.query(&mut conn)?;
// Parse the task keys from the response
let task_keys = match result {
redis::Value::Bulk(items) => {
let mut keys = Vec::new();
for item in items {
if let redis::Value::Data(key_data) = item {
keys.push(String::from_utf8_lossy(&key_data).to_string());
}
}
keys
}
_ => {
return Err(format!("Unexpected Redis response type: {:?}", result).into());
}
};
Ok(task_keys)
}
fn wait_for_batch_completion(task_keys: &[String]) -> Result<f64, Box<dyn std::error::Error>> {
let client = Client::open(REDIS_URL)?;
let mut conn = client.get_connection()?;
let start_time = std::time::Instant::now();
let timeout = Duration::from_secs(30);
// Wait for all tasks to complete
loop {
let mut completed_count = 0;
let mut total_latency = 0u64;
for task_key in task_keys {
let status: Option<String> = conn.hget(task_key, "status")?;
match status.as_deref() {
Some("completed") | Some("error") => {
completed_count += 1;
// Get timing data
let created_at: u64 = conn.hget(task_key, "createdAt")?;
let updated_at: u64 = conn.hget(task_key, "updatedAt")?;
total_latency += updated_at - created_at;
}
_ => {} // Still pending or processing
}
}
if completed_count == task_keys.len() {
// All tasks completed, calculate average latency in milliseconds
let avg_latency_ms = (total_latency as f64 / task_keys.len() as f64) * 1000.0;
return Ok(avg_latency_ms);
}
// Check timeout
if start_time.elapsed() > timeout {
return Err(format!(
"Timeout waiting for batch completion. Completed: {}/{}",
completed_count,
task_keys.len()
)
.into());
}
thread::sleep(Duration::from_millis(100));
}
}
fn cleanup_worker(mut worker: Child) -> Result<(), std::io::Error> {
worker.kill()?;
worker.wait()?;
Ok(())
}
fn bench_single_rhai_task(c: &mut Criterion) {
// Setup: ensure worker is built
let _ = Command::new("cargo")
.args(&["build", "--release", "--bin", "worker"])
.current_dir("src/worker")
.output()
.expect("Failed to build worker");
// Clean up before starting
cleanup_redis().expect("Failed to cleanup Redis");
// Start worker once and reuse it
let worker = start_worker().expect("Failed to start worker");
thread::sleep(Duration::from_millis(1000)); // Give worker time to start
let mut group = c.benchmark_group("rhai_task_execution");
group.sample_size(10); // Reduce sample size
group.measurement_time(Duration::from_secs(10)); // Reduce measurement time
group.bench_function("batch_task_latency", |b| {
b.iter_custom(|iters| {
let mut total_latency = Duration::ZERO;
for _i in 0..iters {
// Clean up Redis between iterations
cleanup_redis().expect("Failed to cleanup Redis");
// Create 100 tasks and measure average latency using Redis timestamps
let task_keys = create_batch_tasks(5000).expect("Failed to create batch tasks");
let avg_latency_ms = wait_for_batch_completion(&task_keys)
.expect("Failed to measure batch completion");
// Convert average latency to duration
total_latency += Duration::from_millis(avg_latency_ms as u64);
}
total_latency
});
});
group.finish();
// Cleanup worker
cleanup_worker(worker).expect("Failed to cleanup worker");
cleanup_redis().expect("Failed to cleanup Redis");
}
criterion_group!(benches, bench_single_rhai_task);
criterion_main!(benches);

1
core/dispatcher/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
/target

View File

@ -0,0 +1,25 @@
[package]
name = "hero_dispatcher"
version = "0.1.0"
edition = "2021"
[[bin]]
name = "dispatcher"
path = "cmd/dispatcher.rs"
[dependencies]
clap = { version = "4.4", features = ["derive"] }
env_logger = "0.10"
redis = { version = "0.25.0", features = ["tokio-comp"] }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
uuid = { version = "1.6", features = ["v4", "serde"] }
chrono = { version = "0.4", features = ["serde"] }
log = "0.4"
tokio = { version = "1", features = ["macros", "rt-multi-thread"] } # For async main in examples, and general async
colored = "2.0"
hero_job = { path = "../job" }
[dev-dependencies] # For examples later
env_logger = "0.10"
rhai = "1.18.0" # For examples that might need to show engine setup

128
core/dispatcher/README.md Normal file
View File

@ -0,0 +1,128 @@
# Hero Dispatcher
A Redis-based job dispatcher for managing Rhai/HeroScript execution across distributed workers.
## Overview
The Hero Dispatcher provides a robust job queue system where:
- **Jobs** represent script execution requests (Rhai or HeroScript)
- **Creating a job** stores job parameters in Redis as an hset entry
- **Submitting a job** pushes the job ID to a worker's queue
- **Running a job** creates, submits, and awaits results on a dedicated reply queue
## Key Features
- **Asynchronous Operations**: Built with `tokio` for non-blocking I/O
- **Request-Reply Pattern**: Submit jobs and await results without polling
- **Configurable Jobs**: Set timeouts, retries, concurrency, and logging options
- **Worker Targeting**: Direct job routing to specific worker queues
- **Job Lifecycle**: Create, submit, monitor status, and retrieve results
## Core Components
### `DispatcherBuilder`
Builder for creating `Dispatcher` instances with caller ID, worker ID, context ID, and Redis URL.
### `Dispatcher`
Main interface for job management:
- `new_job()` - Create a new `JobBuilder`
- `create_job()` - Store job in Redis
- `run_job_and_await_result()` - Execute job and wait for completion
- `get_job_status()` - Check job execution status
- `get_job_output()` - Retrieve job results
### `JobBuilder`
Fluent builder for configuring jobs:
- `script()` - Set the script content
- `worker_id()` - Target specific worker
- `timeout()` - Set execution timeout
- `build()` - Create the job
- `submit()` - Fire-and-forget submission
- `await_response()` - Submit and wait for result
### `Job`
Represents a script execution request with:
- Unique ID and timestamps
- Script content and target worker
- Execution settings (timeout, retries, concurrency)
- Logging configuration
## Redis Schema
Jobs are stored using the `hero:` namespace:
- `hero:job:{job_id}` - Job parameters as Redis hash
- `hero:work_queue:{worker_id}` - Worker-specific job queues
- `hero:reply:{job_id}` - Dedicated reply queues for results
## Prerequisites
- Redis server accessible by dispatcher and workers
## Usage Example
### Basic Job Creation and Submission
```rust
use hero_dispatcher::{DispatcherBuilder, DispatcherError};
use std::time::Duration;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create dispatcher
let dispatcher = DispatcherBuilder::new()
.caller_id("my-app")
.worker_id("worker-1")
.context_id("my-context")
.redis_url("redis://127.0.0.1:6379")
.build()?;
// Create a job
let job = dispatcher
.new_job()
.script(r#"print("Hello from worker!"); "success""#)
.timeout(Duration::from_secs(30))
.build()?;
// Store job in Redis
dispatcher.create_job(&job)?;
println!("Job {} created and stored in Redis", job.id);
// Run job and await result (requires worker)
match dispatcher.run_job_and_await_result(&job, "worker-1".to_string()) {
Ok(result) => println!("Job completed: {}", result),
Err(DispatcherError::Timeout(_)) => println!("Job timed out"),
Err(e) => println!("Job failed: {}", e),
}
Ok(())
}
```
### Job Status Monitoring
```rust
// Check job status
match dispatcher.get_job_status(&job.id) {
Ok(status) => println!("Job status: {:?}", status),
Err(e) => println!("Error getting status: {}", e),
}
// Get job output
match dispatcher.get_job_output(&job.id) {
Ok(output) => println!("Job output: {:?}", output),
Err(e) => println!("Error getting output: {}", e),
}
```
## Examples
Run the comprehensive demo to see dispatcher functionality and Redis entries:
```bash
cargo run --example dispatcher_demo
```
Other examples:
- `timeout_example.rs` - Demonstrates timeout handling
Ensure Redis is running at `redis://127.0.0.1:6379`.

View File

@ -0,0 +1,157 @@
# Rhai Client Binary
A command-line client for executing Rhai scripts on remote workers via Redis.
## Binary: `client`
### Installation
Build the binary:
```bash
cargo build --bin client --release
```
### Usage
```bash
# Basic usage - requires caller and circle keys
client --caller-key <CALLER_KEY> --circle-key <CIRCLE_KEY>
# Execute inline script
client -c <CALLER_KEY> -k <CIRCLE_KEY> --script "print('Hello World!')"
# Execute script from file
client -c <CALLER_KEY> -k <CIRCLE_KEY> --file script.rhai
# Use specific worker (defaults to circle key)
client -c <CALLER_KEY> -k <CIRCLE_KEY> -w <WORKER_KEY> --script "2 + 2"
# Custom Redis and timeout
client -c <CALLER_KEY> -k <CIRCLE_KEY> --redis-url redis://localhost:6379/1 --timeout 60
# Remove timestamps from logs
client -c <CALLER_KEY> -k <CIRCLE_KEY> --no-timestamp
# Increase verbosity
client -c <CALLER_KEY> -k <CIRCLE_KEY> -v --script "debug_info()"
```
### Command-Line Options
| Option | Short | Default | Description |
|--------|-------|---------|-------------|
| `--caller-key` | `-c` | **Required** | Caller public key (your identity) |
| `--circle-key` | `-k` | **Required** | Circle public key (execution context) |
| `--worker-key` | `-w` | `circle-key` | Worker public key (target worker) |
| `--redis-url` | `-r` | `redis://localhost:6379` | Redis connection URL |
| `--script` | `-s` | | Rhai script to execute |
| `--file` | `-f` | | Path to Rhai script file |
| `--timeout` | `-t` | `30` | Timeout for script execution (seconds) |
| `--no-timestamp` | | `false` | Remove timestamps from log output |
| `--verbose` | `-v` | | Increase verbosity (stackable) |
### Execution Modes
#### Inline Script Execution
```bash
# Execute a simple calculation
client -c caller_123 -k circle_456 -s "let result = 2 + 2; print(result);"
# Execute with specific worker
client -c caller_123 -k circle_456 -w worker_789 -s "get_user_data()"
```
#### Script File Execution
```bash
# Execute script from file
client -c caller_123 -k circle_456 -f examples/data_processing.rhai
# Execute with custom timeout
client -c caller_123 -k circle_456 -f long_running_script.rhai -t 120
```
#### Interactive Mode
```bash
# Enter interactive REPL mode (when no script or file provided)
client -c caller_123 -k circle_456
# Interactive mode with verbose logging
client -c caller_123 -k circle_456 -v --no-timestamp
```
### Interactive Mode
When no script (`-s`) or file (`-f`) is provided, the client enters interactive mode:
```
🔗 Starting Rhai Client
📋 Configuration:
Caller Key: caller_123
Circle Key: circle_456
Worker Key: circle_456
Redis URL: redis://localhost:6379
Timeout: 30s
✅ Connected to Redis at redis://localhost:6379
🎮 Entering interactive mode
Type Rhai scripts and press Enter to execute. Type 'exit' or 'quit' to close.
rhai> let x = 42; print(x);
Status: completed
Output: 42
rhai> exit
👋 Goodbye!
```
### Configuration Examples
#### Development Usage
```bash
# Simple development client
client -c dev_user -k dev_circle
# Development with clean logs
client -c dev_user -k dev_circle --no-timestamp -v
```
#### Production Usage
```bash
# Production client with specific worker
client \
--caller-key prod_user_123 \
--circle-key prod_circle_456 \
--worker-key prod_worker_789 \
--redis-url redis://redis-cluster:6379/0 \
--timeout 300 \
--file production_script.rhai
```
#### Batch Processing
```bash
# Process multiple scripts
for script in scripts/*.rhai; do
client -c batch_user -k batch_circle -f "$script" --no-timestamp
done
```
### Key Concepts
- **Caller Key**: Your identity - used for authentication and tracking
- **Circle Key**: Execution context - defines the environment/permissions
- **Worker Key**: Target worker - which worker should execute the script (defaults to circle key)
### Error Handling
The client provides clear error messages for:
- Missing required keys
- Redis connection failures
- Script execution timeouts
- Worker unavailability
- Script syntax errors
### Dependencies
- `rhai_dispatcher`: Core client library for Redis-based script execution
- `redis`: Redis client for task queue communication
- `clap`: Command-line argument parsing
- `env_logger`: Logging infrastructure
- `tokio`: Async runtime

View File

@ -0,0 +1,271 @@
use clap::Parser;
use hero_dispatcher::{Dispatcher, DispatcherBuilder, ScriptType};
use log::{error, info};
use colored::Colorize;
use std::io::{self, Write};
use std::time::Duration;
#[derive(Parser, Debug)]
#[command(author, version, about = "Rhai Client - Script execution client", long_about = None)]
struct Args {
/// Caller ID (your identity)
#[arg(short = 'c', long = "caller-id", help = "Caller ID (your identity)")]
caller_id: String,
/// Context ID (execution context)
#[arg(short = 'k', long = "context-id", help = "Context ID (execution context)")]
context_id: String,
/// Script type to execute (heroscript, rhai-sal, rhai-dsl)
#[arg(short = 'T', long = "script-type", default_value = "heroscript", help = "Script type: heroscript, rhai-sal, or rhai-dsl")]
script_type: String,
/// HeroScript workers (comma-separated)
#[arg(long = "hero-workers", default_value = "hero-worker-1", help = "HeroScript worker IDs (comma-separated)")]
hero_workers: String,
/// Rhai SAL workers (comma-separated)
#[arg(long = "rhai-sal-workers", default_value = "rhai-sal-worker-1", help = "Rhai SAL worker IDs (comma-separated)")]
rhai_sal_workers: String,
/// Rhai DSL workers (comma-separated)
#[arg(long = "rhai-dsl-workers", default_value = "rhai-dsl-worker-1", help = "Rhai DSL worker IDs (comma-separated)")]
rhai_dsl_workers: String,
/// Redis URL
#[arg(short, long, default_value = "redis://localhost:6379", help = "Redis connection URL")]
redis_url: String,
/// Rhai script to execute
#[arg(short, long, help = "Rhai script to execute")]
script: Option<String>,
/// Path to Rhai script file
#[arg(short, long, help = "Path to Rhai script file")]
file: Option<String>,
/// Timeout for script execution (in seconds)
#[arg(short, long, default_value = "30", help = "Timeout for script execution in seconds")]
timeout: u64,
/// Increase verbosity (can be used multiple times)
#[arg(short, long, action = clap::ArgAction::Count, help = "Increase verbosity (-v for debug, -vv for trace)")]
verbose: u8,
/// Disable timestamps in log output
#[arg(long, help = "Remove timestamps from log output")]
no_timestamp: bool,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let args = Args::parse();
// Configure logging based on verbosity level
let log_config = match args.verbose {
0 => "warn,hero_dispatcher=warn",
1 => "info,hero_dispatcher=info",
2 => "debug,hero_dispatcher=debug",
_ => "trace,hero_dispatcher=trace",
};
std::env::set_var("RUST_LOG", log_config);
// Configure env_logger with or without timestamps
if args.no_timestamp {
env_logger::Builder::from_default_env()
.format_timestamp(None)
.init();
} else {
env_logger::init();
}
// Parse worker lists
let hero_workers: Vec<String> = args.hero_workers.split(',').map(|s| s.trim().to_string()).filter(|s| !s.is_empty()).collect();
let rhai_sal_workers: Vec<String> = args.rhai_sal_workers.split(',').map(|s| s.trim().to_string()).filter(|s| !s.is_empty()).collect();
let rhai_dsl_workers: Vec<String> = args.rhai_dsl_workers.split(',').map(|s| s.trim().to_string()).filter(|s| !s.is_empty()).collect();
// Validate that at least one worker is provided for the selected script type
match args.script_type.to_lowercase().as_str() {
"heroscript" => {
if hero_workers.is_empty() {
error!("❌ No HeroScript workers provided. Use --hero-workers to specify at least one worker.");
return Err("At least one HeroScript worker must be provided".into());
}
}
"rhai-sal" => {
if rhai_sal_workers.is_empty() {
error!("❌ No Rhai SAL workers provided. Use --rhai-sal-workers to specify at least one worker.");
return Err("At least one Rhai SAL worker must be provided".into());
}
}
"rhai-dsl" => {
if rhai_dsl_workers.is_empty() {
error!("❌ No Rhai DSL workers provided. Use --rhai-dsl-workers to specify at least one worker.");
return Err("At least one Rhai DSL worker must be provided".into());
}
}
_ => {
error!("❌ Invalid script type: {}. Valid types: heroscript, rhai-sal, rhai-dsl", args.script_type);
return Err(format!("Invalid script type: {}", args.script_type).into());
}
}
if args.verbose > 0 {
info!("🔗 Starting Hero Dispatcher");
info!("📋 Configuration:");
info!(" Caller ID: {}", args.caller_id);
info!(" Context ID: {}", args.context_id);
info!(" Script Type: {}", args.script_type);
info!(" HeroScript Workers: {:?}", hero_workers);
info!(" Rhai SAL Workers: {:?}", rhai_sal_workers);
info!(" Rhai DSL Workers: {:?}", rhai_dsl_workers);
info!(" Redis URL: {}", args.redis_url);
info!(" Timeout: {}s", args.timeout);
info!("");
}
// Create the dispatcher client
let client = DispatcherBuilder::new()
.caller_id(&args.caller_id)
.context_id(&args.context_id)
.heroscript_workers(hero_workers)
.rhai_sal_workers(rhai_sal_workers)
.rhai_dsl_workers(rhai_dsl_workers)
.redis_url(&args.redis_url)
.build()?;
if args.verbose > 0 {
info!("✅ Connected to Redis at {}", args.redis_url);
}
// Determine execution mode
if let Some(script_content) = args.script {
// Execute inline script
if args.verbose > 0 {
info!("📜 Executing inline script");
}
execute_script(&client, script_content, &args.script_type, args.timeout).await?;
} else if let Some(file_path) = args.file {
// Execute script from file
if args.verbose > 0 {
info!("📁 Loading script from file: {}", file_path);
}
let script_content = std::fs::read_to_string(&file_path)
.map_err(|e| format!("Failed to read script file '{}': {}", file_path, e))?;
execute_script(&client, script_content, &args.script_type, args.timeout).await?;
} else {
// Interactive mode
info!("🎮 Entering interactive mode");
info!("Type Rhai scripts and press Enter to execute. Type 'exit' or 'quit' to close.");
run_interactive_mode(&client, &args.script_type, args.timeout, args.verbose).await?;
}
Ok(())
}
async fn execute_script(
client: &Dispatcher,
script: String,
script_type_str: &str,
timeout_secs: u64,
) -> Result<(), Box<dyn std::error::Error>> {
info!("⚡ Executing script: {:.50}...", script);
// Parse script type
let script_type = match script_type_str.to_lowercase().as_str() {
"heroscript" => ScriptType::HeroScript,
"rhai-sal" => ScriptType::RhaiSAL,
"rhai-dsl" => ScriptType::RhaiDSL,
_ => {
error!("❌ Invalid script type: {}. Valid types: heroscript, rhai-sal, rhai-dsl", script_type_str);
return Err(format!("Invalid script type: {}", script_type_str).into());
}
};
let timeout = Duration::from_secs(timeout_secs);
match client
.new_job()
.script_type(script_type)
.script(&script)
.timeout(timeout)
.await_response()
.await
{
Ok(result) => {
info!("✅ Script execution completed");
println!("{}", "Result:".green().bold());
println!("{}", result);
}
Err(e) => {
error!("❌ Script execution failed: {}", e);
return Err(Box::new(e));
}
}
Ok(())
}
async fn run_interactive_mode(
client: &Dispatcher,
script_type_str: &str,
timeout_secs: u64,
verbose: u8,
) -> Result<(), Box<dyn std::error::Error>> {
// Parse script type
let script_type = match script_type_str.to_lowercase().as_str() {
"heroscript" => ScriptType::HeroScript,
"rhai-sal" => ScriptType::RhaiSAL,
"rhai-dsl" => ScriptType::RhaiDSL,
_ => {
error!("❌ Invalid script type: {}. Valid types: heroscript, rhai-sal, rhai-dsl", script_type_str);
return Err(format!("Invalid script type: {}", script_type_str).into());
}
};
let timeout = Duration::from_secs(timeout_secs);
loop {
print!("rhai> ");
io::stdout().flush()?;
let mut input = String::new();
io::stdin().read_line(&mut input)?;
let input = input.trim();
if input.is_empty() {
continue;
}
if input == "exit" || input == "quit" {
info!("👋 Goodbye!");
break;
}
if verbose > 0 {
info!("⚡ Executing: {}", input);
}
match client
.new_job()
.script_type(script_type.clone())
.script(input)
.timeout(timeout)
.await_response()
.await
{
Ok(result) => {
println!("{}", result.green());
}
Err(e) => {
println!("{}", format!("error: {}", e).red());
}
}
println!(); // Add blank line for readability
}
Ok(())
}

View File

@ -0,0 +1,190 @@
# Architecture of the `rhai_dispatcher` Crate
The `rhai_dispatcher` crate provides a Redis-based client library for submitting Rhai scripts to distributed worker services and awaiting their execution results. It implements a request-reply pattern using Redis as the message broker.
## Core Architecture
The client follows a builder pattern design with clear separation of concerns:
```mermaid
graph TD
A[RhaiDispatcherBuilder] --> B[RhaiDispatcher]
B --> C[PlayRequestBuilder]
C --> D[PlayRequest]
D --> E[Redis Task Queue]
E --> F[Worker Service]
F --> G[Redis Reply Queue]
G --> H[Client Response]
subgraph "Client Components"
A
B
C
D
end
subgraph "Redis Infrastructure"
E
G
end
subgraph "External Services"
F
end
```
## Key Components
### 1. RhaiDispatcherBuilder
A builder pattern implementation for constructing `RhaiDispatcher` instances with proper configuration validation.
**Responsibilities:**
- Configure Redis connection URL
- Set caller ID for task attribution
- Validate configuration before building client
**Key Methods:**
- `caller_id(id: &str)` - Sets the caller identifier
- `redis_url(url: &str)` - Configures Redis connection
- `build()` - Creates the final `RhaiDispatcher` instance
### 2. RhaiDispatcher
The main client interface that manages Redis connections and provides factory methods for creating play requests.
**Responsibilities:**
- Maintain Redis connection pool
- Provide factory methods for request builders
- Handle low-level Redis operations
- Manage task status queries
**Key Methods:**
- `new_play_request()` - Creates a new `PlayRequestBuilder`
- `get_task_status(task_id)` - Queries task status from Redis
- Internal methods for Redis operations
### 3. PlayRequestBuilder
A fluent builder for constructing and submitting script execution requests.
**Responsibilities:**
- Configure script execution parameters
- Handle script loading from files or strings
- Manage request timeouts
- Provide submission methods (fire-and-forget vs await-response)
**Key Methods:**
- `worker_id(id: &str)` - Target worker queue (determines which worker processes the task)
- `context_id(id: &str)` - Target context ID (determines execution context/circle)
- `script(content: &str)` - Set script content directly
- `script_path(path: &str)` - Load script from file
- `timeout(duration: Duration)` - Set execution timeout
- `submit()` - Fire-and-forget submission
- `await_response()` - Submit and wait for result
**Architecture Note:** The decoupling of `worker_id` and `context_id` allows a single worker to process tasks for multiple contexts (circles), providing greater deployment flexibility.
### 4. Data Structures
#### RhaiTaskDetails
Represents the complete state of a task throughout its lifecycle.
```rust
pub struct RhaiTaskDetails {
pub task_id: String,
pub script: String,
pub status: String, // "pending", "processing", "completed", "error"
pub output: Option<String>,
pub error: Option<String>,
pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>,
pub caller_id: String,
}
```
#### RhaiDispatcherError
Comprehensive error handling for various failure scenarios:
- `RedisError` - Redis connection/operation failures
- `SerializationError` - JSON serialization/deserialization issues
- `Timeout` - Task execution timeouts
- `TaskNotFound` - Missing tasks after submission
## Communication Protocol
### Task Submission Flow
1. **Task Creation**: Client generates unique UUID for task identification
2. **Task Storage**: Task details stored in Redis hash: `rhailib:<task_id>`
3. **Queue Submission**: Task ID pushed to worker queue: `rhailib:<worker_id>`
4. **Reply Queue Setup**: Client listens on: `rhailib:reply:<task_id>`
### Redis Key Patterns
- **Task Storage**: `rhailib:<task_id>` (Redis Hash)
- **Worker Queues**: `rhailib:<worker_id>` (Redis List)
- **Reply Queues**: `rhailib:reply:<task_id>` (Redis List)
### Message Flow Diagram
```mermaid
sequenceDiagram
participant C as Client
participant R as Redis
participant W as Worker
C->>R: HSET rhailib:task_id (task details)
C->>R: LPUSH rhailib:worker_id task_id
C->>R: BLPOP rhailib:reply:task_id (blocking)
W->>R: BRPOP rhailib:worker_id (blocking)
W->>W: Execute Rhai Script
W->>R: LPUSH rhailib:reply:task_id (result)
R->>C: Return result from BLPOP
C->>R: DEL rhailib:reply:task_id (cleanup)
```
## Concurrency and Async Design
The client is built on `tokio` for asynchronous operations:
- **Connection Pooling**: Uses Redis multiplexed connections for efficiency
- **Non-blocking Operations**: All Redis operations are async
- **Timeout Handling**: Configurable timeouts with proper cleanup
- **Error Propagation**: Comprehensive error handling with context
## Configuration and Deployment
### Prerequisites
- Redis server accessible to both client and workers
- Proper network connectivity between components
- Sufficient Redis memory for task storage
### Configuration Options
- **Redis URL**: Connection string for Redis instance
- **Caller ID**: Unique identifier for client instance
- **Timeouts**: Per-request timeout configuration
- **Worker Targeting**: Direct worker queue addressing
## Security Considerations
- **Task Isolation**: Each task uses unique identifiers
- **Queue Separation**: Worker-specific queues prevent cross-contamination
- **Cleanup**: Automatic cleanup of reply queues after completion
- **Error Handling**: Secure error propagation without sensitive data leakage
## Performance Characteristics
- **Scalability**: Horizontal scaling through multiple worker instances
- **Throughput**: Limited by Redis performance and network latency
- **Memory Usage**: Efficient with connection pooling and cleanup
- **Latency**: Low latency for local Redis deployments
## Integration Points
The client integrates with:
- **Worker Services**: Via Redis queue protocol
- **Monitoring Systems**: Through structured logging
- **Application Code**: Via builder pattern API
- **Configuration Systems**: Through environment variables and builders

View File

@ -0,0 +1,272 @@
# Hero Dispatcher Protocol
This document describes the Redis-based protocol used by the Hero Dispatcher for job management and worker communication.
## Overview
The Hero Dispatcher uses Redis as a message broker and data store for managing distributed job execution. Jobs are stored as Redis hashes, and communication with workers happens through Redis lists (queues).
## Redis Namespace
All dispatcher-related keys use the `hero:` namespace prefix to avoid conflicts with other Redis usage.
## Data Structures
### Job Storage
Jobs are stored as Redis hashes with the following key pattern:
```
hero:job:{job_id}
```
**Job Hash Fields:**
- `id`: Unique job identifier (UUID v4)
- `caller_id`: Identifier of the client that created the job
- `worker_id`: Target worker identifier
- `context_id`: Execution context identifier
- `script`: Script content to execute (Rhai or HeroScript)
- `timeout`: Execution timeout in seconds
- `retries`: Number of retry attempts
- `concurrent`: Whether to execute in separate thread (true/false)
- `log_path`: Optional path to log file for job output
- `created_at`: Job creation timestamp (ISO 8601)
- `updated_at`: Job last update timestamp (ISO 8601)
- `status`: Current job status (dispatched/started/error/finished)
- `env_vars`: Environment variables as JSON object (optional)
- `prerequisites`: JSON array of job IDs that must complete before this job (optional)
- `dependents`: JSON array of job IDs that depend on this job completing (optional)
- `output`: Job execution result (set by worker)
- `error`: Error message if job failed (set by worker)
- `dependencies`: List of job IDs that this job depends on
### Job Dependencies
Jobs can have dependencies on other jobs, which are stored in the `dependencies` field. A job will not be dispatched until all its dependencies have completed successfully.
### Work Queues
Jobs are queued for execution using Redis lists:
```
hero:work_queue:{worker_id}
```
Workers listen on their specific queue using `BLPOP` for job IDs to process.
### Stop Queues
Job stop requests are sent through dedicated stop queues:
```
hero:stop_queue:{worker_id}
```
Workers monitor these queues to receive stop requests for running jobs.
### Reply Queues
For synchronous job execution, dedicated reply queues are used:
```
hero:reply:{job_id}
```
Workers send results to these queues when jobs complete.
## Job Lifecycle
### 1. Job Creation
```
Client -> Redis: HSET hero:job:{job_id} {job_fields}
```
### 2. Job Submission
```
Client -> Redis: LPUSH hero:work_queue:{worker_id} {job_id}
```
### 3. Job Processing
```
Worker -> Redis: BLPOP hero:work_queue:{worker_id}
Worker -> Redis: HSET hero:job:{job_id} status "started"
Worker: Execute script
Worker -> Redis: HSET hero:job:{job_id} status "finished" output "{result}"
```
### 4. Job Completion (Async)
```
Worker -> Redis: LPUSH hero:reply:{job_id} {result}
```
## API Operations
### List Jobs
```rust
dispatcher.list_jobs() -> Vec<String>
```
**Redis Operations:**
- `KEYS hero:job:*` - Get all job keys
- Extract job IDs from key names
### Stop Job
```rust
dispatcher.stop_job(job_id) -> Result<(), DispatcherError>
```
**Redis Operations:**
- `LPUSH hero:stop_queue:{worker_id} {job_id}` - Send stop request
### Get Job Status
```rust
dispatcher.get_job_status(job_id) -> Result<JobStatus, DispatcherError>
```
**Redis Operations:**
- `HGETALL hero:job:{job_id}` - Get job data
- Parse `status` field
### Get Job Logs
```rust
dispatcher.get_job_logs(job_id) -> Result<Option<String>, DispatcherError>
```
**Redis Operations:**
- `HGETALL hero:job:{job_id}` - Get job data
- Read `log_path` field
- Read log file from filesystem
### Run Job and Await Result
```rust
dispatcher.run_job_and_await_result(job, worker_id) -> Result<String, DispatcherError>
```
**Redis Operations:**
1. `HSET hero:job:{job_id} {job_fields}` - Store job
2. `LPUSH hero:work_queue:{worker_id} {job_id}` - Submit job
3. `BLPOP hero:reply:{job_id} {timeout}` - Wait for result
## Worker Protocol
### Job Processing Loop
```rust
loop {
// 1. Wait for job
job_id = BLPOP hero:work_queue:{worker_id}
// 2. Get job details
job_data = HGETALL hero:job:{job_id}
// 3. Update status
HSET hero:job:{job_id} status "started"
// 4. Check for stop requests
if LLEN hero:stop_queue:{worker_id} > 0 {
stop_job_id = LPOP hero:stop_queue:{worker_id}
if stop_job_id == job_id {
HSET hero:job:{job_id} status "error" error "stopped"
continue
}
}
// 5. Execute script
result = execute_script(job_data.script)
// 6. Update job with result
HSET hero:job:{job_id} status "finished" output result
// 7. Send reply if needed
if reply_queue_exists(hero:reply:{job_id}) {
LPUSH hero:reply:{job_id} result
}
}
```
### Stop Request Handling
Workers should periodically check the stop queue during long-running jobs:
```rust
if LLEN hero:stop_queue:{worker_id} > 0 {
stop_requests = LRANGE hero:stop_queue:{worker_id} 0 -1
if stop_requests.contains(current_job_id) {
// Stop current job execution
HSET hero:job:{current_job_id} status "error" error "stopped_by_request"
// Remove stop request
LREM hero:stop_queue:{worker_id} 1 current_job_id
return
}
}
```
## Error Handling
### Job Timeouts
- Client sets timeout when creating job
- Worker should respect timeout and stop execution
- If timeout exceeded: `HSET hero:job:{job_id} status "error" error "timeout"`
### Worker Failures
- If worker crashes, job remains in "started" status
- Monitoring systems can detect stale jobs and retry
- Jobs can be requeued: `LPUSH hero:work_queue:{worker_id} {job_id}`
### Redis Connection Issues
- Clients should implement retry logic with exponential backoff
- Workers should reconnect and resume processing
- Use Redis persistence to survive Redis restarts
## Monitoring and Observability
### Queue Monitoring
```bash
# Check work queue length
LLEN hero:work_queue:{worker_id}
# Check stop queue length
LLEN hero:stop_queue:{worker_id}
# List all jobs
KEYS hero:job:*
# Get job details
HGETALL hero:job:{job_id}
```
### Metrics to Track
- Jobs created per second
- Jobs completed per second
- Average job execution time
- Queue depths
- Worker availability
- Error rates by job type
## Security Considerations
### Redis Security
- Use Redis AUTH for authentication
- Enable TLS for Redis connections
- Restrict Redis network access
- Use Redis ACLs to limit worker permissions
### Job Security
- Validate script content before execution
- Sandbox script execution environment
- Limit resource usage (CPU, memory, disk)
- Log all job executions for audit
### Log File Security
- Ensure log paths are within allowed directories
- Validate log file permissions
- Rotate and archive logs regularly
- Sanitize sensitive data in logs
## Performance Considerations
### Redis Optimization
- Use Redis pipelining for batch operations
- Configure appropriate Redis memory limits
- Use Redis clustering for high availability
- Monitor Redis memory usage and eviction
### Job Optimization
- Keep job payloads small
- Use efficient serialization formats
- Batch similar jobs when possible
- Implement job prioritization if needed
### Worker Optimization
- Pool worker connections to Redis
- Use async I/O for Redis operations
- Implement graceful shutdown handling
- Monitor worker resource usage

View File

@ -0,0 +1,559 @@
use hero_dispatcher::{Dispatcher, DispatcherBuilder, ScriptType};
use log::info;
use redis::AsyncCommands;
use std::collections::HashMap;
use std::time::Duration;
use tokio::time::sleep;
/// Comprehensive example demonstrating the Hero Dispatcher functionality.
///
/// This example shows:
/// 1. Creating a dispatcher instance
/// 2. Creating jobs with different configurations
/// 3. Submitting jobs to the queue
/// 4. Inspecting Redis entries created by the dispatcher
/// 5. Running jobs and awaiting results
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
env_logger::init();
println!("🚀 Hero Dispatcher Demo");
println!("======================\n");
// Create dispatcher client with worker vectors per script type
let dispatcher = DispatcherBuilder::new()
.caller_id("demo-caller")
.context_id("demo-context")
.heroscript_workers(vec!["hero-worker-1".to_string(), "hero-worker-2".to_string()])
.rhai_sal_workers(vec!["rhai-sal-worker-1".to_string()])
.rhai_dsl_workers(vec!["rhai-dsl-worker-1".to_string()])
.redis_url("redis://127.0.0.1/")
.build()?;
println!("✅ Dispatcher created with:");
println!(" - Caller ID: demo-caller");
println!(" - Worker ID: demo-worker");
println!(" - Context ID: demo-context\n");
// Create Redis connection for inspection
let redis_client = redis::Client::open("redis://127.0.0.1:6379")?;
let mut redis_conn = redis_client.get_multiplexed_async_connection().await?;
// Demo 1: Create a simple job
println!("📝 Demo 1: Creating a simple job");
println!("--------------------------------");
let job1 = dispatcher
.new_job()
.script_type(ScriptType::HeroScript)
.script(r#"print("Hello from job 1!");"#)
.timeout(Duration::from_secs(10))
.build()?;
println!("Job 1 created with ID: {}", job1.id);
// Create the job (stores in Redis)
dispatcher.create_job(&job1).await?;
println!("✅ Job 1 stored in Redis");
// Inspect Redis entries for this job
print_job_redis_entries(&mut redis_conn, &job1.id).await?;
println!();
// Demo 2: Create a job with custom settings
println!("📝 Demo 2: Creating a job with custom settings");
println!("----------------------------------------------");
let job2 = dispatcher
.new_job()
.script_type(ScriptType::RhaiSAL)
.script(r#"
let result = 42 * 2;
print("Calculation result: " + result);
result
"#)
.timeout(Duration::from_secs(30))
.build()?;
println!("Job 2 created with ID: {}", job2.id);
// Create the job
dispatcher.create_job(&job2).await?;
println!("✅ Job 2 stored in Redis");
// Inspect Redis entries
print_job_redis_entries(&mut redis_conn, &job2.id).await?;
println!();
// Demo 3: Environment Variables
println!("📝 Demo 3: Jobs with Environment Variables");
println!("------------------------------------------");
// Create environment variables map
let mut env_vars = HashMap::new();
env_vars.insert("API_KEY".to_string(), "secret-api-key-123".to_string());
env_vars.insert("DEBUG_MODE".to_string(), "true".to_string());
env_vars.insert("MAX_RETRIES".to_string(), "5".to_string());
env_vars.insert("SERVICE_URL".to_string(), "https://api.example.com".to_string());
let job_with_env = dispatcher
.new_job()
.script_type(ScriptType::HeroScript)
.script(r#"
print("Environment variables available:");
print("API_KEY: " + env.API_KEY);
print("DEBUG_MODE: " + env.DEBUG_MODE);
print("MAX_RETRIES: " + env.MAX_RETRIES);
print("SERVICE_URL: " + env.SERVICE_URL);
"Environment variables processed successfully"
"#)
.env_vars(env_vars.clone())
.timeout(Duration::from_secs(15))
.build()?;
println!("Job with environment variables created: {}", job_with_env.id);
// Store job in Redis
dispatcher.create_job(&job_with_env).await?;
println!("✅ Job with env vars stored in Redis");
// Show Redis entries including environment variables
print_job_redis_entries(&mut redis_conn, &job_with_env.id).await?;
// Demonstrate individual env var setting
let job_individual_env = dispatcher
.new_job()
.script_type(ScriptType::RhaiSAL)
.script("print('Single env var: ' + env.SINGLE_VAR); 'done'")
.env_var("SINGLE_VAR", "individual-value")
.env_var("ANOTHER_VAR", "another-value")
.build()?;
println!("Job with individual env vars created: {}", job_individual_env.id);
dispatcher.create_job(&job_individual_env).await?;
println!("✅ Job with individual env vars stored in Redis");
print_job_redis_entries(&mut redis_conn, &job_individual_env.id).await?;
println!();
// Demo 4: Create multiple jobs and show queue state
println!("📝 Demo 4: Creating multiple jobs and inspecting queue");
println!("----------------------------------------------------");
let mut job_ids = Vec::new();
for i in 3..=5 {
let script_type = match i {
3 => ScriptType::HeroScript,
4 => ScriptType::RhaiSAL,
5 => ScriptType::RhaiDSL,
_ => ScriptType::HeroScript,
};
let job = dispatcher
.new_job()
.script_type(script_type)
.script(&format!(r#"print("Job {} is running");"#, i))
.timeout(Duration::from_secs(15))
.build()?;
job_ids.push(job.id.clone());
dispatcher.create_job(&job).await?;
println!("✅ Job {} created with ID: {}", i, job.id);
}
// Show all Redis keys related to our jobs
print_all_dispatcher_redis_keys(&mut redis_conn).await?;
println!();
// Demo 4: Show job status checking
println!("📝 Demo 4: Checking job statuses");
println!("--------------------------------");
for job_id in &job_ids {
match dispatcher.get_job_status(job_id).await {
Ok(status) => println!("Job {}: {:?}", job_id, status),
Err(e) => println!("Error getting status for job {}: {}", job_id, e),
}
}
println!();
// Demo 5: Simulate running a job and getting result (if worker is available)
println!("📝 Demo 5: Attempting to run job and await result");
println!("------------------------------------------------");
let simple_job = dispatcher
.new_job()
.script_type(ScriptType::HeroScript)
.script(r#"print("This job will complete quickly"); "success""#)
.timeout(Duration::from_secs(5))
.build()?;
println!("Created job for execution: {}", simple_job.id);
// Try to run the job (this will timeout if no worker is available)
match dispatcher.run_job_and_await_result(&simple_job).await {
Ok(result) => {
println!("✅ Job completed successfully!");
println!("Result: {}", result);
}
Err(e) => {
println!("⚠️ Job execution failed (likely no worker available): {}", e);
println!(" This is expected if no Hero worker is running");
}
}
// Demo 6: List all jobs
println!("📝 Demo 6: Listing all jobs");
println!("-------------------------");
let all_job_ids = match dispatcher.list_jobs().await {
Ok(job_ids) => {
println!("Found {} jobs:", job_ids.len());
for job_id in &job_ids {
println!(" - {}", job_id);
}
job_ids
}
Err(e) => {
println!("Error listing jobs: {}", e);
Vec::new()
}
};
println!();
// Demo 7: Create a job with log path and demonstrate logs functionality
println!("📝 Demo 7: Job with log path and logs retrieval");
println!("-----------------------------------------------");
let log_job = dispatcher
.new_job()
.script(r#"print("This job writes to logs"); "log_test""#)
.log_path("/tmp/hero_job_demo.log")
.timeout(Duration::from_secs(10))
.build()?;
println!("Created job with log path: {}", log_job.id);
dispatcher.create_job(&log_job).await?;
// Try to get logs (will be empty since job hasn't run)
match dispatcher.get_job_logs(&log_job.id).await {
Ok(Some(logs)) => println!("Job logs: {}", logs),
Ok(None) => println!("No logs available for job (expected - job hasn't run or no log file)"),
Err(e) => println!("Error getting logs: {}", e),
}
println!();
// Demo 8: Stop job functionality
println!("📝 Demo 8: Stopping a job");
println!("-------------------------");
if let Some(job_id) = all_job_ids.first() {
println!("Attempting to stop job: {}", job_id);
match dispatcher.stop_job(job_id).await {
Ok(()) => println!("✅ Stop request sent for job {}", job_id),
Err(e) => println!("Error stopping job: {}", e),
}
// Show stop queue
let stop_queue_key = "hero:stop_queue:demo-worker";
let stop_queue_length: i64 = redis_conn.llen(stop_queue_key).await?;
println!("📤 Stop queue length ({}): {}", stop_queue_key, stop_queue_length);
if stop_queue_length > 0 {
let stop_items: Vec<String> = redis_conn.lrange(stop_queue_key, 0, -1).await?;
println!("📋 Stop queue items:");
for (i, item) in stop_items.iter().enumerate() {
println!(" {}: {}", i, item);
}
}
} else {
println!("No jobs available to stop");
}
println!();
// Demo 9: Final Redis state inspection
println!("📝 Demo 9: Final Redis state");
println!("----------------------------");
print_all_dispatcher_redis_keys(&mut redis_conn).await?;
for job_id in &job_ids {
match dispatcher.get_job_status(job_id).await {
Ok(status) => println!("Job {}: {:?}", job_id, status),
Err(e) => println!("Error getting status for job {}: {}", job_id, e),
}
}
println!();
// Demo 5: Simulate running a job and getting result (if worker is available)
println!("📝 Demo 5: Attempting to run job and await result");
println!("------------------------------------------------");
let simple_job = dispatcher
.new_job()
.script_type(ScriptType::HeroScript)
.script(r#"print("This job will complete quickly"); "success""#)
.timeout(Duration::from_secs(5))
.build()?;
println!("Created job for execution: {}", simple_job.id);
// Try to run the job (this will timeout if no worker is available)
match dispatcher.run_job_and_await_result(&simple_job).await {
Ok(result) => {
println!("✅ Job completed successfully!");
println!("Result: {}", result);
}
Err(e) => {
println!("⚠️ Job execution failed (likely no worker available): {}", e);
println!(" This is expected if no Hero worker is running");
}
}
// Demo 6: List all jobs
println!("📝 Demo 6: Listing all jobs");
println!("-------------------------");
let all_job_ids = match dispatcher.list_jobs().await {
Ok(job_ids) => {
println!("Found {} jobs:", job_ids.len());
for job_id in &job_ids {
println!(" - {}", job_id);
}
job_ids
}
Err(e) => {
println!("Error listing jobs: {}", e);
Vec::new()
}
};
println!();
// Demo 7: Create a job with log path and demonstrate logs functionality
println!("📝 Demo 7: Job with log path and logs retrieval");
println!("-----------------------------------------------");
let log_job = dispatcher
.new_job()
.script(r#"print("This job writes to logs"); "log_test""#)
.log_path("/tmp/hero_job_demo.log")
.timeout(Duration::from_secs(10))
.build()?;
println!("Created job with log path: {}", log_job.id);
dispatcher.create_job(&log_job).await?;
// Try to get logs (will be empty since job hasn't run)
match dispatcher.get_job_logs(&log_job.id).await {
Ok(Some(logs)) => println!("Job logs: {}", logs),
Ok(None) => println!("No logs available for job (expected - job hasn't run or no log file)"),
Err(e) => println!("Error getting logs: {}", e),
}
println!();
// Demo 8: Stop job functionality
println!("📝 Demo 8: Stopping a job");
println!("-------------------------");
if let Some(job_id) = all_job_ids.first() {
println!("Attempting to stop job: {}", job_id);
match dispatcher.stop_job(job_id).await {
Ok(()) => println!("✅ Stop request sent for job {}", job_id),
Err(e) => println!("Error stopping job: {}", e),
}
// Show stop queue
let stop_queue_key = "hero:stop_queue:demo-worker";
let stop_queue_length: i64 = redis_conn.llen(stop_queue_key).await?;
println!("📤 Stop queue length ({}): {}", stop_queue_key, stop_queue_length);
if stop_queue_length > 0 {
let stop_items: Vec<String> = redis_conn.lrange(stop_queue_key, 0, -1).await?;
println!("📋 Stop queue items:");
for (i, item) in stop_items.iter().enumerate() {
println!(" {}: {}", i, item);
}
}
} else {
println!("No jobs available to stop");
}
println!();
// Demo 9: Final Redis state inspection
println!("📝 Demo 9: Final Redis state");
println!("----------------------------");
print_all_dispatcher_redis_keys(&mut redis_conn).await?;
println!("\n🎉 Dispatcher demo completed!");
println!("💡 New features demonstrated:");
println!(" - list_jobs(): List all job IDs");
println!(" - stop_job(): Send stop request to worker");
println!(" - get_job_logs(): Retrieve job logs from file");
println!(" - log_path(): Configure log file for jobs");
println!("💡 To see job execution in action, start a Hero worker that processes the 'demo-worker' queue");
// Demo 6: Demonstrate new job management features
println!("📝 Demo 6: Job Management - Delete and Clear Operations");
println!("--------------------------------------------------------");
// List all current jobs
match dispatcher.list_jobs().await {
Ok(jobs) => {
println!("Current jobs in system: {:?}", jobs);
if !jobs.is_empty() {
// Delete the first job as an example
let job_to_delete = &jobs[0];
println!("Deleting job: {}", job_to_delete);
match dispatcher.delete_job(job_to_delete).await {
Ok(()) => println!("✅ Job {} deleted successfully", job_to_delete),
Err(e) => println!("❌ Error deleting job {}: {}", job_to_delete, e),
}
// Show updated job list
match dispatcher.list_jobs().await {
Ok(remaining_jobs) => println!("Remaining jobs: {:?}", remaining_jobs),
Err(e) => println!("Error listing jobs: {}", e),
}
}
}
Err(e) => println!("Error listing jobs: {}", e),
}
println!();
// Demonstrate clear all jobs
println!("Clearing all remaining jobs...");
match dispatcher.clear_all_jobs().await {
Ok(count) => println!("✅ Cleared {} jobs from Redis", count),
Err(e) => println!("❌ Error clearing jobs: {}", e),
}
// Verify all jobs are cleared
match dispatcher.list_jobs().await {
Ok(jobs) => {
if jobs.is_empty() {
println!("✅ All jobs successfully cleared from Redis");
} else {
println!("⚠️ Some jobs remain: {:?}", jobs);
}
}
Err(e) => println!("Error verifying job clearance: {}", e),
}
println!();
println!("🎉 Demo completed! The dispatcher now supports:");
println!(" • Script type routing (HeroScript, RhaiSAL, RhaiDSL)");
println!(" • Multiple workers per script type for load balancing");
println!(" • Automatic worker selection based on job script type");
println!(" • Job management: list, delete, and clear operations");
println!(" • Enhanced job logging and monitoring");
Ok(())
}
/// Print Redis entries for a specific job
async fn print_job_redis_entries(
conn: &mut redis::aio::MultiplexedConnection,
job_id: &str,
) -> Result<(), redis::RedisError> {
let job_key = format!("hero:job:{}", job_id);
println!("🔍 Redis entries for job {}:", job_id);
// Check if job hash exists
let exists: bool = conn.exists(&job_key).await?;
if exists {
// Check if the key is actually a hash before trying to get all fields
let key_type: String = redis::cmd("TYPE").arg(&job_key).query_async(conn).await?;
if key_type == "hash" {
let job_data: std::collections::HashMap<String, String> = conn.hgetall(&job_key).await?;
println!(" 📋 Job data ({}): ", job_key);
for (field, value) in job_data {
println!(" {}: {}", field, value);
}
} else {
println!(" ⚠️ Key {} exists but is not a hash (type: {})", job_key, key_type);
}
} else {
println!(" ❌ No job data found at key: {}", job_key);
}
// Check work queue
let queue_key = "hero:work_queue:demo-worker";
let queue_length: i64 = conn.llen(queue_key).await?;
println!(" 📤 Work queue length ({}): {}", queue_key, queue_length);
if queue_length > 0 {
let queue_items: Vec<String> = conn.lrange(queue_key, 0, -1).await?;
println!(" 📋 Queue items:");
for (i, item) in queue_items.iter().enumerate() {
println!(" {}: {}", i, item);
}
}
Ok(())
}
/// Print all dispatcher-related Redis keys
async fn print_all_dispatcher_redis_keys(
conn: &mut redis::aio::MultiplexedConnection,
) -> Result<(), redis::RedisError> {
println!("🔍 All Hero Dispatcher Redis keys:");
// Get all keys with hero: prefix
let keys: Vec<String> = conn.keys("hero:*").await?;
if keys.is_empty() {
println!(" ❌ No Hero keys found in Redis");
return Ok(());
}
// Group keys by type
let mut job_keys = Vec::new();
let mut queue_keys = Vec::new();
let mut other_keys = Vec::new();
for key in keys {
if key.starts_with("hero:job:") {
job_keys.push(key);
} else if key.contains("queue") {
queue_keys.push(key);
} else {
other_keys.push(key);
}
}
// Print job keys
if !job_keys.is_empty() {
println!(" 📋 Job entries:");
for key in job_keys {
// Check if the key is actually a hash before trying to get all fields
let key_type: String = redis::cmd("TYPE").arg(&key).query_async(conn).await?;
if key_type == "hash" {
let job_data: std::collections::HashMap<String, String> = conn.hgetall(&key).await?;
println!(" {}: {} fields", key, job_data.len());
} else {
println!(" {}: {} (not a hash, skipping)", key, key_type);
}
}
}
// Print queue keys
if !queue_keys.is_empty() {
println!(" 📤 Queue entries:");
for key in queue_keys {
let length: i64 = conn.llen(&key).await?;
println!(" {}: {} items", key, length);
}
}
// Print other keys
if !other_keys.is_empty() {
println!(" 🔧 Other entries:");
for key in other_keys {
println!(" {}", key);
}
}
Ok(())
}

View File

@ -0,0 +1,90 @@
use log::info;
use hero_dispatcher::{DispatcherBuilder, DispatcherError, ScriptType};
use std::time::{Duration, Instant};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
env_logger::builder()
.filter_level(log::LevelFilter::Info)
.init();
// Build the client using the new builder pattern
let client = DispatcherBuilder::new()
.caller_id("timeout-example-runner")
.redis_url("redis://127.0.0.1/")
.build()?;
info!("Dispatcher created.");
let script_content = r#"
// This script will never be executed by a worker because the recipient does not exist.
let x = 10;
let y = x + 32;
y
"#;
// The worker_id points to a worker queue that doesn't have a worker.
let non_existent_recipient = "non_existent_worker_for_timeout_test";
let very_short_timeout = Duration::from_secs(2);
info!(
"Submitting script to non-existent recipient '{}' with a timeout of {:?}...",
non_existent_recipient, very_short_timeout
);
let start_time = Instant::now();
// Use the new JobBuilder
let result = client
.new_job()
.script_type(ScriptType::HeroScript)
.script(script_content)
.timeout(very_short_timeout)
.await_response()
.await;
match result {
Ok(details) => {
log::error!(
"Timeout Example FAILED: Expected a timeout, but got Ok: {:?}",
details
);
Err("Expected timeout, but task completed successfully.".into())
}
Err(e) => {
let elapsed = start_time.elapsed();
info!("Timeout Example: Received error as expected: {}", e);
info!("Elapsed time: {:?}", elapsed);
match e {
DispatcherError::Timeout(task_id) => {
info!("Timeout Example PASSED: Correctly received DispatcherError::Timeout for task_id: {}", task_id);
// Ensure the elapsed time is close to the timeout duration
// Allow for some buffer for processing
assert!(
elapsed >= very_short_timeout
&& elapsed < very_short_timeout + Duration::from_secs(1),
"Elapsed time {:?} should be close to timeout {:?}",
elapsed,
very_short_timeout
);
info!(
"Elapsed time {:?} is consistent with timeout duration {:?}.",
elapsed, very_short_timeout
);
Ok(())
}
other_error => {
log::error!(
"Timeout Example FAILED: Expected DispatcherError::Timeout, but got other error: {:?}",
other_error
);
Err(format!(
"Expected DispatcherError::Timeout, got other error: {:?}",
other_error
)
.into())
}
}
}
}
}

View File

@ -0,0 +1,57 @@
// Added error
// Duration is still used, Instant and sleep were removed
/// Comprehensive error type for all possible failures in the Rhai client.
///
/// This enum covers all error scenarios that can occur during client operations,
/// from Redis connectivity issues to task execution timeouts.
#[derive(Debug)]
pub enum DispatcherError {
/// Redis connection or operation error
RedisError(redis::RedisError),
/// JSON serialization/deserialization error
SerializationError(serde_json::Error),
/// Task execution timeout - contains the task_id that timed out
Timeout(String),
/// Task not found after submission - contains the task_id (rare occurrence)
TaskNotFound(String),
/// Context ID is missing
ContextIdMissing,
/// Invalid input provided
InvalidInput(String),
}
impl From<redis::RedisError> for DispatcherError {
fn from(err: redis::RedisError) -> Self {
DispatcherError::RedisError(err)
}
}
impl From<serde_json::Error> for DispatcherError {
fn from(err: serde_json::Error) -> Self {
DispatcherError::SerializationError(err)
}
}
impl std::fmt::Display for DispatcherError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
DispatcherError::RedisError(e) => write!(f, "Redis error: {}", e),
DispatcherError::SerializationError(e) => write!(f, "Serialization error: {}", e),
DispatcherError::Timeout(task_id) => {
write!(f, "Timeout waiting for task {} to complete", task_id)
}
DispatcherError::TaskNotFound(task_id) => {
write!(f, "Task {} not found after submission", task_id)
}
DispatcherError::ContextIdMissing => {
write!(f, "Context ID is missing")
}
DispatcherError::InvalidInput(msg) => {
write!(f, "Invalid input: {}", msg)
}
}
}
}
impl std::error::Error for DispatcherError {}

261
core/dispatcher/src/job.rs Normal file
View File

@ -0,0 +1,261 @@
use chrono::Utc;
use std::collections::HashMap;
use std::time::Duration;
use uuid::Uuid;
use crate::{Dispatcher, DispatcherError};
use hero_job::{Job, ScriptType};
/// Builder for constructing and submitting script execution requests.
///
/// This builder provides a fluent interface for configuring script execution
/// parameters and offers two submission modes: fire-and-forget (`submit()`)
/// and request-reply (`await_response()`).
///
/// # Example
///
/// ```rust,no_run
/// use std::time::Duration;
/// use hero_dispatcher::ScriptType;
///
/// # async fn example(client: &hero_dispatcher::Dispatcher) -> Result<String, hero_dispatcher::DispatcherError> {
/// let result = client
/// .new_job()
/// .script_type(ScriptType::HeroScript)
/// .script(r#"print("Hello, World!");"#)
/// .timeout(Duration::from_secs(30))
/// .await_response()
/// .await?;
/// # Ok(result)
/// # }
/// ```
pub struct JobBuilder<'a> {
client: &'a Dispatcher,
request_id: String,
context_id: String,
caller_id: String,
script: String,
script_type: ScriptType,
timeout: Duration,
retries: u32,
concurrent: bool,
log_path: Option<String>,
env_vars: HashMap<String, String>,
prerequisites: Vec<String>,
dependents: Vec<String>
}
impl<'a> JobBuilder<'a> {
pub fn new(client: &'a Dispatcher) -> Self {
Self {
client,
request_id: "".to_string(),
context_id: client.context_id.clone(),
caller_id: client.caller_id.clone(),
script: "".to_string(),
script_type: ScriptType::HeroScript, // Default to HeroScript
timeout: Duration::from_secs(5),
retries: 0,
concurrent: false,
log_path: None,
env_vars: HashMap::new(),
prerequisites: Vec::new(),
dependents: Vec::new(),
}
}
pub fn request_id(mut self, request_id: &str) -> Self {
self.request_id = request_id.to_string();
self
}
pub fn script_type(mut self, script_type: ScriptType) -> Self {
self.script_type = script_type;
self
}
pub fn context_id(mut self, context_id: &str) -> Self {
self.context_id = context_id.to_string();
self
}
pub fn script(mut self, script: &str) -> Self {
self.script = script.to_string();
self
}
pub fn script_path(mut self, script_path: &str) -> Self {
self.script = std::fs::read_to_string(script_path).unwrap();
self
}
pub fn timeout(mut self, timeout: Duration) -> Self {
self.timeout = timeout;
self
}
pub fn log_path(mut self, log_path: &str) -> Self {
self.log_path = Some(log_path.to_string());
self
}
/// Set a single environment variable
pub fn env_var(mut self, key: &str, value: &str) -> Self {
self.env_vars.insert(key.to_string(), value.to_string());
self
}
/// Set multiple environment variables from a HashMap
pub fn env_vars(mut self, env_vars: HashMap<String, String>) -> Self {
self.env_vars.extend(env_vars);
self
}
/// Clear all environment variables
pub fn clear_env_vars(mut self) -> Self {
self.env_vars.clear();
self
}
/// Add a prerequisite job ID that must complete before this job can run
pub fn prerequisite(mut self, job_id: &str) -> Self {
self.prerequisites.push(job_id.to_string());
self
}
/// Set multiple prerequisite job IDs
pub fn prerequisites(mut self, job_ids: Vec<String>) -> Self {
self.prerequisites.extend(job_ids);
self
}
/// Add a dependent job ID that depends on this job completing
pub fn dependent(mut self, job_id: &str) -> Self {
self.dependents.push(job_id.to_string());
self
}
/// Set multiple dependent job IDs
pub fn dependents(mut self, job_ids: Vec<String>) -> Self {
self.dependents.extend(job_ids);
self
}
/// Clear all prerequisites
pub fn clear_prerequisites(mut self) -> Self {
self.prerequisites.clear();
self
}
/// Clear all dependents
pub fn clear_dependents(mut self) -> Self {
self.dependents.clear();
self
}
pub fn build(self) -> Result<Job, DispatcherError> {
let request_id = if self.request_id.is_empty() {
// Generate a UUID for the request_id
Uuid::new_v4().to_string()
} else {
self.request_id.clone()
};
if self.context_id.is_empty() {
return Err(DispatcherError::ContextIdMissing);
}
if self.caller_id.is_empty() {
return Err(DispatcherError::ContextIdMissing);
}
let now = Utc::now();
Ok(Job {
id: request_id,
caller_id: self.caller_id,
context_id: self.context_id,
script: self.script,
script_type: self.script_type,
timeout: self.timeout,
retries: self.retries as u8,
concurrent: self.concurrent,
log_path: self.log_path.clone(),
env_vars: self.env_vars.clone(),
prerequisites: self.prerequisites.clone(),
dependents: self.dependents.clone(),
created_at: now,
updated_at: now,
})
}
pub async fn submit(self) -> Result<(), DispatcherError> {
// Create job first, then use client reference
let request_id = if self.request_id.is_empty() {
Uuid::new_v4().to_string()
} else {
self.request_id
};
if self.context_id.is_empty() {
return Err(DispatcherError::ContextIdMissing);
}
let now = Utc::now();
let job = Job {
id: request_id,
caller_id: self.caller_id,
context_id: self.context_id,
script: self.script,
script_type: self.script_type.clone(),
timeout: self.timeout,
retries: self.retries as u8,
concurrent: self.concurrent,
log_path: self.log_path.clone(),
env_vars: self.env_vars.clone(),
prerequisites: self.prerequisites.clone(),
dependents: self.dependents.clone(),
created_at: now,
updated_at: now,
};
self.client.create_job(&job).await?;
Ok(())
}
pub async fn await_response(self) -> Result<String, DispatcherError> {
// Create job first, then use client reference
let request_id = if self.request_id.is_empty() {
Uuid::new_v4().to_string()
} else {
self.request_id
};
if self.context_id.is_empty() {
return Err(DispatcherError::ContextIdMissing);
}
let now = Utc::now();
let job = Job {
id: request_id,
caller_id: self.caller_id.clone(),
context_id: self.context_id,
script: self.script,
script_type: self.script_type.clone(),
timeout: self.timeout,
retries: self.retries as u8,
concurrent: self.concurrent,
log_path: self.log_path.clone(),
env_vars: self.env_vars.clone(),
prerequisites: self.prerequisites.clone(),
dependents: self.dependents.clone(),
created_at: now,
updated_at: now,
};
let result = self.client.run_job_and_await_result(&job).await?;
Ok(result)
}
}

498
core/dispatcher/src/lib.rs Normal file
View File

@ -0,0 +1,498 @@
use log::{debug, error, info, warn};
use redis::AsyncCommands;
use std::time::Duration;
use hero_job::NAMESPACE_PREFIX;
mod job;
mod error;
pub use crate::error::DispatcherError;
pub use crate::job::JobBuilder;
// Re-export types from hero_job for public API
pub use hero_job::{Job, JobStatus, ScriptType};
pub struct Dispatcher {
redis_client: redis::Client,
caller_id: String,
context_id: String,
heroscript_workers: Vec<String>,
rhai_sal_workers: Vec<String>,
rhai_dsl_workers: Vec<String>,
}
pub struct DispatcherBuilder {
redis_url: Option<String>,
caller_id: Option<String>,
context_id: Option<String>,
heroscript_workers: Vec<String>,
rhai_sal_workers: Vec<String>,
rhai_dsl_workers: Vec<String>,
}
impl DispatcherBuilder {
pub fn new() -> Self {
Self {
redis_url: None,
caller_id: Some("default_caller".to_string()),
context_id: Some("default_context".to_string()),
heroscript_workers: Vec::new(),
rhai_sal_workers: Vec::new(),
rhai_dsl_workers: Vec::new(),
}
}
pub fn caller_id(mut self, caller_id: &str) -> Self {
self.caller_id = Some(caller_id.to_string());
self
}
pub fn context_id(mut self, context_id: &str) -> Self {
self.context_id = Some(context_id.to_string());
self
}
pub fn heroscript_workers(mut self, workers: Vec<String>) -> Self {
self.heroscript_workers = workers;
self
}
pub fn rhai_sal_workers(mut self, workers: Vec<String>) -> Self {
self.rhai_sal_workers = workers;
self
}
pub fn rhai_dsl_workers(mut self, workers: Vec<String>) -> Self {
self.rhai_dsl_workers = workers;
self
}
pub fn redis_url(mut self, url: &str) -> Self {
self.redis_url = Some(url.to_string());
self
}
/// Builds the final `Dispatcher` instance.
///
/// This method validates the configuration and creates the Redis client.
/// It will return an error if the caller ID is empty or if the Redis
/// connection cannot be established.
///
/// # Returns
///
/// * `Ok(Dispatcher)` - Successfully configured client
/// * `Err(DispatcherError)` - Configuration or connection error
pub fn build(self) -> Result<Dispatcher, DispatcherError> {
let url = self
.redis_url
.unwrap_or_else(|| "redis://127.0.0.1/".to_string());
let client = redis::Client::open(url)?;
Ok(Dispatcher {
redis_client: client,
caller_id: self.caller_id.unwrap_or_else(|| "default_caller".to_string()),
context_id: self.context_id.unwrap_or_else(|| "default_context".to_string()),
heroscript_workers: self.heroscript_workers,
rhai_sal_workers: self.rhai_sal_workers,
rhai_dsl_workers: self.rhai_dsl_workers,
})
}
}
impl Dispatcher {
/// Select a worker ID based on the script type using round-robin or first available
fn select_worker_for_script_type(&self, script_type: &ScriptType) -> Result<String, DispatcherError> {
let workers = match script_type {
ScriptType::HeroScript => &self.heroscript_workers,
ScriptType::RhaiSAL => &self.rhai_sal_workers,
ScriptType::RhaiDSL => &self.rhai_dsl_workers,
};
if workers.is_empty() {
return Err(DispatcherError::InvalidInput(format!(
"No workers configured for script type: {:?}", script_type
)));
}
// For now, use simple round-robin by selecting first available worker
// TODO: Implement proper load balancing
Ok(workers[0].clone())
}
pub fn new_job(&self) -> JobBuilder {
JobBuilder::new(self)
}
// Internal helper to submit script details and push to work queue
async fn create_job_using_connection(
&self,
conn: &mut redis::aio::MultiplexedConnection,
job: &Job,
) -> Result<(), DispatcherError> {
debug!(
"Submitting play request: {} for script type: {:?} with namespace prefix: {}",
job.id, job.script_type, NAMESPACE_PREFIX
);
// Use the shared Job struct's Redis storage method
job.store_in_redis(conn).await
.map_err(|e| DispatcherError::InvalidInput(format!("Failed to store job in Redis: {}", e)))?;
Ok(())
}
// Internal helper to submit script details and push to work queue
async fn start_job_using_connection(
&self,
conn: &mut redis::aio::MultiplexedConnection,
job_id: String,
worker_id: String
) -> Result<(), DispatcherError> {
let worker_queue_key = format!(
"{}{}",
NAMESPACE_PREFIX,
worker_id.replace(" ", "_").to_lowercase()
);
// lpush also infers its types, RV is typically i64 (length of list) or () depending on exact command variant
// For `redis::AsyncCommands::lpush`, it's `RedisResult<R>` where R: FromRedisValue
// Often this is the length of the list. Let's allow inference or specify if needed.
let _: redis::RedisResult<i64> =
conn.lpush(&worker_queue_key, job_id.clone()).await;
Ok(())
}
// Internal helper to await response from worker
async fn await_response_from_connection(
&self,
conn: &mut redis::aio::MultiplexedConnection,
job_key: &String,
reply_queue_key: &String,
timeout: Duration,
) -> Result<String, DispatcherError> {
// BLPOP on the reply queue
// The timeout for BLPOP is in seconds (integer)
let blpop_timeout_secs = timeout.as_secs().max(1); // Ensure at least 1 second for BLPOP timeout
match conn
.blpop::<&String, Option<(String, String)>>(reply_queue_key, blpop_timeout_secs as f64)
.await
{
Ok(Some((_queue, result_message_str))) => {
Ok(result_message_str)
}
Ok(None) => {
// BLPOP timed out
warn!(
"Timeout waiting for result on reply queue {} for job {}",
reply_queue_key, job_key
);
// Optionally, delete the reply queue
let _: redis::RedisResult<i32> = conn.del(&reply_queue_key).await;
Err(DispatcherError::Timeout(job_key.clone()))
}
Err(e) => {
// Redis error
error!(
"Redis error on BLPOP for reply queue {}: {}",
reply_queue_key, e
);
// Optionally, delete the reply queue
let _: redis::RedisResult<i32> = conn.del(&reply_queue_key).await;
Err(DispatcherError::RedisError(e))
}
}
}
// New method using dedicated reply queue
pub async fn create_job(
&self,
job: &Job,
) -> Result<(), DispatcherError> {
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
self.create_job_using_connection(
&mut conn,
&job, // Pass the job_id parameter
)
.await?;
Ok(())
}
// New method using dedicated reply queue with automatic worker selection
pub async fn run_job_and_await_result(
&self,
job: &Job
) -> Result<String, DispatcherError> {
// Select worker based on script type
let worker_id = self.select_worker_for_script_type(&job.script_type)?;
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
let reply_queue_key = format!("{}:reply:{}", NAMESPACE_PREFIX, job.id); // Derived from the passed job_id
self.create_job_using_connection(
&mut conn,
&job, // Pass the job_id parameter
)
.await?;
self.start_job_using_connection(&mut conn, job.id.clone(), worker_id).await?;
info!(
"Task {} submitted. Waiting for result on queue {} with timeout {:?}...",
job.id, // This is the UUID
reply_queue_key,
job.timeout
);
self.await_response_from_connection(
&mut conn,
&job.id,
&reply_queue_key,
job.timeout,
)
.await
}
// Method to get job status
pub async fn get_job_status(
&self,
job_id: &str,
) -> Result<JobStatus, DispatcherError> {
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
let job_key = format!("{}{}", NAMESPACE_PREFIX, job_id);
let result_map: Option<std::collections::HashMap<String, String>> =
conn.hgetall(&job_key).await?;
match result_map {
Some(map) => {
let status_str = map.get("status").cloned().unwrap_or_else(|| {
warn!("Task {}: 'status' field missing from Redis hash, defaulting to empty.", job_id);
String::new()
});
let status = match status_str.as_str() {
"dispatched" => JobStatus::Dispatched,
"started" => JobStatus::Started,
"error" => JobStatus::Error,
"finished" => JobStatus::Finished,
_ => JobStatus::Dispatched, // default
};
Ok(status)
}
None => {
warn!("Job {} not found in Redis", job_id);
Ok(JobStatus::Dispatched) // default for missing jobs
}
}
}
// Method to get job output
pub async fn get_job_output(
&self,
job_id: &str,
) -> Result<Option<String>, DispatcherError> {
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
let job_key = format!("{}{}", NAMESPACE_PREFIX, job_id);
let result_map: Option<std::collections::HashMap<String, String>> =
conn.hgetall(&job_key).await?;
match result_map {
Some(map) => {
Ok(map.get("output").cloned())
}
None => {
warn!("Job {} not found in Redis", job_id);
Ok(None)
}
}
}
/// List all jobs in Redis
pub async fn list_jobs(&self) -> Result<Vec<String>, DispatcherError> {
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
// Use the shared Job struct's list method
Job::list_all_job_ids(&mut conn).await
.map_err(|e| DispatcherError::InvalidInput(format!("Failed to list jobs: {}", e)))
}
/// Stop a job by pushing its ID to the stop queue
pub async fn stop_job(&self, job_id: &str) -> Result<(), DispatcherError> {
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
// Get job details to determine script type and appropriate worker
let job_key = format!("{}job:{}", NAMESPACE_PREFIX, job_id);
let job_data: std::collections::HashMap<String, String> = conn.hgetall(&job_key).await?;
if job_data.is_empty() {
return Err(DispatcherError::InvalidInput(format!("Job {} not found", job_id)));
}
// Parse script type from job data
let script_type_str = job_data.get("script_type")
.ok_or_else(|| DispatcherError::InvalidInput("Job missing script_type field".to_string()))?;
let script_type: ScriptType = serde_json::from_str(&format!("\"{}\"", script_type_str))
.map_err(|e| DispatcherError::InvalidInput(format!("Invalid script type: {}", e)))?;
// Select appropriate worker for this script type
let worker_id = self.select_worker_for_script_type(&script_type)?;
let stop_queue_key = format!("{}stop_queue:{}", NAMESPACE_PREFIX, worker_id);
// Push job ID to the stop queue
conn.lpush::<_, _, ()>(&stop_queue_key, job_id).await?;
info!("Job {} added to stop queue {} for script type {:?}", job_id, stop_queue_key, script_type);
Ok(())
}
/// Get logs for a job by reading from its log file
pub async fn get_job_logs(&self, job_id: &str) -> Result<Option<String>, DispatcherError> {
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
let job_key = format!("{}job:{}", NAMESPACE_PREFIX, job_id);
// Get the job data to find the log path
let result_map: Option<std::collections::HashMap<String, String>> =
conn.hgetall(&job_key).await?;
match result_map {
Some(map) => {
if let Some(log_path) = map.get("log_path") {
// Try to read the log file
match std::fs::read_to_string(log_path) {
Ok(contents) => Ok(Some(contents)),
Err(e) => {
warn!("Failed to read log file {}: {}", log_path, e);
Ok(None)
}
}
} else {
// No log path configured for this job
Ok(None)
}
}
None => {
warn!("Job {} not found in Redis", job_id);
Ok(None)
}
}
}
/// Delete a specific job by ID
pub async fn delete_job(&self, job_id: &str) -> Result<(), DispatcherError> {
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
// Use the shared Job struct's delete method
Job::delete_from_redis(&mut conn, job_id).await
.map_err(|e| DispatcherError::InvalidInput(format!("Failed to delete job: {}", e)))?;
info!("Job {} deleted successfully", job_id);
Ok(())
}
/// Clear all jobs from Redis
pub async fn clear_all_jobs(&self) -> Result<usize, DispatcherError> {
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
// Get all job IDs first
let job_ids = Job::list_all_job_ids(&mut conn).await
.map_err(|e| DispatcherError::InvalidInput(format!("Failed to list jobs: {}", e)))?;
let count = job_ids.len();
// Delete each job using the shared method
for job_id in job_ids {
Job::delete_from_redis(&mut conn, &job_id).await
.map_err(|e| DispatcherError::InvalidInput(format!("Failed to delete job {}: {}", job_id, e)))?;
}
Ok(count)
}
/// Check if all prerequisites for a job are completed
pub async fn check_prerequisites_completed(&self, job_id: &str) -> Result<bool, DispatcherError> {
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
// Load the job using the shared Job struct
let job = Job::load_from_redis(&mut conn, job_id).await
.map_err(|e| DispatcherError::InvalidInput(format!("Failed to load job: {}", e)))?;
// Check each prerequisite job status
for prereq_id in &job.prerequisites {
let status = Job::get_status(&mut conn, prereq_id).await
.map_err(|e| DispatcherError::InvalidInput(format!("Failed to get prerequisite status: {}", e)))?;
if status != JobStatus::Finished {
return Ok(false); // Prerequisite not completed
}
}
Ok(true) // All prerequisites completed (or no prerequisites)
}
/// Update job status and check dependent jobs for readiness
pub async fn update_job_status_and_check_dependents(&self, job_id: &str, new_status: JobStatus) -> Result<Vec<String>, DispatcherError> {
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
// Update job status using shared Job method
Job::update_status(&mut conn, job_id, new_status.clone()).await
.map_err(|e| DispatcherError::InvalidInput(format!("Failed to update job status: {}", e)))?;
let mut ready_jobs = Vec::new();
// If job finished, check dependent jobs
if new_status == JobStatus::Finished {
// Load the job to get its dependents
let job = Job::load_from_redis(&mut conn, job_id).await
.map_err(|e| DispatcherError::InvalidInput(format!("Failed to load job: {}", e)))?;
// Check each dependent job
for dependent_id in &job.dependents {
let dependent_status = Job::get_status(&mut conn, dependent_id).await
.map_err(|e| DispatcherError::InvalidInput(format!("Failed to get dependent status: {}", e)))?;
// Only check jobs that are waiting for prerequisites
if dependent_status == JobStatus::WaitingForPrerequisites {
// Check if all prerequisites are now completed
if self.check_prerequisites_completed(dependent_id).await? {
// Update status to dispatched and add to ready jobs
Job::update_status(&mut conn, dependent_id, JobStatus::Dispatched).await
.map_err(|e| DispatcherError::InvalidInput(format!("Failed to update dependent status: {}", e)))?;
ready_jobs.push(dependent_id.clone());
}
}
}
}
Ok(ready_jobs)
}
/// Dispatch jobs that are ready (have all prerequisites completed)
pub async fn dispatch_ready_jobs(&self, ready_job_ids: Vec<String>) -> Result<(), DispatcherError> {
for job_id in ready_job_ids {
// Get job data to determine script type and select worker
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
let job_key = format!("{}job:{}", NAMESPACE_PREFIX, job_id);
let job_data: std::collections::HashMap<String, String> = conn.hgetall(&job_key).await?;
if let Some(script_type_str) = job_data.get("script_type") {
// Parse script type (stored as Debug format, e.g., "HeroScript")
let script_type = match script_type_str.as_str() {
"HeroScript" => ScriptType::HeroScript,
"RhaiSAL" => ScriptType::RhaiSAL,
"RhaiDSL" => ScriptType::RhaiDSL,
_ => return Err(DispatcherError::InvalidInput(format!("Unknown script type: {}", script_type_str))),
};
// Select worker and dispatch job
let worker_id = self.select_worker_for_script_type(&script_type)?;
self.start_job_using_connection(&mut conn, job_id, worker_id).await?;
}
}
Ok(())
}
}

794
core/engine/Cargo.lock generated Normal file
View File

@ -0,0 +1,794 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 4
[[package]]
name = "ahash"
version = "0.8.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75"
dependencies = [
"cfg-if",
"const-random",
"getrandom 0.3.3",
"once_cell",
"version_check",
"zerocopy",
]
[[package]]
name = "android-tzdata"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0"
[[package]]
name = "android_system_properties"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311"
dependencies = [
"libc",
]
[[package]]
name = "arrayvec"
version = "0.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50"
[[package]]
name = "autocfg"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26"
[[package]]
name = "bincode"
version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "36eaf5d7b090263e8150820482d5d93cd964a81e4019913c972f4edcc6edb740"
dependencies = [
"bincode_derive",
"serde",
"unty",
]
[[package]]
name = "bincode_derive"
version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bf95709a440f45e986983918d0e8a1f30a9b1df04918fc828670606804ac3c09"
dependencies = [
"virtue",
]
[[package]]
name = "bitflags"
version = "2.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967"
[[package]]
name = "bumpalo"
version = "3.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf"
[[package]]
name = "cc"
version = "1.2.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d0fc897dc1e865cc67c0e05a836d9d3f1df3cbe442aa4a9473b18e12624a4951"
dependencies = [
"shlex",
]
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "chrono"
version = "0.4.41"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d"
dependencies = [
"android-tzdata",
"iana-time-zone",
"js-sys",
"num-traits",
"serde",
"wasm-bindgen",
"windows-link",
]
[[package]]
name = "const-random"
version = "0.1.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "87e00182fe74b066627d63b85fd550ac2998d4b0bd86bfed477a0ae4c7c71359"
dependencies = [
"const-random-macro",
]
[[package]]
name = "const-random-macro"
version = "0.1.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e"
dependencies = [
"getrandom 0.2.16",
"once_cell",
"tiny-keccak",
]
[[package]]
name = "core-foundation-sys"
version = "0.8.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b"
[[package]]
name = "crc32fast"
version = "1.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3"
dependencies = [
"cfg-if",
]
[[package]]
name = "crunchy"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929"
[[package]]
name = "engine"
version = "0.1.0"
dependencies = [
"chrono",
"heromodels",
"heromodels-derive",
"heromodels_core",
"rhai",
]
[[package]]
name = "getrandom"
version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592"
dependencies = [
"cfg-if",
"libc",
"wasi 0.11.0+wasi-snapshot-preview1",
]
[[package]]
name = "getrandom"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4"
dependencies = [
"cfg-if",
"libc",
"r-efi",
"wasi 0.14.2+wasi-0.2.4",
]
[[package]]
name = "heck"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
[[package]]
name = "heromodels"
version = "0.1.0"
dependencies = [
"bincode",
"chrono",
"heromodels-derive",
"heromodels_core",
"ourdb",
"rhai",
"rhai_client_macros",
"serde",
"serde_json",
"strum",
"strum_macros",
"tst",
"uuid",
]
[[package]]
name = "heromodels-derive"
version = "0.1.0"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "heromodels_core"
version = "0.1.0"
dependencies = [
"chrono",
"serde",
]
[[package]]
name = "iana-time-zone"
version = "0.1.63"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b0c919e5debc312ad217002b8048a17b7d83f80703865bbfcfebb0458b0b27d8"
dependencies = [
"android_system_properties",
"core-foundation-sys",
"iana-time-zone-haiku",
"js-sys",
"log",
"wasm-bindgen",
"windows-core",
]
[[package]]
name = "iana-time-zone-haiku"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f"
dependencies = [
"cc",
]
[[package]]
name = "instant"
version = "0.1.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222"
dependencies = [
"cfg-if",
]
[[package]]
name = "itoa"
version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c"
[[package]]
name = "js-sys"
version = "0.3.77"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f"
dependencies = [
"once_cell",
"wasm-bindgen",
]
[[package]]
name = "libc"
version = "0.2.172"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa"
[[package]]
name = "log"
version = "0.4.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94"
[[package]]
name = "memchr"
version = "2.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
[[package]]
name = "no-std-compat"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b93853da6d84c2e3c7d730d6473e8817692dd89be387eb01b94d7f108ecb5b8c"
dependencies = [
"spin",
]
[[package]]
name = "num-traits"
version = "0.2.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841"
dependencies = [
"autocfg",
]
[[package]]
name = "once_cell"
version = "1.21.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d"
dependencies = [
"portable-atomic",
]
[[package]]
name = "ourdb"
version = "0.1.0"
dependencies = [
"crc32fast",
"log",
"rand",
"thiserror",
]
[[package]]
name = "portable-atomic"
version = "1.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "350e9b48cbc6b0e028b0473b114454c6316e57336ee184ceab6e53f72c178b3e"
[[package]]
name = "ppv-lite86"
version = "0.2.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9"
dependencies = [
"zerocopy",
]
[[package]]
name = "proc-macro2"
version = "1.0.95"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.40"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d"
dependencies = [
"proc-macro2",
]
[[package]]
name = "r-efi"
version = "5.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5"
[[package]]
name = "rand"
version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
dependencies = [
"libc",
"rand_chacha",
"rand_core",
]
[[package]]
name = "rand_chacha"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
dependencies = [
"ppv-lite86",
"rand_core",
]
[[package]]
name = "rand_core"
version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
dependencies = [
"getrandom 0.2.16",
]
[[package]]
name = "rhai"
version = "1.22.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2780e813b755850e50b178931aaf94ed24f6817f46aaaf5d21c13c12d939a249"
dependencies = [
"ahash",
"bitflags",
"instant",
"no-std-compat",
"num-traits",
"once_cell",
"rhai_codegen",
"rust_decimal",
"smallvec",
"smartstring",
"thin-vec",
]
[[package]]
name = "rhai_client_macros"
version = "0.1.0"
dependencies = [
"proc-macro2",
"quote",
"rhai",
"syn",
]
[[package]]
name = "rhai_codegen"
version = "2.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a5a11a05ee1ce44058fa3d5961d05194fdbe3ad6b40f904af764d81b86450e6b"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "rust_decimal"
version = "1.37.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "faa7de2ba56ac291bd90c6b9bece784a52ae1411f9506544b3eae36dd2356d50"
dependencies = [
"arrayvec",
"num-traits",
]
[[package]]
name = "rustversion"
version = "1.0.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d"
[[package]]
name = "ryu"
version = "1.0.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f"
[[package]]
name = "serde"
version = "1.0.219"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.219"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "serde_json"
version = "1.0.140"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373"
dependencies = [
"itoa",
"memchr",
"ryu",
"serde",
]
[[package]]
name = "shlex"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
[[package]]
name = "smallvec"
version = "1.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8917285742e9f3e1683f0a9c4e6b57960b7314d0b08d30d1ecd426713ee2eee9"
[[package]]
name = "smartstring"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3fb72c633efbaa2dd666986505016c32c3044395ceaf881518399d2f4127ee29"
dependencies = [
"autocfg",
"static_assertions",
"version_check",
]
[[package]]
name = "spin"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d"
[[package]]
name = "static_assertions"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
[[package]]
name = "strum"
version = "0.26.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06"
[[package]]
name = "strum_macros"
version = "0.26.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be"
dependencies = [
"heck",
"proc-macro2",
"quote",
"rustversion",
"syn",
]
[[package]]
name = "syn"
version = "2.0.101"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ce2b7fc941b3a24138a0a7cf8e858bfc6a992e7978a068a5c760deb0ed43caf"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "thin-vec"
version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "144f754d318415ac792f9d69fc87abbbfc043ce2ef041c60f16ad828f638717d"
[[package]]
name = "thiserror"
version = "1.0.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "tiny-keccak"
version = "2.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237"
dependencies = [
"crunchy",
]
[[package]]
name = "tst"
version = "0.1.0"
dependencies = [
"ourdb",
"thiserror",
]
[[package]]
name = "unicode-ident"
version = "1.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512"
[[package]]
name = "unty"
version = "0.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6d49784317cd0d1ee7ec5c716dd598ec5b4483ea832a2dced265471cc0f690ae"
[[package]]
name = "uuid"
version = "1.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3cf4199d1e5d15ddd86a694e4d0dffa9c323ce759fea589f00fef9d81cc1931d"
dependencies = [
"getrandom 0.3.3",
"js-sys",
"wasm-bindgen",
]
[[package]]
name = "version_check"
version = "0.9.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a"
[[package]]
name = "virtue"
version = "0.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "051eb1abcf10076295e815102942cc58f9d5e3b4560e46e53c21e8ff6f3af7b1"
[[package]]
name = "wasi"
version = "0.11.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
[[package]]
name = "wasi"
version = "0.14.2+wasi-0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3"
dependencies = [
"wit-bindgen-rt",
]
[[package]]
name = "wasm-bindgen"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5"
dependencies = [
"cfg-if",
"once_cell",
"rustversion",
"wasm-bindgen-macro",
]
[[package]]
name = "wasm-bindgen-backend"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6"
dependencies = [
"bumpalo",
"log",
"proc-macro2",
"quote",
"syn",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-macro"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407"
dependencies = [
"quote",
"wasm-bindgen-macro-support",
]
[[package]]
name = "wasm-bindgen-macro-support"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de"
dependencies = [
"proc-macro2",
"quote",
"syn",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-shared"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d"
dependencies = [
"unicode-ident",
]
[[package]]
name = "windows-core"
version = "0.61.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3"
dependencies = [
"windows-implement",
"windows-interface",
"windows-link",
"windows-result",
"windows-strings",
]
[[package]]
name = "windows-implement"
version = "0.60.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "windows-interface"
version = "0.59.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "windows-link"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38"
[[package]]
name = "windows-result"
version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6"
dependencies = [
"windows-link",
]
[[package]]
name = "windows-strings"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57"
dependencies = [
"windows-link",
]
[[package]]
name = "wit-bindgen-rt"
version = "0.39.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1"
dependencies = [
"bitflags",
]
[[package]]
name = "zerocopy"
version = "0.8.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1702d9583232ddb9174e01bb7c15a2ab8fb1bc6f227aa1233858c351a3ba0cb"
dependencies = [
"zerocopy-derive",
]
[[package]]
name = "zerocopy-derive"
version = "0.8.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef"
dependencies = [
"proc-macro2",
"quote",
"syn",
]

38
core/engine/Cargo.toml Normal file
View File

@ -0,0 +1,38 @@
[package]
name = "rhailib_engine"
version = "0.1.0"
edition = "2021"
description = "Central Rhai engine for heromodels"
[dependencies]
rhai = { version = "1.21.0", features = ["std", "sync", "decimal", "internals"] }
heromodels = { path = "../../../db/heromodels", features = ["rhai"] }
heromodels_core = { path = "../../../db/heromodels_core" }
chrono = "0.4"
heromodels-derive = { path = "../../../db/heromodels-derive" }
rhailib_dsl = { path = "../../../rhailib/src/dsl" }
[features]
default = ["calendar", "finance"]
calendar = []
finance = []
# Flow module is now updated to use our approach to Rhai engine registration
flow = []
legal = []
projects = []
biz = []
[[example]]
name = "calendar_example"
path = "examples/calendar/example.rs"
required-features = ["calendar"]
[[example]]
name = "flow_example"
path = "examples/flow/example.rs"
required-features = ["flow"]
[[example]]
name = "finance"
path = "examples/finance/example.rs"
required-features = ["finance"]

135
core/engine/README.md Normal file
View File

@ -0,0 +1,135 @@
# HeroModels Rhai Engine (`engine`)
The `engine` crate provides a central Rhai scripting engine for the HeroModels project. It offers a unified way to interact with various HeroModels modules (like Calendar, Flow, Legal, etc.) through Rhai scripts, leveraging a shared database connection.
## Overview
This crate facilitates:
1. **Centralized Engine Creation**: A function `create_heromodels_engine` to instantiate a Rhai engine pre-configured with common settings and all enabled HeroModels modules.
2. **Modular Registration**: HeroModels modules (Calendar, Flow, etc.) can be registered with a Rhai engine based on feature flags.
3. **Script Evaluation Utilities**: Helper functions for compiling Rhai scripts into Abstract Syntax Trees (ASTs) and for evaluating scripts or ASTs.
4. **Mock Database**: Includes a `mock_db` module for testing and running examples without needing a live database.
## Core Components & Usage
### Library (`src/lib.rs`)
- **`create_heromodels_engine(db: Arc<OurDB>) -> Engine`**:
Creates and returns a new `rhai::Engine` instance. This engine is configured with default settings (e.g., max expression depths, string/array/map sizes) and then all available HeroModels modules (controlled by feature flags) are registered with it, using the provided `db` (an `Arc<OurDB>`) instance.
- **`register_all_modules(engine: &mut Engine, db: Arc<OurDB>)`**:
Registers all HeroModels modules for which features are enabled (e.g., `calendar`, `flow`, `legal`, `projects`, `biz`) with the given Rhai `engine`. Each module is passed the shared `db` instance.
- **`eval_script(engine: &Engine, script: &str) -> Result<rhai::Dynamic, Box<rhai::EvalAltResult>>`**:
A utility function to directly evaluate a Rhai script string using the provided `engine`.
- **`compile_script(engine: &Engine, script: &str) -> Result<AST, Box<rhai::EvalAltResult>>`**:
Compiles a Rhai script string into an `AST` (Abstract Syntax Tree) for potentially faster repeated execution.
- **`run_ast(engine: &Engine, ast: &AST, scope: &mut Scope) -> Result<rhai::Dynamic, Box<rhai::EvalAltResult>>`**:
Runs a pre-compiled `AST` with a given `scope` using the provided `engine`.
- **`mock_db` module**:
Provides `create_mock_db()` which returns an `Arc<OurDB>` instance suitable for testing and examples. This allows scripts that interact with database functionalities to run without external database dependencies.
### Basic Usage
```rust
use std::sync::Arc;
use engine::{create_heromodels_engine, eval_script};
use engine::mock_db::create_mock_db; // For example usage
use heromodels::db::hero::OurDB; // Actual DB type
// Create a mock database (or connect to a real one)
let db: Arc<OurDB> = create_mock_db();
// Create the Rhai engine with all enabled modules registered
let engine = create_heromodels_engine(db);
// Run a Rhai script
let script = r#"
// Example: Assuming 'calendar' feature is enabled
let cal = new_calendar("My Test Calendar");
cal.set_description("This is a test.");
print(`Created calendar: ${cal.get_name()}`);
cal.get_id() // Return the ID
"#;
match eval_script(&engine, script) {
Ok(val) => println!("Script returned: {:?}", val),
Err(err) => eprintln!("Script error: {}", err),
}
```
### Using Specific Modules Manually
If you need more fine-grained control or only want specific modules (and prefer not to rely solely on feature flags at compile time for `create_heromodels_engine`), you can initialize an engine and register modules manually:
```rust
use std::sync::Arc;
use rhai::Engine;
use engine::mock_db::create_mock_db; // For example usage
use heromodels::db::hero::OurDB;
// Import the specific module registration function
use heromodels::models::calendar::register_calendar_rhai_module;
// Create a mock database
let db: Arc<OurDB> = create_mock_db();
// Create a new Rhai engine
let mut engine = Engine::new();
// Register only the calendar module
register_calendar_rhai_module(&mut engine, db.clone());
// Now you can use calendar-related functions in your scripts
let result = engine.eval::<String>(r#" let c = new_calendar("Solo Cal"); c.get_name() "#);
match result {
Ok(name) => println!("Calendar name: {}", name),
Err(err) => eprintln!("Error: {}", err),
}
```
## Examples
This crate includes several examples demonstrating how to use different HeroModels modules with Rhai. Each example typically requires its corresponding feature to be enabled.
- `calendar_example`: Working with calendars, events, and attendees (requires `calendar` feature).
- `flow_example`: Working with flows, steps, and signature requirements (requires `flow` feature).
- `finance_example`: Working with financial models (requires `finance` feature).
- *(Additional examples for `legal`, `projects`, `biz` would follow the same pattern if present).*
To run an example (e.g., `calendar_example`):
```bash
cargo run --example calendar_example --features calendar
```
*(Note: Examples in `Cargo.toml` already specify `required-features`, so simply `cargo run --example calendar_example` might suffice if those features are part of the default set or already enabled.)*
## Features
The crate uses feature flags to control which HeroModels modules are compiled and registered:
- `calendar`: Enables the Calendar module.
- `finance`: Enables the Finance module.
- `flow`: Enables the Flow module.
- `legal`: Enables the Legal module.
- `projects`: Enables the Projects module.
- `biz`: Enables the Business module.
The `default` features are `["calendar", "finance"]`. You can enable other modules by specifying them during the build or in your project's `Cargo.toml` if this `engine` crate is a dependency.
## Dependencies
Key dependencies include:
- `rhai`: The Rhai scripting engine.
- `heromodels`: Provides the core data models and database interaction logic, including the Rhai registration functions for each module.
- `heromodels_core`: Core utilities for HeroModels.
- `chrono`: For date/time utilities.
- `heromodels-derive`: Procedural macros used by HeroModels.
## License
This crate is part of the HeroModels project and shares its license.

16
core/engine/build.rs Normal file
View File

@ -0,0 +1,16 @@
fn main() {
// Tell Cargo to re-run this build script if the calendar/rhai.rs file changes
println!("cargo:rerun-if-changed=../heromodels/src/models/calendar/rhai.rs");
// Tell Cargo to re-run this build script if the flow/rhai.rs file changes
println!("cargo:rerun-if-changed=../heromodels/src/models/flow/rhai.rs");
// Tell Cargo to re-run this build script if the legal/rhai.rs file changes
println!("cargo:rerun-if-changed=../heromodels/src/models/legal/rhai.rs");
// Tell Cargo to re-run this build script if the projects/rhai.rs file changes
println!("cargo:rerun-if-changed=../heromodels/src/models/projects/rhai.rs");
// Tell Cargo to re-run this build script if the biz/rhai.rs file changes
println!("cargo:rerun-if-changed=../heromodels/src/models/biz/rhai.rs");
}

View File

@ -0,0 +1,331 @@
# Architecture of the `rhailib_engine` Crate
The `rhailib_engine` crate serves as the central Rhai scripting engine for the heromodels ecosystem. It provides a unified interface for creating, configuring, and executing Rhai scripts with access to all business domain modules through a feature-based architecture.
## Core Architecture
The engine acts as an orchestration layer that brings together the DSL modules and provides execution utilities:
```mermaid
graph TD
A[rhailib_engine] --> B[Engine Creation]
A --> C[Script Execution]
A --> D[Mock Database]
A --> E[Feature Management]
B --> B1[create_heromodels_engine]
B --> B2[Engine Configuration]
B --> B3[DSL Registration]
C --> C1[eval_script]
C --> C2[eval_file]
C --> C3[compile_script]
C --> C4[run_ast]
D --> D1[create_mock_db]
D --> D2[seed_mock_db]
D --> D3[Domain Data Seeding]
E --> E1[calendar]
E --> E2[finance]
E --> E3[flow]
E --> E4[legal]
E --> E5[projects]
E --> E6[biz]
B3 --> F[rhailib_dsl]
F --> G[All Domain Modules]
```
## Core Components
### 1. Engine Factory (`create_heromodels_engine`)
The primary entry point for creating a fully configured Rhai engine:
```rust
pub fn create_heromodels_engine() -> Engine
```
**Responsibilities:**
- Creates a new Rhai engine instance
- Configures engine limits and settings
- Registers all available DSL modules
- Returns a ready-to-use engine
**Configuration Settings:**
- **Expression Depth**: 128 levels for both expressions and functions
- **String Size Limit**: 10 MB maximum string size
- **Array Size Limit**: 10,000 elements maximum
- **Map Size Limit**: 10,000 key-value pairs maximum
### 2. Script Execution Utilities
#### Direct Script Evaluation
```rust
pub fn eval_script(engine: &Engine, script: &str) -> Result<Dynamic, Box<EvalAltResult>>
```
Executes Rhai script strings directly with immediate results.
#### File-Based Script Execution
```rust
pub fn eval_file(engine: &Engine, file_path: &Path) -> Result<Dynamic, Box<EvalAltResult>>
```
Loads and executes Rhai scripts from filesystem with proper error handling.
#### Compiled Script Execution
```rust
pub fn compile_script(engine: &Engine, script: &str) -> Result<AST, Box<EvalAltResult>>
pub fn run_ast(engine: &Engine, ast: &AST, scope: &mut Scope) -> Result<Dynamic, Box<EvalAltResult>>
```
Provides compilation and execution of scripts for performance optimization.
### 3. Mock Database System
#### Database Creation
```rust
pub fn create_mock_db() -> Arc<OurDB>
```
Creates an in-memory database instance for testing and examples.
#### Data Seeding
```rust
pub fn seed_mock_db(db: Arc<OurDB>)
```
Populates the mock database with representative data across all domains.
## Feature-Based Architecture
The engine uses Cargo features to control which domain modules are included:
### Available Features
- **`calendar`** (default): Calendar and event management
- **`finance`** (default): Financial accounts, assets, and marketplace
- **`flow`**: Workflow and approval processes
- **`legal`**: Contract and legal document management
- **`projects`**: Project and task management
- **`biz`**: Business operations and entities
### Feature Integration Pattern
```rust
#[cfg(feature = "calendar")]
use heromodels::models::calendar::*;
#[cfg(feature = "finance")]
use heromodels::models::finance::*;
```
This allows for:
- **Selective Compilation**: Only include needed functionality
- **Reduced Binary Size**: Exclude unused domain modules
- **Modular Deployment**: Different configurations for different use cases
## Mock Database Architecture
### Database Structure
The mock database provides a complete testing environment:
```mermaid
graph LR
A[Mock Database] --> B[Calendar Data]
A --> C[Finance Data]
A --> D[Flow Data]
A --> E[Legal Data]
A --> F[Projects Data]
B --> B1[Calendars]
B --> B2[Events]
B --> B3[Attendees]
C --> C1[Accounts]
C --> C2[Assets - ERC20/ERC721]
C --> C3[Marketplace Listings]
D --> D1[Flows]
D --> D2[Flow Steps]
D --> D3[Signature Requirements]
E --> E1[Contracts]
E --> E2[Contract Revisions]
E --> E3[Contract Signers]
F --> F1[Projects]
F --> F2[Project Members]
F --> F3[Project Tags]
```
### Seeding Strategy
Each domain has its own seeding function that creates realistic test data:
#### Calendar Seeding
- Creates work calendars with descriptions
- Adds team meetings with attendees
- Sets up recurring events
#### Finance Seeding
- Creates demo trading accounts
- Generates ERC20 tokens and ERC721 NFTs
- Sets up marketplace listings with metadata
#### Flow Seeding (Feature-Gated)
- Creates document approval workflows
- Defines multi-step approval processes
- Sets up signature requirements
#### Legal Seeding (Feature-Gated)
- Creates service agreements
- Adds contract revisions and versions
- Defines contract signers and roles
#### Projects Seeding (Feature-Gated)
- Creates project instances with status tracking
- Assigns team members and priorities
- Adds project tags and categorization
## Error Handling Architecture
### Comprehensive Error Propagation
```rust
Result<Dynamic, Box<EvalAltResult>>
```
All functions return proper Rhai error types that include:
- **Script Compilation Errors**: Syntax and parsing issues
- **Runtime Errors**: Execution failures and exceptions
- **File System Errors**: File reading and path resolution issues
- **Database Errors**: Mock database operation failures
### Error Context Enhancement
File operations include enhanced error context:
```rust
Err(Box::new(EvalAltResult::ErrorSystem(
format!("Failed to read script file: {}", file_path.display()),
Box::new(io_err),
)))
```
## Performance Considerations
### Engine Configuration
Optimized settings for production use:
- **Memory Limits**: Prevent runaway script execution
- **Depth Limits**: Avoid stack overflow from deep recursion
- **Size Limits**: Control memory usage for large data structures
### Compilation Strategy
- **AST Caching**: Compile once, execute multiple times
- **Scope Management**: Efficient variable scope handling
- **Module Registration**: One-time registration at engine creation
### Mock Database Performance
- **In-Memory Storage**: Fast access for testing scenarios
- **Temporary Directories**: Automatic cleanup after use
- **Lazy Loading**: Data seeded only when needed
## Integration Patterns
### Script Development Workflow
```rust
// 1. Create engine with all modules
let engine = create_heromodels_engine();
// 2. Execute business logic scripts
let result = eval_script(&engine, r#"
let company = new_company()
.name("Tech Startup")
.business_type("startup");
save_company(company)
"#)?;
// 3. Handle results and errors
match result {
Ok(value) => println!("Success: {:?}", value),
Err(error) => eprintln!("Error: {}", error),
}
```
### Testing Integration
```rust
// 1. Create mock database
let db = create_mock_db();
seed_mock_db(db.clone());
// 2. Create engine
let engine = create_heromodels_engine();
// 3. Test scripts against seeded data
let script = r#"
let calendars = list_calendars();
calendars.len()
"#;
let count = eval_script(&engine, script)?;
```
### File-Based Script Execution
```rust
// Execute scripts from files
let result = eval_file(&engine, Path::new("scripts/business_logic.rhai"))?;
```
## Deployment Configurations
### Minimal Configuration
```toml
[dependencies]
rhailib_engine = { version = "0.1.0", default-features = false, features = ["calendar"] }
```
### Full Configuration
```toml
[dependencies]
rhailib_engine = { version = "0.1.0", features = ["calendar", "finance", "flow", "legal", "projects", "biz"] }
```
### Custom Configuration
```toml
[dependencies]
rhailib_engine = { version = "0.1.0", default-features = false, features = ["finance", "biz"] }
```
## Security Considerations
### Script Execution Limits
- **Resource Limits**: Prevent resource exhaustion attacks
- **Execution Time**: Configurable timeouts for long-running scripts
- **Memory Bounds**: Controlled memory allocation
### Database Access
- **Mock Environment**: Safe testing without production data exposure
- **Temporary Storage**: Automatic cleanup prevents data persistence
- **Isolated Execution**: Each test run gets fresh database state
## Extensibility
### Adding New Domains
1. Create new feature flag in `Cargo.toml`
2. Add conditional imports for new models
3. Implement seeding function for test data
4. Register with DSL module system
### Custom Engine Configuration
```rust
let mut engine = Engine::new();
// Custom configuration
engine.set_max_expr_depths(256, 256);
// Register specific modules
rhailib_dsl::register_dsl_modules(&mut engine);
```
This architecture provides a robust, feature-rich foundation for Rhai script execution while maintaining flexibility, performance, and security.

View File

@ -0,0 +1,101 @@
// calendar_script.rhai
// Example Rhai script for working with Calendar models
// Constants for AttendanceStatus
const NO_RESPONSE = "NoResponse";
const ACCEPTED = "Accepted";
const DECLINED = "Declined";
const TENTATIVE = "Tentative";
// Create a new calendar using builder pattern
let my_calendar = new_calendar()
.name("Team Calendar")
.description("Calendar for team events and meetings");
print(`Created calendar: ${my_calendar.name} (${my_calendar.id})`);
// Add attendees to the event
let alice = new_attendee()
.with_contact_id(1)
.with_status(NO_RESPONSE);
let bob = new_attendee()
.with_contact_id(2)
.with_status(ACCEPTED);
let charlie = new_attendee()
.with_contact_id(3)
.with_status(TENTATIVE);
// Create a new event using builder pattern
// Note: Timestamps are in seconds since epoch
let now = timestamp_now();
let one_hour = 60 * 60;
let meeting = new_event()
.title("Weekly Sync")
.reschedule(now, now + one_hour)
.location("Conference Room A")
.description("Regular team sync meeting")
.add_attendee(alice)
.add_attendee(bob)
.add_attendee(charlie)
.save_event();
print(`Created event: ${meeting.title}`);
meeting.delete_event();
print(`Deleted event: ${meeting.title}`);
// Print attendees info
let attendees = meeting.attendees;
print(`Added attendees to the event`);
// Update Charlie's attendee status directly
meeting.update_attendee_status(3, ACCEPTED);
print(`Updated Charlie's status to: ${ACCEPTED}`);
// Add the event to the calendar
my_calendar.add_event_to_calendar(meeting);
// Print events info
print(`Added event to calendar`);
// Save the calendar to the database
let saved_calendar = my_calendar.save_calendar();
print(`Calendar saved to database with ID: ${saved_calendar.id}`);
// Retrieve the calendar from the database using the ID from the saved calendar
let retrieved_calendar = get_calendar_by_id(saved_calendar.id);
if retrieved_calendar != () {
print(`Retrieved calendar: ${retrieved_calendar.name}`);
print(`Retrieved calendar successfully`);
} else {
print("Failed to retrieve calendar from database");
}
// List all calendars in the database
let all_calendars = list_calendars();
print("\nListing all calendars in database:");
let calendar_count = 0;
for calendar in all_calendars {
print(` - Calendar: ${calendar.name} (ID: ${calendar.id})`);
calendar_count += 1;
}
print(`Total calendars: ${calendar_count}`);
// List all events in the database
let all_events = list_events();
print("\nListing all events in database:");
let event_count = 0;
for event in all_events {
print(` - Event: ${event.title} (ID: ${event.id})`);
event_count += 1;
}
print(`Total events: ${event_count}`);
// Helper function to get current timestamp
fn timestamp_now() {
// This would typically be provided by the host application
// For this example, we'll use a fixed timestamp
1685620800 // June 1, 2023, 12:00 PM
}

View File

@ -0,0 +1,70 @@
use engine::mock_db::create_mock_db;
use engine::{create_heromodels_engine, eval_file};
use rhai::Engine;
mod mock;
use mock::seed_calendar_data;
fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("Calendar Rhai Example");
println!("=====================");
// Create a mock database
let db = create_mock_db();
// Seed the database with some initial data
seed_calendar_data(db.clone());
// Create the Rhai engine using our central engine creator
let mut engine = create_heromodels_engine(db.clone());
// Register timestamp helper functions
register_timestamp_helpers(&mut engine);
// Get the path to the script
let manifest_dir = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"));
let script_path = manifest_dir
.join("examples")
.join("calendar")
.join("calendar_script.rhai");
println!("\nRunning script: {}", script_path.display());
println!("---------------------");
// Run the script
match eval_file(&engine, &script_path) {
Ok(result) => {
if !result.is_unit() {
println!("\nScript returned: {:?}", result);
}
println!("\nScript executed successfully!");
Ok(())
}
Err(err) => {
eprintln!("\nError running script: {}", err);
Err(Box::new(std::io::Error::new(
std::io::ErrorKind::Other,
err.to_string(),
)))
}
}
}
// Register timestamp helper functions with the engine
fn register_timestamp_helpers(engine: &mut Engine) {
use chrono::{TimeZone, Utc};
// Function to get current timestamp
engine.register_fn("timestamp_now", || Utc::now().timestamp() as i64);
// Function to format a timestamp
engine.register_fn("format_timestamp", |ts: i64| {
let dt = Utc
.timestamp_opt(ts, 0)
.single()
.expect("Invalid timestamp");
dt.format("%Y-%m-%d %H:%M:%S UTC").to_string()
});
println!("Timestamp helper functions registered successfully.");
}

View File

@ -0,0 +1,60 @@
use chrono::Utc;
use heromodels::db::hero::OurDB;
use heromodels::db::{Collection, Db};
use heromodels::models::calendar::{Calendar, Event};
use heromodels_core::Model;
use std::sync::Arc;
/// Seed the mock database with calendar data
pub fn seed_calendar_data(db: Arc<OurDB>) {
// Create a calendar
let calendar = Calendar::new(None, "Work Calendar".to_string())
.description("My work schedule".to_string());
// Store the calendar in the database
let (calendar_id, mut saved_calendar) = db
.collection::<Calendar>()
.expect("Failed to get Calendar collection")
.set(&calendar)
.expect("Failed to store calendar");
// Create an event
let now = Utc::now().timestamp();
let end_time = now + 3600; // Add 1 hour in seconds
let event = Event::new()
.title("Team Meeting".to_string())
.reschedule(now, end_time)
.location("Conference Room A".to_string())
.description("Weekly sync".to_string())
.build();
// Store the event in the database first to get its ID
let (event_id, saved_event) = db
.collection()
.expect("Failed to get Event collection")
.set(&event)
.expect("Failed to store event");
// Add the event ID to the calendar
saved_calendar = saved_calendar.add_event(event_id as i64);
// Store the updated calendar in the database
let (_calendar_id, final_calendar) = db
.collection::<Calendar>()
.expect("Failed to get Calendar collection")
.set(&saved_calendar)
.expect("Failed to store calendar");
println!("Mock database seeded with calendar data:");
println!(
" - Added calendar: {} (ID: {})",
final_calendar.name,
final_calendar.get_id()
);
println!(
" - Added event: {} (ID: {})",
saved_event.title,
saved_event.get_id()
);
}

View File

@ -0,0 +1,70 @@
use engine::mock_db::create_mock_db;
use engine::{create_heromodels_engine, eval_file};
use rhai::Engine;
use std::path::Path;
mod mock;
use mock::seed_finance_data;
fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("Finance Rhai Example");
println!("===================");
// Create a mock database
let db = create_mock_db();
// Seed the database with some initial data
seed_finance_data(db.clone());
// Create the Rhai engine using our central engine creator
let mut engine = create_heromodels_engine(db.clone());
// Register timestamp helper functions
register_timestamp_helpers(&mut engine);
// Get the path to the script
let script_path = Path::new(file!())
.parent()
.unwrap()
.join("finance_script.rhai");
println!("\nRunning script: {}", script_path.display());
println!("---------------------");
// Run the script
match eval_file(&engine, &script_path) {
Ok(result) => {
if !result.is_unit() {
println!("\nScript returned: {:?}", result);
}
println!("\nScript executed successfully!");
Ok(())
}
Err(err) => {
eprintln!("\nError running script: {}", err);
Err(Box::new(std::io::Error::new(
std::io::ErrorKind::Other,
err.to_string(),
)))
}
}
}
// Register timestamp helper functions with the engine
fn register_timestamp_helpers(engine: &mut Engine) {
use chrono::{TimeZone, Utc};
// Function to get current timestamp
engine.register_fn("timestamp_now", || Utc::now().timestamp() as i64);
// Function to format a timestamp
engine.register_fn("format_timestamp", |ts: i64| {
let dt = Utc
.timestamp_opt(ts, 0)
.single()
.expect("Invalid timestamp");
dt.format("%Y-%m-%d %H:%M:%S UTC").to_string()
});
println!("Timestamp helper functions registered successfully.");
}

View File

@ -0,0 +1,202 @@
// finance_script.rhai
// Example Rhai script for working with Finance models
// Constants for AssetType
const NATIVE = "Native";
const ERC20 = "Erc20";
const ERC721 = "Erc721";
const ERC1155 = "Erc1155";
// Constants for ListingStatus
const ACTIVE = "Active";
const SOLD = "Sold";
const CANCELLED = "Cancelled";
const EXPIRED = "Expired";
// Constants for ListingType
const FIXED_PRICE = "FixedPrice";
const AUCTION = "Auction";
const EXCHANGE = "Exchange";
// Constants for BidStatus
const BID_ACTIVE = "Active";
const BID_ACCEPTED = "Accepted";
const BID_REJECTED = "Rejected";
const BID_CANCELLED = "Cancelled";
// Create a new account using builder pattern
let alice_account = new_account()
.name("Alice's Account")
.user_id(101)
.description("Alice's primary trading account")
.ledger("ethereum")
.address("0x1234567890abcdef1234567890abcdef12345678")
.pubkey("0xabcdef1234567890abcdef1234567890abcdef12");
print(`Created account: ${alice_account.get_name()} (User ID: ${alice_account.get_user_id()})`);
// Save the account to the database
let saved_alice = set_account(alice_account);
print(`Account saved to database with ID: ${saved_alice.get_id()}`);
// Create a new asset using builder pattern
let token_asset = new_asset()
.name("HERO Token")
.description("Herocode governance token")
.amount(1000.0)
.address("0x9876543210abcdef9876543210abcdef98765432")
.asset_type(ERC20)
.decimals(18);
print(`Created asset: ${token_asset.get_name()} (${token_asset.get_amount()} ${token_asset.get_asset_type()})`);
// Save the asset to the database
let saved_token = set_asset(token_asset);
print(`Asset saved to database with ID: ${saved_token.get_id()}`);
// Add the asset to Alice's account
saved_alice = saved_alice.add_asset(saved_token.get_id());
saved_alice = set_account(saved_alice);
print(`Added asset ${saved_token.get_name()} to ${saved_alice.get_name()}`);
// Create a new NFT asset
let nft_asset = new_asset()
.name("Herocode #42")
.description("Unique digital collectible")
.amount(1.0)
.address("0xabcdef1234567890abcdef1234567890abcdef12")
.asset_type(ERC721)
.decimals(0);
// Save the NFT to the database
let saved_nft = set_asset(nft_asset);
print(`NFT saved to database with ID: ${saved_nft.get_id()}`);
// Create Bob's account
let bob_account = new_account()
.name("Bob's Account")
.user_id(102)
.description("Bob's trading account")
.ledger("ethereum")
.address("0xfedcba0987654321fedcba0987654321fedcba09")
.pubkey("0x654321fedcba0987654321fedcba0987654321fe");
// Save Bob's account
let saved_bob = set_account(bob_account);
print(`Created and saved Bob's account with ID: ${saved_bob.get_id()}`);
// Create a listing for the NFT
let nft_listing = new_listing()
.seller_id(saved_alice.get_id())
.asset_id(saved_nft.get_id())
.price(0.5)
.currency("ETH")
.listing_type(AUCTION)
.title("Rare Herocode NFT")
.description("One of a kind digital collectible")
.image_url("https://example.com/nft/42.png")
.expires_at(timestamp_now() + 86400) // 24 hours from now
.add_tag("rare")
.add_tag("collectible")
.add_tag("digital art")
.set_listing();
// Save the listing
print(`Created listing: ${nft_listing.get_title()} (ID: ${nft_listing.get_id()})`);
print(`Listing status: ${nft_listing.get_status()}, Type: ${nft_listing.get_listing_type()}`);
print(`Listing price: ${nft_listing.get_price()} ${nft_listing.get_currency()}`);
// Create a bid from Bob
let bob_bid = new_bid()
.listing_id(nft_listing.get_id().to_string())
.bidder_id(saved_bob.get_id())
.amount(1.5)
.currency("ETH")
.set_bid();
// Save the bid
print(`Created bid from ${saved_bob.get_name()} for ${bob_bid.get_amount()} ${bob_bid.get_currency()}`);
// Add the bid to the listing
nft_listing.add_bid(bob_bid);
nft_listing.set_listing();
print(`Added bid to listing ${nft_listing.get_title()}`);
// Create another bid with higher amount
let charlie_account = new_account()
.name("Charlie's Account")
.user_id(103)
.description("Charlie's trading account")
.ledger("ethereum")
.address("0x1122334455667788991122334455667788990011")
.pubkey("0x8877665544332211887766554433221188776655");
let saved_charlie = set_account(charlie_account);
print(`Created and saved Charlie's account with ID: ${saved_charlie.get_id()}`);
let charlie_bid = new_bid()
.listing_id(nft_listing.get_id().to_string())
.bidder_id(saved_charlie.get_id())
.amount(2.5)
.currency("ETH")
.set_bid();
print(`Created higher bid from ${saved_charlie.get_name()} for ${charlie_bid.get_amount()} ${charlie_bid.get_currency()}`);
// Add the higher bid to the listing
nft_listing.add_bid(charlie_bid)
.set_listing();
print(`Added higher bid to listing ${nft_listing.get_title()}`);
nft_listing.sale_price(2.5)
.set_listing();
// Complete the sale to the highest bidder (Charlie)
nft_listing.complete_sale(saved_charlie.get_id())
.set_listing();
print(`Completed sale of ${nft_listing.get_title()} to ${saved_charlie.get_name()}`);
print(`New listing status: ${saved_listing.get_status()}`);
// Retrieve the listing from the database
let retrieved_listing = get_listing_by_id(saved_listing.get_id());
print(`Retrieved listing: ${retrieved_listing.get_title()} (Status: ${retrieved_listing.get_status()})`);
// Create a fixed price listing
let token_listing = new_listing()
.seller_id(saved_alice.get_id())
.asset_id(saved_token.get_id())
.price(100.0)
.currency("USDC")
.listing_type(FIXED_PRICE)
.title("HERO Tokens for Sale")
.description("100 HERO tokens at fixed price")
.set_listing();
// Save the fixed price listing
print(`Created fixed price listing: ${token_listing.get_title()} (ID: ${token_listing.get_id()})`);
// Cancel the listing
token_listing.cancel();
token_listing.set_listing();
print(`Cancelled listing: ${token_listing.get_title()}`);
print(`Listing status: ${token_listing.get_status()}`);
// Print summary of all accounts
print("\nAccount Summary:");
print(`Alice (ID: ${saved_alice.get_id()}): ${saved_alice.get_assets().len()} assets`);
print(`Bob (ID: ${saved_bob.get_id()}): ${saved_bob.get_assets().len()} assets`);
print(`Charlie (ID: ${saved_charlie.get_id()}): ${saved_charlie.get_assets().len()} assets`);
// Print summary of all listings
print("\nListing Summary:");
print(`NFT Auction (ID: ${nft_listing.get_id()}): ${nft_listing.get_status()}`);
print(`Token Sale (ID: ${token_listing.get_id()}): ${token_listing.get_status()}`);
// Print summary of all bids
print("\nBid Summary:");
print(`Bob's bid: ${bob_bid.get_amount()} ${bob_bid.get_currency()} (Status: ${bob_bid.get_status()})`);
print(`Charlie's bid: ${charlie_bid.get_amount()} ${charlie_bid.get_currency()} (Status: ${charlie_bid.get_status()})`);

View File

@ -0,0 +1,111 @@
use heromodels::db::hero::OurDB;
use heromodels::db::{Collection, Db};
use heromodels::models::finance::account::Account;
use heromodels::models::finance::asset::{Asset, AssetType};
use heromodels::models::finance::marketplace::{Listing, ListingType};
use heromodels_core::Model;
use std::sync::Arc;
/// Seed the mock database with finance data
pub fn seed_finance_data(db: Arc<OurDB>) {
// Create a user account
let account = Account::new()
.name("Demo Account")
.user_id(1)
.description("Demo trading account")
.ledger("ethereum")
.address("0x1234567890abcdef1234567890abcdef12345678")
.pubkey("0xabcdef1234567890abcdef1234567890abcdef12");
// Store the account in the database
let (account_id, mut updated_account) = db
.collection::<Account>()
.expect("Failed to get Account collection")
.set(&account)
.expect("Failed to store account");
// Create an ERC20 token asset
let token_asset = Asset::new()
.name("HERO Token")
.description("Herocode governance token")
.amount(1000.0)
.address("0x9876543210abcdef9876543210abcdef98765432")
.asset_type(AssetType::Erc20)
.decimals(18);
// Store the token asset in the database
let (token_id, updated_token) = db
.collection::<Asset>()
.expect("Failed to get Asset collection")
.set(&token_asset)
.expect("Failed to store token asset");
// Create an NFT asset
let nft_asset = Asset::new()
.name("Herocode #1")
.description("Unique digital collectible")
.amount(1.0)
.address("0xabcdef1234567890abcdef1234567890abcdef12")
.asset_type(AssetType::Erc721)
.decimals(0);
// Store the NFT asset in the database
let (nft_id, updated_nft) = db
.collection::<Asset>()
.expect("Failed to get Asset collection")
.set(&nft_asset)
.expect("Failed to store NFT asset");
// Add assets to the account
updated_account = updated_account.add_asset(token_id);
updated_account = updated_account.add_asset(nft_id);
// Update the account in the database
let (_, final_account) = db
.collection::<Account>()
.expect("Failed to get Account collection")
.set(&updated_account)
.expect("Failed to store updated account");
// Create a listing for the NFT
let listing = Listing::new()
.seller_id(account_id)
.asset_id(nft_id)
.price(0.5)
.currency("ETH")
.listing_type(ListingType::Auction)
.title("Rare Herocode NFT".to_string())
.description("One of a kind digital collectible".to_string())
.image_url(Some("https://example.com/nft/1.png".to_string()))
.add_tag("rare".to_string())
.add_tag("collectible".to_string());
// Store the listing in the database
let (_listing_id, updated_listing) = db
.collection::<Listing>()
.expect("Failed to get Listing collection")
.set(&listing)
.expect("Failed to store listing");
println!("Mock database seeded with finance data:");
println!(
" - Added account: {} (ID: {})",
final_account.name,
final_account.get_id()
);
println!(
" - Added token asset: {} (ID: {})",
updated_token.name,
updated_token.get_id()
);
println!(
" - Added NFT asset: {} (ID: {})",
updated_nft.name,
updated_nft.get_id()
);
println!(
" - Added listing: {} (ID: {})",
updated_listing.title,
updated_listing.get_id()
);
}

View File

@ -0,0 +1,162 @@
use engine::mock_db::create_mock_db;
use engine::{create_heromodels_engine, eval_file};
use heromodels::models::flow::{Flow, FlowStep, SignatureRequirement};
use heromodels_core::Model;
use rhai::Scope;
use std::path::Path;
mod mock;
use mock::seed_flow_data;
fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("Flow Rhai Example");
println!("=================");
// Create a mock database
let db = create_mock_db();
// Seed the database with initial data
seed_flow_data(db.clone());
// Create the Rhai engine with all modules registered
let engine = create_heromodels_engine(db.clone());
// Get the path to the script
let script_path = Path::new(file!())
.parent()
.unwrap()
.join("flow_script.rhai");
println!("\nRunning script: {}", script_path.display());
println!("---------------------");
// Run the script
match eval_file(&engine, &script_path.to_string_lossy()) {
Ok(result) => {
if !result.is_unit() {
println!("\nScript returned: {:?}", result);
}
println!("\nScript executed successfully!");
}
Err(err) => {
eprintln!("\nError running script: {}", err);
return Err(Box::new(std::io::Error::new(
std::io::ErrorKind::Other,
err.to_string(),
)));
}
}
// Demonstrate direct Rust interaction with the Rhai-exposed flow functionality
println!("\nDirect Rust interaction with Rhai-exposed flow functionality");
println!("----------------------------------------------------------");
// Create a new scope
let mut scope = Scope::new();
// Create a new flow using the Rhai function
let result = engine.eval::<Flow>("new_flow(0, \"Direct Rust Flow\")");
match result {
Ok(mut flow) => {
println!(
"Created flow from Rust: {} (ID: {})",
flow.name,
flow.get_id()
);
// Set flow status using the builder pattern
flow = flow.status("active".to_string());
println!("Set flow status to: {}", flow.status);
// Create a new flow step using the Rhai function
let result = engine.eval::<FlowStep>("new_flow_step(0, 1)");
match result {
Ok(mut step) => {
println!(
"Created flow step from Rust: Step Order {} (ID: {})",
step.step_order,
step.get_id()
);
// Set step description
step = step.description("Direct Rust Step".to_string());
println!(
"Set step description to: {}",
step.description
.clone()
.unwrap_or_else(|| "None".to_string())
);
// Create a signature requirement using the Rhai function
let result = engine.eval::<SignatureRequirement>(
"new_signature_requirement(0, 1, \"Direct Rust Signer\", \"Please sign this document\")"
);
match result {
Ok(req) => {
println!(
"Created signature requirement from Rust: Public Key {} (ID: {})",
req.public_key,
req.get_id()
);
// Add the step to the flow using the builder pattern
flow = flow.add_step(step);
println!(
"Added step to flow. Flow now has {} steps",
flow.steps.len()
);
// Save the flow to the database using the Rhai function
let save_flow_script = "fn save_it(f) { return db::save_flow(f); }";
let save_flow_ast = engine.compile(save_flow_script).unwrap();
let result = engine.call_fn::<Flow>(
&mut scope,
&save_flow_ast,
"save_it",
(flow,),
);
match result {
Ok(saved_flow) => {
println!(
"Saved flow to database with ID: {}",
saved_flow.get_id()
);
}
Err(err) => eprintln!("Error saving flow: {}", err),
}
// Save the signature requirement to the database using the Rhai function
let save_req_script =
"fn save_it(r) { return db::save_signature_requirement(r); }";
let save_req_ast = engine.compile(save_req_script).unwrap();
let result = engine.call_fn::<SignatureRequirement>(
&mut scope,
&save_req_ast,
"save_it",
(req,),
);
match result {
Ok(saved_req) => {
println!(
"Saved signature requirement to database with ID: {}",
saved_req.get_id()
);
}
Err(err) => {
eprintln!("Error saving signature requirement: {}", err)
}
}
}
Err(err) => eprintln!("Error creating signature requirement: {}", err),
}
}
Err(err) => eprintln!("Error creating flow step: {}", err),
}
}
Err(err) => eprintln!("Error creating flow: {}", err),
}
Ok(())
}

View File

@ -0,0 +1,111 @@
// flow_script.rhai
// Example Rhai script for working with Flow models
// Constants for Flow status
const STATUS_DRAFT = "draft";
const STATUS_ACTIVE = "active";
const STATUS_COMPLETED = "completed";
const STATUS_CANCELLED = "cancelled";
// Create a new flow using builder pattern
let my_flow = new_flow(0, "flow-123");
name(my_flow, "Document Approval Flow");
status(my_flow, STATUS_DRAFT);
print(`Created flow: ${get_flow_name(my_flow)} (ID: ${get_flow_id(my_flow)})`);
print(`Status: ${get_flow_status(my_flow)}`);
// Create flow steps using builder pattern
let step1 = new_flow_step(0, 1);
description(step1, "Initial review by legal team");
status(step1, STATUS_DRAFT);
let step2 = new_flow_step(0, 2);
description(step2, "Approval by department head");
status(step2, STATUS_DRAFT);
let step3 = new_flow_step(0, 3);
description(step3, "Final signature by CEO");
status(step3, STATUS_DRAFT);
// Create signature requirements using builder pattern
let req1 = new_signature_requirement(0, get_flow_step_id(step1), "legal@example.com", "Please review this document");
signed_by(req1, "Legal Team");
status(req1, STATUS_DRAFT);
let req2 = new_signature_requirement(0, get_flow_step_id(step2), "dept@example.com", "Department approval needed");
signed_by(req2, "Department Head");
status(req2, STATUS_DRAFT);
let req3 = new_signature_requirement(0, get_flow_step_id(step3), "ceo@example.com", "Final approval required");
signed_by(req3, "CEO");
status(req3, STATUS_DRAFT);
print(`Created flow steps with signature requirements`);
// Add steps to the flow
let flow_with_steps = my_flow;
add_step(flow_with_steps, step1);
add_step(flow_with_steps, step2);
add_step(flow_with_steps, step3);
print(`Added steps to flow. Flow now has ${get_flow_steps(flow_with_steps).len()} steps`);
// Activate the flow
let active_flow = flow_with_steps;
status(active_flow, STATUS_ACTIVE);
print(`Updated flow status to: ${get_flow_status(active_flow)}`);
// Save the flow to the database
let saved_flow = db::save_flow(active_flow);
print(`Flow saved to database with ID: ${get_flow_id(saved_flow)}`);
// Save signature requirements to the database
let saved_req1 = db::save_signature_requirement(req1);
let saved_req2 = db::save_signature_requirement(req2);
let saved_req3 = db::save_signature_requirement(req3);
print(`Signature requirements saved to database with IDs: ${get_signature_requirement_id(saved_req1)}, ${get_signature_requirement_id(saved_req2)}, ${get_signature_requirement_id(saved_req3)}`);
// Retrieve the flow from the database
let retrieved_flow = db::get_flow_by_id(get_flow_id(saved_flow));
print(`Retrieved flow: ${get_flow_name(retrieved_flow)}`);
print(`It has ${get_flow_steps(retrieved_flow).len()} steps`);
// Complete the flow
let completed_flow = retrieved_flow;
status(completed_flow, STATUS_COMPLETED);
print(`Updated retrieved flow status to: ${get_flow_status(completed_flow)}`);
// Save the updated flow
db::save_flow(completed_flow);
print("Updated flow saved to database");
// List all flows in the database
let all_flows = db::list_flows();
print("\nListing all flows in database:");
let flow_count = 0;
for flow in all_flows {
print(` - Flow: ${get_flow_name(flow)} (ID: ${get_flow_id(flow)})`);
flow_count += 1;
}
print(`Total flows: ${flow_count}`);
// List all signature requirements
let all_reqs = db::list_signature_requirements();
print("\nListing all signature requirements in database:");
let req_count = 0;
for req in all_reqs {
print(` - Requirement for step ${get_signature_requirement_flow_step_id(req)} (ID: ${get_signature_requirement_id(req)})`);
req_count += 1;
}
print(`Total signature requirements: ${req_count}`);
// Clean up - delete the flow
db::delete_flow(get_flow_id(completed_flow));
print(`Deleted flow with ID: ${get_flow_id(completed_flow)}`);
// Clean up - delete signature requirements
db::delete_signature_requirement(get_signature_requirement_id(saved_req1));
db::delete_signature_requirement(get_signature_requirement_id(saved_req2));
db::delete_signature_requirement(get_signature_requirement_id(saved_req3));
print("Deleted all signature requirements");

View File

@ -0,0 +1,65 @@
use heromodels::db::hero::OurDB;
use heromodels::db::{Collection, Db};
use heromodels::models::flow::{Flow, FlowStep, SignatureRequirement};
use heromodels_core::Model;
use std::sync::Arc;
/// Seed the mock database with flow data
#[cfg(feature = "flow")]
pub fn seed_flow_data(db: Arc<OurDB>) {
// Create a flow
let flow = Flow::new(None, "Onboarding Flow".to_string())
.description("New employee onboarding process".to_string())
.status("active".to_string());
// Create a signature requirement first
let sig_req = SignatureRequirement::new(
None,
1,
"hr_manager_pubkey".to_string(),
"Please sign the employment contract".to_string(),
);
let (sig_req_id, saved_sig_req) = db
.collection::<SignatureRequirement>()
.expect("Failed to get SignatureRequirement collection")
.set(&sig_req)
.expect("Failed to store signature requirement");
// Create a flow step and add the signature requirement
let step = FlowStep::new(None, 1)
.description("Complete HR paperwork".to_string())
.add_signature_requirement(sig_req_id);
let (step_id, saved_step) = db
.collection::<FlowStep>()
.expect("Failed to get FlowStep collection")
.set(&step)
.expect("Failed to store flow step");
// Add the step to the flow
let flow_with_step = flow.add_step(step_id);
// Store the flow
let (_flow_id, saved_flow) = db
.collection::<Flow>()
.expect("Failed to get Flow collection")
.set(&flow_with_step)
.expect("Failed to store flow");
println!("Mock database seeded with flow data:");
println!(
" - Added flow: {} (ID: {})",
saved_flow.name,
saved_flow.get_id()
);
println!(
" - Added step with order: {} (ID: {})",
saved_step.step_order,
saved_step.get_id()
);
println!(
" - Added signature requirement for: {} (ID: {})",
saved_sig_req.public_key,
saved_sig_req.get_id()
);
}

305
core/engine/src/lib.rs Normal file
View File

@ -0,0 +1,305 @@
//! # Rhailib Engine
//!
//! The central Rhai scripting engine for the heromodels ecosystem. This crate provides
//! a unified interface for creating, configuring, and executing Rhai scripts with access
//! to all business domain modules.
//!
//! ## Features
//!
//! - **Unified Engine Creation**: Pre-configured Rhai engine with all DSL modules
//! - **Script Execution Utilities**: Direct evaluation, file-based execution, and AST compilation
//! - **Mock Database System**: Complete testing environment with seeded data
//! - **Feature-Based Architecture**: Modular compilation based on required domains
//!
//! ## Quick Start
//!
//! ```rust
//! use rhailib_engine::{create_heromodels_engine, eval_script};
//!
//! // Create a fully configured engine
//! let engine = create_heromodels_engine();
//!
//! // Execute a business logic script
//! let result = eval_script(&engine, r#"
//! let company = new_company()
//! .name("Acme Corp")
//! .business_type("global");
//! company.name
//! "#)?;
//!
//! println!("Company name: {}", result.as_string().unwrap());
//! ```
//!
//! ## Available Features
//!
//! - `calendar` (default): Calendar and event management
//! - `finance` (default): Financial accounts, assets, and marketplace
//! - `flow`: Workflow and approval processes
//! - `legal`: Contract and legal document management
//! - `projects`: Project and task management
//! - `biz`: Business operations and entities
use rhai::{Engine, EvalAltResult, Scope, AST};
use rhailib_dsl;
use std::fs;
use std::path::Path;
/// Mock database module for testing and examples
pub mod mock_db;
/// Creates a fully configured Rhai engine with all available DSL modules.
///
/// This function creates a new Rhai engine instance, configures it with appropriate
/// limits and settings, and registers all available business domain modules based
/// on enabled features.
///
/// # Engine Configuration
///
/// The engine is configured with the following limits:
/// - **Expression Depth**: 128 levels for both expressions and functions
/// - **String Size**: 10 MB maximum
/// - **Array Size**: 10,000 elements maximum
/// - **Map Size**: 10,000 key-value pairs maximum
///
/// # Registered Modules
///
/// All enabled DSL modules are automatically registered, including:
/// - Business operations (companies, products, sales, shareholders)
/// - Financial models (accounts, assets, marketplace)
/// - Content management (collections, images, PDFs, books)
/// - Workflow management (flows, steps, signatures)
/// - And more based on enabled features
///
/// # Returns
///
/// A fully configured `Engine` instance ready for script execution.
///
/// # Example
///
/// ```rust
/// use rhailib_engine::create_heromodels_engine;
///
/// let engine = create_heromodels_engine();
///
/// // Engine is now ready to execute scripts with access to all DSL functions
/// let result = engine.eval::<String>(r#"
/// let company = new_company().name("Test Corp");
/// company.name
/// "#).unwrap();
/// assert_eq!(result, "Test Corp");
/// ```
pub fn create_heromodels_engine() -> Engine {
let mut engine = Engine::new();
// Configure engine settings
engine.set_max_expr_depths(128, 128);
engine.set_max_string_size(10 * 1024 * 1024); // 10 MB
engine.set_max_array_size(10 * 1024); // 10K elements
engine.set_max_map_size(10 * 1024); // 10K elements
// Register all heromodels Rhai modules
rhailib_dsl::register_dsl_modules(&mut engine);
engine
}
// /// Register all heromodels Rhai modules with the engine
// pub fn register_all_modules(engine: &mut Engine, db: Arc<OurDB>) {
// // Register the calendar module if the feature is enabled
// heromodels::models::access::register_access_rhai_module(engine, db.clone());
// #[cfg(feature = "calendar")]
// heromodels::models::calendar::register_calendar_rhai_module(engine, db.clone());
// heromodels::models::contact::register_contact_rhai_module(engine, db.clone());
// heromodels::models::library::register_library_rhai_module(engine, db.clone());
// heromodels::models::circle::register_circle_rhai_module(engine, db.clone());
// // Register the flow module if the feature is enabled
// #[cfg(feature = "flow")]
// heromodels::models::flow::register_flow_rhai_module(engine, db.clone());
// // // Register the finance module if the feature is enabled
// // #[cfg(feature = "finance")]
// // heromodels::models::finance::register_finance_rhai_module(engine, db.clone());
// // Register the legal module if the feature is enabled
// #[cfg(feature = "legal")]
// heromodels::models::legal::register_legal_rhai_module(engine, db.clone());
// // Register the projects module if the feature is enabled
// #[cfg(feature = "projects")]
// heromodels::models::projects::register_projects_rhai_module(engine, db.clone());
// // Register the biz module if the feature is enabled
// #[cfg(feature = "biz")]
// heromodels::models::biz::register_biz_rhai_module(engine, db.clone());
// println!("Heromodels Rhai modules registered successfully.");
// }
/// Evaluates a Rhai script string and returns the result.
///
/// This function provides a convenient way to execute Rhai script strings directly
/// using the provided engine. It's suitable for one-off script execution or when
/// the script content is dynamically generated.
///
/// # Arguments
///
/// * `engine` - The Rhai engine to use for script execution
/// * `script` - The Rhai script content as a string
///
/// # Returns
///
/// * `Ok(Dynamic)` - The result of script execution
/// * `Err(Box<EvalAltResult>)` - Script compilation or execution error
///
/// # Example
///
/// ```rust
/// use rhailib_engine::{create_heromodels_engine, eval_script};
///
/// let engine = create_heromodels_engine();
/// let result = eval_script(&engine, r#"
/// let x = 42;
/// let y = 8;
/// x + y
/// "#)?;
/// assert_eq!(result.as_int().unwrap(), 50);
/// ```
pub fn eval_script(
engine: &Engine,
script: &str,
) -> Result<rhai::Dynamic, Box<rhai::EvalAltResult>> {
engine.eval::<rhai::Dynamic>(script)
}
/// Evaluates a Rhai script from a file and returns the result.
///
/// This function reads a Rhai script from the filesystem and executes it using
/// the provided engine. It handles file reading errors gracefully and provides
/// meaningful error messages.
///
/// # Arguments
///
/// * `engine` - The Rhai engine to use for script execution
/// * `file_path` - Path to the Rhai script file
///
/// # Returns
///
/// * `Ok(Dynamic)` - The result of script execution
/// * `Err(Box<EvalAltResult>)` - File reading, compilation, or execution error
///
/// # Example
///
/// ```rust
/// use rhailib_engine::{create_heromodels_engine, eval_file};
/// use std::path::Path;
///
/// let engine = create_heromodels_engine();
/// let result = eval_file(&engine, Path::new("scripts/business_logic.rhai"))?;
/// println!("Script result: {:?}", result);
/// ```
///
/// # Error Handling
///
/// File reading errors are converted to Rhai `ErrorSystem` variants with
/// descriptive messages including the file path that failed to load.
pub fn eval_file(
engine: &Engine,
file_path: &Path,
) -> Result<rhai::Dynamic, Box<rhai::EvalAltResult>> {
match fs::read_to_string(file_path) {
Ok(script_content) => engine.eval::<rhai::Dynamic>(&script_content),
Err(io_err) => Err(Box::new(EvalAltResult::ErrorSystem(
format!("Failed to read script file: {}", file_path.display()),
Box::new(io_err),
))),
}
}
/// Compiles a Rhai script string into an Abstract Syntax Tree (AST).
///
/// This function compiles a Rhai script into an AST that can be executed multiple
/// times with different scopes. This is more efficient than re-parsing the script
/// for each execution when the same script needs to be run repeatedly.
///
/// # Arguments
///
/// * `engine` - The Rhai engine to use for compilation
/// * `script` - The Rhai script content as a string
///
/// # Returns
///
/// * `Ok(AST)` - The compiled Abstract Syntax Tree
/// * `Err(Box<EvalAltResult>)` - Script compilation error
///
/// # Example
///
/// ```rust
/// use rhailib_engine::{create_heromodels_engine, compile_script, run_ast};
/// use rhai::Scope;
///
/// let engine = create_heromodels_engine();
/// let ast = compile_script(&engine, r#"
/// let company = new_company().name(company_name);
/// save_company(company)
/// "#)?;
///
/// // Execute the compiled script multiple times with different variables
/// let mut scope1 = Scope::new();
/// scope1.push("company_name", "Acme Corp");
/// let result1 = run_ast(&engine, &ast, &mut scope1)?;
///
/// let mut scope2 = Scope::new();
/// scope2.push("company_name", "Tech Startup");
/// let result2 = run_ast(&engine, &ast, &mut scope2)?;
/// ```
pub fn compile_script(engine: &Engine, script: &str) -> Result<AST, Box<rhai::EvalAltResult>> {
Ok(engine.compile(script)?)
}
/// Executes a compiled Rhai script AST with the provided scope.
///
/// This function runs a pre-compiled AST using the provided engine and scope.
/// The scope can contain variables and functions that will be available to
/// the script during execution.
///
/// # Arguments
///
/// * `engine` - The Rhai engine to use for execution
/// * `ast` - The compiled Abstract Syntax Tree to execute
/// * `scope` - Mutable scope containing variables and functions for the script
///
/// # Returns
///
/// * `Ok(Dynamic)` - The result of script execution
/// * `Err(Box<EvalAltResult>)` - Script execution error
///
/// # Example
///
/// ```rust
/// use rhailib_engine::{create_heromodels_engine, compile_script, run_ast};
/// use rhai::Scope;
///
/// let engine = create_heromodels_engine();
/// let ast = compile_script(&engine, "x + y")?;
///
/// let mut scope = Scope::new();
/// scope.push("x", 10_i64);
/// scope.push("y", 32_i64);
///
/// let result = run_ast(&engine, &ast, &mut scope)?;
/// assert_eq!(result.as_int().unwrap(), 42);
/// ```
///
/// # Performance Notes
///
/// Using compiled ASTs is significantly more efficient than re-parsing scripts
/// for repeated execution, especially for complex scripts or when executing
/// the same logic with different input parameters.
pub fn run_ast(
engine: &Engine,
ast: &AST,
scope: &mut Scope,
) -> Result<rhai::Dynamic, Box<rhai::EvalAltResult>> {
engine.eval_ast_with_scope(scope, ast)
}

374
core/engine/src/mock_db.rs Normal file
View File

@ -0,0 +1,374 @@
use chrono::Utc;
use heromodels::db::hero::OurDB;
use heromodels::db::{Collection, Db}; // Import both Db and Collection traits
use heromodels::models::calendar::{Calendar, Event};
use heromodels_core::Model; // Import Model trait to use build method
use std::env;
use std::sync::Arc;
// Import finance models
use heromodels::models::finance::account::Account;
use heromodels::models::finance::asset::{Asset, AssetType};
use heromodels::models::finance::marketplace::{Listing, ListingType};
// Conditionally import other modules based on features
#[cfg(feature = "flow")]
use heromodels::models::flow::{Flow, FlowStep, SignatureRequirement};
#[cfg(feature = "legal")]
use heromodels::models::legal::{
Contract, ContractRevision, ContractSigner, ContractStatus, SignerStatus,
};
#[cfg(feature = "projects")]
use heromodels::models::projects::{ItemType, Priority, Project, Status as ProjectStatus};
/// Create a mock in-memory database for examples
pub fn create_mock_db() -> Arc<OurDB> {
// Create a temporary directory for the database files
let temp_dir = env::temp_dir().join("engine_examples");
std::fs::create_dir_all(&temp_dir).expect("Failed to create temp directory");
// Create a new OurDB instance with reset=true to ensure it's clean
let db = OurDB::new(temp_dir, true).expect("Failed to create OurDB instance");
Arc::new(db)
}
/// Seed the mock database with some initial data for all modules
pub fn seed_mock_db(db: Arc<OurDB>) {
// Seed calendar data
seed_calendar_data(db.clone());
// Seed finance data
seed_finance_data(db.clone());
// Seed flow data if the feature is enabled
#[cfg(feature = "flow")]
seed_flow_data(db.clone());
// Seed legal data if the feature is enabled
#[cfg(feature = "legal")]
seed_legal_data(db.clone());
// Seed projects data if the feature is enabled
#[cfg(feature = "projects")]
seed_projects_data(db.clone());
println!("Mock database seeded with initial data for all enabled modules.");
}
/// Seed the mock database with calendar data
fn seed_calendar_data(db: Arc<OurDB>) {
// Create a calendar
let mut calendar = Calendar::new(None, "Work Calendar".to_string());
calendar.description = Some("My work schedule".to_string());
// Store the calendar in the database
let (_calendar_id, _updated_calendar) = db
.collection::<Calendar>()
.expect("Failed to get Calendar collection")
.set(&calendar)
.expect("Failed to store calendar");
// Create an event
let now = Utc::now().timestamp();
let end_time = now + 3600; // Add 1 hour in seconds
// Use the builder pattern for Event
let event = Event::new()
.title("Team Meeting".to_string())
.reschedule(now, end_time)
.location("Conference Room A".to_string())
.description("Weekly sync".to_string())
// .add_attendee(Attendee::new(1))
// .add_attendee(Attendee::new(2))
.build();
// // Add attendees to the event using the builder pattern
// let attendee1 = Attendee::new(1);
// let attendee2 = Attendee::new(2);
// // Add attendees using the builder pattern
// event = event.add_attendee(attendee1);
// event = event.add_attendee(attendee2);
// Call build and capture the returned value
// let event = event.build();
// Store the event in the database first to get its ID
let (event_id, updated_event) = db
.collection()
.expect("Failed to get Event collection")
.set(&event)
.expect("Failed to store event");
// Add the event ID to the calendar
calendar = calendar.add_event(event_id as i64);
// Store the calendar in the database
let (_calendar_id, updated_calendar) = db
.collection::<Calendar>()
.expect("Failed to get Calendar collection")
.set(&calendar)
.expect("Failed to store calendar");
println!("Mock database seeded with calendar data:");
println!(
" - Added calendar: {} (ID: {})",
updated_calendar.name, updated_calendar.base_data.id
);
println!(
" - Added event: {} (ID: {})",
updated_event.title, updated_event.base_data.id
);
}
/// Seed the mock database with flow data
#[cfg(feature = "flow")]
fn seed_flow_data(db: Arc<OurDB>) {
// Create a flow
let mut flow = Flow::new(0, "Document Approval".to_string());
// Set flow properties using the builder pattern
flow = flow.status("draft".to_string());
flow = flow.name("Document Approval Flow".to_string());
// Create flow steps
let mut step1 = FlowStep::new(0, 1);
step1 = step1.description("Initial review by legal team".to_string());
step1 = step1.status("pending".to_string());
let mut step2 = FlowStep::new(0, 2);
step2 = step2.description("Approval by department head".to_string());
step2 = step2.status("pending".to_string());
// Add signature requirements
let mut req1 = SignatureRequirement::new(
0,
1,
"Legal Team".to_string(),
"Please review this document".to_string(),
);
let mut req2 = SignatureRequirement::new(
0,
2,
"Department Head".to_string(),
"Please approve this document".to_string(),
);
// Add steps to flow
flow = flow.add_step(step1);
flow = flow.add_step(step2);
// Store in the database
let (_, updated_flow) = db
.collection::<Flow>()
.expect("Failed to get Flow collection")
.set(&flow)
.expect("Failed to store flow");
// Store signature requirements in the database
let (_, updated_req1) = db
.collection::<SignatureRequirement>()
.expect("Failed to get SignatureRequirement collection")
.set(&req1)
.expect("Failed to store signature requirement");
let (_, updated_req2) = db
.collection::<SignatureRequirement>()
.expect("Failed to get SignatureRequirement collection")
.set(&req2)
.expect("Failed to store signature requirement");
println!("Mock database seeded with flow data:");
println!(
" - Added flow: {} (ID: {})",
updated_flow.name, updated_flow.base_data.id
);
println!(" - Added {} steps", updated_flow.steps.len());
println!(
" - Added signature requirements with IDs: {} and {}",
updated_req1.base_data.id, updated_req2.base_data.id
);
}
/// Seed the mock database with legal data
#[cfg(feature = "legal")]
fn seed_legal_data(db: Arc<OurDB>) {
// Create a contract
let mut contract = Contract::new(None, "Service Agreement".to_string());
contract.description = Some("Agreement for software development services".to_string());
contract.status = ContractStatus::Draft;
// Create a revision
let revision = ContractRevision::new(
None,
"Initial draft".to_string(),
"https://example.com/contract/v1".to_string(),
);
// Create signers
let signer1 = ContractSigner::new(None, 1, "Client".to_string());
let signer2 = ContractSigner::new(None, 2, "Provider".to_string());
// Add revision and signers to contract
contract.add_revision(revision);
contract.add_signer(signer1);
contract.add_signer(signer2);
// Store in the database
let (_, updated_contract) = db
.collection::<Contract>()
.expect("Failed to get Contract collection")
.set(&contract)
.expect("Failed to store contract");
println!("Mock database seeded with legal data:");
println!(
" - Added contract: {} (ID: {})",
updated_contract.name, updated_contract.base_data.id
);
println!(
" - Added {} revisions and {} signers",
updated_contract.revisions.len(),
updated_contract.signers.len()
);
}
/// Seed the mock database with projects data
#[cfg(feature = "projects")]
fn seed_projects_data(db: Arc<OurDB>) {
// Create a project
let mut project = Project::new(None, "Website Redesign".to_string());
project.description = Some("Redesign the company website".to_string());
project.status = ProjectStatus::InProgress;
project.priority = Priority::High;
// Add members and tags
project.add_member_id(1);
project.add_member_id(2);
project.add_tag("design".to_string());
project.add_tag("web".to_string());
// Store in the database
let (_, updated_project) = db
.collection::<Project>()
.expect("Failed to get Project collection")
.set(&project)
.expect("Failed to store project");
println!("Mock database seeded with projects data:");
println!(
" - Added project: {} (ID: {})",
updated_project.name, updated_project.base_data.id
);
println!(
" - Status: {}, Priority: {}",
updated_project.status, updated_project.priority
);
println!(
" - Added {} members and {} tags",
updated_project.member_ids.len(),
updated_project.tags.len()
);
}
/// Seed the mock database with finance data
fn seed_finance_data(db: Arc<OurDB>) {
// Create a user account
let mut account = Account::new()
.name("Demo Account")
.user_id(1)
.description("Demo trading account")
.ledger("ethereum")
.address("0x1234567890abcdef1234567890abcdef12345678")
.pubkey("0xabcdef1234567890abcdef1234567890abcdef12");
// Store the account in the database
let (account_id, updated_account) = db
.collection::<Account>()
.expect("Failed to get Account collection")
.set(&account)
.expect("Failed to store account");
// Create an ERC20 token asset
let token_asset = Asset::new()
.name("HERO Token")
.description("Herocode governance token")
.amount(1000.0)
.address("0x9876543210abcdef9876543210abcdef98765432")
.asset_type(AssetType::Erc20)
.decimals(18);
// Store the token asset in the database
let (token_id, updated_token) = db
.collection::<Asset>()
.expect("Failed to get Asset collection")
.set(&token_asset)
.expect("Failed to store token asset");
// Create an NFT asset
let nft_asset = Asset::new()
.name("Herocode #1")
.description("Unique digital collectible")
.amount(1.0)
.address("0xabcdef1234567890abcdef1234567890abcdef12")
.asset_type(AssetType::Erc721)
.decimals(0);
// Store the NFT asset in the database
let (nft_id, updated_nft) = db
.collection::<Asset>()
.expect("Failed to get Asset collection")
.set(&nft_asset)
.expect("Failed to store NFT asset");
// Add assets to the account
account = updated_account.add_asset(token_id);
account = account.add_asset(nft_id);
// Update the account in the database
let (_, updated_account) = db
.collection::<Account>()
.expect("Failed to get Account collection")
.set(&account)
.expect("Failed to store updated account");
// Create a listing for the NFT
let listing = Listing::new()
.seller_id(account_id)
.asset_id(nft_id)
.price(0.5)
.currency("ETH")
.listing_type(ListingType::Auction)
.title("Rare Herocode NFT".to_string())
.description("One of a kind digital collectible".to_string())
.image_url(Some("hcttps://example.com/nft/1.png".to_string()))
.add_tag("rare".to_string())
.add_tag("collectible".to_string());
// Store the listing in the database
let (_listing_id, updated_listing) = db
.collection::<Listing>()
.expect("Failed to get Listing collection")
.set(&listing)
.expect("Failed to store listing");
println!("Mock database seeded with finance data:");
println!(
" - Added account: {} (ID: {})",
updated_account.name, updated_account.base_data.id
);
println!(
" - Added token asset: {} (ID: {})",
updated_token.name, updated_token.base_data.id
);
println!(
" - Added NFT asset: {} (ID: {})",
updated_nft.name, updated_nft.base_data.id
);
println!(
" - Added listing: {} (ID: {})",
updated_listing.title, updated_listing.base_data.id
);
}

20
core/examples/Cargo.toml Normal file
View File

@ -0,0 +1,20 @@
[package]
name = "hero_examples"
version = "0.1.0"
edition = "2021"
[[bin]]
name = "supervisor_worker_demo"
path = "supervisor_worker_demo.rs"
[dependencies]
hero_dispatcher = { path = "../dispatcher" }
hero_job = { path = "../job" }
tokio = { version = "1.0", features = ["full"] }
redis = { version = "0.25", features = ["tokio-comp"] }
serde_json = "1.0"
log = "0.4"
env_logger = "0.10"
colored = "2.0"
uuid = { version = "1.0", features = ["v4"] }
chrono = { version = "0.4", features = ["serde"] }

View File

@ -0,0 +1,365 @@
use colored::*;
use hero_dispatcher::{DispatcherBuilder, ScriptType, JobStatus};
use log::warn;
use std::process::Stdio;
use std::time::Duration;
use tokio::process::{Child, Command as TokioCommand};
use tokio::time::sleep;
/// Supervisor manages worker lifecycle and job execution
pub struct Supervisor {
dispatcher: hero_dispatcher::Dispatcher,
worker_processes: Vec<WorkerProcess>,
redis_url: String,
}
/// Represents a managed worker process
pub struct WorkerProcess {
id: String,
script_type: ScriptType,
process: Option<Child>,
binary_path: String,
}
impl Supervisor {
/// Create a new supervisor with dispatcher configuration
pub async fn new(redis_url: String) -> Result<Self, Box<dyn std::error::Error>> {
let dispatcher = DispatcherBuilder::new()
.caller_id("supervisor")
.context_id("demo-context")
.redis_url(&redis_url)
.heroscript_workers(vec!["hero-worker-1".to_string()])
.rhai_sal_workers(vec!["rhai-sal-worker-1".to_string()])
.rhai_dsl_workers(vec!["rhai-dsl-worker-1".to_string()])
.build()?;
Ok(Self {
dispatcher,
worker_processes: Vec::new(),
redis_url,
})
}
/// Start a worker for a specific script type
pub async fn start_worker(&mut self, script_type: ScriptType, worker_binary_path: &str) -> Result<(), Box<dyn std::error::Error>> {
let worker_id = match script_type {
ScriptType::HeroScript => "hero-worker-1",
ScriptType::RhaiSAL => "rhai-sal-worker-1",
ScriptType::RhaiDSL => "rhai-dsl-worker-1",
};
println!("{}", format!("🚀 Starting {} worker: {}", script_type.as_str(), worker_id).green().bold());
// Check if worker binary exists
if !std::path::Path::new(worker_binary_path).exists() {
return Err(format!("Worker binary not found at: {}", worker_binary_path).into());
}
// Start the worker process
let mut cmd = TokioCommand::new(worker_binary_path);
cmd.arg("--worker-id").arg(worker_id)
.arg("--redis-url").arg(&self.redis_url)
.arg("--no-timestamp")
.stdout(Stdio::piped())
.stderr(Stdio::piped());
let process = cmd.spawn()?;
let worker_process = WorkerProcess {
id: worker_id.to_string(),
script_type,
process: Some(process),
binary_path: worker_binary_path.to_string(),
};
self.worker_processes.push(worker_process);
// Give worker time to start up
sleep(Duration::from_millis(500)).await;
println!("{}", format!("✅ Worker {} started successfully", worker_id).green());
Ok(())
}
/// Stop all workers
pub async fn stop_all_workers(&mut self) {
println!("{}", "🛑 Stopping all workers...".yellow().bold());
for worker in &mut self.worker_processes {
if let Some(mut process) = worker.process.take() {
println!("Stopping worker: {}", worker.id);
// Try graceful shutdown first
if let Err(e) = process.kill().await {
warn!("Failed to kill worker {}: {}", worker.id, e);
}
// Wait for process to exit
if let Ok(status) = process.wait().await {
println!("Worker {} exited with status: {:?}", worker.id, status);
} else {
warn!("Failed to wait for worker {} to exit", worker.id);
}
}
}
self.worker_processes.clear();
println!("{}", "✅ All workers stopped".green());
}
/// Submit a job and return the job ID
pub async fn submit_job(&self, script_type: ScriptType, script: &str) -> Result<String, Box<dyn std::error::Error>> {
let job = self.dispatcher
.new_job()
.script_type(script_type.clone())
.script(script)
.timeout(Duration::from_secs(30))
.build()?;
let job_id = job.id.clone();
self.dispatcher.create_job(&job).await?;
println!("{}", format!("📝 Job {} submitted for {}", job_id, script_type.as_str()).cyan());
Ok(job_id)
}
/// Wait for job completion and return result
pub async fn wait_for_job_completion(&self, job_id: &str, timeout_duration: Duration) -> Result<String, Box<dyn std::error::Error>> {
let start_time = std::time::Instant::now();
println!("{}", format!("⏳ Waiting for job {} to complete...", job_id).yellow());
loop {
if start_time.elapsed() > timeout_duration {
return Err("Job execution timeout".into());
}
// Check job status using dispatcher methods
match self.dispatcher.get_job_status(job_id).await {
Ok(status) => {
match status {
JobStatus::Finished => {
if let Ok(Some(result)) = self.dispatcher.get_job_output(job_id).await {
println!("{}", format!("✅ Job {} completed successfully", job_id).green());
return Ok(result);
}
}
JobStatus::Error => {
return Err("Job failed".into());
}
_ => {
// Job still running or waiting
}
}
}
Err(_) => {
// Job not found or error checking status
}
}
sleep(Duration::from_millis(100)).await;
}
}
/// List all jobs
pub async fn list_jobs(&self) -> Result<Vec<String>, Box<dyn std::error::Error>> {
self.dispatcher.list_jobs().await.map_err(|e| e.into())
}
/// Clear all jobs
pub async fn clear_all_jobs(&self) -> Result<usize, Box<dyn std::error::Error>> {
self.dispatcher.clear_all_jobs().await.map_err(|e| e.into())
}
/// Get worker status
pub fn get_worker_status(&self) -> Vec<(String, ScriptType, bool)> {
self.worker_processes.iter().map(|w| {
(w.id.clone(), w.script_type.clone(), w.process.is_some())
}).collect()
}
}
impl Drop for Supervisor {
fn drop(&mut self) {
// Ensure workers are stopped when supervisor is dropped
if !self.worker_processes.is_empty() {
println!("{}", "⚠️ Supervisor dropping - stopping remaining workers".yellow());
}
}
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Initialize logging
env_logger::Builder::from_default_env()
.filter_level(log::LevelFilter::Info)
.format_timestamp(None)
.init();
println!("{}", "🎯 Hero Supervisor-Worker End-to-End Demo".blue().bold());
println!("{}", "==========================================".blue());
println!();
// Configuration
let redis_url = "redis://localhost:6379".to_string();
let worker_binary_path = "../../target/debug/worker";
// Check if worker binary exists
if !std::path::Path::new(worker_binary_path).exists() {
println!("{}", "❌ Worker binary not found!".red().bold());
println!("Please build the worker first:");
println!(" cd ../worker && cargo build");
return Err("Worker binary not found".into());
}
// Create supervisor
println!("{}", "🏗️ Creating supervisor...".cyan());
let mut supervisor = Supervisor::new(redis_url).await?;
println!("{}", "✅ Supervisor created successfully".green());
println!();
// Clear any existing jobs
let cleared_count = supervisor.clear_all_jobs().await?;
if cleared_count > 0 {
println!("{}", format!("🧹 Cleared {} existing jobs", cleared_count).yellow());
}
// Demo 1: Start a HeroScript worker
println!("{}", "📋 Demo 1: Starting HeroScript Worker".blue().bold());
println!("{}", "------------------------------------".blue());
supervisor.start_worker(ScriptType::HeroScript, worker_binary_path).await?;
// Show worker status
let worker_status = supervisor.get_worker_status();
println!("Active workers:");
for (id, script_type, active) in worker_status {
let status = if active { "🟢 Running" } else { "🔴 Stopped" };
println!(" {} - {} ({})", id, script_type.as_str(), status);
}
println!();
// Demo 2: Submit and execute a simple job
println!("{}", "📋 Demo 2: Submit and Execute Job".blue().bold());
println!("{}", "---------------------------------".blue());
let script = r#"
print("Hello from HeroScript worker!");
let result = 42 + 8;
print("Calculation: 42 + 8 = " + result);
result
"#;
let job_id = supervisor.submit_job(ScriptType::HeroScript, script).await?;
// Wait for job completion
match supervisor.wait_for_job_completion(&job_id, Duration::from_secs(10)).await {
Ok(result) => {
println!("{}", format!("🎉 Job result: {}", result).green().bold());
}
Err(e) => {
println!("{}", format!("❌ Job failed: {}", e).red());
}
}
println!();
// Demo 3: Submit multiple jobs
println!("{}", "📋 Demo 3: Multiple Jobs".blue().bold());
println!("{}", "------------------------".blue());
let jobs = vec![
("Job 1", r#"print("Job 1 executing"); "job1_result""#),
("Job 2", r#"print("Job 2 executing"); 100 + 200"#),
("Job 3", r#"print("Job 3 executing"); "hello_world""#),
];
let mut job_ids = Vec::new();
for (name, script) in jobs {
let job_id = supervisor.submit_job(ScriptType::HeroScript, script).await?;
job_ids.push((name, job_id));
println!("{} submitted: {}", name, job_ids.last().unwrap().1);
}
// Wait for all jobs to complete
for (name, job_id) in job_ids {
match supervisor.wait_for_job_completion(&job_id, Duration::from_secs(5)).await {
Ok(result) => {
println!("{} completed: {}", name, result);
}
Err(e) => {
println!("{} failed: {}", name, e);
}
}
}
println!();
// Demo 4: Job management
println!("{}", "📋 Demo 4: Job Management".blue().bold());
println!("{}", "-------------------------".blue());
let all_jobs = supervisor.list_jobs().await?;
println!("Total jobs in system: {}", all_jobs.len());
if !all_jobs.is_empty() {
println!("Job IDs:");
for (i, job_id) in all_jobs.iter().enumerate() {
println!(" {}. {}", i + 1, job_id);
}
}
println!();
// Demo 5: Error handling
println!("{}", "📋 Demo 5: Error Handling".blue().bold());
println!("{}", "-------------------------".blue());
let error_script = r#"
print("This job will cause an error");
let x = undefined_variable; // This will cause an error
x
"#;
let error_job_id = supervisor.submit_job(ScriptType::HeroScript, error_script).await?;
match supervisor.wait_for_job_completion(&error_job_id, Duration::from_secs(5)).await {
Ok(result) => {
println!("Unexpected success: {}", result);
}
Err(e) => {
println!("{}", format!("Expected error handled: {}", e).yellow());
}
}
println!();
// Demo 6: Cleanup
println!("{}", "📋 Demo 6: Cleanup".blue().bold());
println!("{}", "-------------------".blue());
let final_job_count = supervisor.list_jobs().await?.len();
println!("Jobs before cleanup: {}", final_job_count);
let cleared = supervisor.clear_all_jobs().await?;
println!("Jobs cleared: {}", cleared);
let remaining_jobs = supervisor.list_jobs().await?.len();
println!("Jobs after cleanup: {}", remaining_jobs);
println!();
// Stop all workers
supervisor.stop_all_workers().await;
println!("{}", "🎉 Demo completed successfully!".green().bold());
println!();
println!("{}", "Key Features Demonstrated:".blue().bold());
println!(" ✅ Supervisor lifecycle management");
println!(" ✅ Worker process spawning and management");
println!(" ✅ Job submission and execution");
println!(" ✅ Real-time job monitoring");
println!(" ✅ Multiple job handling");
println!(" ✅ Error handling and recovery");
println!(" ✅ Resource cleanup");
println!();
println!("{}", "The supervisor successfully managed the complete worker lifecycle!".green());
Ok(())
}

1
core/job/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
/target

14
core/job/Cargo.toml Normal file
View File

@ -0,0 +1,14 @@
[package]
name = "hero_job"
version = "0.1.0"
edition = "2021"
[dependencies]
chrono = { version = "0.4", features = ["serde"] }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
redis = { version = "0.25", features = ["tokio-comp"] }
tokio = { version = "1.0", features = ["full"] }
uuid = { version = "1.0", features = ["v4", "serde"] }
log = "0.4"
thiserror = "1.0"

381
core/job/src/lib.rs Normal file
View File

@ -0,0 +1,381 @@
use chrono::Utc;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::time::Duration;
use uuid::Uuid;
use redis::AsyncCommands;
use thiserror::Error;
/// Redis namespace prefix for all Hero job-related keys
pub const NAMESPACE_PREFIX: &str = "hero:job:";
/// Script type enumeration for different script engines
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub enum ScriptType {
/// HeroScript - Hero's native scripting language
HeroScript,
/// Rhai SAL - Rhai Script Abstraction Layer
RhaiSAL,
/// Rhai DSL - Rhai Domain Specific Language
RhaiDSL,
}
impl ScriptType {
/// Get the worker queue suffix for this script type
pub fn worker_queue_suffix(&self) -> &'static str {
match self {
ScriptType::HeroScript => "heroscript",
ScriptType::RhaiSAL => "rhai_sal",
ScriptType::RhaiDSL => "rhai_dsl",
}
}
pub fn as_str(&self) -> &'static str {
match self {
ScriptType::HeroScript => "heroscript",
ScriptType::RhaiSAL => "rhai_sal",
ScriptType::RhaiDSL => "rhai_dsl",
}
}
}
/// Job status enumeration
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub enum JobStatus {
Dispatched,
WaitingForPrerequisites,
Started,
Error,
Finished,
}
impl JobStatus {
pub fn as_str(&self) -> &'static str {
match self {
JobStatus::Dispatched => "dispatched",
JobStatus::WaitingForPrerequisites => "waiting_for_prerequisites",
JobStatus::Started => "started",
JobStatus::Error => "error",
JobStatus::Finished => "finished",
}
}
pub fn from_str(s: &str) -> Option<Self> {
match s {
"dispatched" => Some(JobStatus::Dispatched),
"waiting_for_prerequisites" => Some(JobStatus::WaitingForPrerequisites),
"started" => Some(JobStatus::Started),
"error" => Some(JobStatus::Error),
"finished" => Some(JobStatus::Finished),
_ => None,
}
}
}
/// Representation of a script execution request.
///
/// This structure contains all the information needed to execute a script
/// on a worker service, including the script content, dependencies, and metadata.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Job {
pub id: String,
pub caller_id: String,
pub context_id: String,
pub script: String,
pub script_type: ScriptType,
pub timeout: Duration,
pub retries: u8, // retries on script execution
pub concurrent: bool, // whether to execute script in separate thread
pub log_path: Option<String>, // path to write logs of script execution to
pub env_vars: HashMap<String, String>, // environment variables for script execution
pub prerequisites: Vec<String>, // job IDs that must complete before this job can run
pub dependents: Vec<String>, // job IDs that depend on this job completing
pub created_at: chrono::DateTime<chrono::Utc>,
pub updated_at: chrono::DateTime<chrono::Utc>,
}
/// Error types for job operations
#[derive(Error, Debug)]
pub enum JobError {
#[error("Redis error: {0}")]
RedisError(#[from] redis::RedisError),
#[error("Serialization error: {0}")]
SerializationError(#[from] serde_json::Error),
#[error("Job not found: {0}")]
JobNotFound(String),
#[error("Invalid job data: {0}")]
InvalidJobData(String),
#[error("Missing required field: {0}")]
MissingField(String),
}
impl Job {
/// Create a new job with the given parameters
pub fn new(
caller_id: String,
context_id: String,
script: String,
script_type: ScriptType,
) -> Self {
let now = Utc::now();
Self {
id: Uuid::new_v4().to_string(),
caller_id,
context_id,
script,
script_type,
timeout: Duration::from_secs(30),
retries: 0,
concurrent: false,
log_path: None,
env_vars: HashMap::new(),
prerequisites: Vec::new(),
dependents: Vec::new(),
created_at: now,
updated_at: now,
}
}
/// Store this job in Redis
pub async fn store_in_redis(&self, conn: &mut redis::aio::MultiplexedConnection) -> Result<(), JobError> {
let job_key = format!("{}{}", NAMESPACE_PREFIX, self.id);
let mut hset_args: Vec<(String, String)> = vec![
("jobId".to_string(), self.id.clone()),
("script".to_string(), self.script.clone()),
("script_type".to_string(), format!("{:?}", self.script_type)),
("callerId".to_string(), self.caller_id.clone()),
("contextId".to_string(), self.context_id.clone()),
("status".to_string(), "pending".to_string()),
("timeout".to_string(), self.timeout.as_secs().to_string()),
("retries".to_string(), self.retries.to_string()),
("concurrent".to_string(), self.concurrent.to_string()),
("createdAt".to_string(), self.created_at.to_rfc3339()),
("updatedAt".to_string(), self.updated_at.to_rfc3339()),
];
// Add optional log path
if let Some(log_path) = &self.log_path {
hset_args.push(("log_path".to_string(), log_path.clone()));
}
// Add environment variables as JSON string if any are provided
if !self.env_vars.is_empty() {
let env_vars_json = serde_json::to_string(&self.env_vars)?;
hset_args.push(("env_vars".to_string(), env_vars_json));
}
// Add prerequisites as JSON string if any are provided
if !self.prerequisites.is_empty() {
let prerequisites_json = serde_json::to_string(&self.prerequisites)?;
hset_args.push(("prerequisites".to_string(), prerequisites_json));
}
// Add dependents as JSON string if any are provided
if !self.dependents.is_empty() {
let dependents_json = serde_json::to_string(&self.dependents)?;
hset_args.push(("dependents".to_string(), dependents_json));
}
conn.hset_multiple::<_, _, _, ()>(&job_key, &hset_args).await?;
Ok(())
}
/// Load a job from Redis by ID
pub async fn load_from_redis(
conn: &mut redis::aio::MultiplexedConnection,
job_id: &str,
) -> Result<Self, JobError> {
let job_key = format!("{}{}", NAMESPACE_PREFIX, job_id);
let job_data: HashMap<String, String> = conn.hgetall(&job_key).await?;
if job_data.is_empty() {
return Err(JobError::JobNotFound(job_id.to_string()));
}
// Parse required fields
let id = job_data.get("jobId")
.ok_or_else(|| JobError::MissingField("jobId".to_string()))?
.clone();
let script = job_data.get("script")
.ok_or_else(|| JobError::MissingField("script".to_string()))?
.clone();
let script_type_str = job_data.get("script_type")
.ok_or_else(|| JobError::MissingField("script_type".to_string()))?;
let script_type = match script_type_str.as_str() {
"HeroScript" => ScriptType::HeroScript,
"RhaiSAL" => ScriptType::RhaiSAL,
"RhaiDSL" => ScriptType::RhaiDSL,
_ => return Err(JobError::InvalidJobData(format!("Unknown script type: {}", script_type_str))),
};
let caller_id = job_data.get("callerId")
.ok_or_else(|| JobError::MissingField("callerId".to_string()))?
.clone();
let context_id = job_data.get("contextId")
.ok_or_else(|| JobError::MissingField("contextId".to_string()))?
.clone();
let timeout_secs: u64 = job_data.get("timeout")
.ok_or_else(|| JobError::MissingField("timeout".to_string()))?
.parse()
.map_err(|_| JobError::InvalidJobData("Invalid timeout value".to_string()))?;
let retries: u8 = job_data.get("retries")
.unwrap_or(&"0".to_string())
.parse()
.map_err(|_| JobError::InvalidJobData("Invalid retries value".to_string()))?;
let concurrent: bool = job_data.get("concurrent")
.unwrap_or(&"false".to_string())
.parse()
.map_err(|_| JobError::InvalidJobData("Invalid concurrent value".to_string()))?;
let created_at = job_data.get("createdAt")
.ok_or_else(|| JobError::MissingField("createdAt".to_string()))?
.parse()
.map_err(|_| JobError::InvalidJobData("Invalid createdAt timestamp".to_string()))?;
let updated_at = job_data.get("updatedAt")
.ok_or_else(|| JobError::MissingField("updatedAt".to_string()))?
.parse()
.map_err(|_| JobError::InvalidJobData("Invalid updatedAt timestamp".to_string()))?;
// Parse optional fields
let log_path = job_data.get("log_path").cloned();
let env_vars = if let Some(env_vars_json) = job_data.get("env_vars") {
serde_json::from_str(env_vars_json)?
} else {
HashMap::new()
};
let prerequisites = if let Some(prerequisites_json) = job_data.get("prerequisites") {
serde_json::from_str(prerequisites_json)?
} else {
Vec::new()
};
let dependents = if let Some(dependents_json) = job_data.get("dependents") {
serde_json::from_str(dependents_json)?
} else {
Vec::new()
};
Ok(Self {
id,
caller_id,
context_id,
script,
script_type,
timeout: Duration::from_secs(timeout_secs),
retries,
concurrent,
log_path,
env_vars,
prerequisites,
dependents,
created_at,
updated_at,
})
}
/// Update job status in Redis
pub async fn update_status(
conn: &mut redis::aio::MultiplexedConnection,
job_id: &str,
status: JobStatus,
) -> Result<(), JobError> {
let job_key = format!("{}{}", NAMESPACE_PREFIX, job_id);
let now = Utc::now();
conn.hset::<_, _, _, ()>(&job_key, "status", status.as_str()).await?;
conn.hset::<_, _, _, ()>(&job_key, "updatedAt", now.to_rfc3339()).await?;
Ok(())
}
/// Get job status from Redis
pub async fn get_status(
conn: &mut redis::aio::MultiplexedConnection,
job_id: &str,
) -> Result<JobStatus, JobError> {
let job_key = format!("{}{}", NAMESPACE_PREFIX, job_id);
let status_str: String = conn.hget(&job_key, "status").await?;
JobStatus::from_str(&status_str)
.ok_or_else(|| JobError::InvalidJobData(format!("Unknown status: {}", status_str)))
}
/// Set job result in Redis
pub async fn set_result(
conn: &mut redis::aio::MultiplexedConnection,
job_id: &str,
result: &str,
) -> Result<(), JobError> {
let job_key = format!("{}{}", NAMESPACE_PREFIX, job_id);
let now = Utc::now();
conn.hset::<_, _, _, ()>(&job_key, "output", result).await?;
conn.hset::<_, _, _, ()>(&job_key, "status", JobStatus::Finished.as_str()).await?;
conn.hset::<_, _, _, ()>(&job_key, "updatedAt", now.to_rfc3339()).await?;
Ok(())
}
/// Set job error in Redis
pub async fn set_error(
conn: &mut redis::aio::MultiplexedConnection,
job_id: &str,
error: &str,
) -> Result<(), JobError> {
let job_key = format!("{}{}", NAMESPACE_PREFIX, job_id);
let now = Utc::now();
conn.hset::<_, _, _, ()>(&job_key, "error", error).await?;
conn.hset::<_, _, _, ()>(&job_key, "status", JobStatus::Error.as_str()).await?;
conn.hset::<_, _, _, ()>(&job_key, "updatedAt", now.to_rfc3339()).await?;
Ok(())
}
/// Delete job from Redis
pub async fn delete_from_redis(
conn: &mut redis::aio::MultiplexedConnection,
job_id: &str,
) -> Result<(), JobError> {
let job_key = format!("{}{}", NAMESPACE_PREFIX, job_id);
conn.del::<_, ()>(&job_key).await?;
Ok(())
}
/// List all job IDs from Redis
pub async fn list_all_job_ids(
conn: &mut redis::aio::MultiplexedConnection,
) -> Result<Vec<String>, JobError> {
// Search specifically for job keys with the exact job pattern
let job_keys: Vec<String> = conn.keys(format!("{}*", NAMESPACE_PREFIX)).await?;
let job_ids: Vec<String> = job_keys
.iter()
.filter_map(|key| {
// Only include keys that exactly match the job key pattern hero:job:*
if key.starts_with(NAMESPACE_PREFIX) {
let potential_id = key.strip_prefix(NAMESPACE_PREFIX)?;
// Validate that this looks like a UUID (job IDs are UUIDs)
if potential_id.len() == 36 && potential_id.chars().filter(|&c| c == '-').count() == 4 {
Some(potential_id.to_string())
} else {
None
}
} else {
None
}
})
.collect();
Ok(job_ids)
}
}

BIN
core/worker/.DS_Store vendored Normal file

Binary file not shown.

2
core/worker/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
/target
worker_rhai_temp_db

1423
core/worker/Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

29
core/worker/Cargo.toml Normal file
View File

@ -0,0 +1,29 @@
[package]
name = "rhailib_worker"
version = "0.1.0"
edition = "2021"
[lib]
name = "rhailib_worker" # Can be different from package name, or same
path = "src/lib.rs"
[[bin]]
name = "worker"
path = "cmd/worker.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
redis = { version = "0.25.0", features = ["tokio-comp"] }
rhai = { version = "1.18.0", default-features = false, features = ["sync", "decimal", "std"] } # Added "decimal" for broader script support
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
tokio = { version = "1", features = ["macros", "rt-multi-thread", "time"] }
log = "0.4"
env_logger = "0.10"
clap = { version = "4.4", features = ["derive"] }
uuid = { version = "1.6", features = ["v4", "serde"] } # Though task_id is string, uuid might be useful
chrono = { version = "0.4", features = ["serde"] }
rhai_dispatcher = { path = "../../../rhailib/src/dispatcher" }
rhailib_engine = { path = "../engine" }
heromodels = { path = "../../../db/heromodels", features = ["rhai"] }

75
core/worker/README.md Normal file
View File

@ -0,0 +1,75 @@
# Rhai Worker
The `rhai_worker` crate implements a standalone worker service that listens for Rhai script execution tasks from a Redis queue, executes them, and posts results back to Redis. It is designed to be spawned as a separate OS process by an orchestrator like the `launcher` crate.
## Features
- **Redis Queue Consumption**: Listens to a specific Redis list (acting as a task queue) for incoming task IDs. The queue is determined by the `--circle-public-key` argument.
- **Rhai Script Execution**: Executes Rhai scripts retrieved from Redis based on task IDs.
- **Task State Management**: Updates task status (`processing`, `completed`, `error`) and stores results in Redis hashes.
- **Script Scope Injection**: Automatically injects two important constants into the Rhai script's scope:
- `CONTEXT_ID`: The public key of the worker's own circle.
- `CALLER_ID`: The public key of the entity that requested the script execution.
- **Asynchronous Operations**: Built with `tokio` for non-blocking Redis communication.
- **Graceful Error Handling**: Captures errors during script execution and stores them for the client.
## Core Components
- **`worker_lib` (Library Crate)**:
- **`Args`**: A struct (using `clap`) for parsing command-line arguments: `--redis-url` and `--circle-public-key`.
- **`run_worker_loop(engine: Engine, args: Args)`**: The main asynchronous function that:
- Connects to Redis.
- Continuously polls the designated Redis queue (`rhai_tasks:<circle_public_key>`) using `BLPOP`.
- Upon receiving a `task_id`, it fetches the task details from a Redis hash.
- It injects `CALLER_ID` and `CONTEXT_ID` into the script's scope.
- It executes the script and updates the task status in Redis with the output or error.
- **`worker` (Binary Crate - `cmd/worker.rs`)**:
- The main executable entry point. It parses command-line arguments, initializes a Rhai engine, and invokes `run_worker_loop`.
## How It Works
1. The worker executable is launched by an external process (e.g., `launcher`), which passes the required command-line arguments.
```bash
# This is typically done programmatically by a parent process.
/path/to/worker --redis-url redis://127.0.0.1/ --circle-public-key 02...abc
```
2. The `run_worker_loop` connects to Redis and starts listening to its designated task queue (e.g., `rhai_tasks:02...abc`).
3. A `rhai_dispatcher` submits a task by pushing a `task_id` to this queue and storing the script and other details in a Redis hash.
4. The worker's `BLPOP` command picks up the `task_id`.
5. The worker retrieves the script from the corresponding `rhai_task_details:<task_id>` hash.
6. It updates the task's status to "processing".
7. The Rhai script is executed within a scope that contains both `CONTEXT_ID` and `CALLER_ID`.
8. After execution, the status is updated to "completed" (with output) or "error" (with an error message).
9. The worker then goes back to listening for the next task.
## Prerequisites
- A running Redis instance accessible by the worker.
- An orchestrator process (like `launcher`) to spawn the worker.
- A `rhai_dispatcher` (or another system) to populate the Redis queues.
## Building and Running
The worker is intended to be built as a dependency and run by another program.
1. **Build the worker:**
```bash
# From the root of the rhailib project
cargo build --package worker
```
The binary will be located at `target/debug/worker`.
2. **Running the worker:**
The worker is not typically run manually. The `launcher` crate is responsible for spawning it with the correct arguments. If you need to run it manually for testing, you must provide the required arguments:
```bash
./target/debug/worker --redis-url redis://127.0.0.1/ --circle-public-key <a_valid_hex_public_key>
```
## Dependencies
Key dependencies include:
- `redis`: For asynchronous Redis communication.
- `rhai`: The Rhai script engine.
- `clap`: For command-line argument parsing.
- `tokio`: For the asynchronous runtime.
- `log`, `env_logger`: For logging.

113
core/worker/cmd/README.md Normal file
View File

@ -0,0 +1,113 @@
# Rhai Worker Binary
A command-line worker for executing Rhai scripts from Redis task queues.
## Binary: `worker`
### Installation
Build the binary:
```bash
cargo build --bin worker --release
```
### Usage
```bash
# Basic usage - requires circle public key
worker --circle-public-key <CIRCLE_PUBLIC_KEY>
# Custom Redis URL
worker -c <CIRCLE_PUBLIC_KEY> --redis-url redis://localhost:6379/1
# Custom worker ID and database path
worker -c <CIRCLE_PUBLIC_KEY> --worker-id my_worker --db-path /tmp/worker_db
# Preserve tasks for debugging/benchmarking
worker -c <CIRCLE_PUBLIC_KEY> --preserve-tasks
# Remove timestamps from logs
worker -c <CIRCLE_PUBLIC_KEY> --no-timestamp
# Increase verbosity
worker -c <CIRCLE_PUBLIC_KEY> -v # Debug logging
worker -c <CIRCLE_PUBLIC_KEY> -vv # Full debug
worker -c <CIRCLE_PUBLIC_KEY> -vvv # Trace logging
```
### Command-Line Options
| Option | Short | Default | Description |
|--------|-------|---------|-------------|
| `--circle-public-key` | `-c` | **Required** | Circle public key to listen for tasks |
| `--redis-url` | `-r` | `redis://localhost:6379` | Redis connection URL |
| `--worker-id` | `-w` | `worker_1` | Unique worker identifier |
| `--preserve-tasks` | | `false` | Preserve task details after completion |
| `--db-path` | | `worker_rhai_temp_db` | Database path for Rhai engine |
| `--no-timestamp` | | `false` | Remove timestamps from log output |
| `--verbose` | `-v` | | Increase verbosity (stackable) |
### Features
- **Task Queue Processing**: Listens to Redis queues for Rhai script execution tasks
- **Performance Optimized**: Configured for maximum Rhai engine performance
- **Graceful Shutdown**: Supports shutdown signals for clean termination
- **Flexible Logging**: Configurable verbosity and timestamp control
- **Database Integration**: Uses heromodels for data persistence
- **Task Cleanup**: Optional task preservation for debugging/benchmarking
### How It Works
1. **Queue Listening**: Worker listens on Redis queue `rhailib:{circle_public_key}`
2. **Task Processing**: Receives task IDs, fetches task details from Redis
3. **Script Execution**: Executes Rhai scripts with configured engine
4. **Result Handling**: Updates task status and sends results to reply queues
5. **Cleanup**: Optionally cleans up task details after completion
### Configuration Examples
#### Development Worker
```bash
# Simple development worker
worker -c dev_circle_123
# Development with verbose logging (no timestamps)
worker -c dev_circle_123 -v --no-timestamp
```
#### Production Worker
```bash
# Production worker with custom configuration
worker \
--circle-public-key prod_circle_456 \
--redis-url redis://redis-server:6379/0 \
--worker-id prod_worker_1 \
--db-path /var/lib/worker/db \
--preserve-tasks
```
#### Benchmarking Worker
```bash
# Worker optimized for benchmarking
worker \
--circle-public-key bench_circle_789 \
--preserve-tasks \
--no-timestamp \
-vv
```
### Error Handling
The worker provides clear error messages for:
- Missing or invalid circle public key
- Redis connection failures
- Script execution errors
- Database access issues
### Dependencies
- `rhailib_engine`: Rhai engine with heromodels integration
- `redis`: Redis client for task queue management
- `rhai`: Script execution engine
- `clap`: Command-line argument parsing
- `env_logger`: Logging infrastructure

95
core/worker/cmd/worker.rs Normal file
View File

@ -0,0 +1,95 @@
use clap::Parser;
use rhailib_engine::create_heromodels_engine;
use rhailib_worker::spawn_rhai_worker;
use tokio::sync::mpsc;
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// Worker ID for identification
#[arg(short, long)]
worker_id: String,
/// Redis URL
#[arg(short, long, default_value = "redis://localhost:6379")]
redis_url: String,
/// Preserve task details after completion (for benchmarking)
#[arg(long, default_value = "false")]
preserve_tasks: bool,
/// Root directory for engine database
#[arg(long, default_value = "worker_rhai_temp_db")]
db_path: String,
/// Disable timestamps in log output
#[arg(long, help = "Remove timestamps from log output")]
no_timestamp: bool,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let args = Args::parse();
// Configure env_logger with or without timestamps
if args.no_timestamp {
env_logger::Builder::from_default_env()
.format_timestamp(None)
.init();
} else {
env_logger::init();
}
log::info!("Rhai Worker (binary) starting with performance-optimized engine.");
log::info!(
"Worker ID: {}, Redis: {}",
args.worker_id,
args.redis_url
);
let mut engine = create_heromodels_engine();
// Performance optimizations for benchmarking
engine.set_max_operations(0); // Unlimited operations for performance testing
engine.set_max_expr_depths(0, 0); // Unlimited expression depth
engine.set_max_string_size(0); // Unlimited string size
engine.set_max_array_size(0); // Unlimited array size
engine.set_max_map_size(0); // Unlimited map size
// Enable full optimization for maximum performance
engine.set_optimization_level(rhai::OptimizationLevel::Full);
log::info!("Engine configured for maximum performance");
// Create shutdown channel (for graceful shutdown, though not used in benchmarks)
let (_shutdown_tx, shutdown_rx) = mpsc::channel::<()>(1);
// Spawn the worker
let worker_handle = spawn_rhai_worker(
args.worker_id,
args.db_path,
engine,
args.redis_url,
shutdown_rx,
args.preserve_tasks,
);
// Wait for the worker to complete
match worker_handle.await {
Ok(result) => match result {
Ok(_) => {
log::info!("Worker completed successfully");
Ok(())
}
Err(e) => {
log::error!("Worker failed: {}", e);
Err(e)
}
},
Err(e) => {
log::error!("Worker task panicked: {}", e);
Err(Box::new(e) as Box<dyn std::error::Error + Send + Sync>)
}
}
}

View File

@ -0,0 +1,53 @@
# Architecture of the `rhailib_worker` Crate
The `rhailib_worker` crate implements a distributed task execution system for Rhai scripts, providing scalable, reliable script processing through Redis-based task queues. Workers are decoupled from contexts, allowing a single worker to process tasks for multiple contexts (circles).
## Core Architecture
```mermaid
graph TD
A[Worker Process] --> B[Task Queue Processing]
A --> C[Script Execution Engine]
A --> D[Result Management]
B --> B1[Redis Queue Monitoring]
B --> B2[Task Deserialization]
B --> B3[Priority Handling]
C --> C1[Rhai Engine Integration]
C --> C2[Context Management]
C --> C3[Error Handling]
D --> D1[Result Serialization]
D --> D2[Reply Queue Management]
D --> D3[Status Updates]
```
## Key Components
### Task Processing Pipeline
- **Queue Monitoring**: Continuous Redis queue polling for new tasks
- **Task Execution**: Secure Rhai script execution with proper context
- **Result Handling**: Comprehensive result and error management
### Engine Integration
- **Rhailib Engine**: Full integration with rhailib_engine for DSL access
- **Context Injection**: Proper authentication and database context setup
- **Security**: Isolated execution environment with access controls
### Scalability Features
- **Horizontal Scaling**: Multiple worker instances for load distribution
- **Queue-based Architecture**: Reliable task distribution via Redis
- **Fault Tolerance**: Robust error handling and recovery mechanisms
## Dependencies
- **Redis Integration**: Task queue management and communication
- **Rhai Engine**: Script execution with full DSL capabilities
- **Client Integration**: Shared data structures with rhai_dispatcher
- **Heromodels**: Database and business logic integration
- **Async Runtime**: Tokio for high-performance concurrent processing
## Deployment Patterns
Workers can be deployed as standalone processes, containerized services, or embedded components, providing flexibility for various deployment scenarios from development to production.

259
core/worker/src/lib.rs Normal file
View File

@ -0,0 +1,259 @@
use chrono::Utc;
use log::{debug, error, info};
use redis::AsyncCommands;
use rhai::{Dynamic, Engine};
use rhai_dispatcher::RhaiTaskDetails; // Import for constructing the reply message
use serde_json;
use std::collections::HashMap;
use tokio::sync::mpsc; // For shutdown signal
use tokio::task::JoinHandle; // For serializing the reply message
const NAMESPACE_PREFIX: &str = "rhailib:";
const BLPOP_TIMEOUT_SECONDS: usize = 5;
// This function updates specific fields in the Redis hash.
// It doesn't need to know the full RhaiTaskDetails struct, only the field names.
async fn update_task_status_in_redis(
conn: &mut redis::aio::MultiplexedConnection,
task_id: &str,
status: &str,
output: Option<String>,
error_msg: Option<String>,
) -> redis::RedisResult<()> {
let task_key = format!("{}{}", NAMESPACE_PREFIX, task_id);
let mut updates: Vec<(&str, String)> = vec![
("status", status.to_string()),
("updatedAt", Utc::now().timestamp().to_string()),
];
if let Some(out) = output {
updates.push(("output", out));
}
if let Some(err) = error_msg {
updates.push(("error", err));
}
debug!(
"Updating task {} in Redis with status: {}, updates: {:?}",
task_id, status, updates
);
conn.hset_multiple::<_, _, _, ()>(&task_key, &updates)
.await?;
Ok(())
}
pub fn spawn_rhai_worker(
worker_id: String,
db_path: String,
mut engine: Engine,
redis_url: String,
mut shutdown_rx: mpsc::Receiver<()>, // Add shutdown receiver
preserve_tasks: bool, // Flag to control task cleanup
) -> JoinHandle<Result<(), Box<dyn std::error::Error + Send + Sync>>> {
tokio::spawn(async move {
let queue_key = format!("{}{}", NAMESPACE_PREFIX, worker_id);
info!(
"Rhai Worker for Worker ID '{}' starting. Connecting to Redis at {}. Listening on queue: {}. Waiting for tasks or shutdown signal.",
worker_id, redis_url, queue_key
);
let redis_client = match redis::Client::open(redis_url.as_str()) {
Ok(client) => client,
Err(e) => {
error!(
"Worker for Worker ID '{}': Failed to open Redis client: {}",
worker_id, e
);
return Err(Box::new(e) as Box<dyn std::error::Error + Send + Sync>);
}
};
let mut redis_conn = match redis_client.get_multiplexed_async_connection().await {
Ok(conn) => conn,
Err(e) => {
error!(
"Worker for Worker ID '{}': Failed to get Redis connection: {}",
worker_id, e
);
return Err(Box::new(e) as Box<dyn std::error::Error + Send + Sync>);
}
};
info!(
"Worker for Worker ID '{}' successfully connected to Redis.",
worker_id
);
loop {
let blpop_keys = vec![queue_key.clone()];
tokio::select! {
// Listen for shutdown signal
_ = shutdown_rx.recv() => {
info!("Worker for Worker ID '{}': Shutdown signal received. Terminating loop.", worker_id.clone());
break;
}
// Listen for tasks from Redis
blpop_result = redis_conn.blpop(&blpop_keys, BLPOP_TIMEOUT_SECONDS as f64) => {
debug!("Worker for Worker ID '{}': Attempting BLPOP on queue: {}", worker_id.clone(), queue_key);
let response: Option<(String, String)> = match blpop_result {
Ok(resp) => resp,
Err(e) => {
error!("Worker '{}': Redis BLPOP error on queue {}: {}. Worker for this circle might stop.", worker_id, queue_key, e);
return Err(Box::new(e) as Box<dyn std::error::Error + Send + Sync>);
}
};
if let Some((_queue_name_recv, task_id)) = response {
info!("Worker '{}' received task_id: {} from queue: {}", worker_id, task_id, _queue_name_recv);
debug!("Worker '{}', Task {}: Processing started.", worker_id, task_id);
let task_details_key = format!("{}{}", NAMESPACE_PREFIX, task_id);
debug!("Worker '{}', Task {}: Attempting HGETALL from key: {}", worker_id, task_id, task_details_key);
let task_details_map_result: Result<HashMap<String, String>, _> =
redis_conn.hgetall(&task_details_key).await;
match task_details_map_result {
Ok(details_map) => {
debug!("Worker '{}', Task {}: HGETALL successful. Details: {:?}", worker_id, task_id, details_map);
let script_content_opt = details_map.get("script").cloned();
let created_at_str_opt = details_map.get("createdAt").cloned();
let caller_id = details_map.get("callerId").cloned().expect("callerId field missing from Redis hash");
let context_id = details_map.get("contextId").cloned().expect("contextId field missing from Redis hash");
if context_id.is_empty() {
error!("Worker '{}', Task {}: contextId field missing from Redis hash", worker_id, task_id);
return Err("contextId field missing from Redis hash".into());
}
if caller_id.is_empty() {
error!("Worker '{}', Task {}: callerId field missing from Redis hash", worker_id, task_id);
return Err("callerId field missing from Redis hash".into());
}
if let Some(script_content) = script_content_opt {
info!("Worker '{}' processing task_id: {}. Script: {:.50}...", context_id, task_id, script_content);
debug!("Worker for Context ID '{}', Task {}: Attempting to update status to 'processing'.", context_id, task_id);
if let Err(e) = update_task_status_in_redis(&mut redis_conn, &task_id, "processing", None, None).await {
error!("Worker for Context ID '{}', Task {}: Failed to update status to 'processing': {}", context_id, task_id, e);
} else {
debug!("Worker for Context ID '{}', Task {}: Status updated to 'processing'.", context_id, task_id);
}
let mut db_config = rhai::Map::new();
db_config.insert("DB_PATH".into(), db_path.clone().into());
db_config.insert("CALLER_ID".into(), caller_id.clone().into());
db_config.insert("CONTEXT_ID".into(), context_id.clone().into());
engine.set_default_tag(Dynamic::from(db_config)); // Or pass via CallFnOptions
debug!("Worker for Context ID '{}', Task {}: Evaluating script with Rhai engine.", context_id, task_id);
let mut final_status = "error".to_string(); // Default to error
let mut final_output: Option<String> = None;
let mut final_error_msg: Option<String> = None;
match engine.eval::<rhai::Dynamic>(&script_content) {
Ok(result) => {
let output_str = if result.is::<String>() {
// If the result is a string, we can unwrap it directly.
// This moves `result`, which is fine because it's the last time we use it in this branch.
result.into_string().unwrap()
} else {
result.to_string()
};
info!("Worker for Context ID '{}' task {} completed. Output: {}", context_id, task_id, output_str);
final_status = "completed".to_string();
final_output = Some(output_str);
}
Err(e) => {
let error_str = format!("{:?}", *e);
error!("Worker for Context ID '{}' task {} script evaluation failed. Error: {}", context_id, task_id, error_str);
final_error_msg = Some(error_str);
// final_status remains "error"
}
}
debug!("Worker for Context ID '{}', Task {}: Attempting to update status to '{}'.", context_id, task_id, final_status);
if let Err(e) = update_task_status_in_redis(
&mut redis_conn,
&task_id,
&final_status,
final_output.clone(), // Clone for task hash update
final_error_msg.clone(), // Clone for task hash update
).await {
error!("Worker for Context ID '{}', Task {}: Failed to update final status to '{}': {}", context_id, task_id, final_status, e);
} else {
debug!("Worker for Context ID '{}', Task {}: Final status updated to '{}'.", context_id, task_id, final_status);
}
// Send to reply queue if specified
let created_at = created_at_str_opt
.and_then(|s| chrono::DateTime::parse_from_rfc3339(&s).ok())
.map(|dt| dt.with_timezone(&Utc))
.unwrap_or_else(Utc::now); // Fallback, though createdAt should exist
let reply_details = RhaiTaskDetails {
task_id: task_id.to_string(), // Add the task_id
script: script_content.clone(), // Include script for context in reply
status: final_status, // The final status
output: final_output, // The final output
error: final_error_msg, // The final error
created_at, // Original creation time
updated_at: Utc::now(), // Time of this final update/reply
caller_id: caller_id.clone(),
context_id: context_id.clone(),
worker_id: worker_id.clone(),
};
let reply_queue_key = format!("{}:reply:{}", NAMESPACE_PREFIX, task_id);
match serde_json::to_string(&reply_details) {
Ok(reply_json) => {
let lpush_result: redis::RedisResult<i64> = redis_conn.lpush(&reply_queue_key, &reply_json).await;
match lpush_result {
Ok(_) => debug!("Worker for Context ID '{}', Task {}: Successfully sent result to reply queue {}", context_id, task_id, reply_queue_key),
Err(e_lpush) => error!("Worker for Context ID '{}', Task {}: Failed to LPUSH result to reply queue {}: {}", context_id, task_id, reply_queue_key, e_lpush),
}
}
Err(e_json) => {
error!("Worker for Context ID '{}', Task {}: Failed to serialize reply details for queue {}: {}", context_id, task_id, reply_queue_key, e_json);
}
}
// Clean up task details based on preserve_tasks flag
if !preserve_tasks {
// The worker is responsible for cleaning up the task details hash.
if let Err(e) = redis_conn.del::<_, ()>(&task_details_key).await {
error!("Worker for Context ID '{}', Task {}: Failed to delete task details key '{}': {}", context_id, task_id, task_details_key, e);
} else {
debug!("Worker for Context ID '{}', Task {}: Cleaned up task details key '{}'.", context_id, task_id, task_details_key);
}
} else {
debug!("Worker for Context ID '{}', Task {}: Preserving task details (preserve_tasks=true)", context_id, task_id);
}
} else { // Script content not found in hash
error!(
"Worker for Context ID '{}', Task {}: Script content not found in Redis hash. Details map: {:?}",
context_id, task_id, details_map
);
// Clean up invalid task details based on preserve_tasks flag
if !preserve_tasks {
// Even if the script is not found, the worker should clean up the invalid task hash.
if let Err(e) = redis_conn.del::<_, ()>(&task_details_key).await {
error!("Worker for Context ID '{}', Task {}: Failed to delete invalid task details key '{}': {}", context_id, task_id, task_details_key, e);
}
} else {
debug!("Worker for Context ID '{}', Task {}: Preserving invalid task details (preserve_tasks=true)", context_id, task_id);
}
}
}
Err(e) => {
error!(
"Worker '{}', Task {}: Failed to fetch details (HGETALL) from Redis for key {}. Error: {:?}",
worker_id, task_id, task_details_key, e
);
}
}
} else {
debug!("Worker '{}': BLPOP timed out on queue {}. No new tasks. Checking for shutdown signal again.", &worker_id, &queue_key);
}
} // End of blpop_result match
} // End of tokio::select!
} // End of loop
info!("Worker '{}' has shut down.", worker_id);
Ok(())
})
}

62
interfaces/openrpc.json Normal file
View File

@ -0,0 +1,62 @@
{
"openrpc": "1.2.6",
"info": {
"title": "Circle WebSocket Server API",
"version": "0.1.0",
"description": "API for interacting with a Circle's WebSocket server, primarily for Rhai script execution."
},
"methods": [
{
"name": "play",
"summary": "Executes a Rhai script on the server.",
"params": [
{
"name": "script",
"description": "The Rhai script to execute.",
"required": true,
"schema": {
"type": "string"
}
}
],
"result": {
"name": "playResult",
"description": "The output from the executed Rhai script.",
"schema": {
"$ref": "#/components/schemas/PlayResult"
}
},
"examples": [
{
"name": "Simple Script Execution",
"params": [
{
"name": "script",
"value": "let x = 10; x * 2"
}
],
"result": {
"name": "playResult",
"value": {
"output": "20"
}
}
}
]
}
],
"components": {
"schemas": {
"PlayResult": {
"type": "object",
"properties": {
"output": {
"type": "string",
"description": "The string representation of the Rhai script's evaluation result."
}
},
"required": ["output"]
}
}
}
}

1
interfaces/unix/client/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
/target

View File

@ -0,0 +1,6 @@
[package]
name = "hero-client-unix"
version = "0.1.0"
edition = "2024"
[dependencies]

View File

@ -0,0 +1,3 @@
fn main() {
println!("Hello, world!");
}

View File

1
interfaces/unix/server/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
/target

View File

@ -0,0 +1,6 @@
[package]
name = "hero-server-unix"
version = "0.1.0"
edition = "2024"
[dependencies]

View File

@ -0,0 +1,3 @@
fn main() {
println!("Hello, world!");
}

View File

@ -0,0 +1,2 @@
/target
/dist

View File

@ -0,0 +1,94 @@
# `client_ws` Architecture
This document details the internal architecture of the `client_ws` crate, focusing on its cross-platform design, internal modules, and the mechanics of its authentication process.
## 1. Core Design Principles
The `client_ws` is built on the following principles:
- **Platform Abstraction**: The core client logic is written in a platform-agnostic way. Platform-specific details (like the WebSocket implementation) are abstracted behind a common interface.
- **Modularity**: The crate is divided into logical modules, with a clear separation of concerns between the main client logic, authentication procedures, and cryptographic utilities.
- **Asynchronous Operations**: All network I/O is asynchronous, using `async/await` to ensure the client is non-blocking and efficient.
- **Fluent Configuration**: A builder pattern (`CircleWsClientBuilder`) is used for clear and flexible client construction.
- **Self-Managing Clients**: Each `CircleWsClient` handles its own lifecycle including connection, authentication, keep-alive, and reconnection logic internally.
## 2. Cross-Platform Implementation
To support both native and WebAssembly (WASM) environments, `client_ws` uses conditional compilation (`#[cfg]`) to provide different implementations for the underlying WebSocket transport.
- **Native (`target_arch != "wasm32"`)**: The `tokio-tungstenite` crate is used for robust, `tokio`-based WebSocket communication.
- **WebAssembly (`target_arch = "wasm32"`)**: The `gloo-net` crate provides bindings to the browser's native `WebSocket` API.
This approach allows the `CircleWsClient` to expose a single, unified API while the underlying implementation details are handled transparently at compile time.
## 3. Module Structure
The `client_ws` crate is organized into the following key modules:
- **`lib.rs`**: The main module that defines the `CircleWsClientBuilder` and `CircleWsClient` structs and their public APIs. It orchestrates the entire communication flow.
- **`auth/`**: This module contains all the logic related to the `secp256k1` authentication flow.
- **`types.rs`**: Defines the core data structures used in authentication, such as `AuthError` and `AuthCredentials`.
- **`crypto_utils.rs`**: A self-contained utility module for handling all `secp256k1` cryptographic operations, including key generation, public key derivation, and message signing.
## 4. Self-Managing Client Architecture
Each `CircleWsClient` is designed to be completely self-managing, handling its entire lifecycle internally. This includes:
- **Connection Management**: Establishing and maintaining WebSocket connections
- **Authentication**: Automatic secp256k1 authentication flow when private keys are provided
- **Keep-Alive**: Periodic health checks to ensure connection stability
- **Reconnection**: Automatic reconnection with exponential backoff on connection failures
- **Connection Status Tracking**: Internal state management for connection health
### Connection Flow
The `connect()` method orchestrates the complete connection and authentication process:
```mermaid
sequenceDiagram
participant User as User Code
participant Builder as CircleWsClientBuilder
participant Client as CircleWsClient
participant CryptoUtils as auth::crypto_utils
participant WsActor as Server WebSocket Actor
User->>+Builder: new(url)
User->>+Builder: with_keypair(private_key)
User->>+Builder: build()
Builder-->>-User: client
User->>+Client: connect()
Note over Client: Self-managing connection process
Client->>Client: Establish WebSocket connection
Client->>Client: Start keep-alive loop
Client->>Client: Start reconnection handler
alt Has Private Key
Client->>Client: Check for private_key
Client->>+CryptoUtils: derive_public_key(private_key)
CryptoUtils-->>-Client: public_key
Note over Client: Request nonce via WebSocket
Client->>+WsActor: JSON-RPC "fetch_nonce" (pubkey)
WsActor-->>-Client: JSON-RPC Response (nonce)
Client->>+CryptoUtils: sign_message(private_key, nonce)
CryptoUtils-->>-Client: signature
Note over Client: Send credentials via WebSocket
Client->>+WsActor: JSON-RPC "authenticate" (pubkey, signature)
WsActor-->>-Client: JSON-RPC Response (authenticated: true/false)
end
Client-->>-User: Connection established and authenticated
```
### Self-Management Features
- **Automatic Keep-Alive**: Each client runs its own keep-alive loop to detect connection issues
- **Transparent Reconnection**: Failed connections are automatically retried with exponential backoff
- **Status Monitoring**: Connection status is tracked internally and can be queried via `is_connected()`
- **Resource Cleanup**: Proper cleanup of resources when clients are dropped
This architecture ensures that the cryptographic operations are isolated, the platform-specific code is cleanly separated, and each client is completely autonomous in managing its connection lifecycle.

2764
interfaces/websocket/client/Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,56 @@
[package]
name = "hero_websocket_client"
version = "0.1.0"
edition = "2021"
[[bin]]
name = "hero_websocket_client"
path = "cmd/main.rs"
[dependencies]
serde = { workspace = true }
serde_json = { workspace = true }
uuid = { workspace = true }
log = { workspace = true }
futures-channel = { workspace = true, features = ["sink"] }
futures-util = { workspace = true, features = ["sink"] }
thiserror = { workspace = true }
url = { workspace = true }
http = "0.2"
# Authentication dependencies
hex = { workspace = true }
rand = { workspace = true }
getrandom = { version = "0.2", features = ["js"] }
# Optional crypto dependencies (enabled by default)
k256 = { version = "0.13", features = ["ecdsa", "sha256"], optional = true }
sha3 = { workspace = true, optional = true }
# WASM-specific dependencies
[target.'cfg(target_arch = "wasm32")'.dependencies]
gloo-net = { version = "0.4.0", features = ["websocket"] }
gloo-timers = { version = "0.3.0", features = ["futures"] }
wasm-bindgen-futures = "0.4"
gloo-console = "0.3.0"
wasm-bindgen = "0.2"
js-sys = "0.3"
web-sys = { version = "0.3", features = ["Request", "RequestInit", "RequestMode", "Response", "Window"] }
# Native-specific dependencies
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
tokio-tungstenite = { version = "0.23.1", features = ["native-tls"] }
tokio = { workspace = true, features = ["rt", "macros", "time"] }
native-tls = "0.2"
clap = { workspace = true }
env_logger = { workspace = true }
dotenv = "0.15"
[dev-dependencies]
tokio = { workspace = true }
# Features
[features]
default = ["crypto"]
crypto = ["k256", "sha3"]

View File

@ -0,0 +1,141 @@
# Circle WebSocket Client
A Rust library for connecting to Circle WebSocket servers with authentication support and self-managing connection lifecycle.
## Features
- **Cross-platform WebSocket client** (native and WASM)
- **secp256k1 cryptographic authentication** with automatic challenge-response flow
- **JSON-RPC 2.0 protocol support** for server communication
- **Self-managing connections** with automatic keep-alive and reconnection
- **Async/await interface** with modern Rust async patterns
- **Built on tokio-tungstenite** for reliable WebSocket connections (native)
- **Built on gloo-net** for WASM browser compatibility
## Architecture
Each `CircleWsClient` is completely self-managing:
- **Automatic Connection Management**: Handles WebSocket connection establishment
- **Built-in Authentication**: Seamless secp256k1 authentication when private keys are provided
- **Keep-Alive Monitoring**: Periodic health checks to detect connection issues
- **Transparent Reconnection**: Automatic reconnection with exponential backoff on failures
- **Connection Status Tracking**: Real-time connection state monitoring
## Usage
Add this to your `Cargo.toml`:
```toml
[dependencies]
circle_client_ws = { path = "../client_ws" }
```
### Basic Example (Self-Managing Connection)
```rust
use circle_client_ws::CircleWsClientBuilder;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create client with private key
let private_key = "your_private_key_hex";
let mut client = CircleWsClientBuilder::new("ws://localhost:8080".to_string())
.with_keypair(private_key.to_string())
.build();
// Connect - this handles authentication, keep-alive, and reconnection automatically
client.connect().await?;
// Check connection status
println!("Connected: {}", client.is_connected());
// Execute scripts on the server
let result = client.play("\"Hello from client!\"".to_string()).await?;
println!("Script result: {:?}", result);
// Client automatically maintains connection in the background
// No manual keep-alive or reconnection logic needed
Ok(())
}
```
### Self-Managing Features
The client automatically handles:
1. **Connection Establishment**: WebSocket connection to the server
2. **Authentication Flow**: secp256k1 challenge-response authentication
3. **Keep-Alive Monitoring**: Periodic health checks to ensure connection stability
4. **Automatic Reconnection**: Transparent reconnection on connection failures
5. **Resource Management**: Proper cleanup when the client is dropped
### Connection Status Monitoring
```rust
// Check if the client is currently connected
if client.is_connected() {
println!("Client is connected and healthy");
} else {
println!("Client is disconnected or reconnecting");
}
// Get detailed connection status
let status = client.get_connection_status();
println!("Connection status: {}", status);
```
### WASM Usage
For WASM applications, the client works seamlessly in browsers:
```rust
use circle_client_ws::CircleWsClientBuilder;
use wasm_bindgen_futures::spawn_local;
// In a WASM context
spawn_local(async move {
let mut client = CircleWsClientBuilder::new("ws://localhost:8080".to_string())
.build();
// Self-managing connection works the same in WASM
if let Ok(_) = client.connect().await {
// Client automatically handles keep-alive and reconnection
let result = client.play("\"WASM client connected!\"".to_string()).await;
// Handle result...
}
});
```
## Binary Tool
A command-line binary is also available for interactive use and script execution. See [`cmd/README.md`](cmd/README.md) for details.
## Platform Support
- **Native**: Full support on all Rust-supported platforms with tokio-tungstenite
- **WASM**: Browser support with gloo-net WebSocket bindings
## Dependencies
### Core Dependencies
- `serde`: JSON serialization and deserialization
- `uuid`: Request ID generation for JSON-RPC
- `futures-util`: Async utilities for WebSocket handling
- `thiserror`: Error handling and propagation
### Platform-Specific Dependencies
#### Native (tokio-based)
- `tokio-tungstenite`: Robust WebSocket implementation
- `tokio`: Async runtime for connection management
#### WASM (browser-based)
- `gloo-net`: WebSocket bindings for browsers
- `gloo-timers`: Timer utilities for keep-alive functionality
- `wasm-bindgen-futures`: Async support in WASM
### Cryptographic Dependencies (optional)
- `secp256k1`: Elliptic curve cryptography for authentication
- `sha3`: Hashing for cryptographic operations

View File

@ -0,0 +1,136 @@
# Circles WebSocket Client
A WebSocket client for connecting to Circles servers with authentication support. Available in both CLI and WebAssembly (WASM) versions.
## CLI Usage
### Installation
Build the CLI binary:
```bash
cargo build --bin circles_client --release
```
### Configuration
Create a `.env` file in the `cmd/` directory:
```bash
# cmd/.env
PRIVATE_KEY=your_actual_private_key_hex_here
```
Or set the environment variable directly:
```bash
export PRIVATE_KEY=your_actual_private_key_hex_here
```
### Usage
```bash
# Basic usage - connects and enters interactive mode
circles_client ws://localhost:8080
# Execute a single Rhai script
circles_client -s "print('Hello from Rhai!')" ws://localhost:8080
# Execute a script from file
circles_client -f script.rhai ws://localhost:8080
# Increase verbosity (can be used multiple times)
circles_client -v ws://localhost:8080
circles_client -vv ws://localhost:8080
```
### Features
- **Authentication**: Automatically loads private key and completes secp256k1 authentication flow
- **Script Execution**: Supports both inline scripts (`-s`) and script files (`-f`)
- **Interactive Mode**: When no script is provided, enters interactive REPL mode
- **Verbosity Control**: Use `-v` flags to increase logging detail
- **Cross-platform**: Works on all platforms supported by Rust and tokio-tungstenite
## WebAssembly (WASM) Usage
### Build and Serve
1. Install Trunk:
```bash
cargo install trunk
```
2. Build the WASM version:
```bash
trunk build --release
```
3. Serve the application:
```bash
trunk serve
```
The application will be available at `http://localhost:8080`
### Usage in Browser
1. Open the served page in your browser
2. Enter the WebSocket server URL
3. Choose either:
- Execute a Rhai script directly
- Enter interactive mode (type 'exit' or 'quit' to leave)
### Features
- **Browser Integration**: Uses browser's WebSocket implementation
- **Interactive Mode**: Browser-based input/output using prompts
- **Error Handling**: Browser console logging
- **Cross-browser**: Works in all modern browsers supporting WebAssembly
## Common Features
Both versions share the same core functionality:
- **WebSocket Connection**: Connects to Circles WebSocket server
- **Authentication**: Handles secp256k1 authentication
- **Script Execution**: Executes Rhai scripts
- **Interactive Mode**: Provides REPL-like interface
- **Error Handling**: Comprehensive error reporting
- **Logging**: Detailed logging at different verbosity levels
### Interactive Mode
When run without `-s` or `-f` flags, the client enters interactive mode where you can:
- Enter Rhai scripts line by line
- Type `exit` or `quit` to close the connection
- Use Ctrl+C to terminate
### Examples
```bash
# Connect to local development server
circles_client ws://localhost:8080
# Connect to secure WebSocket with verbose logging
circles_client -v wss://circles.example.com/ws
# Execute a simple calculation
circles_client -s "let result = 2 + 2; print(result);" ws://localhost:8080
# Load and execute a complex script
circles_client -f examples/complex_script.rhai ws://localhost:8080
```
### Error Handling
The client provides clear error messages for common issues:
- Missing or invalid private key
- Connection failures
- Authentication errors
- Script execution errors
### Dependencies
- `tokio-tungstenite`: WebSocket client implementation
- `secp256k1`: Cryptographic authentication
- `clap`: Command-line argument parsing
- `env_logger`: Logging infrastructure
- `dotenv`: Environment variable loading

View File

@ -0,0 +1,118 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Circles WebSocket Client</title>
<link data-trunk rel="rust" href="../Cargo.toml" data-wasm-opt="z" />
<style>
body {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, sans-serif;
margin: 20px;
max-width: 800px;
margin: 0 auto;
padding: 20px;
}
.container {
background: white;
padding: 20px;
border-radius: 8px;
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
}
.input-group {
margin-bottom: 20px;
}
input[type="text"] {
padding: 8px;
border: 1px solid #ddd;
border-radius: 4px;
width: 100%;
margin-top: 8px;
}
button {
background: #007bff;
color: white;
padding: 8px 16px;
border: none;
border-radius: 4px;
cursor: pointer;
}
button:hover {
background: #0056b3;
}
pre {
background: #f5f5f5;
padding: 15px;
border-radius: 4px;
overflow-x: auto;
}
</style>
</head>
<body>
<div class="container">
<h1>Circles WebSocket Client</h1>
<div class="input-group">
<label for="ws-url">WebSocket URL:</label>
<input type="text" id="ws-url" placeholder="ws://localhost:8080">
</div>
<div class="input-group">
<label for="script">Rhai Script:</label>
<input type="text" id="script" placeholder="Enter Rhai script here">
</div>
<button id="run-script">Run Script</button>
<button id="run-interactive">Interactive Mode</button>
<div id="output" style="margin-top: 20px;">
<h2>Output:</h2>
<pre id="output-content"></pre>
</div>
</div>
<script type="module">
// Trunk will inject the necessary JS to load the WASM module.
// The wasm_bindgen functions will be available on the `window` object.
async function main() {
// The `wasm_bindgen` object is exposed globally by the Trunk-injected script.
const { start_client } = wasm_bindgen;
document.getElementById('run-script').addEventListener('click', async () => {
const url = document.getElementById('ws-url').value;
const script = document.getElementById('script').value;
if (!url) {
alert('Please enter a WebSocket URL');
return;
}
try {
// The init function is called automatically by Trunk's setup.
const result = await start_client(url, script);
document.getElementById('output-content').textContent = result;
} catch (error) {
console.error('Error:', error);
document.getElementById('output-content').textContent = `Error: ${error}`;
}
});
document.getElementById('run-interactive').addEventListener('click', async () => {
const url = document.getElementById('ws-url').value;
if (!url) {
alert('Please enter a WebSocket URL');
return;
}
try {
// The init function is called automatically by Trunk's setup.
await start_client(url, null);
} catch (error) {
console.error('Error:', error);
alert(`Error: ${error}`);
}
});
}
// The `wasm_bindgen` function is a promise that resolves when the WASM is loaded.
wasm_bindgen('./pkg/circle_client_ws_bg.wasm').then(main).catch(console.error);
</script>
</body>
</html>

View File

@ -0,0 +1,342 @@
#![cfg_attr(target_arch = "wasm32", no_main)]
use hero_websocket_client::CircleWsClientBuilder;
#[cfg(not(target_arch = "wasm32"))]
use std::env;
#[cfg(not(target_arch = "wasm32"))]
use std::path::Path;
#[cfg(not(target_arch = "wasm32"))]
use std::io::{self, Write};
#[cfg(target_arch = "wasm32")]
use wasm_bindgen::prelude::*;
#[cfg(target_arch = "wasm32")]
use web_sys::{console, window};
#[cfg(target_arch = "wasm32")]
use wasm_bindgen_futures::spawn_local;
#[cfg(not(target_arch = "wasm32"))]
use clap::{Arg, ArgAction, Command};
#[cfg(not(target_arch = "wasm32"))]
use dotenv::dotenv;
#[cfg(not(target_arch = "wasm32"))]
use env_logger;
#[cfg(not(target_arch = "wasm32"))]
use tokio;
#[cfg(not(target_arch = "wasm32"))]
use log::{error, info};
#[derive(Debug)]
struct Args {
ws_url: String,
script: Option<String>,
script_path: Option<String>,
verbose: u8,
no_timestamp: bool,
}
#[cfg(not(target_arch = "wasm32"))]
fn parse_args() -> Args {
let matches = Command::new("circles_client")
.version("0.1.0")
.about("WebSocket client for Circles server")
.arg(
Arg::new("url")
.help("WebSocket server URL")
.required(true)
.index(1),
)
.arg(
Arg::new("script")
.short('s')
.long("script")
.value_name("SCRIPT")
.help("Rhai script to execute")
.conflicts_with("script_path"),
)
.arg(
Arg::new("script_path")
.short('f')
.long("file")
.value_name("FILE")
.help("Path to Rhai script file")
.conflicts_with("script"),
)
.arg(
Arg::new("verbose")
.short('v')
.long("verbose")
.help("Increase verbosity (can be used multiple times)")
.action(ArgAction::Count),
)
.arg(
Arg::new("no_timestamp")
.long("no-timestamp")
.help("Remove timestamps from log output")
.action(ArgAction::SetTrue),
)
.get_matches();
Args {
ws_url: matches.get_one::<String>("url").unwrap().clone(),
script: matches.get_one::<String>("script").cloned(),
script_path: matches.get_one::<String>("script_path").cloned(),
verbose: matches.get_count("verbose"),
no_timestamp: matches.get_flag("no_timestamp"),
}
}
#[cfg(not(target_arch = "wasm32"))]
fn setup_logging(verbose: u8, no_timestamp: bool) {
let log_level = match verbose {
0 => "warn,hero_websocket_client=info",
1 => "info,hero_websocket_client=debug",
2 => "debug",
_ => "trace",
};
std::env::set_var("RUST_LOG", log_level);
// Configure env_logger with or without timestamps
if no_timestamp {
env_logger::Builder::from_default_env()
.format_timestamp(None)
.init();
} else {
env_logger::init();
}
}
#[cfg(not(target_arch = "wasm32"))]
fn load_private_key() -> Result<String, Box<dyn std::error::Error>> {
// Try to load from .env file first
if let Ok(_) = dotenv() {
if let Ok(key) = env::var("PRIVATE_KEY") {
return Ok(key);
}
}
// Try to load from cmd/.env file
let cmd_env_path = Path::new("cmd/.env");
if cmd_env_path.exists() {
dotenv::from_path(cmd_env_path)?;
if let Ok(key) = env::var("PRIVATE_KEY") {
return Ok(key);
}
}
Err("PRIVATE_KEY not found in environment or .env files".into())
}
#[cfg(target_arch = "wasm32")]
async fn run_interactive_mode(client: hero_websocket_client::CircleWsClient) -> Result<(), Box<dyn std::error::Error>> {
console::log_1(&"Entering interactive mode. Type 'exit' or 'quit' to leave.".into());
console::log_1(&"🔄 Interactive mode - Enter Rhai scripts (type 'exit' or 'quit' to leave):\n".into());
// In wasm32, we need to use browser's console for input/output
let window = window().expect("Window not available");
let input = window.prompt_with_message("Enter Rhai script (or 'exit' to quit):")
.map_err(|e| format!("Failed to get input: {:#?}", e))? // Use debug formatting
.unwrap_or_default();
// Handle empty or exit cases
if input == "exit" || input == "quit" {
console::log_1(&"👋 Goodbye!".into());
return Ok(());
}
// Execute the script
match client.play(input).await {
Ok(result) => {
console::log_1(&format!("📤 Result: {}", result.output).into());
}
Err(e) => {
console::log_1(&format!("❌ Script execution failed: {}", e).into());
}
}
Ok(())
}
#[cfg(target_arch = "wasm32")]
async fn execute_script(client: hero_websocket_client::CircleWsClient, script: String) -> Result<(), Box<dyn std::error::Error>> {
console::log_1(&format!("Executing script: {}", script).into());
match client.play(script).await {
Ok(result) => {
console::log_1(&result.output.into());
Ok(())
}
Err(e) => {
console::log_1(&format!("Script execution failed: {}", e).into());
Err(e.into())
}
}
}
#[cfg(target_arch = "wasm32")]
pub async fn start_client(url: &str, script: Option<String>) -> Result<(), Box<dyn std::error::Error>> {
// Build client
let mut client = CircleWsClientBuilder::new(url.to_string())
.build();
// Connect to WebSocket server
console::log_1(&"🔌 Connecting to WebSocket server...".into());
if let Err(e) = client.connect().await {
console::log_1(&format!("❌ Failed to connect: {}", e).into());
return Err(e.into());
}
console::log_1(&"✅ Connected successfully".into());
// Authenticate with server
if let Err(e) = client.authenticate().await {
console::log_1(&format!("❌ Authentication failed: {}", e).into());
return Err(e.into());
}
console::log_1(&"✅ Authentication successful".into());
// Handle script execution
if let Some(script) = script {
execute_script(client, script).await
} else {
run_interactive_mode(client).await
}
}
#[cfg(not(target_arch = "wasm32"))]
async fn execute_script(client: hero_websocket_client::CircleWsClient, script: String) -> Result<(), Box<dyn std::error::Error>> {
info!("Executing script: {}", script);
match client.play(script).await {
Ok(result) => {
println!("{}", result.output);
Ok(())
}
Err(e) => {
error!("Script execution failed: {}", e);
Err(e.into())
}
}
}
#[cfg(not(target_arch = "wasm32"))]
async fn load_script_from_file(path: &str) -> Result<String, Box<dyn std::error::Error>> {
let script = tokio::fs::read_to_string(path).await?;
Ok(script)
}
#[cfg(not(target_arch = "wasm32"))]
async fn run_interactive_mode(client: hero_websocket_client::CircleWsClient) -> Result<(), Box<dyn std::error::Error>> {
println!("\n🔄 Interactive mode - Enter Rhai scripts (type 'exit' or 'quit' to leave):\n");
loop {
print!("Enter Rhai script (or 'exit' to quit): ");
io::stdout().flush()?;
let mut input = String::new();
io::stdin().read_line(&mut input)?;
let input = input.trim().to_string();
if input == "exit" || input == "quit" {
println!("\n👋 Goodbye!");
return Ok(());
}
match client.play(input).await {
Ok(result) => {
println!("\n📤 Result: {}", result.output);
}
Err(e) => {
error!("❌ Script execution failed: {}", e);
println!("\n❌ Script execution failed: {}", e);
}
}
println!();
}
}
#[cfg(not(target_arch = "wasm32"))]
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let args = parse_args();
setup_logging(args.verbose, args.no_timestamp);
info!("🚀 Starting Circles WebSocket client");
info!("📡 Connecting to: {}", args.ws_url);
// Load private key from environment
let private_key = match load_private_key() {
Ok(key) => {
info!("🔑 Private key loaded from environment");
key
}
Err(e) => {
error!("❌ Failed to load private key: {}", e);
eprintln!("Error: {}", e);
eprintln!("Please set PRIVATE_KEY in your environment or create a cmd/.env file with:");
eprintln!("PRIVATE_KEY=your_private_key_here");
std::process::exit(1);
}
};
// Build client with private key
let mut client = CircleWsClientBuilder::new(args.ws_url.clone())
.with_keypair(private_key)
.build();
// Connect to WebSocket server
info!("🔌 Connecting to WebSocket server...");
if let Err(e) = client.connect().await {
error!("❌ Failed to connect: {}", e);
eprintln!("Connection failed: {}", e);
std::process::exit(1);
}
info!("✅ Connected successfully");
// Authenticate with server
info!("🔐 Authenticating with server...");
match client.authenticate().await {
Ok(true) => {
info!("✅ Authentication successful");
println!("🔐 Authentication successful");
}
Ok(false) => {
error!("❌ Authentication failed");
eprintln!("Authentication failed");
std::process::exit(1);
}
Err(e) => {
error!("❌ Authentication error: {}", e);
eprintln!("Authentication error: {}", e);
std::process::exit(1);
}
}
// Determine execution mode
let result = if let Some(script) = args.script {
// Execute provided script and exit
execute_script(client, script).await
} else if let Some(script_path) = args.script_path {
// Load script from file and execute
match load_script_from_file(&script_path).await {
Ok(script) => execute_script(client, script).await,
Err(e) => {
error!("❌ Failed to load script from file '{}': {}", script_path, e);
eprintln!("Failed to load script file: {}", e);
std::process::exit(1);
}
}
} else {
// Enter interactive mode
run_interactive_mode(client).await
};
// Handle any errors from execution
if let Err(e) = result {
error!("❌ Execution failed: {}", e);
std::process::exit(1);
}
info!("🏁 Client finished successfully");
Ok(())
}

View File

@ -0,0 +1,273 @@
//! Cryptographic utilities for secp256k1 operations
//!
//! This module provides functions for:
//! - Private key validation and parsing
//! - Public key derivation
//! - Ethereum-style message signing
//! - Signature verification
use crate::auth::types::{AuthError, AuthResult};
pub fn generate_keypair() -> AuthResult<(String, String)> {
let private_key = generate_private_key()?;
let public_key = derive_public_key(&private_key)?;
Ok((public_key, private_key))
}
/// Generate a new random private key
pub fn generate_private_key() -> AuthResult<String> {
#[cfg(feature = "crypto")]
{
use rand::rngs::OsRng;
use k256::ecdsa::SigningKey;
let signing_key = SigningKey::random(&mut OsRng);
Ok(hex::encode(signing_key.to_bytes()))
}
#[cfg(not(feature = "crypto"))]
{
// Fallback implementation for when crypto features are not available
use rand::Rng;
let mut rng = rand::thread_rng();
let bytes: [u8; 32] = rng.gen();
Ok(hex::encode(bytes))
}
}
/// Parse a hex-encoded private key
pub fn parse_private_key(private_key_hex: &str) -> AuthResult<Vec<u8>> {
// Remove 0x prefix if present
let clean_hex = private_key_hex
.strip_prefix("0x")
.unwrap_or(private_key_hex);
// Decode hex
let bytes = hex::decode(clean_hex)
.map_err(|e| AuthError::InvalidPrivateKey(format!("Invalid hex: {}", e)))?;
// Validate length
if bytes.len() != 32 {
return Err(AuthError::InvalidPrivateKey(format!(
"Private key must be 32 bytes, got {}",
bytes.len()
)));
}
Ok(bytes)
}
/// Derive public key from private key
pub fn derive_public_key(private_key_hex: &str) -> AuthResult<String> {
#[cfg(feature = "crypto")]
{
use k256::ecdsa::SigningKey;
use k256::elliptic_curve::sec1::ToEncodedPoint;
let key_bytes = parse_private_key(private_key_hex)?;
let signing_key = SigningKey::from_slice(&key_bytes)
.map_err(|e| AuthError::InvalidPrivateKey(format!("Invalid key: {}", e)))?;
let verifying_key = signing_key.verifying_key();
let encoded_point = verifying_key.to_encoded_point(false); // false = uncompressed
// Return uncompressed public key (65 bytes with 0x04 prefix)
Ok(hex::encode(encoded_point.as_bytes()))
}
#[cfg(not(feature = "crypto"))]
{
// Fallback implementation - generate a mock public key
let key_bytes = parse_private_key(private_key_hex)?;
let mut public_key_bytes = vec![0x04u8]; // Uncompressed prefix
public_key_bytes.extend_from_slice(&key_bytes);
public_key_bytes.extend_from_slice(&key_bytes); // Double for 65 bytes total
public_key_bytes.truncate(65);
Ok(hex::encode(public_key_bytes))
}
}
/// Create Ethereum-style message hash
/// This follows the Ethereum standard: keccak256("\x19Ethereum Signed Message:\n" + len(message) + message)
fn create_eth_message_hash(message: &str) -> Vec<u8> {
let prefix = format!("\x19Ethereum Signed Message:\n{}", message.len());
let full_message = format!("{}{}", prefix, message);
#[cfg(feature = "crypto")]
{
use sha3::{Digest, Keccak256};
let mut hasher = Keccak256::new();
hasher.update(full_message.as_bytes());
hasher.finalize().to_vec()
}
#[cfg(not(feature = "crypto"))]
{
// Fallback: use a simple hash
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
let mut hasher = DefaultHasher::new();
full_message.hash(&mut hasher);
let hash = hasher.finish();
hash.to_be_bytes().to_vec()
}
}
/// Sign a message using Ethereum-style signing
pub fn sign_message(private_key_hex: &str, message: &str) -> AuthResult<String> {
#[cfg(feature = "crypto")]
{
use k256::ecdsa::{SigningKey, signature::Signer};
let key_bytes = parse_private_key(private_key_hex)?;
let signing_key = SigningKey::from_slice(&key_bytes)
.map_err(|e| AuthError::InvalidPrivateKey(format!("Invalid private key: {}", e)))?;
// Create message hash
let message_hash = create_eth_message_hash(message);
// Sign the hash
let signature: k256::ecdsa::Signature = signing_key.sign(&message_hash);
// Convert to recoverable signature format (65 bytes with recovery ID)
let sig_bytes = signature.to_bytes();
let mut full_sig = [0u8; 65];
full_sig[..64].copy_from_slice(&sig_bytes);
// Calculate recovery ID (simplified - in production you'd want proper recovery)
full_sig[64] = 0; // Recovery ID placeholder
Ok(hex::encode(full_sig))
}
#[cfg(not(feature = "crypto"))]
{
// Fallback implementation - generate a mock signature
let key_bytes = parse_private_key(private_key_hex)?;
let message_hash = create_eth_message_hash(message);
// Create a deterministic but fake signature
let mut sig_bytes = Vec::with_capacity(65);
sig_bytes.extend_from_slice(&key_bytes);
sig_bytes.extend_from_slice(&message_hash[..32]);
sig_bytes.push(27); // Recovery ID
sig_bytes.truncate(65);
Ok(hex::encode(sig_bytes))
}
}
/// Verify an Ethereum-style signature
pub fn verify_signature(
public_key_hex: &str,
message: &str,
signature_hex: &str,
) -> AuthResult<bool> {
#[cfg(feature = "crypto")]
{
use k256::ecdsa::{Signature, VerifyingKey, signature::Verifier};
use k256::EncodedPoint;
// Remove 0x prefix if present
let clean_pubkey = public_key_hex.strip_prefix("0x").unwrap_or(public_key_hex);
let clean_sig = signature_hex.strip_prefix("0x").unwrap_or(signature_hex);
// Decode public key
let pubkey_bytes = hex::decode(clean_pubkey)
.map_err(|e| AuthError::InvalidSignature(format!("Invalid public key hex: {}", e)))?;
let encoded_point = EncodedPoint::from_bytes(&pubkey_bytes)
.map_err(|e| AuthError::InvalidSignature(format!("Invalid public key format: {}", e)))?;
let verifying_key = VerifyingKey::from_encoded_point(&encoded_point)
.map_err(|e| AuthError::InvalidSignature(format!("Invalid public key: {}", e)))?;
// Decode signature
let sig_bytes = hex::decode(clean_sig)
.map_err(|e| AuthError::InvalidSignature(format!("Invalid signature hex: {}", e)))?;
if sig_bytes.len() != 65 {
return Err(AuthError::InvalidSignature(format!(
"Signature must be 65 bytes, got {}",
sig_bytes.len()
)));
}
// Extract r, s components (ignore recovery byte for verification)
let signature = Signature::from_slice(&sig_bytes[..64])
.map_err(|e| AuthError::InvalidSignature(format!("Invalid signature format: {}", e)))?;
// Create message hash
let message_hash = create_eth_message_hash(message);
// Verify signature
match verifying_key.verify(&message_hash, &signature) {
Ok(()) => Ok(true),
Err(_) => Ok(false),
}
}
#[cfg(not(feature = "crypto"))]
{
// Fallback implementation - basic validation
let clean_pubkey = public_key_hex.strip_prefix("0x").unwrap_or(public_key_hex);
let clean_sig = signature_hex.strip_prefix("0x").unwrap_or(signature_hex);
// Basic validation
if clean_pubkey.len() != 130 {
// 65 bytes as hex
return Err(AuthError::InvalidSignature(
"Invalid public key length".to_string(),
));
}
if clean_sig.len() != 130 {
// 65 bytes as hex
return Err(AuthError::InvalidSignature(
"Invalid signature length".to_string(),
));
}
// For app purposes, accept any properly formatted signature
Ok(true)
}
}
/// Validate that a private key is valid
pub fn validate_private_key(private_key_hex: &str) -> AuthResult<()> {
parse_private_key(private_key_hex)?;
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_key_generation_and_derivation() {
let private_key = generate_private_key().unwrap();
let public_key = derive_public_key(&private_key).unwrap();
assert_eq!(private_key.len(), 64); // 32 bytes as hex
assert_eq!(public_key.len(), 130); // 65 bytes as hex (uncompressed)
assert!(public_key.starts_with("04")); // Uncompressed public key prefix
}
#[test]
fn test_signing_and_verification() {
let private_key = generate_private_key().unwrap();
let public_key = derive_public_key(&private_key).unwrap();
let message = "Hello, World!";
let signature = sign_message(&private_key, message).unwrap();
let is_valid = verify_signature(&public_key, message, &signature).unwrap();
assert!(is_valid);
assert_eq!(signature.len(), 130); // 65 bytes as hex
}
#[test]
fn test_invalid_private_key() {
let result = validate_private_key("invalid_hex");
assert!(result.is_err());
let result = validate_private_key("0x1234"); // Too short
assert!(result.is_err());
}
}

View File

@ -0,0 +1,113 @@
//! Authentication module for Circle WebSocket client
//!
//! This module provides core cryptographic authentication support for WebSocket connections
//! using secp256k1 signatures. It includes:
//!
//! - **Cryptographic utilities**: Key generation, signing, and verification
//! - **Nonce management**: Fetching nonces from authentication servers
//! - **Basic types**: Core authentication data structures
//!
//! ## Features
//!
//! - **Cross-platform**: Works in both WASM and native environments
//! - **Ethereum-compatible**: Uses Ethereum-style message signing
//! - **Secure**: Implements proper nonce-based replay protection
//!
//! ## Usage
//!
//! ```rust
//! use circle_client_ws::auth::{generate_private_key, derive_public_key, sign_message};
//! use tokio::runtime::Runtime;
//!
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! # let rt = Runtime::new()?;
//! # rt.block_on(async {
//! // Generate a private key
//! let private_key = generate_private_key()?;
//!
//! // Derive public key from private key
//! let public_key = derive_public_key(&private_key)?;
//!
//! // The nonce would typically be fetched from a server
//! let nonce = "some_nonce_from_server";
//!
//! // Authentication Module
//!
//! This module handles the client-side authentication flow, including:
//! - Fetching a nonce from the server
//! - Signing the nonce with a private key
//! - Sending the credentials to the server for verification
//!
//! // Sign the nonce
//! let signature = sign_message(&private_key, nonce)?;
//! # Ok(())
//! # })
//! # }
//! ```
pub mod types;
pub use types::{AuthCredentials, AuthError, AuthResult, NonceResponse};
pub mod crypto_utils;
pub use crypto_utils::{
derive_public_key, generate_keypair, generate_private_key, parse_private_key, sign_message,
validate_private_key, verify_signature,
};
/// Check if the authentication feature is enabled
///
/// This function can be used to conditionally enable authentication features
/// based on compile-time feature flags.
///
/// # Returns
///
/// `true` if crypto features are available, `false` otherwise
pub fn is_auth_enabled() -> bool {
cfg!(feature = "crypto")
}
/// Get version information for the authentication module
///
/// # Returns
///
/// A string containing version and feature information
pub fn auth_version_info() -> String {
let crypto_status = if cfg!(feature = "crypto") {
"enabled"
} else {
"disabled (fallback mode)"
};
let platform = if cfg!(target_arch = "wasm32") {
"WASM"
} else {
"native"
};
format!(
"circles-client-ws auth module - crypto: {}, platform: {}",
crypto_status, platform
)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_module_exports() {
// Test utility functions
assert!(auth_version_info().contains("circles-client-ws auth module"));
// Test feature detection
let _is_enabled = is_auth_enabled();
}
#[test]
fn test_version_info() {
let version = auth_version_info();
assert!(version.contains("circles-client-ws auth module"));
assert!(version.contains("crypto:"));
assert!(version.contains("platform:"));
}
}

View File

@ -0,0 +1,128 @@
//! Authentication types for Circle WebSocket client
//!
//! This module defines the core types used in the authentication system,
//! including error types, response structures, and authentication states.
use serde::{Deserialize, Serialize};
use thiserror::Error;
/// Result type for authentication operations
pub type AuthResult<T> = Result<T, AuthError>;
/// Authentication error types
#[derive(Error, Debug, Clone)]
pub enum AuthError {
#[error("Invalid private key: {0}")]
InvalidPrivateKey(String),
#[error("Invalid URL: {0}")]
InvalidUrl(String),
#[error("Nonce request failed: {0}")]
NonceRequestFailed(String),
#[error("Signing failed: {0}")]
SigningFailed(String),
#[error("Network error: {0}")]
NetworkError(String),
#[error("Invalid signature: {0}")]
InvalidSignature(String),
#[error("Invalid credentials: {0}")]
InvalidCredentials(String),
}
/// Response from nonce endpoint
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct NonceResponse {
/// The cryptographic nonce
pub nonce: String,
/// Expiration timestamp (seconds since epoch)
pub expires_at: u64,
}
/// Authentication credentials for WebSocket connection
#[derive(Debug, Clone)]
pub struct AuthCredentials {
/// Public key in hex format
pub public_key: String,
/// Signature of the nonce
pub signature: String,
/// Nonce that was signed
pub nonce: String,
/// Expiration timestamp (seconds since epoch)
pub expires_at: u64,
}
impl AuthCredentials {
/// Create new authentication credentials
pub fn new(public_key: String, signature: String, nonce: String, expires_at: u64) -> Self {
Self {
public_key,
signature,
nonce,
expires_at,
}
}
/// Get the public key
pub fn public_key(&self) -> &str {
&self.public_key
}
/// Get the signature
pub fn signature(&self) -> &str {
&self.signature
}
/// Get the nonce
pub fn nonce(&self) -> &str {
&self.nonce
}
/// Check if credentials have expired
pub fn is_expired(&self) -> bool {
use std::time::{SystemTime, UNIX_EPOCH};
if let Ok(current_time) = SystemTime::now().duration_since(UNIX_EPOCH) {
let current_timestamp = current_time.as_secs();
current_timestamp >= self.expires_at
} else {
true // If we can't get current time, assume expired for safety
}
}
/// Check if credentials expire within the given number of seconds
pub fn expires_within(&self, seconds: u64) -> bool {
use std::time::{SystemTime, UNIX_EPOCH};
if let Ok(current_time) = SystemTime::now().duration_since(UNIX_EPOCH) {
let current_timestamp = current_time.as_secs();
self.expires_at <= current_timestamp + seconds
} else {
true // If we can't get current time, assume expiring soon for safety
}
}
}
/// Authentication state for tracking connection status
#[derive(Debug, Clone, PartialEq)]
pub enum AuthState {
/// Not authenticated
NotAuthenticated,
/// Currently authenticating
Authenticating,
/// Successfully authenticated
Authenticated { public_key: String },
/// Authentication failed
Failed(String),
}
/// Authentication method used
#[derive(Debug, Clone, PartialEq)]
pub enum AuthMethod {
/// Private key authentication
PrivateKey,
}
impl std::fmt::Display for AuthMethod {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
AuthMethod::PrivateKey => write!(f, "Private Key"),
}
}
}

View File

@ -0,0 +1,994 @@
use futures_channel::{mpsc, oneshot};
use futures_util::{FutureExt, SinkExt, StreamExt};
use log::{debug, error, info, warn};
use serde::{Deserialize, Serialize};
use serde_json::Value;
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use thiserror::Error;
use uuid::Uuid;
// Authentication module
pub mod auth;
pub use auth::{AuthCredentials, AuthError, AuthResult};
// Platform-specific WebSocket imports and spawn function
#[cfg(target_arch = "wasm32")]
use {
gloo_net::websocket::{futures::WebSocket, Message as GlooWsMessage},
wasm_bindgen_futures::spawn_local,
};
#[cfg(not(target_arch = "wasm32"))]
use {
tokio::spawn as spawn_local,
tokio_tungstenite::{
connect_async, connect_async_tls_with_config,
tungstenite::{
protocol::Message as TungsteniteWsMessage,
},
Connector,
},
};
// JSON-RPC Structures (client-side perspective)
#[derive(Serialize, Debug, Clone)]
pub struct JsonRpcRequestClient {
jsonrpc: String,
method: String,
params: Value,
id: String,
}
#[derive(Deserialize, Debug, Clone)]
pub struct JsonRpcResponseClient {
#[allow(dead_code)]
// Field is part of JSON-RPC spec, even if not directly used by client logic
jsonrpc: String,
pub result: Option<Value>,
pub error: Option<JsonRpcErrorClient>,
pub id: String,
}
#[derive(Deserialize, Debug, Clone)]
pub struct JsonRpcErrorClient {
pub code: i32,
pub message: String,
pub data: Option<Value>,
}
#[derive(Serialize, Debug, Clone)]
pub struct PlayParamsClient {
pub script: String,
}
#[derive(Deserialize, Debug, Clone)]
pub struct PlayResultClient {
pub output: String,
}
#[derive(Serialize, Debug, Clone)]
pub struct AuthCredentialsParams {
pub pubkey: String,
pub signature: String,
}
#[derive(Serialize, Debug, Clone)]
pub struct FetchNonceParams {
pub pubkey: String,
}
#[derive(Deserialize, Debug, Clone)]
pub struct FetchNonceResponse {
pub nonce: String,
}
#[derive(Error, Debug)]
pub enum CircleWsClientError {
#[error("WebSocket connection error: {0}")]
ConnectionError(String),
#[error("WebSocket send error: {0}")]
SendError(String),
#[error("WebSocket receive error: {0}")]
ReceiveError(String),
#[error("JSON serialization/deserialization error: {0}")]
JsonError(#[from] serde_json::Error),
#[error("Request timed out for request ID: {0}")]
Timeout(String),
#[error("JSON-RPC error response: {code} - {message}")]
JsonRpcError {
code: i32,
message: String,
data: Option<Value>,
},
#[error("No response received for request ID: {0}")]
NoResponse(String),
#[error("Client is not connected")]
NotConnected,
#[error("Internal channel error: {0}")]
ChannelError(String),
#[error("Authentication error: {0}")]
Auth(#[from] auth::AuthError),
#[error("Authentication requires a keypair, but none was provided.")]
AuthNoKeyPair,
}
// Wrapper for messages sent to the WebSocket task
enum InternalWsMessage {
SendJsonRpc(
JsonRpcRequestClient,
oneshot::Sender<Result<JsonRpcResponseClient, CircleWsClientError>>,
),
SendPlaintext(
String,
oneshot::Sender<Result<String, CircleWsClientError>>,
),
Close,
}
pub struct CircleWsClientBuilder {
ws_url: String,
private_key: Option<String>,
}
impl CircleWsClientBuilder {
pub fn new(ws_url: String) -> Self {
Self {
ws_url,
private_key: None,
}
}
pub fn with_keypair(mut self, private_key: String) -> Self {
self.private_key = Some(private_key);
self
}
pub fn build(self) -> CircleWsClient {
CircleWsClient {
ws_url: self.ws_url,
internal_tx: None,
#[cfg(not(target_arch = "wasm32"))]
task_handle: None,
private_key: self.private_key,
is_connected: Arc::new(Mutex::new(false)),
}
}
}
pub struct CircleWsClient {
ws_url: String,
internal_tx: Option<mpsc::Sender<InternalWsMessage>>,
#[cfg(not(target_arch = "wasm32"))]
task_handle: Option<tokio::task::JoinHandle<()>>,
private_key: Option<String>,
is_connected: Arc<Mutex<bool>>,
}
impl CircleWsClient {
/// Get the connection status
pub fn get_connection_status(&self) -> String {
if *self.is_connected.lock().unwrap() {
"Connected".to_string()
} else {
"Disconnected".to_string()
}
}
/// Check if the client is connected
pub fn is_connected(&self) -> bool {
*self.is_connected.lock().unwrap()
}
}
impl CircleWsClient {
pub async fn authenticate(&mut self) -> Result<bool, CircleWsClientError> {
info!("🔐 [{}] Starting authentication process...", self.ws_url);
let private_key = self
.private_key
.as_ref()
.ok_or(CircleWsClientError::AuthNoKeyPair)?;
info!("🔑 [{}] Deriving public key from private key...", self.ws_url);
let public_key = auth::derive_public_key(private_key)?;
info!("✅ [{}] Public key derived: {}...", self.ws_url, &public_key[..8]);
info!("🎫 [{}] Fetching authentication nonce...", self.ws_url);
let nonce = self.fetch_nonce(&public_key).await?;
info!("✅ [{}] Nonce received: {}...", self.ws_url, &nonce[..8]);
info!("✍️ [{}] Signing nonce with private key...", self.ws_url);
let signature = auth::sign_message(private_key, &nonce)?;
info!("✅ [{}] Signature created: {}...", self.ws_url, &signature[..8]);
info!("🔒 [{}] Submitting authentication credentials...", self.ws_url);
let result = self.authenticate_with_signature(&public_key, &signature).await?;
if result {
info!("🎉 [{}] Authentication successful!", self.ws_url);
} else {
error!("❌ [{}] Authentication failed - server rejected credentials", self.ws_url);
}
Ok(result)
}
async fn fetch_nonce(&self, pubkey: &str) -> Result<String, CircleWsClientError> {
info!("📡 [{}] Sending fetch_nonce request for pubkey: {}...", self.ws_url, &pubkey[..8]);
let params = FetchNonceParams {
pubkey: pubkey.to_string(),
};
let req = self.create_request("fetch_nonce", params)?;
let res = self.send_request(req).await?;
if let Some(err) = res.error {
error!("❌ [{}] fetch_nonce failed: {} (code: {})", self.ws_url, err.message, err.code);
return Err(CircleWsClientError::JsonRpcError {
code: err.code,
message: err.message,
data: err.data,
});
}
let nonce_res: FetchNonceResponse = serde_json::from_value(res.result.unwrap_or_default())?;
info!("✅ [{}] fetch_nonce successful, nonce length: {}", self.ws_url, nonce_res.nonce.len());
Ok(nonce_res.nonce)
}
async fn authenticate_with_signature(
&self,
pubkey: &str,
signature: &str,
) -> Result<bool, CircleWsClientError> {
info!("📡 [{}] Sending authenticate request with signature...", self.ws_url);
let params = AuthCredentialsParams {
pubkey: pubkey.to_string(),
signature: signature.to_string(),
};
let req = self.create_request("authenticate", params)?;
let res = self.send_request(req).await?;
if let Some(err) = res.error {
error!("❌ [{}] authenticate failed: {} (code: {})", self.ws_url, err.message, err.code);
return Err(CircleWsClientError::JsonRpcError {
code: err.code,
message: err.message,
data: err.data,
});
}
let authenticated = res
.result
.and_then(|v| v.get("authenticated").and_then(|v| v.as_bool()))
.unwrap_or(false);
if authenticated {
info!("✅ [{}] authenticate request successful - server confirmed authentication", self.ws_url);
} else {
error!("❌ [{}] authenticate request failed - server returned false", self.ws_url);
}
Ok(authenticated)
}
/// Call the whoami method to get authentication status and user information
pub async fn whoami(&self) -> Result<Value, CircleWsClientError> {
let req = self.create_request("whoami", serde_json::json!({}))?;
let response = self.send_request(req).await?;
if let Some(result) = response.result {
Ok(result)
} else if let Some(error) = response.error {
Err(CircleWsClientError::JsonRpcError {
code: error.code,
message: error.message,
data: error.data,
})
} else {
Err(CircleWsClientError::NoResponse("whoami".to_string()))
}
}
fn create_request<T: Serialize>(
&self,
method: &str,
params: T,
) -> Result<JsonRpcRequestClient, CircleWsClientError> {
Ok(JsonRpcRequestClient {
jsonrpc: "2.0".to_string(),
method: method.to_string(),
params: serde_json::to_value(params)?,
id: Uuid::new_v4().to_string(),
})
}
async fn send_request(
&self,
req: JsonRpcRequestClient,
) -> Result<JsonRpcResponseClient, CircleWsClientError> {
let (response_tx, response_rx) = oneshot::channel();
if let Some(mut tx) = self.internal_tx.clone() {
tx.send(InternalWsMessage::SendJsonRpc(req.clone(), response_tx))
.await
.map_err(|e| {
CircleWsClientError::ChannelError(format!(
"Failed to send request to internal task: {}",
e
))
})?;
} else {
return Err(CircleWsClientError::NotConnected);
}
#[cfg(target_arch = "wasm32")]
{
match response_rx.await {
Ok(Ok(rpc_response)) => Ok(rpc_response),
Ok(Err(e)) => Err(e),
Err(_) => Err(CircleWsClientError::Timeout(req.id)),
}
}
#[cfg(not(target_arch = "wasm32"))]
{
use tokio::time::timeout as tokio_timeout;
match tokio_timeout(std::time::Duration::from_secs(30), response_rx).await {
Ok(Ok(Ok(rpc_response))) => Ok(rpc_response),
Ok(Ok(Err(e))) => Err(e),
Ok(Err(_)) => Err(CircleWsClientError::ChannelError(
"Response channel cancelled".to_string(),
)),
Err(_) => Err(CircleWsClientError::Timeout(req.id)),
}
}
}
pub async fn connect(&mut self) -> Result<(), CircleWsClientError> {
if self.internal_tx.is_some() {
info!("🔄 [{}] Client already connected or connecting", self.ws_url);
return Ok(());
}
info!("🚀 [{}] Starting self-managed WebSocket connection with keep-alive and reconnection...", self.ws_url);
let (internal_tx, internal_rx) = mpsc::channel::<InternalWsMessage>(32);
self.internal_tx = Some(internal_tx);
// Clone necessary data for the task
let connection_url = self.ws_url.clone();
let private_key = self.private_key.clone();
let is_connected = self.is_connected.clone();
info!("🔗 [{}] Will handle connection, authentication, keep-alive, and reconnection internally", connection_url);
// Pending requests: map request_id to a oneshot sender for the response
let pending_requests: Arc<
Mutex<
HashMap<
String,
oneshot::Sender<Result<JsonRpcResponseClient, CircleWsClientError>>,
>,
>,
> = Arc::new(Mutex::new(HashMap::new()));
let task_pending_requests = pending_requests.clone();
let log_url = connection_url.clone();
let task = async move {
// Main connection loop with reconnection logic
loop {
info!("🔄 [{}] Starting connection attempt...", log_url);
// Reset connection status
*is_connected.lock().unwrap() = false;
// Clone connection_url for this iteration to avoid move issues
let connection_url_clone = connection_url.clone();
// Establish WebSocket connection
#[cfg(target_arch = "wasm32")]
let ws_result = WebSocket::open(&connection_url_clone);
#[cfg(not(target_arch = "wasm32"))]
let connect_attempt = async {
// Check if this is a secure WebSocket connection
if connection_url_clone.starts_with("wss://") {
// For WSS connections, use a custom TLS connector that accepts self-signed certificates
// This is for development/demo purposes only
use tokio_tungstenite::tungstenite::client::IntoClientRequest;
let request = connection_url_clone.as_str().into_client_request()
.map_err(|e| CircleWsClientError::ConnectionError(format!("Invalid URL: {}", e)))?;
// Create a native-tls connector that accepts invalid certificates (for development)
let tls_connector = native_tls::TlsConnector::builder()
.danger_accept_invalid_certs(true)
.danger_accept_invalid_hostnames(true)
.build()
.map_err(|e| CircleWsClientError::ConnectionError(format!("TLS connector creation failed: {}", e)))?;
let connector = Connector::NativeTls(tls_connector);
warn!("⚠️ DEVELOPMENT MODE: Accepting self-signed certificates (NOT for production!)");
connect_async_tls_with_config(request, None, false, Some(connector))
.await
.map_err(|e| CircleWsClientError::ConnectionError(format!("WSS connection failed: {}", e)))
} else {
// For regular WS connections, use the standard method
connect_async(&connection_url_clone)
.await
.map_err(|e| CircleWsClientError::ConnectionError(format!("WS connection failed: {}", e)))
}
};
#[cfg(not(target_arch = "wasm32"))]
let ws_result = connect_attempt.await;
match ws_result {
Ok(ws_conn_maybe_response) => {
#[cfg(target_arch = "wasm32")]
let ws_conn = ws_conn_maybe_response;
#[cfg(not(target_arch = "wasm32"))]
let (ws_conn, _) = ws_conn_maybe_response;
// For WASM, WebSocket::open() always succeeds even if server is down
// We'll start as "connecting" and detect failures through timeouts
#[cfg(target_arch = "wasm32")]
info!("🔄 [{}] WebSocket object created, testing actual connectivity...", log_url);
#[cfg(not(target_arch = "wasm32"))]
{
info!("✅ [{}] WebSocket connection established successfully", log_url);
*is_connected.lock().unwrap() = true;
}
// Handle authentication if private key is provided
let auth_success = if let Some(ref _pk) = private_key {
info!("🔐 [{}] Authentication will be handled by separate authenticate() call", log_url);
true // For now, assume auth will be handled separately
} else {
info!(" [{}] No private key provided, skipping authentication", log_url);
true
};
if auth_success {
// Start the main message handling loop with keep-alive
let disconnect_reason = Self::handle_connection_with_keepalive(
ws_conn,
internal_rx,
&task_pending_requests,
&log_url,
&is_connected
).await;
info!("🔌 [{}] Connection ended: {}", log_url, disconnect_reason);
// Check if this was a manual disconnect
if disconnect_reason == "Manual close requested" {
break; // Don't reconnect on manual close
}
// If we reach here, we need to recreate internal_rx for the next iteration
// But since internal_rx was moved, we need to break out of the loop
break;
}
}
Err(e) => {
error!("❌ [{}] WebSocket connection failed: {:?}", log_url, e);
}
}
// Reset connection status
*is_connected.lock().unwrap() = false;
// Wait before reconnecting
info!("⏳ [{}] Waiting 5 seconds before reconnection attempt...", log_url);
#[cfg(target_arch = "wasm32")]
{
use gloo_timers::future::TimeoutFuture;
TimeoutFuture::new(5_000).await;
}
#[cfg(not(target_arch = "wasm32"))]
{
tokio::time::sleep(std::time::Duration::from_secs(5)).await;
}
}
// Cleanup pending requests on exit
task_pending_requests
.lock()
.unwrap()
.drain()
.for_each(|(_, sender)| {
let _ = sender.send(Err(CircleWsClientError::ConnectionError(
"WebSocket task terminated".to_string(),
)));
});
info!("🏁 [{}] WebSocket task finished", log_url);
};
#[cfg(target_arch = "wasm32")]
spawn_local(task);
#[cfg(not(target_arch = "wasm32"))]
{
self.task_handle = Some(spawn_local(task));
}
Ok(())
}
// Enhanced connection loop handler with keep-alive
#[cfg(target_arch = "wasm32")]
async fn handle_connection_with_keepalive(
ws_conn: WebSocket,
mut internal_rx: mpsc::Receiver<InternalWsMessage>,
pending_requests: &Arc<Mutex<HashMap<String, oneshot::Sender<Result<JsonRpcResponseClient, CircleWsClientError>>>>>,
log_url: &str,
is_connected: &Arc<Mutex<bool>>,
) -> String {
let (mut ws_tx, mut ws_rx) = ws_conn.split();
let mut internal_rx_fused = internal_rx.fuse();
// Track plaintext requests (like ping)
let pending_plaintext: Arc<Mutex<HashMap<String, oneshot::Sender<Result<String, CircleWsClientError>>>>> = Arc::new(Mutex::new(HashMap::new()));
// Connection validation for WASM - test if connection actually works
let mut connection_test_timer = TimeoutFuture::new(2_000).fuse(); // 2 second timeout
let mut connection_validated = false;
// Keep-alive timer - send ping every 30 seconds
use gloo_timers::future::TimeoutFuture;
let mut keep_alive_timer = TimeoutFuture::new(30_000).fuse();
// Send initial connection test ping
debug!("Sending initial connection test ping to {}", log_url);
let test_ping_res = ws_tx.send(GlooWsMessage::Text("ping".to_string())).await;
if let Err(e) = test_ping_res {
error!("❌ [{}] Initial connection test failed: {:?}", log_url, e);
*is_connected.lock().unwrap() = false;
return format!("Initial connection test failed: {}", e);
}
loop {
futures_util::select! {
// Connection test timeout - if no response in 2 seconds, connection failed
_ = connection_test_timer => {
if !connection_validated {
error!("❌ [{}] Connection test failed - no response within 2 seconds", log_url);
*is_connected.lock().unwrap() = false;
return "Connection test timeout - server not responding".to_string();
}
}
// Handle messages from the client's public methods (e.g., play)
internal_msg = internal_rx_fused.next().fuse() => {
match internal_msg {
Some(InternalWsMessage::SendJsonRpc(req, response_sender)) => {
let req_id = req.id.clone();
match serde_json::to_string(&req) {
Ok(req_str) => {
debug!("Sending JSON-RPC request (ID: {}): {}", req_id, req_str);
let send_res = ws_tx.send(GlooWsMessage::Text(req_str)).await;
if let Err(e) = send_res {
error!("WebSocket send error for request ID {}: {:?}", req_id, e);
// Connection failed - update status
*is_connected.lock().unwrap() = false;
let _ = response_sender.send(Err(CircleWsClientError::SendError(e.to_string())));
} else {
// Store the sender to await the response
pending_requests.lock().unwrap().insert(req_id, response_sender);
}
}
Err(e) => {
error!("Failed to serialize request ID {}: {}", req_id, e);
let _ = response_sender.send(Err(CircleWsClientError::JsonError(e)));
}
}
}
Some(InternalWsMessage::SendPlaintext(text, response_sender)) => {
debug!("Sending plaintext message: {}", text);
let send_res = ws_tx.send(GlooWsMessage::Text(text.clone())).await;
if let Err(e) = send_res {
error!("WebSocket send error for plaintext message: {:?}", e);
*is_connected.lock().unwrap() = false;
let _ = response_sender.send(Err(CircleWsClientError::SendError(e.to_string())));
} else {
// For plaintext messages like ping, we expect an immediate response
// Store the response sender to await the response (e.g., pong)
let request_id = format!("plaintext_{}", uuid::Uuid::new_v4());
pending_plaintext.lock().unwrap().insert(request_id, response_sender);
}
}
Some(InternalWsMessage::Close) => {
info!("Close message received internally, closing WebSocket.");
let _ = ws_tx.close().await;
return "Manual close requested".to_string();
}
None => {
info!("Internal MPSC channel closed, WebSocket task shutting down.");
let _ = ws_tx.close().await;
return "Internal channel closed".to_string();
}
}
},
// Handle messages received from the WebSocket server
ws_msg_res = ws_rx.next().fuse() => {
match ws_msg_res {
Some(Ok(msg)) => {
// Any successful message confirms the connection is working
if !connection_validated {
info!("✅ [{}] WebSocket connection validated - received message from server", log_url);
*is_connected.lock().unwrap() = true;
connection_validated = true;
}
match msg {
GlooWsMessage::Text(text) => {
debug!("Received WebSocket message: {}", text);
Self::handle_received_message(&text, pending_requests, &pending_plaintext);
}
GlooWsMessage::Bytes(_) => {
debug!("Received binary WebSocket message (WASM).");
}
}
}
Some(Err(e)) => {
error!("WebSocket receive error: {:?}", e);
*is_connected.lock().unwrap() = false;
return format!("Receive error: {}", e);
}
None => {
info!("WebSocket connection closed by server (stream ended).");
*is_connected.lock().unwrap() = false;
return "Server closed connection (stream ended)".to_string();
}
}
}
// Keep-alive timer - send ping every 30 seconds
_ = keep_alive_timer => {
// Only send ping if connection is validated
if connection_validated {
debug!("Sending keep-alive ping to {}", log_url);
let ping_str = "ping"; // Send simple plaintext ping
let send_res = ws_tx.send(GlooWsMessage::Text(ping_str.to_string())).await;
if let Err(e) = send_res {
warn!("Keep-alive ping failed for {}: {:?}", log_url, e);
*is_connected.lock().unwrap() = false;
return format!("Keep-alive failed: {}", e);
}
} else {
debug!("Skipping keep-alive ping - connection not yet validated for {}", log_url);
}
// Reset timer
keep_alive_timer = TimeoutFuture::new(30_000).fuse();
}
}
}
}
// Enhanced connection loop handler with keep-alive for native targets
#[cfg(not(target_arch = "wasm32"))]
async fn handle_connection_with_keepalive(
ws_conn: tokio_tungstenite::WebSocketStream<tokio_tungstenite::MaybeTlsStream<tokio::net::TcpStream>>,
mut internal_rx: mpsc::Receiver<InternalWsMessage>,
pending_requests: &Arc<Mutex<HashMap<String, oneshot::Sender<Result<JsonRpcResponseClient, CircleWsClientError>>>>>,
log_url: &str,
_is_connected: &Arc<Mutex<bool>>,
) -> String {
let (mut ws_tx, mut ws_rx) = ws_conn.split();
let mut internal_rx_fused = internal_rx.fuse();
// Track plaintext requests (like ping)
let pending_plaintext: Arc<Mutex<HashMap<String, oneshot::Sender<Result<String, CircleWsClientError>>>>> = Arc::new(Mutex::new(HashMap::new()));
loop {
futures_util::select! {
// Handle messages from the client's public methods (e.g., play)
internal_msg = internal_rx_fused.next().fuse() => {
match internal_msg {
Some(InternalWsMessage::SendJsonRpc(req, response_sender)) => {
let req_id = req.id.clone();
match serde_json::to_string(&req) {
Ok(req_str) => {
debug!("Sending JSON-RPC request (ID: {}): {}", req_id, req_str);
let send_res = ws_tx.send(TungsteniteWsMessage::Text(req_str)).await;
if let Err(e) = send_res {
error!("WebSocket send error for request ID {}: {:?}", req_id, e);
let _ = response_sender.send(Err(CircleWsClientError::SendError(e.to_string())));
} else {
// Store the sender to await the response
pending_requests.lock().unwrap().insert(req_id, response_sender);
}
}
Err(e) => {
error!("Failed to serialize request ID {}: {}", req_id, e);
let _ = response_sender.send(Err(CircleWsClientError::JsonError(e)));
}
}
}
Some(InternalWsMessage::SendPlaintext(text, response_sender)) => {
debug!("Sending plaintext message: {}", text);
let send_res = ws_tx.send(TungsteniteWsMessage::Text(text.clone())).await;
if let Err(e) = send_res {
error!("WebSocket send error for plaintext message: {:?}", e);
let _ = response_sender.send(Err(CircleWsClientError::SendError(e.to_string())));
} else {
// For plaintext messages like ping, we expect an immediate response
// Store the response sender to await the response (e.g., pong)
let request_id = format!("plaintext_{}", uuid::Uuid::new_v4());
pending_plaintext.lock().unwrap().insert(request_id, response_sender);
}
}
Some(InternalWsMessage::Close) => {
info!("Close message received internally, closing WebSocket.");
let _ = ws_tx.close().await;
return "Manual close requested".to_string();
}
None => {
info!("Internal MPSC channel closed, WebSocket task shutting down.");
let _ = ws_tx.close().await;
return "Internal channel closed".to_string();
}
}
},
// Handle messages received from the WebSocket server
ws_msg_res = ws_rx.next().fuse() => {
match ws_msg_res {
Some(Ok(msg)) => {
match msg {
TungsteniteWsMessage::Text(text) => {
debug!("Received WebSocket message: {}", text);
Self::handle_received_message(&text, pending_requests, &pending_plaintext);
}
TungsteniteWsMessage::Binary(_) => {
debug!("Received binary WebSocket message (Native).");
}
TungsteniteWsMessage::Ping(_) | TungsteniteWsMessage::Pong(_) => {
debug!("Received Ping/Pong (Native).");
}
TungsteniteWsMessage::Close(_) => {
info!("WebSocket connection closed by server (Native).");
return "Server closed connection".to_string();
}
TungsteniteWsMessage::Frame(_) => {
debug!("Received Frame (Native) - not typically handled directly.");
}
}
}
Some(Err(e)) => {
error!("WebSocket receive error: {:?}", e);
return format!("Receive error: {}", e);
}
None => {
info!("WebSocket connection closed by server (stream ended).");
return "Server closed connection (stream ended)".to_string();
}
}
}
}
}
}
// Helper method to handle received messages
fn handle_received_message(
text: &str,
pending_requests: &Arc<Mutex<HashMap<String, oneshot::Sender<Result<JsonRpcResponseClient, CircleWsClientError>>>>>,
pending_plaintext: &Arc<Mutex<HashMap<String, oneshot::Sender<Result<String, CircleWsClientError>>>>>,
) {
// Handle ping/pong messages - these are not JSON-RPC
if text.trim() == "pong" {
debug!("Received pong response");
// Find and respond to any pending plaintext ping requests
let mut plaintext_map = pending_plaintext.lock().unwrap();
if let Some((_, sender)) = plaintext_map.drain().next() {
let _ = sender.send(Ok("pong".to_string()));
}
return;
}
match serde_json::from_str::<JsonRpcResponseClient>(text) {
Ok(response) => {
if let Some(sender) = pending_requests.lock().unwrap().remove(&response.id) {
if let Err(failed_send_val) = sender.send(Ok(response)) {
if let Ok(resp_for_log) = failed_send_val {
warn!("Failed to send response to waiting task for ID: {}", resp_for_log.id);
} else {
warn!("Failed to send response to waiting task, and also failed to get original response for logging.");
}
}
} else {
warn!("Received response for unknown request ID or unsolicited message: {:?}", response);
}
}
Err(e) => {
error!("Failed to parse JSON-RPC response: {}. Raw: {}", e, text);
}
}
}
pub fn play(
&self,
script: String,
) -> impl std::future::Future<Output = Result<PlayResultClient, CircleWsClientError>> + Send + 'static
{
let req_id_outer = Uuid::new_v4().to_string();
// Clone the sender option. The sender itself (mpsc::Sender) is also Clone.
let internal_tx_clone_opt = self.internal_tx.clone();
async move {
let req_id = req_id_outer; // Move req_id into the async block
let params = PlayParamsClient { script }; // script is moved in
let request = match serde_json::to_value(params) {
Ok(p_val) => JsonRpcRequestClient {
jsonrpc: "2.0".to_string(),
method: "play".to_string(),
params: p_val,
id: req_id.clone(),
},
Err(e) => return Err(CircleWsClientError::JsonError(e)),
};
let (response_tx, response_rx) = oneshot::channel();
if let Some(mut internal_tx) = internal_tx_clone_opt {
internal_tx
.send(InternalWsMessage::SendJsonRpc(request, response_tx))
.await
.map_err(|e| {
CircleWsClientError::ChannelError(format!(
"Failed to send request to internal task: {}",
e
))
})?;
} else {
return Err(CircleWsClientError::NotConnected);
}
// Add a timeout for waiting for the response
// For simplicity, using a fixed timeout here. Could be configurable.
#[cfg(target_arch = "wasm32")]
{
match response_rx.await {
Ok(Ok(rpc_response)) => {
if let Some(json_rpc_error) = rpc_response.error {
Err(CircleWsClientError::JsonRpcError {
code: json_rpc_error.code,
message: json_rpc_error.message,
data: json_rpc_error.data,
})
} else if let Some(result_value) = rpc_response.result {
serde_json::from_value(result_value)
.map_err(CircleWsClientError::JsonError)
} else {
Err(CircleWsClientError::NoResponse(req_id.clone()))
}
}
Ok(Err(e)) => Err(e), // Error propagated from the ws task
Err(_) => Err(CircleWsClientError::Timeout(req_id.clone())), // oneshot channel cancelled
}
}
#[cfg(not(target_arch = "wasm32"))]
{
use tokio::time::timeout as tokio_timeout;
match tokio_timeout(std::time::Duration::from_secs(10), response_rx).await {
Ok(Ok(Ok(rpc_response))) => {
// Timeout -> Result<ChannelRecvResult, Error>
if let Some(json_rpc_error) = rpc_response.error {
Err(CircleWsClientError::JsonRpcError {
code: json_rpc_error.code,
message: json_rpc_error.message,
data: json_rpc_error.data,
})
} else if let Some(result_value) = rpc_response.result {
serde_json::from_value(result_value)
.map_err(CircleWsClientError::JsonError)
} else {
Err(CircleWsClientError::NoResponse(req_id.clone()))
}
}
Ok(Ok(Err(e))) => Err(e), // Error propagated from the ws task
Ok(Err(_)) => Err(CircleWsClientError::ChannelError(
"Response channel cancelled".to_string(),
)), // oneshot cancelled
Err(_) => Err(CircleWsClientError::Timeout(req_id.clone())), // tokio_timeout expired
}
}
}
}
/// Send a plaintext ping message and wait for pong response
pub async fn ping(&mut self) -> Result<String, CircleWsClientError> {
if let Some(mut tx) = self.internal_tx.clone() {
let (response_tx, response_rx) = oneshot::channel();
// Send plaintext ping message
tx.send(InternalWsMessage::SendPlaintext("ping".to_string(), response_tx))
.await
.map_err(|e| {
CircleWsClientError::ChannelError(format!(
"Failed to send ping request to internal task: {}",
e
))
})?;
// Wait for pong response with timeout
#[cfg(target_arch = "wasm32")]
{
match response_rx.await {
Ok(Ok(response)) => Ok(response),
Ok(Err(e)) => Err(e),
Err(_) => Err(CircleWsClientError::ChannelError(
"Ping response channel cancelled".to_string(),
)),
}
}
#[cfg(not(target_arch = "wasm32"))]
{
use tokio::time::timeout as tokio_timeout;
match tokio_timeout(std::time::Duration::from_secs(10), response_rx).await {
Ok(Ok(Ok(response))) => Ok(response),
Ok(Ok(Err(e))) => Err(e),
Ok(Err(_)) => Err(CircleWsClientError::ChannelError(
"Ping response channel cancelled".to_string(),
)),
Err(_) => Err(CircleWsClientError::Timeout("ping".to_string())),
}
}
} else {
Err(CircleWsClientError::NotConnected)
}
}
pub async fn disconnect(&mut self) {
if let Some(mut tx) = self.internal_tx.take() {
info!("Sending close signal to internal WebSocket task.");
let _ = tx.send(InternalWsMessage::Close).await;
}
#[cfg(not(target_arch = "wasm32"))]
if let Some(handle) = self.task_handle.take() {
let _ = handle.await; // Wait for the task to finish
}
info!("Client disconnected.");
}
}
// Ensure client cleans up on drop for native targets
#[cfg(not(target_arch = "wasm32"))]
impl Drop for CircleWsClient {
fn drop(&mut self) {
if self.internal_tx.is_some() || self.task_handle.is_some() {
warn!("CircleWsClient dropped without explicit disconnect. Spawning task to send close signal.");
// We can't call async disconnect directly in drop.
// Spawn a new task to send the close message if on native.
if let Some(mut tx) = self.internal_tx.take() {
spawn_local(async move {
info!("Drop: Sending close signal to internal WebSocket task.");
let _ = tx.send(InternalWsMessage::Close).await;
});
}
if let Some(handle) = self.task_handle.take() {
spawn_local(async move {
info!("Drop: Waiting for WebSocket task to finish.");
let _ = handle.await;
info!("Drop: WebSocket task finished.");
});
}
}
}
}
#[cfg(test)]
mod tests {
// use super::*;
#[test]
fn it_compiles() {
assert_eq!(2 + 2, 4);
}
}

View File

@ -0,0 +1 @@
/target

View File

@ -0,0 +1,24 @@
[package]
name = "hero-websocket-examples"
version = "0.1.0"
edition = "2021"
[dependencies]
hero_websocket_client = { path = "../client" }
hero_websocket_server = { path = "../server" }
tokio = { version = "1.0", features = ["full"] }
k256 = { version = "0.13", features = ["ecdsa", "sha256"] }
rand = "0.8"
hex = "0.4"
[[bin]]
name = "ping"
path = "src/ping.rs"
[[bin]]
name = "auth"
path = "src/auth.rs"
[[bin]]
name = "play"
path = "src/play.rs"

View File

@ -0,0 +1,7 @@
## Hero Websocket Interface Examples
A set of end-to-end examples demonstrating the use of the Hero Websocket interface.
### Ping Example
Simple ping example.

View File

@ -0,0 +1,104 @@
use hero_websocket_client::CircleWsClientBuilder;
use hero_websocket_server::ServerBuilder;
use tokio::signal;
use tokio::time::{sleep, Duration};
use k256::ecdsa::SigningKey;
use k256::elliptic_curve::sec1::ToEncodedPoint;
use rand::rngs::OsRng;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let (circle_public_key_hex, circle_private_key_hex) = {
let signing_key = SigningKey::random(&mut OsRng);
let verifying_key = signing_key.verifying_key();
let public_key_bytes = verifying_key.to_encoded_point(false).as_bytes().to_vec();
let private_key_bytes = signing_key.to_bytes().to_vec();
(hex::encode(public_key_bytes), hex::encode(private_key_bytes))
};
println!("🔗 Minimal WebSocket Ping Example");
// Build server
let server = match ServerBuilder::new()
.host("127.0.0.1")
.port(8443)
.redis_url("redis://localhost:6379")
.worker_id("test")
.with_auth()
.build() {
Ok(server) => {
println!("🚀 Built server...");
server
},
Err(e) => {
eprintln!("Failed to build server: {}", e);
return Err(e.into());
}
};
// Start server
println!("🚀 Starting server...");
let (server_task, server_handle) = server.spawn_circle_server().map_err(|e| {
eprintln!("Failed to start server: {}", e);
e
})?;
// Setup signal handling for clean shutdown
let server_handle_clone = server_handle.clone();
tokio::spawn(async move {
signal::ctrl_c().await.expect("Failed to listen for Ctrl+C");
println!("\n🔌 Shutting down...");
server_handle_clone.stop(true).await;
std::process::exit(0);
});
// Brief pause for server startup
sleep(Duration::from_millis(200)).await;
// Connect client
let mut client = CircleWsClientBuilder::new(format!("ws://localhost:8443/{}", circle_public_key_hex))
.with_keypair(circle_private_key_hex)
.build();
match client.connect().await {
Ok(_) => println!("✅ Client Connected"),
Err(e) => {
eprintln!("Failed to connect: {}", e);
return Err(e.into());
}
}
// Authenticate
print!("📤 Authenticating... ");
let response = client.authenticate().await;
match response {
Ok(response) => {
println!("📥 {}", response);
}
Err(e) => {
eprintln!("Failed to authenticate: {}", e);
return Err(e.into());
}
}
// Test whoami after authentication
print!("📤 Calling whoami... ");
match client.whoami().await {
Ok(response) => {
println!("📥 Whoami response: {}", response);
}
Err(e) => {
eprintln!("Failed to call whoami: {}", e);
return Err(e.into());
}
}
// Clean shutdown
client.disconnect().await;
server_handle.stop(true).await;
println!("✅ Done");
Ok(())
}

View File

@ -0,0 +1,3 @@
fn main() {
println!("Hello, world!");
}

View File

@ -0,0 +1,79 @@
use hero_websocket_client::CircleWsClientBuilder;
use hero_websocket_server::ServerBuilder;
use std::time::Instant;
use tokio::signal;
use tokio::time::{sleep, timeout, Duration};
const CIRCLE_PUBLIC_KEY: &str = "circle_public_key";
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("🔗 Minimal WebSocket Ping Example");
// Build server
let server = match ServerBuilder::new()
.host("127.0.0.1")
.port(8443)
.redis_url("redis://localhost:6379")
.worker_id("test")
.build() {
Ok(server) => {
println!("🚀 Built server...");
server
},
Err(e) => {
eprintln!("Failed to build server: {}", e);
return Err(e.into());
}
};
// Start server
println!("🚀 Starting server...");
let (server_task, server_handle) = server.spawn_circle_server().map_err(|e| {
eprintln!("Failed to start server: {}", e);
e
})?;
// Setup signal handling for clean shutdown
let server_handle_clone = server_handle.clone();
tokio::spawn(async move {
signal::ctrl_c().await.expect("Failed to listen for Ctrl+C");
println!("\n🔌 Shutting down...");
server_handle_clone.stop(true).await;
std::process::exit(0);
});
// Brief pause for server startup
sleep(Duration::from_millis(200)).await;
// Connect client
let mut client = CircleWsClientBuilder::new(format!("ws://localhost:8443/{}", CIRCLE_PUBLIC_KEY)).build();
match client.connect().await {
Ok(_) => println!("✅ Client Connected"),
Err(e) => {
eprintln!("Failed to connect: {}", e);
return Err(e.into());
}
}
// Send one ping
print!("📤 Ping... ");
let response = client.ping().await;
match response {
Ok(response) => {
println!("📥 {}", response);
}
Err(e) => {
eprintln!("Failed to ping: {}", e);
return Err(e.into());
}
}
// Clean shutdown
client.disconnect().await;
server_handle.stop(true).await;
println!("✅ Done");
Ok(())
}

View File

@ -0,0 +1,96 @@
use hero_websocket_client::CircleWsClientBuilder;
use hero_websocket_server::ServerBuilder;
use tokio::signal;
use tokio::time::{sleep, Duration};
use k256::ecdsa::SigningKey;
use k256::elliptic_curve::sec1::ToEncodedPoint;
use rand::rngs::OsRng;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let (circle_public_key_hex, circle_private_key_hex) = {
let signing_key = SigningKey::random(&mut OsRng);
let verifying_key = signing_key.verifying_key();
let public_key_bytes = verifying_key.to_encoded_point(false).as_bytes().to_vec();
let private_key_bytes = signing_key.to_bytes().to_vec();
(hex::encode(public_key_bytes), hex::encode(private_key_bytes))
};
println!("🔗 Minimal WebSocket Ping Example");
// Build server
let server = match ServerBuilder::new()
.host("127.0.0.1")
.port(8443)
.redis_url("redis://localhost:6379")
.worker_id("test")
.with_auth()
.build() {
Ok(server) => {
println!("🚀 Built server...");
server
},
Err(e) => {
eprintln!("Failed to build server: {}", e);
return Err(e.into());
}
};
// Start server
println!("🚀 Starting server...");
let (server_task, server_handle) = server.spawn_circle_server().map_err(|e| {
eprintln!("Failed to start server: {}", e);
e
})?;
// Setup signal handling for clean shutdown
let server_handle_clone = server_handle.clone();
tokio::spawn(async move {
signal::ctrl_c().await.expect("Failed to listen for Ctrl+C");
println!("\n🔌 Shutting down...");
server_handle_clone.stop(true).await;
std::process::exit(0);
});
// Brief pause for server startup
sleep(Duration::from_millis(200)).await;
// Connect client
let mut client = CircleWsClientBuilder::new(format!("ws://localhost:8443/{}", circle_public_key_hex))
.with_keypair(circle_private_key_hex)
.build();
match client.connect().await {
Ok(_) => println!("✅ Client Connected"),
Err(e) => {
eprintln!("Failed to connect: {}", e);
return Err(e.into());
}
}
// Authenticate
print!("📤 Authenticating... ");
let response = client.authenticate().await;
match response {
Ok(response) => {
println!("📥 {}", response);
}
Err(e) => {
eprintln!("Failed to authenticate: {}", e);
return Err(e.into());
}
}
// Test whoami after authentication
print!("📤 Calling whoami... ");
client.play("script".to_string()).await;
// Clean shutdown
client.disconnect().await;
server_handle.stop(true).await;
println!("✅ Done");
Ok(())
}

BIN
interfaces/websocket/server/.DS_Store vendored Normal file

Binary file not shown.

View File

@ -0,0 +1,10 @@
# Webhook Configuration
# Copy this file to .env and set your actual webhook secrets
# Stripe webhook endpoint secret
# Get this from your Stripe dashboard under Webhooks
STRIPE_WEBHOOK_SECRET=whsec_your_stripe_webhook_secret_here
# iDenfy webhook endpoint secret
# Get this from your iDenfy dashboard under Webhooks
IDENFY_WEBHOOK_SECRET=your_idenfy_webhook_secret_here

View File

@ -0,0 +1,3 @@
/target
file:memdb_test_server*
*.pem

2775
interfaces/websocket/server/Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,78 @@
[package]
name = "hero_websocket_server"
version = "0.1.0"
edition = "2021"
[lib]
name = "hero_websocket_server"
path = "src/lib.rs"
[[bin]]
name = "hero_websocket_server"
path = "cmd/main.rs"
[[example]]
name = "wss_basic_example"
path = "../../examples/wss_basic_example.rs"
[[example]]
name = "wss_auth_example"
path = "../../examples/wss_auth_example.rs"
required-features = ["auth"]
[[example]]
name = "wss_test_client"
path = "../../examples/wss_test_client.rs"
[[example]]
name = "wss_server"
path = "../../examples/wss_demo/wss_server.rs"
required-features = ["auth"]
[dependencies]
rustls = "0.23.5"
rustls-pemfile = "2.1.2"
actix-web = { workspace = true, features = ["rustls-0_23"] }
actix-web-actors = { workspace = true }
actix = { workspace = true }
env_logger = { workspace = true }
log = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
redis = { workspace = true }
uuid = { workspace = true }
tokio = { workspace = true }
chrono = { workspace = true }
rhai_dispatcher = { path = "../../../../rhailib/src/dispatcher" } # Corrected relative path
thiserror = { workspace = true }
heromodels = { path = "../../../../db/heromodels" }
# Webhook dependencies
hmac = "0.12"
sha2 = "0.10"
dotenv = "0.15"
bytes = "1.0"
hex = { workspace = true }
# Authentication dependencies (optional)
secp256k1 = { workspace = true, optional = true }
sha3 = { workspace = true, optional = true }
rand = { workspace = true, optional = true }
once_cell = { workspace = true }
clap = { workspace = true }
# Optional features for authentication
[features]
default = []
auth = ["secp256k1", "sha3", "rand"]
[dev-dependencies]
redis = { version = "0.23.0", features = ["tokio-comp"] }
uuid = { version = "1.2.2", features = ["v4"] }
tokio-tungstenite = { version = "0.19.0", features = ["native-tls"] }
futures-util = { workspace = true }
url = { workspace = true }
heromodels = { path = "../../../../db/heromodels" }
tokio = { workspace = true, features = ["full"] }
native-tls = "0.2"

View File

@ -0,0 +1,76 @@
# `server`: The Circles WebSocket Server
The `server` crate provides a secure, high-performance WebSocket server built with `Actix`. It is the core backend component of the `circles` ecosystem, responsible for handling client connections, processing JSON-RPC requests, and executing Rhai scripts in a secure manner.
## Features
- **`Actix` Framework**: Built on `Actix`, a powerful and efficient actor-based web framework.
- **WebSocket Management**: Uses `actix-web-actors` to manage each client connection in its own isolated actor (`CircleWs`), ensuring robust and concurrent session handling.
- **JSON-RPC 2.0 API**: Implements a JSON-RPC 2.0 API for all client-server communication. The API is formally defined in the root [openrpc.json](../../openrpc.json) file.
- **Secure Authentication**: Features a built-in `secp256k1` signature-based authentication system to protect sensitive endpoints.
- **Stateful Session Management**: The `CircleWs` actor maintains the authentication state for each client, granting or denying access to protected methods like `play`.
- **Webhook Integration**: Supports HTTP webhook endpoints for external services (Stripe, iDenfy) with signature verification and script execution capabilities.
## Core Components
### `spawn_circle_server`
This is the main entry point function for the server. It configures and starts the `Actix` HTTP server and sets up the WebSocket route with path-based routing (`/{circle_pk}`).
### `CircleWs` Actor
This `Actix` actor is the heart of the server's session management. A new instance of `CircleWs` is created for each client that connects. Its responsibilities include:
- Handling the WebSocket connection lifecycle.
- Parsing incoming JSON-RPC messages.
- Managing the authentication state of the session (i.e., whether the client is authenticated or not).
- Dispatching requests to the appropriate handlers (`fetch_nonce`, `authenticate`, and `play`).
## Authentication
The server provides a robust authentication mechanism to ensure that only authorized clients can execute scripts. The entire flow is handled over the WebSocket connection using two dedicated JSON-RPC methods:
1. **`fetch_nonce`**: The client requests a unique, single-use nonce (a challenge) from the server.
2. **`authenticate`**: The client sends back the nonce signed with its private key. The `CircleWs` actor verifies the signature to confirm the client's identity.
For a more detailed breakdown of the authentication architecture, please see the [ARCHITECTURE.md](docs/ARCHITECTURE.md) file.
## Webhook Integration
The server also provides HTTP webhook endpoints for external services alongside the WebSocket functionality:
- **Stripe Webhooks**: `POST /webhooks/stripe/{circle_pk}` - Handles Stripe payment events
- **iDenfy Webhooks**: `POST /webhooks/idenfy/{circle_pk}` - Handles iDenfy KYC verification events
### Webhook Features
- **Signature Verification**: All webhooks use HMAC signature verification for security
- **Script Execution**: Webhook events trigger Rhai script execution via the same Redis-based system
- **Type Safety**: Webhook payload types are defined in the `heromodels` library for reusability
- **Modular Architecture**: Separate handlers for each webhook provider with common utilities
For detailed webhook architecture and configuration, see [WEBHOOK_ARCHITECTURE.md](WEBHOOK_ARCHITECTURE.md).
## How to Run
### As a Library
The `server` is designed to be used as a library by the `launcher`, which is responsible for spawning a single multi-circle server instance that can handle multiple circles via path-based routing.
To run the server via the launcher with circle public keys:
```bash
cargo run --package launcher -- -k <circle_public_key1> -k <circle_public_key2> [options]
```
The launcher will start a single `server` instance that can handle multiple circles through path-based WebSocket connections at `/{circle_pk}`.
### Standalone Binary
A standalone binary is also available for development and testing purposes. See [`cmd/README.md`](cmd/README.md) for detailed usage instructions.
```bash
# Basic standalone server
cargo run
# With authentication and TLS
cargo run -- --auth --tls --cert cert.pem --key key.pem
```

View File

@ -0,0 +1,142 @@
# Circles WebSocket Server Binary
A command-line WebSocket server for hosting Circles with authentication and TLS support.
## Binary: Server
### Installation
Build the binary:
```bash
cargo build --release
```
### Usage
```bash
# Basic usage - starts server on localhost:8443
cargo run
# Custom host and port
cargo run -- --host 0.0.0.0 --port 9000
# Enable authentication
cargo run -- --auth
# Enable TLS/WSS with certificates
cargo run -- --tls --cert /path/to/cert.pem --key /path/to/key.pem
# Use separate TLS port
cargo run -- --tls --cert cert.pem --key key.pem --tls-port 8444
# Custom Redis URL
cargo run -- --redis-url redis://localhost:6379/1
# Increase verbosity
cargo run -- -v # Debug logging
cargo run -- -vv # Full debug logging
cargo run -- -vvv # Trace logging
```
### Command-Line Options
| Option | Short | Default | Description |
|--------|-------|---------|-------------|
| `--host` | `-H` | `127.0.0.1` | Server bind address |
| `--port` | `-p` | `8443` | Server port |
| `--redis-url` | | `redis://127.0.0.1/` | Redis connection URL |
| `--auth` | | `false` | Enable secp256k1 authentication |
| `--tls` | | `false` | Enable TLS/WSS support |
| `--cert` | | | Path to TLS certificate file (required with --tls) |
| `--key` | | | Path to TLS private key file (required with --tls) |
| `--tls-port` | | | Separate port for TLS connections |
| `--verbose` | `-v` | | Increase verbosity (stackable) |
### Configuration Examples
#### Development Server
```bash
# Simple development server
cargo run
# Development with authentication
cargo run -- --auth
```
#### Production Server
```bash
# Production with TLS and authentication
cargo run -- \
--host 0.0.0.0 \
--port 8080 \
--tls \
--tls-port 8443 \
--cert /etc/ssl/certs/circles.pem \
--key /etc/ssl/private/circles.key \
--auth \
--redis-url redis://redis-server:6379/0
```
#### Custom Redis Configuration
```bash
# Connect to remote Redis with authentication
cargo run -- --redis-url redis://username:password@redis.example.com:6379/2
```
### Logging Levels
The server supports multiple verbosity levels:
- **Default** (`cargo run`): Shows only warnings and circle_ws_lib info
- **Debug** (`-v`): Shows debug info for circle_ws_lib, info for actix
- **Full Debug** (`-vv`): Shows debug for all components
- **Trace** (`-vvv+`): Shows trace-level logging for everything
### TLS/SSL Configuration
When using `--tls`, you must provide both certificate and key files:
```bash
# Generate self-signed certificate for testing
openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -days 365 -nodes
# Run server with TLS
cargo run -- --tls --cert cert.pem --key key.pem
```
### Authentication
When `--auth` is enabled, clients must complete secp256k1 authentication:
1. Client connects to WebSocket
2. Server sends authentication challenge
3. Client signs challenge with private key
4. Server verifies signature and grants access
### Redis Integration
The server uses Redis for:
- Session management
- Message persistence
- Cross-instance communication (in clustered deployments)
Supported Redis URL formats:
- `redis://localhost/` - Local Redis, default database
- `redis://localhost:6379/1` - Local Redis, database 1
- `redis://user:pass@host:port/db` - Authenticated Redis
- `rediss://host:port/` - Redis with TLS
### Error Handling
The server provides clear error messages for common configuration issues:
- Missing TLS certificate or key files
- Invalid Redis connection URLs
- Port binding failures
- Authentication setup problems
### Dependencies
- `actix-web`: Web server framework
- `tokio-tungstenite`: WebSocket implementation
- `redis`: Redis client
- `rustls`: TLS implementation
- `clap`: Command-line argument parsing

View File

@ -0,0 +1,150 @@
use hero_websocket_server::{ServerBuilder, TlsConfigError};
use clap::Parser;
use dotenv::dotenv;
use log::info;
#[derive(Parser, Debug)]
#[clap(author, version, about, long_about = None)]
struct Args {
#[clap(short = 'H', long, value_parser, default_value = "127.0.0.1")]
host: String,
#[clap(short, long, value_parser, default_value_t = 8443)]
port: u16,
#[clap(long, value_parser, default_value = "redis://127.0.0.1/")]
redis_url: String,
#[clap(long, help = "Enable authentication")]
auth: bool,
#[clap(long, help = "Enable TLS/WSS")]
tls: bool,
#[clap(long, value_parser, help = "Path to TLS certificate file")]
cert: Option<String>,
#[clap(long, value_parser, help = "Path to TLS private key file")]
key: Option<String>,
#[clap(long, value_parser, help = "Separate port for TLS connections")]
tls_port: Option<u16>,
#[clap(short, long, action = clap::ArgAction::Count, help = "Increase verbosity (-v for debug, -vv for trace)")]
verbose: u8,
#[clap(long, help = "Remove timestamps from log output")]
no_timestamp: bool,
#[clap(long, help = "Enable webhook handling")]
webhooks: bool,
#[clap(long, value_parser, help = "Worker ID for the server")]
worker_id: String,
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
let args = Args::parse();
// Configure logging based on verbosity level
let log_config = match args.verbose {
0 => {
// Default: suppress actix server logs, show only hero_websocket_server info and above
"warn,hero_websocket_server=info"
}
1 => {
// -v: show debug for hero_websocket_server, info for actix
"info,hero_websocket_server=debug,actix_server=info"
}
2 => {
// -vv: show debug for everything
"debug"
}
_ => {
// -vvv and above: show trace for everything
"trace"
}
};
std::env::set_var("RUST_LOG", log_config);
// Configure env_logger with or without timestamps
if args.no_timestamp {
env_logger::Builder::from_default_env()
.format_timestamp(None)
.init();
} else {
env_logger::init();
}
// Validate TLS configuration
if args.tls && (args.cert.is_none() || args.key.is_none()) {
eprintln!("Error: TLS is enabled but certificate or key path is missing");
eprintln!("Use --cert and --key to specify certificate and key files");
std::process::exit(1);
}
let mut builder = ServerBuilder::new()
.host(args.host.clone())
.port(args.port)
.redis_url(args.redis_url.clone())
.worker_id(args.worker_id.clone());
if args.auth {
builder = builder.with_auth();
}
if args.tls {
if let (Some(cert), Some(key)) = (args.cert.clone(), args.key.clone()) {
builder = builder.with_tls(cert, key);
} else {
eprintln!("Error: TLS is enabled but --cert or --key is missing.");
std::process::exit(1);
}
}
if let Some(tls_port) = args.tls_port {
builder = builder.with_tls_port(tls_port);
}
if args.webhooks {
builder = builder.with_webhooks();
}
let server = match builder.build() {
Ok(server) => server,
Err(e) => {
eprintln!("Error building server: {}", e);
std::process::exit(1);
}
};
println!("🚀 Starting Circles WebSocket Server");
println!("📋 Configuration:");
println!(" Host: {}", args.host);
println!(" Port: {}", args.port);
if let Some(tls_port) = args.tls_port {
println!(" TLS Port: {}", tls_port);
}
println!(" Authentication: {}", if args.auth { "ENABLED" } else { "DISABLED" });
println!(" TLS/WSS: {}", if args.tls { "ENABLED" } else { "DISABLED" });
println!(" Webhooks: {}", if args.webhooks { "ENABLED" } else { "DISABLED" });
if args.tls {
if let (Some(cert), Some(key)) = (&args.cert, &args.key) {
println!(" Certificate: {}", cert);
println!(" Private Key: {}", key);
}
}
if args.webhooks {
println!(" Webhook secrets loaded from environment variables:");
println!(" - STRIPE_WEBHOOK_SECRET");
println!(" - IDENFY_WEBHOOK_SECRET");
}
println!();
let (server_task, _server_handle) = server.spawn_circle_server()?;
server_task.await?
}

View File

@ -0,0 +1,133 @@
# `server` Architecture
This document provides a detailed look into the internal architecture of the `server` crate, focusing on its `Actix`-based design, the structure of the authentication service, and the request lifecycle.
## 1. Core Design: The `Actix` Actor System
The `server` is built around the `Actix` actor framework, which allows for highly concurrent and stateful handling of network requests. The key components of this design are:
- **`HttpServer`**: The main `Actix` server instance that listens for incoming TCP connections.
- **`App`**: The application factory that defines the routes for the server.
- **`CircleWs` Actor**: A dedicated actor that is spawned for each individual WebSocket connection. This is the cornerstone of the server's design, as it allows each client session to be managed in an isolated, stateful manner.
When a client connects to the `/{circle_pk}` endpoint, the `HttpServer` upgrades the connection to a WebSocket and spawns a new `CircleWs` actor to handle it. The circle public key is extracted from the URL path to identify which circle the client wants to connect to. All further communication with that client, including the entire authentication flow, is then processed by this specific actor instance.
## 2. Module Structure
The `server` crate is organized into the following key modules:
- **`lib.rs`**: The main library file that contains the `spawn_circle_server` function, which sets up and runs the `Actix` server. It also defines the `CircleWs` actor and its message handling logic for all JSON-RPC methods.
- **`auth/`**: This module encapsulates all the logic related to the `secp256k1` authentication system.
- **`signature_verifier.rs`**: A self-contained utility module that provides the `verify_signature` function. This function performs the core cryptographic verification of the client's signed nonce.
- **`types.rs`**: Defines the data structures used within the authentication service.
- **`webhook/`**: This module provides HTTP webhook handling capabilities for external services.
- **`mod.rs`**: Main webhook module with route configuration and exports.
- **`handlers/`**: Contains individual webhook handlers for different providers (Stripe, iDenfy).
- **`verifiers.rs`**: Signature verification utilities for webhook authenticity.
- **`types.rs`**: Local webhook types (configuration, errors, verification results).
## 3. Request Lifecycle and Authentication Flow
The diagram below illustrates the flow of a typical client interaction. The entire process, from fetching a nonce to executing a protected command, occurs over the WebSocket connection and is handled by the `CircleWs` actor.
```mermaid
sequenceDiagram
participant Client
participant ActixHttpServer as HttpServer
participant CircleWsActor as CircleWs Actor
participant SignatureVerifier as auth::signature_verifier
Client->>+ActixHttpServer: Establishes WebSocket connection
ActixHttpServer->>ActixHttpServer: Spawns a new CircleWsActor
ActixHttpServer-->>-Client: WebSocket connection established
Note over CircleWsActor: Session created, authenticated = false
Client->>+CircleWsActor: Sends "fetch_nonce" JSON-RPC message
CircleWsActor->>CircleWsActor: Generate and store nonce for pubkey
CircleWsActor-->>-Client: Returns nonce in JSON-RPC response
Client->>Client: Signs nonce with private key
Client->>+CircleWsActor: Sends "authenticate" JSON-RPC message
CircleWsActor->>+SignatureVerifier: verify_signature(pubkey, nonce, signature)
SignatureVerifier-->>-CircleWsActor: Returns verification result
alt Signature is Valid
CircleWsActor->>CircleWsActor: Set session state: authenticated = true
CircleWsActor-->>-Client: Returns success response
else Signature is Invalid
CircleWsActor-->>-Client: Returns error response
end
Note over CircleWsActor: Client is now authenticated
Client->>+CircleWsActor: Sends "play" JSON-RPC message
CircleWsActor->>CircleWsActor: Check if authenticated
alt Is Authenticated
CircleWsActor->>CircleWsActor: Get public key from authenticated connections map
CircleWsActor->>CircleWsActor: Execute Rhai script with public key
CircleWsActor-->>-Client: Returns script result
else Is Not Authenticated
CircleWsActor-->>-Client: Returns "Authentication Required" error
end
```
This architecture ensures a clear separation of concerns and a unified communication protocol:
- The `HttpServer` handles connection management.
- The `CircleWs` actor manages the entire session lifecycle, including state and all API logic.
- The `auth` module provides a self-contained, reusable signature verification utility.
## 4. Webhook Integration Architecture
In addition to WebSocket connections, the server supports HTTP webhook endpoints for external services. This integration runs alongside the WebSocket functionality without interference.
### Webhook Request Flow
```mermaid
sequenceDiagram
participant WS as Webhook Service
participant HS as HttpServer
participant WH as Webhook Handler
participant WV as Webhook Verifier
participant RC as RhaiDispatcher
participant Redis as Redis
WS->>+HS: POST /webhooks/{provider}/{circle_pk}
HS->>+WH: Route to appropriate handler
WH->>WH: Extract circle_pk and signature
WH->>+WV: Verify webhook signature
WV->>WV: HMAC verification with provider secret
WV-->>-WH: Verification result + caller_id
alt Signature Valid
WH->>WH: Parse webhook payload (heromodels types)
WH->>+RC: Create RhaiDispatcher with caller_id
RC->>+Redis: Execute webhook script
Redis-->>-RC: Script result
RC-->>-WH: Execution result
WH-->>-HS: HTTP 200 OK
else Signature Invalid
WH-->>-HS: HTTP 401 Unauthorized
end
HS-->>-WS: HTTP Response
```
### Key Webhook Components
- **Modular Handlers**: Separate handlers for each webhook provider (Stripe, iDenfy)
- **Signature Verification**: HMAC-based verification using provider-specific secrets
- **Type Safety**: Webhook payload types defined in `heromodels` library for reusability
- **Script Integration**: Uses the same Redis-based Rhai execution system as WebSocket connections
- **Isolated Processing**: Webhook processing doesn't affect WebSocket connections
### Webhook vs WebSocket Comparison
| Aspect | WebSocket | Webhook |
|--------|-----------|---------|
| **Connection Type** | Persistent, bidirectional | HTTP request/response |
| **Authentication** | secp256k1 signature-based | HMAC signature verification |
| **State Management** | Stateful sessions via CircleWs actor | Stateless HTTP requests |
| **Script Execution** | Direct via authenticated session | Via RhaiDispatcher with provider caller_id |
| **Use Case** | Interactive client applications | External service notifications |
| **Data Types** | JSON-RPC messages | Provider-specific webhook payloads (heromodels) |

View File

@ -0,0 +1,214 @@
# WebSocket Server Authentication
This document describes the optional authentication features added to the Circle WebSocket server.
## Overview
The WebSocket server now supports optional secp256k1 signature-based authentication while maintaining full backward compatibility with existing clients. Authentication is completely opt-in and can be enabled per server instance.
## Features
### 1. Optional Authentication
- **Backward Compatible**: Existing clients continue to work without any changes
- **Opt-in**: Authentication can be enabled/disabled per server instance
- **Graceful Degradation**: Servers can accept both authenticated and unauthenticated connections
### 2. Nonce-based Security
- **Nonce Endpoints**: REST API for requesting cryptographic nonces
- **Replay Protection**: Each nonce can only be used once
- **Expiration**: Nonces expire after 5 minutes
- **Health Monitoring**: Health endpoint for monitoring nonce service
### 3. Signature Verification
- **secp256k1**: Uses the same cryptographic standard as Ethereum
- **Ethereum-style Signing**: Compatible with eth_sign message format
- **Public Key Recovery**: Verifies signatures against provided public keys
## API Endpoints
These HTTP API endpoints are served by the WebSocket server instance itself, on the same host and port where the WebSocket service is running.
### Nonce Request
```
GET /auth/nonce?public_key=<optional_public_key>
```
**Response:**
```json
{
"nonce": "nonce_1234567890_abcdef",
"expires_at": 1234567890
}
```
### Health Check
```
GET /auth/health
```
**Response:**
```json
{
"status": "healthy",
"active_nonces": 42,
"timestamp": 1234567890
}
```
## WebSocket Authentication
### Query Parameters
Clients can authenticate by including these query parameters in the WebSocket URL:
- `pubkey`: The client's public key in hex format (130 characters, uncompressed)
- `sig`: The signature of the nonce in hex format (130 characters)
- `nonce`: The nonce that was signed (optional)
**Example:**
```
ws://localhost:8080/{circle_pk}?pubkey=04abc123...&sig=def456...&nonce=nonce_123_abc
```
### Authentication Flow
1. **Request Nonce**: Client requests a nonce from `/auth/nonce`
2. **Sign Nonce**: Client signs the nonce with their private key
3. **Connect**: Client connects to WebSocket with `pubkey` and `sig` parameters
4. **Verify**: Server verifies the signature and accepts/rejects the connection
## Server Configuration
### Basic Server (No Authentication)
```rust
use circle_ws_lib::{ServerConfig, spawn_circle_server};
let config = ServerConfig::new(
"localhost".to_string(),
8080,
"redis://localhost".to_string(),
);
let (server_task, server_handle) = spawn_circle_server(config)?;
```
### Server with Authentication
```rust
use circle_ws_lib::{ServerConfig, spawn_circle_server};
let config = ServerConfig::new(
"localhost".to_string(),
8080,
"redis://localhost".to_string(),
).with_auth();
let (server_task, server_handle) = spawn_circle_server(config)?;
```
## Client Integration
### JavaScript/TypeScript Example
```javascript
// 1. Request nonce (from the WebSocket server's HTTP interface)
const nonceResponse = await fetch('http://localhost:8080/auth/nonce');
const { nonce } = await nonceResponse.json();
// 2. Sign nonce (using your preferred secp256k1 library)
const signature = signMessage(privateKey, nonce);
const publicKey = derivePublicKey(privateKey);
// 3. Connect with authentication (replace {circle_pk} with actual circle public key)
const ws = new WebSocket(
`ws://localhost:8080/${circle_pk}?pubkey=${publicKey}&sig=${signature}&nonce=${nonce}`
);
```
### Rust Client Example
```rust
use circle_ws_lib::auth::*;
// Request nonce. NonceClient will derive the HTTP API path from this WebSocket URL.
let nonce_client = NonceClient::from_ws_url("ws://localhost:8080/{circle_pk}")?;
let nonce_response = nonce_client.request_nonce(Some(public_key)).await?;
// Sign nonce
let signature = sign_message(&private_key, &nonce_response.nonce)?;
// Connect with authentication (replace {circle_pk} with actual circle public key)
let ws_url = format!(
"ws://localhost:8080/{}?pubkey={}&sig={}",
circle_pk, public_key, signature
);
```
## Security Considerations
### Nonce Management
- Nonces expire after 5 minutes
- Each nonce can only be used once
- Nonces are stored in memory (consider Redis for production)
### Signature Security
- Uses secp256k1 elliptic curve cryptography
- Ethereum-style message signing for compatibility
- Public key verification prevents impersonation
### Backward Compatibility
- Unauthenticated connections are allowed by default
- No breaking changes to existing APIs
- Optional authentication can be enabled gradually
## Error Handling
### Authentication Errors
- **401 Unauthorized**: Authentication required but not provided
- **403 Forbidden**: Authentication provided but invalid
- **400 Bad Request**: Malformed authentication parameters
### Nonce Errors
- **404 Not Found**: Nonce endpoint not available
- **410 Gone**: Nonce expired or already used
- **429 Too Many Requests**: Rate limiting (if implemented)
## Monitoring
### Metrics
- Active nonce count via `/auth/health`
- Authentication success/failure rates in logs
- Connection counts by authentication status
### Logging
```
INFO Incoming WebSocket connection for circle: 04abc123... (auth_enabled: true)
INFO Authentication successful for pubkey: 04abc123...
WARN Authentication failed: invalid signature
```
## Production Considerations
### Scalability
- Consider Redis-backed nonce storage for multiple server instances
- Implement rate limiting for nonce requests
- Monitor memory usage of in-memory nonce storage
### Security
- Use HTTPS/WSS in production
- Implement proper key management
- Consider certificate-based authentication for additional security
### Monitoring
- Set up alerts for authentication failure rates
- Monitor nonce service health
- Track connection patterns and anomalies
## Migration Guide
### Existing Deployments
1. **No Changes Required**: Existing clients continue to work
2. **Gradual Rollout**: Enable authentication on new servers first
3. **Client Updates**: Update clients to support authentication when ready
4. **Full Migration**: Eventually require authentication on all servers
### Testing
1. Test unauthenticated connections still work
2. Test authenticated connections with valid signatures
3. Test authentication failures are handled gracefully
4. Test nonce expiration and replay protection

View File

@ -0,0 +1,357 @@
# Webhook Integration Architecture
## Overview
This document outlines the architecture for adding webhook handling capabilities to the Circle WebSocket Server. The integration adds HTTP webhook endpoints alongside the existing WebSocket functionality without disrupting the current system.
## Architecture Diagram
```mermaid
graph TB
subgraph "External Services"
A[Stripe Webhooks]
B[iDenfy Webhooks]
end
subgraph "Circle Server"
C[HTTP Router]
D[WebSocket Handler]
E[Webhook Handler]
F[Stripe Verifier]
G[iDenfy Verifier]
H[Script Dispatcher]
I[RhaiDispatcherBuilder]
end
subgraph "Configuration"
J[.env File]
K[Environment Variables]
end
subgraph "Backend"
L[Redis]
M[Rhai Worker]
end
A --> |POST /webhooks/stripe/{circle_pk}| E
B --> |POST /webhooks/idenfy/{circle_pk}| E
C --> D
C --> E
E --> F
E --> G
F --> H
G --> H
H --> I
I --> L
L --> M
J --> K
K --> F
K --> G
D --> I
```
## URL Structure
### Webhook Endpoints
- **Stripe**: `POST /webhooks/stripe/{circle_pk}`
- **iDenfy**: `POST /webhooks/idenfy/{circle_pk}`
### Existing WebSocket Endpoints (Unchanged)
- **WebSocket**: `GET /{circle_pk}` (upgrades to WebSocket)
## Configuration
### Environment Variables (.env file)
```bash
# Webhook secrets for signature verification
STRIPE_WEBHOOK_SECRET=whsec_...
IDENFY_WEBHOOK_SECRET=your_idenfy_secret
# Existing configuration
REDIS_URL=redis://127.0.0.1/
```
### Server Configuration Updates
```rust
pub struct ServerConfig {
// ... existing fields
pub stripe_webhook_secret: Option<String>,
pub idenfy_webhook_secret: Option<String>,
}
```
## Webhook Processing Flow
### 1. Request Reception
```mermaid
sequenceDiagram
participant WS as Webhook Service
participant CS as Circle Server
participant WV as Webhook Verifier
participant SD as Script Dispatcher
participant RC as RhaiDispatcher
participant RW as Rhai Worker
WS->>CS: POST /webhooks/stripe/{circle_pk}
CS->>CS: Extract circle_pk from URL
CS->>CS: Read request body and headers
CS->>WV: Verify webhook signature
alt Stripe Webhook
WV->>WV: Verify Stripe signature using STRIPE_WEBHOOK_SECRET
WV->>WV: Deserialize to Stripe webhook payload
else iDenfy Webhook
WV->>WV: Verify iDenfy signature using IDENFY_WEBHOOK_SECRET
WV->>WV: Deserialize to iDenfy webhook payload
end
WV->>CS: Return verification result + parsed payload
alt Verification Success
CS->>SD: Dispatch appropriate script
SD->>RC: Create RhaiDispatcherBuilder
RC->>RC: Set caller_id="stripe" or "idenfy"
RC->>RC: Set recipient_id=circle_pk
RC->>RC: Set script="stripe_webhook_received" or "idenfy_webhook_received"
RC->>RW: Execute via Redis
RW->>RC: Return result
RC->>CS: Script execution result
CS->>WS: HTTP 200 OK
else Verification Failed
CS->>WS: HTTP 401 Unauthorized
end
```
### 2. Signature Verification
#### Stripe Verification
- Uses `Stripe-Signature` header
- HMAC-SHA256 verification with `STRIPE_WEBHOOK_SECRET`
- Follows Stripe's webhook signature verification protocol
#### iDenfy Verification
- Uses appropriate iDenfy signature header
- HMAC verification with `IDENFY_WEBHOOK_SECRET`
- Follows iDenfy's webhook signature verification protocol
### 3. Payload Deserialization
#### Type Definitions in Heromodels Library
Webhook payload types are now defined in the `heromodels` library for better code organization and reusability:
- **Stripe Types**: Located in `heromodels::models::payment::stripe`
- **iDenfy Types**: Located in `heromodels::models::identity::kyc`
#### Stripe Payload Structure
```rust
// From heromodels::models::payment::StripeWebhookEvent
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct StripeWebhookEvent {
pub id: String,
pub object: String,
pub api_version: Option<String>,
pub created: i64,
pub data: StripeEventData,
pub livemode: bool,
pub pending_webhooks: i32,
pub request: Option<StripeEventRequest>,
#[serde(rename = "type")]
pub event_type: String,
}
```
#### iDenfy Payload Structure
```rust
// From heromodels::models::identity::IdenfyWebhookEvent
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct IdenfyWebhookEvent {
#[serde(rename = "clientId")]
pub client_id: String,
#[serde(rename = "scanRef")]
pub scan_ref: String,
pub status: String,
pub platform: String,
#[serde(rename = "startedAt")]
pub started_at: String,
#[serde(rename = "finishedAt")]
pub finished_at: Option<String>,
pub data: Option<IdenfyVerificationData>,
// ... additional fields
}
```
### 4. Script Execution
#### Script Names
- **Stripe**: `stripe_webhook_received`
- **iDenfy**: `idenfy_webhook_received`
#### Script Context
The Rhai scripts will receive structured data:
```javascript
// For Stripe webhooks
let webhook_data = {
"caller_id": "stripe",
"circle_id": "circle_public_key",
"event_type": "payment_intent.succeeded",
"event_id": "evt_...",
"created": 1234567890,
"livemode": false,
"data": { /* Stripe event data */ }
};
// For iDenfy webhooks
let webhook_data = {
"caller_id": "idenfy",
"circle_id": "circle_public_key",
"final_decision": "APPROVED",
"platform": "PC",
"status": { /* iDenfy status data */ },
"data": { /* iDenfy verification data */ }
};
```
## Implementation Structure
### Current File Structure
```
src/server/src/
├── webhook/
│ ├── mod.rs # Main webhook module with route configuration
│ ├── handlers/
│ │ ├── mod.rs # Handler module exports
│ │ ├── common.rs # Common utilities and app state
│ │ ├── stripe.rs # Stripe webhook handler
│ │ └── idenfy.rs # iDenfy webhook handler
│ ├── verifiers.rs # Signature verification for all providers
│ └── types.rs # Local webhook types (config, errors, etc.)
└── .env # Environment configuration
```
### Heromodels Library Structure
```
heromodels/src/models/
├── payment/
│ ├── mod.rs # Payment module exports
│ └── stripe.rs # Stripe webhook event types
└── identity/
├── mod.rs # Identity module exports
└── kyc.rs # iDenfy KYC webhook event types
```
### Key Architectural Changes
- **Type Organization**: Webhook payload types moved to `heromodels` library for reusability
- **Modular Handlers**: Separate handler files for each webhook provider
- **Simplified Architecture**: Removed unnecessary dispatcher complexity
- **Direct Script Execution**: Handlers directly use `RhaiDispatcher` for script execution
### Modified Files
- `src/lib.rs` - Add webhook routes and module imports
- `Cargo.toml` - Add heromodels dependency and webhook-related dependencies
- `cmd/main.rs` - Load .env file and configure webhook secrets
### Dependencies
```toml
[dependencies]
# Existing dependencies...
# Heromodels library for shared types
heromodels = { path = "../../../db/heromodels" }
# For webhook signature verification
hmac = "0.12"
sha2 = "0.10"
hex = { workspace = true }
# For environment variable loading
dotenv = "0.15"
# For HTTP request handling
bytes = "1.0"
thiserror = { workspace = true }
```
## Security Considerations
### Signature Verification
- **Mandatory**: All webhook requests must have valid signatures
- **Timing Attack Protection**: Use constant-time comparison for signatures
- **Secret Management**: Webhook secrets loaded from environment variables only
### Error Handling
- **No Information Leakage**: Generic error responses for invalid webhooks
- **Logging**: Detailed logging for debugging (same as existing WebSocket errors)
- **Graceful Degradation**: Webhook failures don't affect WebSocket functionality
### Request Validation
- **Content-Type**: Verify appropriate content types
- **Payload Size**: No explicit limits initially (as requested)
- **Rate Limiting**: Consider future implementation
## Backward Compatibility
### WebSocket Functionality
- **Zero Impact**: Existing WebSocket routes and functionality unchanged
- **Authentication**: WebSocket authentication system remains independent
- **Performance**: No performance impact on WebSocket connections
### Configuration
- **Optional**: Webhook functionality only enabled when secrets are configured
- **Graceful Fallback**: Server starts normally even without webhook configuration
## Testing Strategy
### Unit Tests
- Webhook signature verification for both providers
- Payload deserialization
- Error handling scenarios
### Integration Tests
- End-to-end webhook processing
- Script dispatch verification
- Configuration loading
### Mock Testing
- Simulated Stripe webhook calls
- Simulated iDenfy webhook calls
- Invalid signature scenarios
## Deployment Considerations
### Environment Setup
```bash
# .env file in src/server/
STRIPE_WEBHOOK_SECRET=whsec_1234567890abcdef...
IDENFY_WEBHOOK_SECRET=your_idenfy_webhook_secret
REDIS_URL=redis://127.0.0.1/
```
### Server Startup
- Load .env file before server initialization
- Validate webhook secrets if webhook endpoints are to be enabled
- Log webhook endpoint availability
### Monitoring
- Log webhook reception and processing
- Track script execution success/failure rates
- Monitor webhook signature verification failures
## Future Enhancements
### Potential Additions
- Additional webhook providers
- Webhook retry mechanisms
- Webhook event filtering
- Rate limiting implementation
- Webhook event queuing for high-volume scenarios
### Scalability Considerations
- Webhook processing can be made asynchronous if needed
- Multiple server instances can handle webhooks independently
- Redis-based script execution provides natural load distribution

View File

@ -0,0 +1,62 @@
{
"openrpc": "1.2.6",
"info": {
"title": "Circle WebSocket Server API",
"version": "0.1.0",
"description": "API for interacting with a Circle's WebSocket server, primarily for Rhai script execution."
},
"methods": [
{
"name": "play",
"summary": "Executes a Rhai script on the server.",
"params": [
{
"name": "script",
"description": "The Rhai script to execute.",
"required": true,
"schema": {
"type": "string"
}
}
],
"result": {
"name": "playResult",
"description": "The output from the executed Rhai script.",
"schema": {
"$ref": "#/components/schemas/PlayResult"
}
},
"examples": [
{
"name": "Simple Script Execution",
"params": [
{
"name": "script",
"value": "let x = 10; x * 2"
}
],
"result": {
"name": "playResult",
"value": {
"output": "20"
}
}
}
]
}
],
"components": {
"schemas": {
"PlayResult": {
"type": "object",
"properties": {
"output": {
"type": "string",
"description": "The string representation of the Rhai script's evaluation result."
}
},
"required": ["output"]
}
}
}
}

View File

@ -0,0 +1,110 @@
//! Signature verification utilities for secp256k1 authentication
//!
//! This module provides functions to verify secp256k1 signatures in the
//! Ethereum style, allowing WebSocket servers to authenticate clients
//! using cryptographic signatures.
use serde::{Deserialize, Serialize};
use std::time::{SystemTime, UNIX_EPOCH};
/// Nonce response structure
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct NonceResponse {
pub nonce: String,
pub expires_at: u64,
}
/// Verify a secp256k1 signature against a message and public key
///
/// This function implements Ethereum-style signature verification:
/// 1. Creates the Ethereum signed message hash
/// 2. Verifies the signature against the hash using the provided public key
///
/// # Arguments
/// * `public_key_hex` - The public key in hex format (with or without 0x prefix)
/// * `message` - The original message that was signed
/// * `signature_hex` - The signature in hex format (65 bytes: r + s + v)
///
/// # Returns
/// * `Ok(true)` if signature is valid
/// * `Ok(false)` if signature is invalid
/// * `Err(String)` if there's an error in the verification process
pub fn verify_signature(
public_key_hex: &str,
message: &str,
signature_hex: &str,
) -> Result<bool, String> {
// This is a placeholder implementation
// In a real implementation, you would use the secp256k1 crate
// For now, we'll implement basic validation and return success for app
// Remove 0x prefix if present
let clean_pubkey = public_key_hex.strip_prefix("0x").unwrap_or(public_key_hex);
let clean_sig = signature_hex.strip_prefix("0x").unwrap_or(signature_hex);
// Basic validation
if clean_pubkey.len() != 130 {
// 65 bytes as hex (uncompressed public key)
return Err("Invalid public key length".to_string());
}
if clean_sig.len() != 130 {
// 65 bytes as hex (r + s + v)
return Err("Invalid signature length".to_string());
}
// Validate hex format
if !clean_pubkey.chars().all(|c| c.is_ascii_hexdigit()) {
return Err("Invalid public key format".to_string());
}
if !clean_sig.chars().all(|c| c.is_ascii_hexdigit()) {
return Err("Invalid signature format".to_string());
}
// For app purposes, we'll accept any properly formatted signature
// In production, you would implement actual secp256k1 verification here
log::info!(
"Signature verification (app mode): pubkey={}, message={}, sig={}",
&clean_pubkey[..20],
message,
&clean_sig[..20]
);
Ok(true)
}
/// Generate a nonce for authentication
///
/// Creates a time-based nonce that includes timestamp and random component
pub fn generate_nonce() -> NonceResponse {
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs();
// Nonce expires in 5 minutes
let expires_at = now + 300;
// Create a simple time-based nonce
// In production, you might want to add more randomness
#[cfg(feature = "auth")]
let nonce = format!("nonce_{}_{}", now, rand::random::<u32>());
#[cfg(not(feature = "auth"))]
let nonce = format!("nonce_{}_{}", now, 12345u32);
NonceResponse { nonce, expires_at }
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_nonce_generation() {
let nonce_response = generate_nonce();
assert!(nonce_response.nonce.starts_with("nonce_"));
assert!(nonce_response.expires_at > 0);
}
}

View File

@ -0,0 +1,100 @@
use std::collections::HashMap;
use crate::{Server, TlsConfigError};
/// ServerBuilder for constructing Server instances with a fluent API
pub struct ServerBuilder {
host: String,
port: u16,
redis_url: String,
enable_tls: bool,
cert_path: Option<String>,
key_path: Option<String>,
tls_port: Option<u16>,
enable_auth: bool,
enable_webhooks: bool,
circle_worker_id: String,
}
impl ServerBuilder {
pub fn new() -> Self {
Self {
host: "127.0.0.1".to_string(),
port: 8443,
redis_url: "redis://localhost:6379".to_string(),
enable_tls: false,
cert_path: None,
key_path: None,
tls_port: None,
enable_auth: false,
enable_webhooks: false,
circle_worker_id: "default".to_string(),
}
}
pub fn host(mut self, host: impl Into<String>) -> Self {
self.host = host.into();
self
}
pub fn port(mut self, port: u16) -> Self {
self.port = port;
self
}
pub fn redis_url(mut self, redis_url: impl Into<String>) -> Self {
self.redis_url = redis_url.into();
self
}
pub fn worker_id(mut self, worker_id: impl Into<String>) -> Self {
self.circle_worker_id = worker_id.into();
self
}
pub fn with_tls(mut self, cert_path: String, key_path: String) -> Self {
self.enable_tls = true;
self.cert_path = Some(cert_path);
self.key_path = Some(key_path);
self
}
pub fn with_tls_port(mut self, tls_port: u16) -> Self {
self.tls_port = Some(tls_port);
self
}
pub fn with_auth(mut self) -> Self {
self.enable_auth = true;
self
}
pub fn with_webhooks(mut self) -> Self {
self.enable_webhooks = true;
self
}
pub fn build(self) -> Result<Server, TlsConfigError> {
Ok(Server {
host: self.host,
port: self.port,
redis_url: self.redis_url,
enable_tls: self.enable_tls,
cert_path: self.cert_path,
key_path: self.key_path,
tls_port: self.tls_port,
enable_auth: self.enable_auth,
enable_webhooks: self.enable_webhooks,
circle_worker_id: self.circle_worker_id,
circle_name: "default".to_string(),
circle_public_key: "default".to_string(),
nonce_store: HashMap::new(),
authenticated_pubkey: None,
})
}
}
impl Default for ServerBuilder {
fn default() -> Self {
Self::new()
}
}

View File

@ -0,0 +1,90 @@
use actix::prelude::*;
use actix_web_actors::ws;
use log::debug;
use serde_json::Value;
use crate::{Server, JsonRpcRequest, JsonRpcResponse, JsonRpcError};
impl actix::StreamHandler<Result<ws::Message, ws::ProtocolError>> for Server {
fn handle(&mut self, msg: Result<ws::Message, ws::ProtocolError>, ctx: &mut Self::Context) {
match msg {
Ok(ws::Message::Text(text)) => {
debug!("WS Text for {}: {}", self.circle_name, text);
// Handle plaintext ping messages for keep-alive
if text.trim() == "ping" {
debug!("Received keep-alive ping from {}, responding with pong", self.circle_name);
ctx.text("pong");
return;
}
match serde_json::from_str::<JsonRpcRequest>(&text) {
Ok(req) => {
let client_rpc_id = req.id.clone().unwrap_or(Value::Null);
match req.method.as_str() {
"fetch_nonce" => {
self.handle_fetch_nonce(req.params, client_rpc_id, ctx)
}
"authenticate" => {
self.handle_authenticate(req.params, client_rpc_id, ctx)
}
"whoami" => {
self.handle_whoami(req.params, client_rpc_id, ctx)
}
"play" => self.handle_play(req.params, client_rpc_id, ctx),
_ => {
let err_resp = JsonRpcResponse {
jsonrpc: "2.0".to_string(),
result: None,
error: Some(JsonRpcError {
code: -32601,
message: format!("Method not found: {}", req.method),
data: None,
}),
id: client_rpc_id,
};
ctx.text(serde_json::to_string(&err_resp).unwrap());
}
}
}
Err(e) => {
log::error!(
"WS Error: Failed to parse JSON: {}, original text: '{}'",
e,
text
);
let err_resp = JsonRpcResponse {
jsonrpc: "2.0".to_string(),
result: None,
error: Some(JsonRpcError {
code: -32700,
message: "Failed to parse JSON request".to_string(),
data: Some(Value::String(text.to_string())),
}),
id: Value::Null,
};
ctx.text(serde_json::to_string(&err_resp).unwrap());
}
}
}
Ok(ws::Message::Ping(msg)) => ctx.pong(&msg),
Ok(ws::Message::Close(reason)) => {
log::info!(
"WebSocket connection closing for server {}: {:?}",
self.circle_name,
reason
);
ctx.close(reason);
ctx.stop();
}
Err(e) => {
log::error!(
"WebSocket error for server {}: {}",
self.circle_name,
e
);
ctx.stop();
}
_ => (),
}
}
}

View File

@ -0,0 +1,637 @@
use actix::prelude::*;
use actix_web::{web, App, Error, HttpRequest, HttpResponse, HttpServer};
use actix_web_actors::ws;
use log::{info, error}; // Added error for better logging
use once_cell::sync::Lazy;
use hero_dispatcher::{DispatcherBuilder, DispatcherError};
use rustls::pki_types::PrivateKeyDer;
use rustls::ServerConfig as RustlsServerConfig;
use rustls_pemfile::{certs, pkcs8_private_keys};
use serde::{Deserialize, Serialize}; // Import Deserialize and Serialize traits
use serde_json::Value; // Removed unused json
use std::collections::HashMap;
use std::fs::File;
use std::io::BufReader;
use std::sync::Mutex; // Removed unused Arc
use std::time::{SystemTime, UNIX_EPOCH};
use tokio::task::JoinHandle;
use thiserror::Error;
// Global store for server handles
// Global store for server handles, initialized with once_cell::sync::Lazy
pub static SERVER_HANDLES: Lazy<Mutex<HashMap<String, ServerHandle>>> =
Lazy::new(|| Mutex::new(HashMap::new()));
static AUTHENTICATED_CONNECTIONS: Lazy<Mutex<HashMap<Addr<Server>, String>>> =
Lazy::new(|| Mutex::new(HashMap::new()));
// Remove any lazy_static related code if it exists elsewhere, this is the correct static definition.
mod auth;
mod builder;
mod handler;
use crate::auth::{generate_nonce, NonceResponse};
pub use crate::builder::ServerBuilder;
// Re-export server handle type for external use
pub type ServerHandle = actix_web::dev::ServerHandle;
const TASK_TIMEOUT_DURATION: std::time::Duration = std::time::Duration::from_secs(10);
#[derive(Error, Debug)]
pub enum TlsConfigError {
#[error("Certificate file not found: {0}")]
CertificateNotFound(String),
#[error("Private key file not found: {0}")]
PrivateKeyNotFound(String),
#[error("Invalid certificate format: {0}")]
InvalidCertificate(String),
#[error("Invalid private key format: {0}")]
InvalidPrivateKey(String),
#[error("No private keys found in key file: {0}")]
NoPrivateKeys(String),
#[error("TLS configuration error: {0}")]
ConfigurationError(String),
#[error("IO error: {0}")]
IoError(#[from] std::io::Error),
}
#[derive(Debug, Serialize, Deserialize)]
struct JsonRpcRequest {
jsonrpc: String,
method: String,
params: Value,
id: Option<Value>,
}
#[derive(Debug, Serialize, Deserialize)]
struct JsonRpcResponse {
jsonrpc: String,
result: Option<Value>,
error: Option<JsonRpcError>,
id: Value,
}
#[derive(Debug, Serialize, Deserialize)]
struct JsonRpcError {
code: i32,
message: String,
data: Option<Value>,
}
#[derive(Debug, Serialize, Deserialize)]
struct PlayParams {
script: String,
}
#[derive(Debug, Serialize, Deserialize)]
struct PlayResult {
output: String,
}
#[derive(Debug, Serialize, Deserialize)]
struct AuthCredentials {
pubkey: String,
signature: String,
}
#[derive(Debug, Serialize, Deserialize)]
struct FetchNonceParams {
pubkey: String,
}
impl Actor for Server {
type Context = ws::WebsocketContext<Self>;
fn started(&mut self, _ctx: &mut Self::Context) {
if self.enable_auth {
info!(
"Circle '{}' WS: Connection started. Authentication is ENABLED. Waiting for auth challenge.",
self.circle_name
);
} else {
info!(
"Circle '{}' WS: Connection started. Authentication is DISABLED.",
self.circle_name
);
}
}
fn stopping(&mut self, ctx: &mut Self::Context) -> Running {
info!(
"Circle '{}' WS: Connection stopping.",
self.circle_name
);
AUTHENTICATED_CONNECTIONS
.lock()
.unwrap()
.remove(&ctx.address());
Running::Stop
}
}
#[derive(Clone)]
pub struct Server {
pub host: String,
pub port: u16,
pub redis_url: String,
pub enable_tls: bool,
pub cert_path: Option<String>,
pub key_path: Option<String>,
pub tls_port: Option<u16>,
pub enable_auth: bool,
pub enable_webhooks: bool,
pub circle_worker_id: String,
pub circle_name: String,
pub circle_public_key: String,
nonce_store: HashMap<String, NonceResponse>,
authenticated_pubkey: Option<String>,
}
impl Server {
/// Get the effective port for TLS connections
pub fn get_tls_port(&self) -> u16 {
self.tls_port.unwrap_or(self.port)
}
/// Check if TLS is properly configured
pub fn is_tls_configured(&self) -> bool {
self.cert_path.is_some() && self.key_path.is_some()
}
pub fn spawn_circle_server(&self) -> std::io::Result<(JoinHandle<std::io::Result<()>>, ServerHandle)> {
let host = self.host.clone();
let port = self.port;
// Validate TLS configuration if enabled
if self.enable_tls && !self.is_tls_configured() {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput,
"TLS is enabled but certificate or key path is missing",
));
}
let server_config_data = web::Data::new(self.clone());
let http_server = HttpServer::new(move || {
let mut app = App::new()
.app_data(server_config_data.clone())
.route("/{circle_pk}", web::get().to(ws_handler));
app
});
let server = if self.enable_tls && self.is_tls_configured() {
let cert_path = self.cert_path.as_ref().unwrap();
let key_path = self.key_path.as_ref().unwrap();
let tls_port = self.get_tls_port();
info!("🔒 WSS (WebSocket Secure) is ENABLED for multi-circle server");
info!("📜 Certificate: {}", cert_path);
info!("🔑 Private key: {}", key_path);
info!("🌐 WSS URL pattern: wss://{}:{}/<circle_pk>", host, tls_port);
match load_rustls_config(cert_path, key_path) {
Ok(tls_config) => {
info!("✅ TLS configuration loaded successfully");
http_server.bind_rustls_0_23((host.as_str(), tls_port), tls_config)
.map_err(|e| std::io::Error::new(
std::io::ErrorKind::AddrInUse,
format!("Failed to bind WSS server to {}:{}: {}", host, tls_port, e)
))?
}
Err(e) => {
error!("❌ Failed to load TLS configuration: {}", e);
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput,
format!("TLS configuration error: {}", e)
));
}
}
} else {
info!("🔓 WS (WebSocket) is ENABLED for multi-circle server (no TLS)");
info!("🌐 WS URL pattern: ws://{}:{}/<circle_pk>", host, port);
http_server.bind((host.as_str(), port))
.map_err(|e| std::io::Error::new(
std::io::ErrorKind::AddrInUse,
format!("Failed to bind WS server to {}:{}: {}", host, port, e)
))?
}
.run();
let handle = server.handle();
let server_task = tokio::spawn(server);
let protocol = if self.enable_tls { "WSS" } else { "WS" };
let effective_port = if self.enable_tls { self.get_tls_port() } else { port };
info!(
"🚀 Multi-circle {} server running on {}:{}",
protocol, host, effective_port
);
if self.enable_auth {
info!("🔐 Authentication is ENABLED");
} else {
info!("🔓 Authentication is DISABLED");
}
Ok((server_task, handle))
}
fn is_connection_authenticated(&self) -> bool {
self.authenticated_pubkey.is_some()
}
fn handle_fetch_nonce(
&mut self,
params: Value,
client_rpc_id: Value,
ctx: &mut ws::WebsocketContext<Self>,
) {
match serde_json::from_value::<FetchNonceParams>(params) {
Ok(params) => {
let nonce_response = generate_nonce();
self.nonce_store
.insert(params.pubkey, nonce_response.clone());
let resp = JsonRpcResponse {
jsonrpc: "2.0".to_string(),
result: Some(serde_json::to_value(nonce_response).unwrap()),
error: None,
id: client_rpc_id,
};
ctx.text(serde_json::to_string(&resp).unwrap());
}
Err(e) => {
let err_resp = JsonRpcResponse {
jsonrpc: "2.0".to_string(),
result: None,
error: Some(JsonRpcError {
code: -32602,
message: format!("Invalid parameters for fetch_nonce: {}", e),
data: None,
}),
id: client_rpc_id,
};
ctx.text(serde_json::to_string(&err_resp).unwrap());
}
}
}
fn handle_authenticate(
&mut self,
params: Value,
client_rpc_id: Value,
ctx: &mut ws::WebsocketContext<Self>,
) {
if !self.enable_auth {
let err_resp = JsonRpcResponse {
jsonrpc: "2.0".to_string(),
result: None,
error: Some(JsonRpcError {
code: -32000,
message: "Authentication is disabled on this server.".to_string(),
data: None,
}),
id: client_rpc_id,
};
ctx.text(serde_json::to_string(&err_resp).unwrap());
return;
}
match serde_json::from_value::<AuthCredentials>(params) {
Ok(auth_params) => {
let nonce_response = self.nonce_store.get(&auth_params.pubkey);
let is_valid = if let Some(nonce_resp) = nonce_response {
let current_time = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs();
if nonce_resp.expires_at < current_time {
log::warn!("Auth failed for {}: Nonce expired", self.circle_name);
false
} else {
match auth::verify_signature(
&auth_params.pubkey,
&nonce_resp.nonce,
&auth_params.signature,
) {
Ok(valid) => valid,
Err(_) => false,
}
}
} else {
false
};
if is_valid {
self.authenticated_pubkey = Some(auth_params.pubkey.clone());
AUTHENTICATED_CONNECTIONS
.lock()
.unwrap()
.insert(ctx.address(), auth_params.pubkey);
let resp = JsonRpcResponse {
jsonrpc: "2.0".to_string(),
result: Some(serde_json::json!({ "authenticated": true })),
error: None,
id: client_rpc_id,
};
ctx.text(serde_json::to_string(&resp).unwrap());
} else {
let err_resp = JsonRpcResponse {
jsonrpc: "2.0".to_string(),
result: None,
error: Some(JsonRpcError {
code: -32002,
message: "Invalid Credentials".to_string(),
data: None,
}),
id: client_rpc_id,
};
ctx.text(serde_json::to_string(&err_resp).unwrap());
ctx.stop();
}
}
Err(e) => {
let err_resp = JsonRpcResponse {
jsonrpc: "2.0".to_string(),
result: None,
error: Some(JsonRpcError {
code: -32602,
message: format!("Invalid parameters for authenticate: {}", e),
data: None,
}),
id: client_rpc_id,
};
ctx.text(serde_json::to_string(&err_resp).unwrap());
}
}
}
fn handle_whoami(
&mut self,
_params: Value,
client_rpc_id: Value,
ctx: &mut ws::WebsocketContext<Self>,
) {
// Check if authentication is enabled and if the connection is authenticated
if self.enable_auth {
if self.is_connection_authenticated() {
// Get the authenticated public key from the global store
let authenticated_pubkey = AUTHENTICATED_CONNECTIONS
.lock()
.unwrap()
.get(&ctx.address())
.cloned()
.unwrap_or_else(|| "unknown".to_string());
let response = JsonRpcResponse {
jsonrpc: "2.0".to_string(),
result: Some(serde_json::json!({
"authenticated": true,
"public_key": authenticated_pubkey,
"circle_name": self.circle_name,
"auth_enabled": self.enable_auth
})),
error: None,
id: client_rpc_id,
};
ctx.text(serde_json::to_string(&response).unwrap());
} else {
// Not authenticated
let err_resp = JsonRpcResponse {
jsonrpc: "2.0".to_string(),
result: None,
error: Some(JsonRpcError {
code: -32001,
message: "Authentication required. Please authenticate first.".to_string(),
data: None,
}),
id: client_rpc_id,
};
ctx.text(serde_json::to_string(&err_resp).unwrap());
}
} else {
// Authentication is disabled, return basic info
let response = JsonRpcResponse {
jsonrpc: "2.0".to_string(),
result: Some(serde_json::json!({
"authenticated": false,
"public_key": null,
"circle_name": self.circle_name,
"auth_enabled": self.enable_auth
})),
error: None,
id: client_rpc_id,
};
ctx.text(serde_json::to_string(&response).unwrap());
}
}
fn handle_play(
&mut self,
params: Value,
client_rpc_id: Value,
ctx: &mut ws::WebsocketContext<Self>,
) {
if self.enable_auth && !self.is_connection_authenticated() {
let err_resp = JsonRpcResponse {
jsonrpc: "2.0".to_string(),
result: None,
error: Some(JsonRpcError {
code: -32001,
message: "Authentication Required".to_string(),
data: None,
}),
id: client_rpc_id,
};
ctx.text(serde_json::to_string(&err_resp).unwrap());
return;
}
match serde_json::from_value::<PlayParams>(params) {
Ok(play_params) => {
info!("Received play request from: {}", self.authenticated_pubkey.clone().unwrap_or_else(|| "anonymous".to_string()));
let script_content = play_params.script;
let circle_pk_clone = self.circle_public_key.clone();
let redis_url_clone = self.redis_url.clone();
let _rpc_id_clone = client_rpc_id.clone();
let public_key = self.authenticated_pubkey.clone();
let worker_id_clone = self.circle_worker_id.clone();
let fut = async move {
let caller_id = public_key.unwrap_or_else(|| "anonymous".to_string());
match DispatcherBuilder::new()
.redis_url(&redis_url_clone)
.caller_id(&caller_id)
.build() {
Ok(hero_dispatcher) => {
hero_dispatcher
.new_job()
.context_id(&circle_pk_clone)
.worker_id(&worker_id_clone)
.script(&script_content)
.timeout(TASK_TIMEOUT_DURATION)
.await_response()
.await
}
Err(e) => Err(e),
}
};
ctx.spawn(
fut.into_actor(self)
.map(move |res, _act, ctx_inner| match res {
Ok(task_details) => {
if task_details.status == "completed" {
let output = task_details
.output
.unwrap_or_else(|| "No output".to_string());
let result_value = PlayResult { output };
let resp = JsonRpcResponse {
jsonrpc: "2.0".to_string(),
result: Some(serde_json::to_value(result_value).unwrap()),
error: None,
id: client_rpc_id,
};
ctx_inner.text(serde_json::to_string(&resp).unwrap());
} else {
let error_message = task_details.error.unwrap_or_else(|| {
"Rhai script execution failed".to_string()
});
let err_resp = JsonRpcResponse {
jsonrpc: "2.0".to_string(),
result: None,
error: Some(JsonRpcError {
code: -32000,
message: error_message,
data: None,
}),
id: client_rpc_id,
};
ctx_inner.text(serde_json::to_string(&err_resp).unwrap());
}
}
Err(e) => {
let (code, message) = match e {
DispatcherError::Timeout(task_id) => (
-32002,
format!(
"Timeout waiting for Rhai script (task: {})",
task_id
),
),
_ => (-32003, format!("Rhai infrastructure error: {}", e)),
};
let err_resp = JsonRpcResponse {
jsonrpc: "2.0".to_string(),
result: None,
error: Some(JsonRpcError {
code,
message,
data: None,
}),
id: client_rpc_id,
};
ctx_inner.text(serde_json::to_string(&err_resp).unwrap());
}
}),
);
}
Err(e) => {
let err_resp = JsonRpcResponse {
jsonrpc: "2.0".to_string(),
result: None,
error: Some(JsonRpcError {
code: -32602,
message: format!("Invalid parameters for play: {}", e),
data: None,
}),
id: client_rpc_id,
};
ctx.text(serde_json::to_string(&err_resp).unwrap());
}
}
}
}
fn load_rustls_config(
cert_path: &str,
key_path: &str,
) -> Result<RustlsServerConfig, TlsConfigError> {
info!("Loading TLS configuration from cert: {}, key: {}", cert_path, key_path);
// Validate file existence
if !std::path::Path::new(cert_path).exists() {
return Err(TlsConfigError::CertificateNotFound(cert_path.to_string()));
}
if !std::path::Path::new(key_path).exists() {
return Err(TlsConfigError::PrivateKeyNotFound(key_path.to_string()));
}
let config = RustlsServerConfig::builder().with_no_client_auth();
// Load certificate file
let cert_file = &mut BufReader::new(File::open(cert_path)
.map_err(|e| TlsConfigError::ConfigurationError(format!("Failed to open certificate file: {}", e)))?);
// Load key file
let key_file = &mut BufReader::new(File::open(key_path)
.map_err(|e| TlsConfigError::ConfigurationError(format!("Failed to open key file: {}", e)))?);
// Parse certificates
let cert_chain: Vec<_> = certs(cert_file)
.collect::<Result<Vec<_>, _>>()
.map_err(|e| TlsConfigError::InvalidCertificate(format!("Failed to parse certificates: {}", e)))?;
if cert_chain.is_empty() {
return Err(TlsConfigError::InvalidCertificate("No certificates found in certificate file".to_string()));
}
info!("Loaded {} certificate(s)", cert_chain.len());
// Parse private keys
let mut keys: Vec<PrivateKeyDer> = pkcs8_private_keys(key_file)
.collect::<Result<Vec<_>, _>>()
.map_err(|e| TlsConfigError::InvalidPrivateKey(format!("Failed to parse private key: {}", e)))?
.into_iter()
.map(|k| k.into())
.collect();
if keys.is_empty() {
return Err(TlsConfigError::NoPrivateKeys(key_path.to_string()));
}
info!("Loaded {} private key(s)", keys.len());
// Create TLS configuration
config.with_single_cert(cert_chain, keys.remove(0))
.map_err(|e| TlsConfigError::ConfigurationError(format!("Failed to create TLS configuration: {}", e)))
}
async fn ws_handler(
req: HttpRequest,
stream: web::Payload,
server: web::Data<Server>,
) -> Result<HttpResponse, Error> {
let server_circle_name = req.match_info().get("circle_pk").unwrap_or("unknown").to_string();
let circle_public_key = server_circle_name.clone(); // Assuming pk is the name for now
// Extract the Server from web::Data and clone it
let mut server_actor = server.as_ref().clone();
// Set the circle name for this WebSocket connection
server_actor.circle_name = server_circle_name;
server_actor.circle_public_key = circle_public_key;
// Create and start the WebSocket actor
ws::start(
server_actor,
&req,
stream,
)
}

View File

@ -0,0 +1,76 @@
use circle_ws_lib::{spawn_circle_server, ServerConfig};
use rhailib_engine::create_heromodels_engine;
use futures_util::{SinkExt, StreamExt};
use heromodels::db::hero::OurDB;
use rhailib_worker::spawn_rhai_worker;
use serde_json::json;
use std::sync::Arc;
use tokio::sync::mpsc;
use tokio_tungstenite::{connect_async, tungstenite::protocol::Message};
use uuid::Uuid;
#[tokio::test]
async fn test_server_startup_and_play() {
let circle_pk = Uuid::new_v4().to_string();
let redis_url = "redis://127.0.0.1/";
// --- Worker Setup ---
let (shutdown_tx, shutdown_rx) = mpsc::channel(1);
let db = Arc::new(OurDB::new("file:memdb_test_server?mode=memory&cache=shared", true).unwrap());
let engine = create_heromodels_engine();
let worker_id = Uuid::new_v4().to_string();
let worker_handle = spawn_rhai_worker(
worker_id,
circle_pk.to_string(),
engine,
redis_url.to_string(),
shutdown_rx,
false,
);
// --- Server Setup ---
let config = ServerConfig::new(
"127.0.0.1".to_string(),
9997, // Using a different port to avoid conflicts
redis_url.to_string(),
);
let (server_task, server_handle) = spawn_circle_server(config).unwrap();
let server_join_handle = tokio::spawn(server_task);
// Give server and worker a moment to start
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
// --- Client Connection and Test ---
let ws_url = format!("ws://127.0.0.1:9997/{}", circle_pk);
let (mut ws_stream, _) = connect_async(ws_url).await.expect("Failed to connect");
let play_req = json!({
"jsonrpc": "2.0",
"method": "play",
"params": { "script": "40 + 2" },
"id": 1
});
ws_stream
.send(Message::Text(play_req.to_string()))
.await
.unwrap();
let response = ws_stream.next().await.unwrap().unwrap();
let response_text = response.to_text().unwrap();
let response_json: serde_json::Value = serde_json::from_str(response_text).unwrap();
assert_eq!(response_json["id"], 1);
assert!(
response_json["result"].is_object(),
"The result should be an object, but it was: {}",
response_text
);
assert_eq!(response_json["result"]["output"], "42");
// --- Cleanup ---
server_handle.stop(true).await;
let _ = server_join_handle.await;
let _ = shutdown_tx.send(()).await;
let _ = worker_handle.await;
}

View File

@ -0,0 +1,25 @@
use circle_ws_lib::{spawn_circle_server, ServerConfig};
use std::time::Duration;
use tokio_tungstenite::connect_async;
use url::Url;
#[tokio::test]
async fn test_server_connection() {
let config = ServerConfig::new(
"127.0.0.1".to_string(),
9001,
"redis://127.0.0.1:6379".to_string(),
);
let (server_handle, _server_stop_handle) = spawn_circle_server(config).unwrap();
tokio::time::sleep(Duration::from_secs(1)).await;
let url_str = "ws://127.0.0.1:9001/test_pub_key";
let url = Url::parse(url_str).unwrap();
let (ws_stream, _) = connect_async(url).await.expect("Failed to connect");
println!("WebSocket connection successful: {:?}", ws_stream);
server_handle.abort();
}

View File

@ -0,0 +1,119 @@
use circle_ws_lib::{spawn_circle_server, ServerConfig};
use futures_util::{sink::SinkExt, stream::StreamExt};
use std::time::Duration;
use tokio::time::sleep;
use tokio_tungstenite::{connect_async, tungstenite::protocol::Message};
// Define a simple JSON-RPC request structure for sending scripts
#[derive(serde::Serialize, Debug)]
struct JsonRpcRequest {
jsonrpc: String,
method: String,
params: ScriptParams,
id: u64,
}
#[derive(serde::Serialize, Debug)]
struct ScriptParams {
script: String,
}
// Define a simple JSON-RPC error response structure for assertion
#[derive(serde::Deserialize, Debug)]
#[allow(dead_code)]
struct JsonRpcErrorResponse {
jsonrpc: String,
error: JsonRpcErrorDetails,
id: Option<serde_json::Value>,
}
#[derive(serde::Deserialize, Debug)]
struct JsonRpcErrorDetails {
code: i32,
message: String,
}
const SERVER_ADDRESS: &str = "ws://127.0.0.1:8088/test_pub_key_timeout";
const TEST_CIRCLE_NAME: &str = "test_timeout_circle";
const RHAI_TIMEOUT_SECONDS: u64 = 30; // Match server's default timeout
#[tokio::test]
async fn test_rhai_script_timeout() {
let server_config = ServerConfig::new(
"127.0.0.1".to_string(),
8088,
"redis://127.0.0.1:6379".to_string(),
);
let (server_handle, _server_stop_handle) = spawn_circle_server(server_config).unwrap();
sleep(Duration::from_secs(2)).await; // Give server time to start
let (mut ws_stream, _response) = connect_async(SERVER_ADDRESS)
.await
.expect("Failed to connect to WebSocket server");
let long_running_script = "
let mut x = 0;
for i in 0..999999999 {
x = x + i;
}
print(x);
"
.to_string();
let request = JsonRpcRequest {
jsonrpc: "2.0".to_string(),
method: "play".to_string(),
params: ScriptParams {
script: long_running_script,
},
id: 1,
};
let request_json = serde_json::to_string(&request).expect("Failed to serialize request");
ws_stream
.send(Message::Text(request_json))
.await
.expect("Failed to send message");
match tokio::time::timeout(
Duration::from_secs(RHAI_TIMEOUT_SECONDS + 10),
ws_stream.next(),
)
.await
{
Ok(Some(Ok(Message::Text(text)))) => {
let response: Result<JsonRpcErrorResponse, _> = serde_json::from_str(&text);
match response {
Ok(err_resp) => {
assert_eq!(
err_resp.error.code, -32002,
"Error code should indicate timeout."
);
assert!(
err_resp.error.message.contains("Timeout"),
"Error message should indicate timeout."
);
}
Err(e) => {
panic!("Failed to deserialize error response: {}. Raw: {}", e, text);
}
}
}
Ok(Some(Ok(other_msg))) => {
panic!("Received unexpected message type: {:?}", other_msg);
}
Ok(Some(Err(e))) => {
panic!("WebSocket error: {}", e);
}
Ok(None) => {
panic!("WebSocket stream closed unexpectedly.");
}
Err(_) => {
panic!("Test timed out waiting for server response.");
}
}
ws_stream.close(None).await.ok();
server_handle.abort();
}

View File

@ -0,0 +1,85 @@
use circle_ws_lib::{spawn_circle_server, ServerConfig};
use std::time::Duration;
use tokio::time::sleep;
#[tokio::test]
async fn test_basic_ws_server_startup() {
env_logger::init();
let config = ServerConfig::new(
"127.0.0.1".to_string(),
8091, // Use a different port to avoid conflicts
"redis://127.0.0.1:6379".to_string(),
);
let (server_task, server_handle) = spawn_circle_server(config)
.expect("Failed to spawn circle server");
// Let the server run for a short time
sleep(Duration::from_millis(100)).await;
// Stop the server
server_handle.stop(true).await;
// Wait for the server task to complete
let _ = server_task.await;
}
#[tokio::test]
async fn test_tls_server_configuration() {
env_logger::init();
// Test TLS configuration validation
let config = ServerConfig::new(
"127.0.0.1".to_string(),
8092,
"redis://127.0.0.1:6379".to_string(),
)
.with_tls("nonexistent_cert.pem".to_string(), "nonexistent_key.pem".to_string())
.with_tls_port(8444);
// This should fail gracefully if cert files don't exist
match spawn_circle_server(config) {
Ok((server_task, server_handle)) => {
// If it succeeds (cert files exist), clean up
sleep(Duration::from_millis(100)).await;
server_handle.stop(true).await;
let _ = server_task.await;
println!("TLS server started successfully (cert files found)");
}
Err(e) => {
// Expected if cert files don't exist - this is fine for testing
println!("TLS server failed to start as expected: {}", e);
assert!(e.to_string().contains("Certificate") || e.to_string().contains("TLS"));
}
}
}
#[tokio::test]
async fn test_server_config_validation() {
// Test that ServerConfig properly validates TLS settings
let config = ServerConfig::new(
"127.0.0.1".to_string(),
8093,
"redis://127.0.0.1:6379".to_string(),
);
// Test basic configuration
assert_eq!(config.host, "127.0.0.1");
assert_eq!(config.port, 8093);
assert!(!config.enable_tls);
assert!(!config.enable_auth);
// Test TLS configuration
let tls_config = config
.with_tls("cert.pem".to_string(), "key.pem".to_string())
.with_tls_port(8445)
.with_auth();
assert!(tls_config.enable_tls);
assert!(tls_config.enable_auth);
assert_eq!(tls_config.get_tls_port(), 8445);
assert_eq!(tls_config.cert_path, Some("cert.pem".to_string()));
assert_eq!(tls_config.key_path, Some("key.pem".to_string()));
}

25
proxies/http/Cargo.toml Normal file
View File

@ -0,0 +1,25 @@
[package]
name = "hero-http-proxy"
version = "0.1.0"
edition = "2021"
[dependencies]
actix-web = "4.4"
tokio = { version = "1.0", features = ["full"] }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
log = "0.4"
env_logger = "0.10"
clap = { version = "4.0", features = ["derive"] }
bytes = "1.5"
reqwest = { version = "0.11", features = ["json"] }
tokio-tungstenite = { version = "0.20", features = ["native-tls"] }
futures-util = "0.3"
thiserror = "1.0"
url = "2.4"
hmac = "0.12"
sha2 = "0.10"
hex = "0.4"
anyhow = "1.0"
uuid = { version = "1.0", features = ["v4"] }
chrono = { version = "0.4", features = ["serde"] }

149
proxies/http/README.md Normal file
View File

@ -0,0 +1,149 @@
# Hero HTTP Proxy
HTTP proxy server for converting webhook requests to WebSocket JSON-RPC calls to the Hero WebSocket server.
## Overview
This proxy server acts as a bridge between HTTP webhook endpoints (like Stripe, iDenfy) and the Hero WebSocket server. It receives HTTP webhook requests, verifies signatures, and forwards them as JSON-RPC calls to the WebSocket server.
## Features
- **Webhook Support**: Built-in support for Stripe and iDenfy webhooks
- **Signature Verification**: HMAC-SHA256 signature verification for security
- **Extensible Design**: Easy to add new webhook providers
- **WebSocket Connection Pooling**: Reuses WebSocket connections for efficiency
- **Configurable**: JSON-based configuration with environment variable support
- **Health Checks**: Built-in health check endpoint
## Configuration
The proxy can be configured via a JSON configuration file or environment variables:
### Environment Variables
- `STRIPE_WEBHOOK_SECRET`: Stripe webhook signing secret
- `IDENFY_WEBHOOK_SECRET`: iDenfy webhook signing secret
### Configuration File Example
```json
{
"webhooks": {
"stripe": {
"secret": "whsec_...",
"signature_header": "stripe-signature",
"verify_signature": true
},
"idenfy": {
"secret": "your_idenfy_secret",
"signature_header": "idenfy-signature",
"verify_signature": true
}
},
"websocket_timeout": 30,
"max_retries": 3
}
```
## Usage
### Basic Usage
```bash
cargo run -- --port 8080 --websocket-url ws://localhost:3030
```
### With Configuration File
```bash
cargo run -- --port 8080 --websocket-url ws://localhost:3030 --config config.json
```
### Command Line Options
- `--port, -p`: HTTP server port (default: 8080)
- `--websocket-url, -w`: WebSocket server URL (default: ws://localhost:3030)
- `--config, -c`: Configuration file path (optional)
## Endpoints
### Webhook Endpoints
- `POST /webhooks/stripe/{circle_pk}`: Stripe webhook handler
- `POST /webhooks/idenfy/{circle_pk}`: iDenfy webhook handler
### Health Check
- `GET /health`: Health check endpoint
## Adding New Webhook Providers
To add a new webhook provider:
1. **Add configuration** in `src/config.rs`:
```rust
webhooks.insert("newprovider".to_string(), WebhookConfig {
secret: std::env::var("NEWPROVIDER_WEBHOOK_SECRET").unwrap_or_default(),
signature_header: "newprovider-signature".to_string(),
verify_signature: true,
});
```
2. **Add signature verification** in `src/webhook/signature.rs`:
```rust
pub fn verify_newprovider_signature(
payload: &[u8],
signature_header: &str,
secret: &str,
) -> Result<(), ProxyError> {
// Implementation specific to the provider
}
```
3. **Add handler** in `src/webhook/handlers.rs`:
```rust
pub async fn handle_newprovider_webhook(
req: HttpRequest,
path: web::Path<String>,
body: Bytes,
data: web::Data<Arc<ProxyState>>,
) -> ActixResult<HttpResponse> {
// Handler implementation
}
```
4. **Register route** in `src/main.rs`:
```rust
.route("/newprovider/{circle_pk}", web::post().to(webhook::handlers::handle_newprovider_webhook))
```
## Architecture
```
HTTP Webhook → Signature Verification → JSON-RPC → WebSocket Server
↓ ↓ ↓ ↓
Stripe/iDenfy HMAC-SHA256 Check play method Hero Server
```
The proxy maintains persistent WebSocket connections to the Hero server and forwards webhook events as `play` method calls with appropriate scripts (e.g., `stripe_webhook_received`, `idenfy_webhook_received`).
## Dependencies
- **actix-web**: HTTP server framework
- **tokio-tungstenite**: WebSocket client
- **heromodels**: Hero project models (local dependency)
- **serde**: JSON serialization
- **hmac/sha2**: Signature verification
- **clap**: Command line argument parsing
## Development
```bash
# Build
cargo build
# Run tests
cargo test
# Run with debug logging
RUST_LOG=debug cargo run
# Format code
cargo fmt
# Check for issues
cargo clippy
```

View File

@ -0,0 +1,16 @@
{
"webhooks": {
"stripe": {
"secret": "whsec_test_secret_replace_with_actual",
"signature_header": "stripe-signature",
"verify_signature": true
},
"idenfy": {
"secret": "idenfy_test_secret_replace_with_actual",
"signature_header": "idenfy-signature",
"verify_signature": true
}
},
"websocket_timeout": 30,
"max_retries": 3
}

View File

@ -0,0 +1,62 @@
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::fs;
use anyhow::Result;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Config {
/// Webhook configurations for different providers
pub webhooks: HashMap<String, WebhookConfig>,
/// Default timeout for WebSocket requests (in seconds)
pub websocket_timeout: u64,
/// Maximum retries for WebSocket connections
pub max_retries: u32,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct WebhookConfig {
/// Secret key for signature verification
pub secret: String,
/// Signature header name
pub signature_header: String,
/// Whether signature verification is enabled
pub verify_signature: bool,
}
impl Default for Config {
fn default() -> Self {
let mut webhooks = HashMap::new();
// Default Stripe configuration
webhooks.insert("stripe".to_string(), WebhookConfig {
secret: std::env::var("STRIPE_WEBHOOK_SECRET").unwrap_or_default(),
signature_header: "stripe-signature".to_string(),
verify_signature: true,
});
// Default iDenfy configuration
webhooks.insert("idenfy".to_string(), WebhookConfig {
secret: std::env::var("IDENFY_WEBHOOK_SECRET").unwrap_or_default(),
signature_header: "idenfy-signature".to_string(),
verify_signature: true,
});
Self {
webhooks,
websocket_timeout: 30,
max_retries: 3,
}
}
}
impl Config {
pub fn from_file(path: &str) -> Result<Self> {
let content = fs::read_to_string(path)?;
let config: Config = serde_json::from_str(&content)?;
Ok(config)
}
pub fn get_webhook_config(&self, provider: &str) -> Option<&WebhookConfig> {
self.webhooks.get(provider)
}
}

Some files were not shown because too many files have changed in this diff Show More