commit 4b23e5eb7f7042e4a9070e504b50b517eb3951af Author: Timur Gordon <31495328+timurgordon@users.noreply.github.com> Date: Thu Nov 13 20:44:00 2025 +0100 move repos into monorepo diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..af4b17d --- /dev/null +++ b/.gitignore @@ -0,0 +1,36 @@ +# Rust +/target +**/*.rs.bk +*.pdb +Cargo.lock + +# IDE +.vscode/ +.idea/ +*.swp +*.swo +*~ +.DS_Store + +# Environment +.env +.env.local + +# Logs +*.log + +# Testing +/test-data + +# Build artifacts +/dist +/pkg +*.wasm +wasm-pack.log + +# Documentation +/book + +# Temporary files +*.tmp +*.temp diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..a232f59 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,99 @@ +[workspace] +resolver = "2" +members = [ + "bin/coordinator", + "bin/osiris", + "bin/runners/osiris", + "bin/runners/sal", + "bin/supervisor", + "lib/clients/job", + "lib/clients/osiris", + "lib/clients/supervisor", + "lib/models/job", + "lib/osiris/core", + "lib/osiris/derive", + "lib/runner", +] + +[workspace.package] +version = "0.1.0" +edition = "2024" +authors = ["Hero Team"] +license = "MIT OR Apache-2.0" +repository = "https://git.ourworld.tf/herocode/horus" + +[workspace.dependencies] +# Async runtime +tokio = { version = "1.0", features = ["full"] } +async-trait = "0.1" + +# Serialization +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" + +# Error handling +thiserror = "1.0" +anyhow = "1.0" + +# Logging +log = "0.4" +env_logger = "0.11" + +# Time +chrono = { version = "0.4", features = ["serde"] } + +# UUID +uuid = { version = "1.6", features = ["v4", "serde"] } + +# Redis +redis = { version = "0.25", features = ["tokio-comp", "connection-manager"] } + +# JSON-RPC +jsonrpsee = { version = "0.26", features = ["server", "macros", "http-client"] } + +# HTTP/Web +tower = "0.5" +tower-http = { version = "0.5", features = ["cors", "trace"] } +hyper = { version = "1.0", features = ["full"] } +hyper-util = { version = "0.1", features = ["tokio"] } +http = "1.0" +http-body-util = "0.1" +tracing = "0.1" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } + +# CLI +clap = { version = "4.4", features = ["derive", "env"] } +toml = "0.8" + +# WASM +wasm-bindgen = "0.2" +wasm-bindgen-futures = "0.4" +js-sys = "0.3" +web-sys = "0.3" +serde-wasm-bindgen = "0.6" +console_log = "1.0" +getrandom = { version = "0.2", features = ["js"] } + +# Crypto +secp256k1 = { version = "0.29", features = ["rand", "global-context"] } +sha2 = "0.10" +hex = "0.4" + +# Collections +indexmap = "2.0" +dashmap = "6.0" +lazy_static = "1.4" + +# Utilities +futures = "0.3" + +# Scripting +rhai = { version = "1.21.0", features = ["std", "sync", "serde"] } + +# Testing +tempfile = "3.8" + +[profile.release] +opt-level = 3 +lto = true +codegen-units = 1 diff --git a/README.md b/README.md new file mode 100644 index 0000000..6abe1c7 --- /dev/null +++ b/README.md @@ -0,0 +1,102 @@ +# Horus + +Horus is a comprehensive workspace for Hero infrastructure components. + +## Structure + +``` +horus/ +├── bin/ +│ └── supervisor/ # Hero Supervisor - job orchestration and runner management +└── lib/ + └── clients/ + └── supervisor/ # OpenRPC client for Hero Supervisor (native + WASM) +``` + +## Components + +### Hero Supervisor (`bin/supervisor`) + +The Hero Supervisor manages job execution across distributed runners with: +- Job lifecycle management (create, start, stop, delete) +- Runner registration and management +- Redis-based job queuing +- Osiris integration for persistent storage +- OpenRPC JSON-RPC API with authentication +- CORS-enabled HTTP server + +### Supervisor Client (`lib/clients/supervisor`) + +OpenRPC client library for Hero Supervisor with dual-target support: +- **Native**: Full async Rust client using `jsonrpsee` +- **WASM**: Browser-compatible client for web applications + +## Building + +### Build everything +```bash +cargo build --workspace +``` + +### Build supervisor binary +```bash +cargo build -p hero-supervisor +``` + +### Build client library +```bash +cargo build -p hero-supervisor-openrpc-client +``` + +### Build WASM client +```bash +cd lib/clients/supervisor +wasm-pack build --target web +``` + +## Running + +### Start the supervisor +```bash +cargo run -p hero-supervisor -- \ + --bind-address 127.0.0.1 \ + --port 3030 \ + --redis-url redis://127.0.0.1:6379 +``` + +### With configuration file +```bash +cargo run -p hero-supervisor -- --config config.toml +``` + +## Development + +### Run tests +```bash +cargo test --workspace +``` + +### Check all code +```bash +cargo check --workspace +``` + +### Format code +```bash +cargo fmt --all +``` + +### Lint +```bash +cargo clippy --workspace -- -D warnings +``` + +## Dependencies + +- **Rust**: 1.70+ +- **Redis**: Required for job queuing +- **Osiris**: Optional, for persistent storage + +## License + +MIT OR Apache-2.0 diff --git a/WORKSPACE_STRUCTURE.md b/WORKSPACE_STRUCTURE.md new file mode 100644 index 0000000..35211d1 --- /dev/null +++ b/WORKSPACE_STRUCTURE.md @@ -0,0 +1,164 @@ +# Horus Workspace Structure + +The Horus workspace consolidates all Hero ecosystem components into a single, well-organized monorepo. + +## Workspace Members + +### Binaries (`bin/`) + +#### `bin/supervisor/` +- **Package**: `hero-supervisor` +- **Description**: Main supervisor for managing actor runners +- **Binaries**: `supervisor` +- **Library**: `hero_supervisor` + +#### `bin/osiris/` +- **Package**: `osiris-server` +- **Description**: Osiris HTTP server for object storage +- **Binaries**: `osiris` + +#### `bin/runners/sal/` +- **Package**: `runner-sal` +- **Description**: System Abstraction Layer (SAL) runner +- **Binaries**: `runner_sal` + +#### `bin/runners/osiris/` +- **Package**: `runner-osiris` +- **Description**: Osiris-backed runner with database support +- **Binaries**: `runner_osiris` + +### Libraries (`lib/`) + +#### Models (`lib/models/`) + +##### `lib/models/job/` +- **Package**: `hero-job` +- **Description**: Job model types and builders +- **Library**: `hero_job` + +#### Clients (`lib/clients/`) + +##### `lib/clients/job/` +- **Package**: `hero-job-client` +- **Description**: Redis-based job client +- **Library**: `hero_job_client` + +##### `lib/clients/supervisor/` +- **Package**: `hero-supervisor-openrpc-client` +- **Description**: OpenRPC client for supervisor (native + WASM) +- **Library**: `hero_supervisor_openrpc_client` + +##### `lib/clients/osiris/` +- **Package**: `osiris-client` +- **Description**: Client library for Osiris +- **Library**: `osiris_client` + +#### Core Libraries + +##### `lib/runner/` +- **Package**: `hero-runner` +- **Description**: Core runner library for executing jobs +- **Library**: `hero_runner` + +##### `lib/osiris/core/` +- **Package**: `osiris-core` +- **Description**: Osiris core - object storage and indexing +- **Library**: `osiris` + +##### `lib/osiris/derive/` +- **Package**: `osiris-derive` +- **Description**: Derive macros for Osiris +- **Type**: Procedural macro crate + +## Dependency Graph + +``` +bin/supervisor +├── lib/models/job +├── lib/clients/job +└── (jsonrpsee, redis, tokio, etc.) + +bin/osiris +└── lib/osiris/core + └── lib/osiris/derive + +bin/runners/sal +├── lib/runner +│ ├── lib/models/job +│ └── lib/clients/job +└── (SAL modules from herolib_rust) + +bin/runners/osiris +├── lib/runner +│ ├── lib/models/job +│ └── lib/clients/job +└── lib/osiris/core + +lib/clients/supervisor +├── lib/models/job +└── (jsonrpsee, WASM support) + +lib/clients/osiris +├── lib/models/job +└── lib/clients/supervisor + +lib/clients/job +└── lib/models/job +``` + +## Build Commands + +```bash +# Check entire workspace +cargo check --workspace + +# Build entire workspace +cargo build --workspace + +# Build specific package +cargo build -p hero-supervisor +cargo build -p osiris-core +cargo build -p runner-sal + +# Run binaries +cargo run -p hero-supervisor --bin supervisor +cargo run -p osiris-server --bin osiris +cargo run -p runner-sal --bin runner_sal +cargo run -p runner-osiris --bin runner_osiris +``` + +## Migration Notes + +### From External Repos + +The following components were migrated into this workspace: + +1. **Job** (`/herocode/job/rust`) → `lib/models/job` + `lib/clients/job` +2. **Runner** (`/herocode/runner/rust`) → `lib/runner` + `bin/runners/*` +3. **Osiris** (`/herocode/osiris`) → `lib/osiris/*` + `bin/osiris` + `lib/clients/osiris` +4. **Supervisor** (already in workspace) → `bin/supervisor` + `lib/clients/supervisor` + +### Path Dependencies + +All internal dependencies now use path-based references: +- `hero-job = { path = "../../lib/models/job" }` +- `osiris-core = { path = "../../lib/osiris/core" }` +- etc. + +External dependencies (SAL modules, heromodels, etc.) remain as git dependencies. + +## Workspace Configuration + +Shared dependencies are defined in the root `Cargo.toml` under `[workspace.dependencies]`: +- tokio, async-trait +- serde, serde_json +- redis, uuid, chrono +- jsonrpsee, axum, tower +- And more... + +Individual crates reference these with `.workspace = true`: +```toml +[dependencies] +tokio.workspace = true +serde.workspace = true +``` diff --git a/bin/coordinator/Cargo.toml b/bin/coordinator/Cargo.toml new file mode 100644 index 0000000..d474fac --- /dev/null +++ b/bin/coordinator/Cargo.toml @@ -0,0 +1,42 @@ +[package] +name = "hero-coordinator" +version.workspace = true +edition.workspace = true +description = "Hero Coordinator - Manages job execution across runners" +license = "MIT OR Apache-2.0" + +[lib] +name = "hero_coordinator" +path = "src/lib.rs" + +[[bin]] +name = "coordinator" +path = "src/main.rs" + +[dependencies] +# Core dependencies +tokio.workspace = true +async-trait.workspace = true +serde.workspace = true +serde_json.workspace = true +thiserror.workspace = true +clap.workspace = true + +# Redis +redis.workspace = true + +# JSON-RPC +jsonrpsee.workspace = true + +# HTTP client +reqwest = { version = "0.12.7", features = ["json", "rustls-tls"] } + +# Base64 encoding +base64 = "0.22.1" + +# Tracing +tracing.workspace = true +tracing-subscriber.workspace = true + +# Hero dependencies +hero-job = { path = "../../lib/models/job" } diff --git a/bin/coordinator/README.md b/bin/coordinator/README.md new file mode 100644 index 0000000..a7862b5 --- /dev/null +++ b/bin/coordinator/README.md @@ -0,0 +1,28 @@ +# herocoordinator + +## Demo setup + +A python script is provided in the [scripts directory](./scripts/supervisor_flow_demo.py). This script +generates some demo jobs to be run by [a supervisor](https://git.ourworld.tf/herocode/supervisor). +Communication happens over [mycelium](https://github.com/threefoldtech/mycelium). To run the demo a +supervisor must be running, which uses a mycelium instance to read and write messages. A __different__ +mycelium instance needs to run for the coordinator (the supervisor can run on a different host than +the coordinator, so long as the 2 mycelium instances used can reach eachother). + +An example of a local setup: + +```bash +# Run a redis docker +docker run -it -d -p 6379:6379 --name redis redis +# Spawn mycelium node 1 with default settings. This also creates a TUN interface though that is not +# necessary for the messages +mycelium +# Spawn mycelium node 2, connect to the first node +mycelium --key-file key.bin --peers tcp://127.0.0.1:9651 --disable-quic --disable-peer-discovery --api-addr 127.0.0.1:9989 --jsonrpc-addr 127.0.0.1:9990 --no-tun -t 8651 +# Start the supervisor +supervisor --admin-secret admin123 --user-secret user123 --register-secret register123 --mycelium-url http://127.0.0.1:9990 --topic supervisor.rpc +# Start the coordinator +cargo run # (alternatively if a compiled binary is present that can be run) +# Finally, invoke the demo script +python3 scripts/supervisor_flow_demo.py +``` diff --git a/bin/coordinator/main.rs b/bin/coordinator/main.rs new file mode 100644 index 0000000..971c8ae --- /dev/null +++ b/bin/coordinator/main.rs @@ -0,0 +1,142 @@ +use clap::Parser; +use std::net::{IpAddr, SocketAddr}; +use std::sync::Arc; + +use tracing::{error, info}; +use tracing_subscriber::EnvFilter; +#[derive(Debug, Clone, Parser)] +#[command( + name = "herocoordinator", + version, + about = "Hero Coordinator CLI", + long_about = None +)] +struct Cli { + #[arg( + long = "mycelium-ip", + short = 'i', + env = "MYCELIUM_IP", + default_value = "127.0.0.1", + help = "IP address where Mycelium JSON-RPC is listening (default: 127.0.0.1)" + )] + mycelium_ip: IpAddr, + + #[arg( + long = "mycelium-port", + short = 'p', + env = "MYCELIUM_PORT", + default_value_t = 8990u16, + help = "Port for Mycelium JSON-RPC (default: 8990)" + )] + mycelium_port: u16, + + #[arg( + long = "redis-addr", + short = 'r', + env = "REDIS_ADDR", + default_value = "127.0.0.1:6379", + help = "Socket address of Redis instance (default: 127.0.0.1:6379)" + )] + redis_addr: SocketAddr, + + #[arg( + long = "api-http-ip", + env = "API_HTTP_IP", + default_value = "127.0.0.1", + help = "Bind IP for HTTP JSON-RPC server (default: 127.0.0.1)" + )] + api_http_ip: IpAddr, + + #[arg( + long = "api-http-port", + env = "API_HTTP_PORT", + default_value_t = 9652u16, + help = "Bind port for HTTP JSON-RPC server (default: 9652)" + )] + api_http_port: u16, + + #[arg( + long = "api-ws-ip", + env = "API_WS_IP", + default_value = "127.0.0.1", + help = "Bind IP for WebSocket JSON-RPC server (default: 127.0.0.1)" + )] + api_ws_ip: IpAddr, + + #[arg( + long = "api-ws-port", + env = "API_WS_PORT", + default_value_t = 9653u16, + help = "Bind port for WebSocket JSON-RPC server (default: 9653)" + )] + api_ws_port: u16, +} + +#[tokio::main] +async fn main() { + let cli = Cli::parse(); + // Initialize tracing subscriber (pretty formatter; controlled by RUST_LOG) + let filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info")); + tracing_subscriber::fmt() + .with_env_filter(filter) + .pretty() + .with_target(true) + .with_level(true) + .init(); + + let http_addr = SocketAddr::new(cli.api_http_ip, cli.api_http_port); + let ws_addr = SocketAddr::new(cli.api_ws_ip, cli.api_ws_port); + + // Initialize Redis driver + let redis = herocoordinator::storage::RedisDriver::new(cli.redis_addr.to_string()) + .await + .expect("Failed to connect to Redis"); + + // Initialize Service + let service = herocoordinator::service::AppService::new(redis); + let service_for_router = service.clone(); + + // Shared application state + let state = Arc::new(herocoordinator::rpc::AppState::new(service)); + + // Start router workers (auto-discovered contexts) using a single global SupervisorHub (no separate inbound listener) + { + let base_url = format!("http://{}:{}", cli.mycelium_ip, cli.mycelium_port); + let hub = herocoordinator::clients::SupervisorHub::new( + base_url.clone(), + "supervisor.rpc".to_string(), + ) + .expect("Failed to initialize SupervisorHub"); + let cfg = herocoordinator::router::RouterConfig { + context_ids: Vec::new(), // ignored by start_router_auto + concurrency: 32, + base_url, + topic: "supervisor.rpc".to_string(), + sup_hub: hub.clone(), + transport_poll_interval_secs: 2, + transport_poll_timeout_secs: 300, + }; + // Per-context outbound delivery loops (replies handled by SupervisorHub) + let _auto_handle = herocoordinator::router::start_router_auto(service_for_router, cfg); + } + + // Build RPC modules for both servers + let http_module = herocoordinator::rpc::build_module(state.clone()); + let ws_module = herocoordinator::rpc::build_module(state.clone()); + + info!(%http_addr, %ws_addr, redis_addr=%cli.redis_addr, "Starting JSON-RPC servers"); + + // Start servers + let _http_handle = herocoordinator::rpc::start_http(http_addr, http_module) + .await + .expect("Failed to start HTTP server"); + let _ws_handle = herocoordinator::rpc::start_ws(ws_addr, ws_module) + .await + .expect("Failed to start WS server"); + + // Wait for Ctrl+C to terminate + if let Err(e) = tokio::signal::ctrl_c().await { + error!(error=%e, "Failed to listen for shutdown signal"); + } + info!("Shutdown signal received, exiting."); +} diff --git a/bin/coordinator/specs/architecture.md b/bin/coordinator/specs/architecture.md new file mode 100644 index 0000000..5c1f6e0 --- /dev/null +++ b/bin/coordinator/specs/architecture.md @@ -0,0 +1,77 @@ + + +## per user + +runs in container or VM, one per user + +- zinit +- herocoordinator + - think about like DAG worklflow manager + - manage jobs who are send around to different nodes +- mycelium address range (part of mycelium on host) +- herodb + - state manager + - redis protocol / primitives + - fs backend (mem and allways append in future) + - encryption & decryption primitives + - key mgmt for encryption (creation, deletion) + - openrpc admin features: user management, role-based access control +- postgresql + postgrest +- AI Agent TBD + +```mermaid +%%{init: {"theme":"dark"}}%% +graph TD + subgraph Per Node System + N[Node] --> OS(Run on top of ZOS4 or Ubuntu or in a VM) + + subgraph On Node + OS --> SV(Supervisors) + OS --> ZN(Zinit) + OS --> R(Runners) + OS --> PGN(Some Nodes: PostgreSQL + Postgrest) + OS --> HDN(Each Node: Herodb) + + subgraph Supervisors Responsibilities + SV --> SV_MR(Manage runners & scheduling for the node) + SV --> SV_MJ(Monitor & schedule jobs) + SV --> SV_RU(Check resource usage) + SV --> SV_TO(Checks on timeout) + end + + subgraph Runners Characteristics + R --> R_LV(V/Python & Rust) + R --> R_FORK(Uses fork per runner for scalability) + R --> R_COUNT(Some runners can only run 1, others more) + R --> R_CONTEXT(Some runners are per context) + end + end + + SV -- "Manage" --> R + SV -- "Schedule jobs via" --> ZN + ZN -- "Starts" --> R + R -- "Interacts with" --> PGN + R -- "Interacts with" --> HDN + end +``` + +## per node + +- run on top of ZOS4 or Ubuntu or in a VM +- supervisors + - manage runners and scheduling for the node of these runners + - monitor & schedule jobs, check resource usage, checks on timout +- zinit +- runners (are scheduled in zinit by supervisor) + - V/Python & Rust + - uses fork per runner (process) for scalability + - some runners can only run 1, others more + - some runners are per context +- some nodes will have postgresql + postgrest +- each node has herodb + +REMARK + +- each rhaj or heroscript running on a node can use herodb if needed (careful, because can and will be lost), but cannot communicate with anyone else outside of the node + + diff --git a/bin/coordinator/specs/hercoordinator.md b/bin/coordinator/specs/hercoordinator.md new file mode 100644 index 0000000..0b4549b --- /dev/null +++ b/bin/coordinator/specs/hercoordinator.md @@ -0,0 +1,16 @@ + + +will have openrpc interface + +- start, stop, delete, list a DAG +- query the DAG and its status + + +## remarks for supervisor + +- no retry +- no dependencies + +## inspiration + +- DAGU \ No newline at end of file diff --git a/bin/coordinator/specs/model/actor.v b/bin/coordinator/specs/model/actor.v new file mode 100644 index 0000000..368452a --- /dev/null +++ b/bin/coordinator/specs/model/actor.v @@ -0,0 +1,18 @@ +module model + +// a actor is a participant in the new internet, the one who can ask for work +// user can have more than one actor operating for them, an actor always operates in a context which is hosted by the hero of the user +// stored in the context db at actor: (actor is hset) +@[heap] +pub struct Actor { +pub mut: + id u32 + pubkey string + address []Address // address (is to reach the actor back), normally mycelium but doesn't have to be + created_at u32 // epoch + updated_at u32 // epoch +} + +pub fn (self Actor) redis_key() string { + return 'actor:${self.id}' +} diff --git a/bin/coordinator/specs/model/context.v b/bin/coordinator/specs/model/context.v new file mode 100644 index 0000000..69db32c --- /dev/null +++ b/bin/coordinator/specs/model/context.v @@ -0,0 +1,20 @@ +module model + +// each job is run in a context, this corresponds to a DB in redis and has specific rights to actors +// context is a redis db and also a locaction on a filesystem which can be used for e.g. logs, temporary files, etc. +// actors create contexts for others to work in +// stored in the context db at context: (context is hset) +@[heap] +pub struct Context { +pub mut: + id u32 // corresponds with the redis db (in our ourdb or other redis) + admins []u32 // actors which have admin rights on this context (means can do everything) + readers []u32 // actors which can read the context info + executors []u32 // actors which can execute jobs in this context + created_at u32 // epoch + updated_at u32 // epoch +} + +pub fn (self Context) redis_key() string { + return 'context:${self.id}' +} diff --git a/bin/coordinator/specs/model/flow.v b/bin/coordinator/specs/model/flow.v new file mode 100644 index 0000000..abc7c46 --- /dev/null +++ b/bin/coordinator/specs/model/flow.v @@ -0,0 +1,41 @@ +module model + +// what get's executed by an actor and needs to be tracked as a whole, can be represented as a DAG graph +// this is the high level representation of a workflow to execute on work, its fully decentralized and distributed +// only the actor who created the flow can modify it and holds it in DB +// stored in the context db at flow: (flow is hset) +@[heap] +pub struct Flow { +pub mut: + id u32 // this job id is given by the actor who called for it + caller_id u32 // is the actor which called for this job + context_id u32 // each job is executed in a context + jobs []u32 // links to all jobs which make up this flow, this can be dynamically modified + env_vars map[string]string // they are copied to every job done + result map[string]string // the result of the flow + created_at u32 // epoch + updated_at u32 // epoch + status FlowStatus +} + +pub fn (self Flow) redis_key() string { + return 'flow:${self.id}' +} + +// FlowStatus represents the status of a flow +pub enum FlowStatus { + dispatched + started + error + finished +} + +// str returns the string representation of FlowStatus +pub fn (self FlowStatus) str() string { + return match self { + .dispatched { 'dispatched' } + .started { 'started' } + .error { 'error' } + .finished { 'finished' } + } +} diff --git a/bin/coordinator/specs/model/message.v b/bin/coordinator/specs/model/message.v new file mode 100644 index 0000000..f095076 --- /dev/null +++ b/bin/coordinator/specs/model/message.v @@ -0,0 +1,68 @@ +module model + +// Messages is what goes over mycelium (which is our messaging system), they can have a job inside +// stored in the context db at msg:: (msg is hset) +// there are 2 queues in the context db: queue: msg_out and msg_in these are generic queues which get all messages from mycelium (in) and the ones who need to be sent (out) are in the outqueue +@[heap] +pub struct Message { +pub mut: + id u32 // is unique id for the message, has been given by the caller + caller_id u32 // is the actor whos send this message + context_id u32 // each message is for a specific context + message string + message_type ScriptType + message_format_type MessageFormatType + timeout u32 // in sec, to arrive destination + timeout_ack u32 // in sec, to acknowledge receipt + timeout_result u32 // in sec, to process result and have it back + job []Job + logs []Log // e.g. for streaming logs back to originator + created_at u32 // epoch + updated_at u32 // epoch + status MessageStatus +} + +// MessageType represents the type of message +pub enum MessageType { + job + chat + mail +} + +// MessageFormatType represents the format of a message +pub enum MessageFormatType { + html + text + md +} + +pub fn (self Message) redis_key() string { + return 'message:${self.caller_id}:${self.id}' +} + +// queue_suffix returns the queue suffix for the message type +pub fn (mt MessageType) queue_suffix() string { + return match mt { + .job { 'job' } + .chat { 'chat' } + .mail { 'mail' } + } +} + +// MessageStatus represents the status of a message +pub enum MessageStatus { + dispatched + acknowledged + error + processed // e.g. can be something which comes back +} + +// str returns the string representation of MessageStatus +pub fn (ms MessageStatus) str() string { + return match ms { + .dispatched { 'dispatched' } + .acknowledged { 'acknowledged' } + .error { 'error' } + .processed { 'processed' } + } +} diff --git a/bin/coordinator/specs/model/runner.v b/bin/coordinator/specs/model/runner.v new file mode 100644 index 0000000..45cfe41 --- /dev/null +++ b/bin/coordinator/specs/model/runner.v @@ -0,0 +1,27 @@ +module model + +// a runner executes a job, this can be in VM, in a container or just some processes running somewhere +// the messages always come in over a topic +// stored in the context db at runner: (runner is hset) +@[heap] +pub struct Runner { +pub mut: + id u32 + pubkey string // from mycelium + address string // mycelium address + topic string // needs to be set by the runner but often runner e.g. runner20 + local bool // if local then goes on redis using the id + created_at u32 // epoch + updated_at u32 // epoch +} + +pub enum RunnerType { + v + python + osis + rust +} + +pub fn (self Runner) redis_key() string { + return 'runner:${self.id}' +} diff --git a/bin/coordinator/specs/model/runnerjob.v b/bin/coordinator/specs/model/runnerjob.v new file mode 100644 index 0000000..2ef789e --- /dev/null +++ b/bin/coordinator/specs/model/runnerjob.v @@ -0,0 +1,64 @@ +module model + +// Job represents a job, a job is only usable in the context of a runner (which is part of a hero) +// stored in the context db at job:: (job is hset) +@[heap] +pub struct RunnerJob { +pub mut: + id u32 // this job id is given by the actor who called for it + caller_id u32 // is the actor which called for this job + context_id u32 // each job is executed in a context + script string + script_type ScriptType + timeout u32 // in sec + retries u8 + env_vars map[string]string + result map[string]string + prerequisites []string + dependends []u32 + created_at u32 // epoch + updated_at u32 // epoch + status JobStatus +} + +// ScriptType represents the type of script +pub enum ScriptType { + osis + sal + v + python +} + +pub fn (self RunnerJob) redis_key() string { + return 'job:${self.caller_id}:${self.id}' +} + +// queue_suffix returns the queue suffix for the script type +pub fn (st ScriptType) queue_suffix() string { + return match st { + .osis { 'osis' } + .sal { 'sal' } + .v { 'v' } + .python { 'python' } + } +} + +// JobStatus represents the status of a job +pub enum JobStatus { + dispatched + waiting_for_prerequisites + started + error + finished +} + +// str returns the string representation of JobStatus +pub fn (js JobStatus) str() string { + return match js { + .dispatched { 'dispatched' } + .waiting_for_prerequisites { 'waiting_for_prerequisites' } + .started { 'started' } + .error { 'error' } + .finished { 'finished' } + } +} diff --git a/bin/coordinator/specs/models.md b/bin/coordinator/specs/models.md new file mode 100644 index 0000000..4124ba1 --- /dev/null +++ b/bin/coordinator/specs/models.md @@ -0,0 +1,314 @@ +# Models Specification +*Freeflow Universe – mycojobs* + +This document gathers **all data‑models** that exist in the `lib/mycojobs/model/` package, together with a concise purpose description, field semantics, Redis storage layout and the role each model plays in the overall *decentralised workflow* architecture. + + +## Table of Contents +1. [Actor](#actor) +2. [Context](#context) +3. [Flow](#flow) +4. [Message](#message) +5. [Runner](#runner) +6. [RunnerJob](#runnerjob) +7. [Enums & Shared Types](#enums-shared-types) +8. [Key‑generation helpers](#key-generation-helpers) + +--- + +## 1️⃣ `Actor` – Identity & entry‑point + +| Field | Type | Description | +|------|------|-------------| +| `id` | `u32` | Sequential identifier **unique per tenant**. Used as part of the Redis key `actor:`. | +| `pubkey` | `string` | Public key (Mycelium‑compatible) that authenticates the actor when it sends/receives messages. | +| `address` | `[]Address` | One or more reachable addresses (normally Mycelium topics) that other participants can use to contact the actor. | +| `created_at` | `u32` | Unix‑epoch time when the record was created. | +| `updated_at` | `u32` | Unix‑epoch time of the last mutation. | + +### Purpose +* An **Actor** is the *human‑or‑service* that **requests work**, receives results and can be an administrator of a **Context**. +* It is the *security principal* – every operation in a context is authorised against the actor’s ID and its public key signature. + +### Redis representation + +| Key | Example | Storage type | Fields | +|-----|---------|--------------|--------| +| `actor:${id}` | `actor:12` | **hash** (`HSET`) | `id`, `pubkey`, `address` (list), `created_at`, `updated_at` | + +--- + +## 2️⃣ `Context` – Tenant & permission container + +| Field | Type | Description | +|------|------|-------------| +| `id` | `u32` | Identifier that also selects the underlying **Redis DB** for this tenant. | +| `admins` | `[]u32` | Actor IDs that have **full control** (create/delete any object, manage permissions). | +| `readers` | `[]u32` | Actor IDs that may **read** any object in the context but cannot modify. | +| `executors` | `[]u32` | Actor IDs allowed to **run** `RunnerJob`s and update their status. | +| `created_at` | `u32` | Unix‑epoch of creation. | +| `updated_at` | `u32` | Unix‑epoch of last modification. | + +### Purpose +* A **Context** isolates a *tenant* – each tenant gets its own Redis database and a dedicated filesystem area (for logs, temporary files, …). +* It stores **permission lists** that the system consults before any operation (e.g., creating a `Flow`, enqueuing a `RunnerJob`). + +### Redis representation + +| Key | Example | Storage type | Fields | +|-----|---------|--------------|--------| +| `context:${id}` | `context:7` | **hash** | `id`, `admins`, `readers`, `executors`, `created_at`, `updated_at` | + +--- + +## 3️⃣ `Flow` – High‑level workflow (DAG) + +| Field | Type | Description | +|------|------|-------------| +| `id` | `u32` | Flow identifier – *unique inside the creator’s actor space*. | +| `caller_id` | `u32` | Actor that **created** the flow (owner). | +| `context_id` | `u32` | Context in which the flow lives. | +| `jobs` | `[]u32` | List of **RunnerJob** IDs that belong to this flow (the DAG edges are stored in each job’s `dependends`). | +| `env_vars` | `map[string]string` | Global environment variables injected into **every** job of the flow. | +| `result` | `map[string]string` | Aggregated output produced by the flow (filled by the orchestrator when the flow finishes). | +| `created_at` | `u32` | Creation timestamp. | +| `updated_at` | `u32` | Last update timestamp. | +| `status` | `FlowStatus` | Current lifecycle stage (`dispatched`, `started`, `error`, `finished`). | + +### Purpose +* A **Flow** is the *public‑facing* representation of a **workflow**. +* It groups many `RunnerJob`s, supplies common env‑vars, tracks overall status and collects the final result. +* Only the *creator* (the `caller_id`) may mutate the flow definition. + +### Redis representation + +| Key | Example | Storage type | Fields | +|-----|---------|--------------|--------| +| `flow:${id}` | `flow:33` | **hash** | `id`, `caller_id`, `context_id`, `jobs`, `env_vars`, `result`, `created_at`, `updated_at`, `status` | + +### `FlowStatus` enum + +| Value | Meaning | +|-------|---------| +| `dispatched` | Flow has been stored but not yet started. | +| `started` | At least one job is running. | +| `error` | One or more jobs failed; flow aborted. | +| `finished` | All jobs succeeded, `result` is final. | + +--- + +## 4️⃣ `Message` – Transport unit (Mycelium) + +| Field | Type | Description | +|------|------|-------------| +| `id` |u32 `_type` | `ScriptType` | *Kind* of the message – currently re‑used for job payloads (`osis`, `sal`, `v`, `python`). | +| `message_format_type` | `MessageFormatType` | Formatting of `message` (`html`, `text`, `md`). | +| `timeout` | `u32` | Seconds before the message is considered *lost* if not delivered. | +| `timeout_ack` | `u32` | Seconds allowed for the receiver to acknowledge. | +| `timeout_result` | `u32` | Seconds allowed for the receiver to send back a result. | +| `job` | `[]Job` | Embedded **RunnerJob** objects (normally a single job). | +| `logs` | `[]Log` | Optional streaming logs attached to the message. | +| `created_at` | `u32` | Timestamp of creation. | +| `updated_at` | `u32` | Timestamp of latest update. | +| `status` | `MessageStatus` | Current lifecycle (`dispatched`, `acknowledged`, `error`, `processed`). | + +### Purpose +* `Message` is the **payload carrier** that travels over **Mycelium** (the pub/sub system). +* It can be a **job request**, a **chat line**, an **email**, or any generic data that needs to be routed between actors, runners, or services. +* Every message is persisted as a Redis hash; the system also maintains two *generic* queues: + + * `msg_out` – outbound messages waiting to be handed to Mycelium. + * `msg_in` – inbound messages that have already arrived and are awaiting local processing. + +### Redis representation + +| Key | Example | Storage type | Fields | +|-----|---------|--------------|--------| +| `message:${caller_id}:${id}` | `message:12:101` | **hash** | All fields above (`id`, `caller_id`, `context_id`, …, `status`). | + +### `MessageType` enum (legacy – not used in current code but documented) + +| Value | Meaning | +|-------|---------| +| `job` | Payload carries a `RunnerJob`. | +| `chat` | Human‑to‑human communication. | +| `mail` | Email‑like message. | + +### `MessageFormatType` enum + +| Value | Meaning | +|-------|---------| +| `html` | HTML formatted body. | +| `text` | Plain‑text. | +| `md` | Markdown. | + +### `MessageStatus` enum + +| Value | Meaning | +|-------|---------| +| `dispatched` | Stored, not yet processed. | +| `acknowledged` | Receiver has confirmed receipt. | +| `error` | Delivery or processing failed. | +|` | Message handled (e.g., job result returned). | + +--- + +## 5️⃣ `Runner` – Worker that executes jobs + +| Field | Type | Description | +|------|------|-------------| +| `id` | `u32` | Unique runner identifier. | +| `pubkey` | `string` | Public key of the runner (used by Mycelium for auth). | +| `address` | `string` | Mycelium address (e.g., `mycelium://…`). | +| `topic` | `string` | Pub/Sub topic the runner subscribes to; defaults to `runner${id}`. | +| `local` | `bool` | If `true`, the runner also consumes jobs directly from **Redis queues** (e.g., `queue:v`). | +| `created_at` | `u32` | Creation timestamp. | +| `updated_at` | `u32` | Last modification timestamp. | + +### Purpose +* A **Runner** is the *execution engine* – it could be a VM, a container, or a process that knows how to run a specific script type (`v`, `python`, `osis`, `rust`). +* It **subscribes** to a Mycelium topic to receive job‑related messages, and, when `local==true`, it also **polls** a Redis list named after the script‑type (`queue:`). + +### Redis representation + +| Key | Example | Storage type | +|-----|---------|--------------| +| `runner:${id}` | `runner:20` | **hash** *(all fields above)* | + +### `RunnerType` enum + +| Value | Intended runtime | +|-------|------------------| +| `v` | V language VM | +| `python` | CPython / PyPy | +| `osis` | OSIS‑specific runtime | +| `rust` | Native Rust binary | + +--- + +## 6️⃣ `RunnerJob` – Executable unit + +| Field | Type | Description | +|------|------|-------------| +| `id` | `u32` | Job identifier **provided by the caller**. | +| `caller_id` | `u32` | Actor that created the job. | +| `context_id` | `u32` | Context in which the job will run. | +| `script` | `string` | Source code / command to be executed. | +| `script_type` | `ScriptType` | Language or runtime of the script (`osis`, `sal`, `v`, `python`). | +| `timeout` | `u32` | Maximum execution time (seconds). | +| `retries` | `u8` | Number of automatic retries on failure. | +| `env_vars` | `map[string]string` | Job‑specific environment variables (merged with `Flow.env_vars`). | +| `result` | `map[string]string` | Key‑value map that the job writes back upon completion. | +| `prerequisites` | `[]string` | Human‑readable IDs of **external** prerequisites (e.g., files, other services). | +| `dependends` | `[]u32` | IDs of **other RunnerJob** objects that must finish before this job can start. | +| `created_at` | `u32` | Creation timestamp. | +| `updated_at` | `u32` | Last update timestamp. | +| `status` | `JobStatus` | Lifecycle status (`dispatched`, `waiting_for_prerequisites`, `started`, `error`, `finished`). | + +### Purpose +* A **RunnerJob** is the *atomic piece of work* that a `Runner` executes. +* It lives inside a **Context**, is queued according to its `script_type`, and moves through a well‑defined **state machine**. +* The `dependends` field enables the *DAG* behaviour that the `Flow` model represents at a higher level. + +### Redis representation + +| Key | Example | Storage type | +|-----|---------|--------------| +| `job:${caller_id}:${id}` | `job:12:2001` | **hash** *(all fields above)* | + +### `ScriptType` enum + +| Value | Runtime | +|-------|---------| +| `osis` | OSIS interpreter | +| `sal` | SAL DSL (custom) | +| `v` | V language | +| `python`| CPython / PyPy | + +*The enum provides a **`queue_suffix()`** helper that maps a script type to the name of the Redis list used for local job dispatch (`queue:python`, `queue:v`, …).* + +### `JobStatus` enum + +| Value | Meaning | +|-------|---------| +| `dispatched` | Stored, waiting to be examined for prerequisites. | +| `waiting_for_prerequisites` | Has `dependends` that are not yet finished. | +| `started` | Currently executing on a runner. | +| `error` | Execution failed (or exceeded retries). | +| `finished` | Successfully completed, `result` populated. | + +--- + +## 7️⃣ Other Enums & Shared Types + +| Enum | Location | Values | Note | +|------|----------|--------|------| +| `MessageType` | `message.v` | `job`, `chat`, `mail` | Determines how a `Message` is interpreted. | +| `MessageFormatType` | `message.v` | `html`, `text`, `md` | UI‑layer rendering hint. | +| `MessageStatus` | `message.v` | `dispatched`, `acknowledged`, `error`, `processed` | Life‑cycle of a `Message`. | +| `FlowStatus` | `flow.v` | `dispatched`, `started`, `error`, `finished` | High‑level flow progress. | +| `RunnerType` | `runner.v` | `v`, `python`, `osis`, `rust` | Not currently stored; used by the orchestration layer to pick a runner implementation. | +| `ScriptType` | `runnerjob.v` | `osis`, `sal`, `v`, `python` | Determines queue suffix & runtime. | +| `JobStatus` | `runnerjob.v` | `dispatched`, `waiting_for_prerequisites`, `started`, `error`, `finished` | Per‑job state machine. | + +--- + +## 8️⃣ Key‑generation helpers (methods) + +| Model | Method | Returns | Example | +|-------|--------|---------|---------| +| `Actor` | `redis_key()` | `"actor:${self.id}"` | `actor:12` | +| `Context` | `redis_key()` | `"context:${self.id}"` | `context:7` | +| `Flow` | `redis_key()` | `"flow:${self.id}"` | `flow:33` | +| `Message` | `redis_key()` | `"message:${self.caller_id}:${self.id}"` | `message:12:101` | +| `Runner` | `redis_key()` | `"runner:${self.id}"` | `runner:20` | +| `RunnerJob` | `redis_key()` | `"job:${self.caller_id}:${self.id}"` | `job:12:2001` | +| `MessageType` | `queue_suffix()` | `"job"` / `"chat"` / `"mail"` | `MessageType.job.queue_suffix() → "job"` | +| `ScriptType` | `queue_suffix()` | `"osis"` / `"sal"` / `"v"` / `"python"` | `ScriptType.python.queue_suffix() → "python"` | + +These helpers guarantee **canonical key naming** throughout the code base and simplify Redis interactions. + +--- + +## 📌 Summary Diagram (quick reference) + +```mermaid +%%{init: {"theme":"dark"}}%% +graph TD + %% Actors and what they can create + A[Actor] -->|creates| Ctx[Context] + A -->|creates| Fl[Flow] + A -->|creates| Msg[Message] + A -->|creates| Rnr[Runner] + A -->|creates| Job[RunnerJob] + + %% All objects live inside one Redis DB that belongs to a Context + subgraph "Redis DB (per Context)" + Ctx + A + Fl + Msg + Rnr + Job + end + + %% Messaging queues (global, outside the Context DB) + Msg -->|pushes key onto| OutQ[msg_out] + OutQ -->|transport via Mycelium| InQ[msg_in] + InQ -->|pulled by| Rnr + + %% Local runner queues (only when runner.local == true) + Rnr -->|BRPOP from| QueueV["queue:v"] + Rnr -->|BRPOP from| QueuePy["queue:python"] + Rnr -->|BRPOP from| QueueOSIS["queue:osis"] + +``` + +## context based + +* Inside a Context, an **Actor** can create a **Flow** that references many **RunnerJob** IDs (the DAG). +* To *initiate* execution, the Actor packages a **RunnerJob** (or a full Flow) inside a **Message**, pushes it onto `msg_out`, and the system routes it via **Mycelium** to the target Context. +* The remote **Runner** receives the Message, materialises the **RunnerJob**, queues it on a script‑type list, executes it, writes back `result` and status, and optionally sends a *result Message* back to the originator. + +All state is persisted as **Redis hashes**, guaranteeing durability and enabling *idempotent* retries. The uniform naming conventions (`actor:`, `job::`, …) make it trivial to locate any object given its identifiers. + diff --git a/bin/coordinator/specs/openrpc.json b/bin/coordinator/specs/openrpc.json new file mode 100644 index 0000000..67425bc --- /dev/null +++ b/bin/coordinator/specs/openrpc.json @@ -0,0 +1,1399 @@ +{ + "openrpc": "1.2.6", + "info": { + "title": "HeroCoordinator JSON-RPC API", + "version": "0.1.0", + "description": "JSON-RPC API over HTTP and WebSocket for creating and loading domain models. Host and ports are configurable via CLI flags." + }, + "servers": [ + { + "name": "http", + "url": "http://127.0.0.1:9652", + "summary": "Default HTTP server (configurable via --api-http-ip/--api-http-port)" + }, + { + "name": "ws", + "url": "ws://127.0.0.1:9653", + "summary": "Default WS server (configurable via --api-ws-ip/--api-ws-port)" + } + ], + "methods": [ + { + "name": "actor.create", + "summary": "Create/Upsert Actor", + "params": [ + { + "name": "params", + "schema": { + "$ref": "#/components/schemas/ActorCreateParams" + } + } + ], + "result": { + "name": "result", + "schema": { + "$ref": "#/components/schemas/Actor" + } + }, + "errors": [ + { + "$ref": "#/components/errors/InvalidParams" + }, + { + "$ref": "#/components/errors/NotFound" + }, + { + "$ref": "#/components/errors/StorageError" + } + ] + }, + { + "name": "actor.load", + "summary": "Load an Actor by id", + "params": [ + { + "name": "params", + "schema": { + "$ref": "#/components/schemas/ActorLoadParams" + } + } + ], + "result": { + "name": "result", + "schema": { + "$ref": "#/components/schemas/Actor" + } + }, + "errors": [ + { + "$ref": "#/components/errors/InvalidParams" + }, + { + "$ref": "#/components/errors/NotFound" + }, + { + "$ref": "#/components/errors/StorageError" + } + ] + }, + { + "name": "context.create", + "summary": "Create/Upsert Context (stored in its own DB index)", + "params": [ + { + "name": "params", + "schema": { + "$ref": "#/components/schemas/ContextCreateParams" + } + } + ], + "result": { + "name": "result", + "schema": { + "$ref": "#/components/schemas/Context" + } + }, + "errors": [ + { + "$ref": "#/components/errors/InvalidParams" + }, + { + "$ref": "#/components/errors/StorageError" + } + ] + }, + { + "name": "context.load", + "summary": "Load a Context by id", + "params": [ + { + "name": "params", + "schema": { + "$ref": "#/components/schemas/ContextLoadParams" + } + } + ], + "result": { + "name": "result", + "schema": { + "$ref": "#/components/schemas/Context" + } + }, + "errors": [ + { + "$ref": "#/components/errors/InvalidParams" + }, + { + "$ref": "#/components/errors/NotFound" + }, + { + "$ref": "#/components/errors/StorageError" + } + ] + }, + { + "name": "runner.create", + "summary": "Create/Upsert Runner in a context", + "params": [ + { + "name": "params", + "schema": { + "$ref": "#/components/schemas/RunnerCreateParams" + } + } + ], + "result": { + "name": "result", + "schema": { + "$ref": "#/components/schemas/Runner" + } + }, + "errors": [ + { + "$ref": "#/components/errors/InvalidParams" + }, + { + "$ref": "#/components/errors/StorageError" + } + ] + }, + { + "name": "runner.load", + "summary": "Load Runner by id from a context", + "params": [ + { + "name": "params", + "schema": { + "$ref": "#/components/schemas/RunnerLoadParams" + } + } + ], + "result": { + "name": "result", + "schema": { + "$ref": "#/components/schemas/Runner" + } + }, + "errors": [ + { + "$ref": "#/components/errors/InvalidParams" + }, + { + "$ref": "#/components/errors/NotFound" + }, + { + "$ref": "#/components/errors/StorageError" + } + ] + }, + { + "name": "flow.create", + "summary": "Create/Upsert Flow in a context", + "params": [ + { + "name": "params", + "schema": { + "$ref": "#/components/schemas/FlowCreateParams" + } + } + ], + "result": { + "name": "result", + "schema": { + "$ref": "#/components/schemas/Flow" + } + }, + "errors": [ + { + "$ref": "#/components/errors/InvalidParams" + }, + { + "$ref": "#/components/errors/StorageError" + } + ] + }, + { + "name": "flow.load", + "summary": "Load Flow by id from a context", + "params": [ + { + "name": "params", + "schema": { + "$ref": "#/components/schemas/FlowLoadParams" + } + } + ], + "result": { + "name": "result", + "schema": { + "$ref": "#/components/schemas/Flow" + } + }, + "errors": [ + { + "$ref": "#/components/errors/InvalidParams" + }, + { + "$ref": "#/components/errors/NotFound" + }, + { + "$ref": "#/components/errors/StorageError" + } + ] + }, + { + "name": "flow.dag", + "summary": "Compute and return the execution DAG for a Flow", + "params": [ + { + "name": "params", + "schema": { + "$ref": "#/components/schemas/FlowLoadParams" + } + } + ], + "result": { + "name": "result", + "schema": { + "$ref": "#/components/schemas/FlowDag" + } + }, + "errors": [ + { + "$ref": "#/components/errors/InvalidParams" + }, + { + "$ref": "#/components/errors/NotFound" + }, + { + "$ref": "#/components/errors/StorageError" + }, + { + "$ref": "#/components/errors/DagMissingDependency" + }, + { + "$ref": "#/components/errors/DagCycleDetected" + } + ] + }, + { + "name": "job.create", + "summary": "Create/Upsert Job in a context", + "params": [ + { + "name": "params", + "schema": { + "$ref": "#/components/schemas/JobCreateParams" + } + } + ], + "result": { + "name": "result", + "schema": { + "$ref": "#/components/schemas/Job" + } + }, + "errors": [ + { + "$ref": "#/components/errors/InvalidParams" + }, + { + "$ref": "#/components/errors/StorageError" + } + ] + }, + { + "name": "job.load", + "summary": "Load Job by ids from a context", + "params": [ + { + "name": "params", + "schema": { + "$ref": "#/components/schemas/JobLoadParams" + } + } + ], + "result": { + "name": "result", + "schema": { + "$ref": "#/components/schemas/Job" + } + }, + "errors": [ + { + "$ref": "#/components/errors/InvalidParams" + }, + { + "$ref": "#/components/errors/NotFound" + }, + { + "$ref": "#/components/errors/StorageError" + } + ] + }, + { + "name": "message.create", + "summary": "Create/Upsert Message in a context", + "params": [ + { + "name": "params", + "schema": { + "$ref": "#/components/schemas/MessageCreateParams" + } + } + ], + "result": { + "name": "result", + "schema": { + "$ref": "#/components/schemas/Message" + } + }, + "errors": [ + { + "$ref": "#/components/errors/InvalidParams" + }, + { + "$ref": "#/components/errors/StorageError" + } + ] + }, + { + "name": "message.load", + "summary": "Load Message by ids from a context", + "params": [ + { + "name": "params", + "schema": { + "$ref": "#/components/schemas/MessageLoadParams" + } + } + ], + "result": { + "name": "result", + "schema": { + "$ref": "#/components/schemas/Message" + } + }, + "errors": [ + { + "$ref": "#/components/errors/InvalidParams" + }, + { + "$ref": "#/components/errors/NotFound" + }, + { + "$ref": "#/components/errors/StorageError" + } + ] + } + ], + "components": { + "schemas": { + "IpAddr": { + "type": "string", + "description": "IPv4 or IPv6 textual address" + }, + "ScriptType": { + "type": "string", + "enum": [ + "Osis", + "Sal", + "V", + "Python" + ] + }, + "FlowStatus": { + "type": "string", + "enum": [ + "Dispatched", + "Started", + "Error", + "Finished" + ] + }, + "JobStatus": { + "type": "string", + "enum": [ + "Dispatched", + "WaitingForPrerequisites", + "Started", + "Error", + "Finished" + ] + }, + "MessageFormatType": { + "type": "string", + "enum": [ + "Html", + "Text", + "Md" + ] + }, + "MessageStatus": { + "type": "string", + "enum": [ + "Dispatched", + "Acknowledged", + "Error", + "Processed" + ] + }, + "TransportStatus": { + "type": "string", + "enum": [ + "Queued", + "Sent", + "Delivered", + "Read", + "Failed" + ] + }, + "MessageType": { + "type": "string", + "enum": [ + "Job", + "Chat", + "Mail" + ] + }, + "Actor": { + "type": "object", + "required": [ + "id", + "pubkey", + "address", + "created_at", + "updated_at" + ], + "properties": { + "id": { + "type": "integer", + "format": "uint32" + }, + "pubkey": { + "type": "string" + }, + "address": { + "type": "array", + "items": { + "$ref": "#/components/schemas/IpAddr" + } + }, + "created_at": { + "type": "integer", + "format": "int64" + }, + "updated_at": { + "type": "integer", + "format": "int64" + } + } + }, + "Context": { + "type": "object", + "required": [ + "id", + "admins", + "readers", + "executors", + "created_at", + "updated_at" + ], + "properties": { + "id": { + "type": "integer", + "format": "uint32" + }, + "admins": { + "type": "array", + "items": { + "type": "integer", + "format": "uint32" + } + }, + "readers": { + "type": "array", + "items": { + "type": "integer", + "format": "uint32" + } + }, + "executors": { + "type": "array", + "items": { + "type": "integer", + "format": "uint32" + } + }, + "created_at": { + "type": "integer", + "format": "int64" + }, + "updated_at": { + "type": "integer", + "format": "int64" + } + } + }, + "Runner": { + "type": "object", + "required": [ + "id", + "pubkey", + "address", + "topic", + "local", + "created_at", + "updated_at" + ], + "properties": { + "id": { + "type": "integer", + "format": "uint32" + }, + "pubkey": { + "type": "string" + }, + "address": { + "$ref": "#/components/schemas/IpAddr" + }, + "topic": { + "type": "string" + }, + "local": { + "type": "boolean" + }, + "secret": { + "type": "string" + }, + "created_at": { + "type": "integer", + "format": "int64" + }, + "updated_at": { + "type": "integer", + "format": "int64" + } + } + }, + "Flow": { + "type": "object", + "required": [ + "id", + "caller_id", + "context_id", + "jobs", + "env_vars", + "result", + "created_at", + "updated_at", + "status" + ], + "properties": { + "id": { + "type": "integer", + "format": "uint32" + }, + "caller_id": { + "type": "integer", + "format": "uint32" + }, + "context_id": { + "type": "integer", + "format": "uint32" + }, + "jobs": { + "type": "array", + "items": { + "type": "integer", + "format": "uint32" + } + }, + "env_vars": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "result": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "created_at": { + "type": "integer", + "format": "int64" + }, + "updated_at": { + "type": "integer", + "format": "int64" + }, + "status": { + "$ref": "#/components/schemas/FlowStatus" + } + } + }, + "Job": { + "type": "object", + "required": [ + "id", + "caller_id", + "context_id", + "script", + "script_type", + "timeout", + "retries", + "env_vars", + "result", + "prerequisites", + "depends", + "created_at", + "updated_at", + "status" + ], + "properties": { + "id": { + "type": "integer", + "format": "uint32" + }, + "caller_id": { + "type": "integer", + "format": "uint32" + }, + "context_id": { + "type": "integer", + "format": "uint32" + }, + "script": { + "type": "string" + }, + "script_type": { + "$ref": "#/components/schemas/ScriptType" + }, + "timeout": { + "type": "integer", + "format": "uint32" + }, + "retries": { + "type": "integer", + "minimum": 0, + "maximum": 255 + }, + "env_vars": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "result": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "prerequisites": { + "type": "array", + "items": { + "type": "string" + } + }, + "depends": { + "type": "array", + "items": { + "type": "integer", + "format": "uint32" + } + }, + "created_at": { + "type": "integer", + "format": "int64" + }, + "updated_at": { + "type": "integer", + "format": "int64" + }, + "status": { + "$ref": "#/components/schemas/JobStatus" + } + } + }, + "Message": { + "type": "object", + "required": [ + "id", + "caller_id", + "context_id", + "message", + "message_type", + "message_format_type", + "timeout", + "timeout_ack", + "timeout_result", + "job", + "logs", + "created_at", + "updated_at", + "status" + ], + "properties": { + "id": { + "type": "integer", + "format": "uint32" + }, + "caller_id": { + "type": "integer", + "format": "uint32" + }, + "context_id": { + "type": "integer", + "format": "uint32" + }, + "message": { + "type": "string" + }, + "message_type": { + "$ref": "#/components/schemas/ScriptType" + }, + "message_format_type": { + "$ref": "#/components/schemas/MessageFormatType" + }, + "timeout": { + "type": "integer", + "format": "uint32" + }, + "timeout_ack": { + "type": "integer", + "format": "uint32" + }, + "timeout_result": { + "type": "integer", + "format": "uint32" + }, + "job": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Job" + } + }, + "logs": { + "type": "array", + "items": { + "type": "string" + } + }, + "created_at": { + "type": "integer", + "format": "int64" + }, + "updated_at": { + "type": "integer", + "format": "int64" + }, + "status": { + "$ref": "#/components/schemas/MessageStatus" + }, + "transport_id": { + "type": "string" + }, + "transport_status": { + "$ref": "#/components/schemas/TransportStatus" + } + } + }, + "JobSummary": { + "type": "object", + "required": [ + "id", + "depends", + "prerequisites", + "script_type" + ], + "properties": { + "id": { + "type": "integer", + "format": "uint32" + }, + "depends": { + "type": "array", + "items": { + "type": "integer", + "format": "uint32" + } + }, + "prerequisites": { + "type": "array", + "items": { + "type": "string" + } + }, + "script_type": { + "$ref": "#/components/schemas/ScriptType" + } + } + }, + "EdgeTuple": { + "type": "array", + "items": [ + { + "type": "integer", + "format": "uint32" + }, + { + "type": "integer", + "format": "uint32" + } + ], + "minItems": 2, + "maxItems": 2, + "description": "Tuple [from, to] representing a directed edge" + }, + "FlowDag": { + "type": "object", + "required": [ + "flow_id", + "caller_id", + "context_id", + "nodes", + "edges", + "reverse_edges", + "roots", + "leaves", + "levels" + ], + "properties": { + "flow_id": { + "type": "integer", + "format": "uint32" + }, + "caller_id": { + "type": "integer", + "format": "uint32" + }, + "context_id": { + "type": "integer", + "format": "uint32" + }, + "nodes": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/JobSummary" + }, + "description": "Map keyed by job id (serialized as string in JSON)" + }, + "edges": { + "type": "array", + "items": { + "$ref": "#/components/schemas/EdgeTuple" + } + }, + "reverse_edges": { + "type": "array", + "items": { + "$ref": "#/components/schemas/EdgeTuple" + } + }, + "roots": { + "type": "array", + "items": { + "type": "integer", + "format": "uint32" + } + }, + "leaves": { + "type": "array", + "items": { + "type": "integer", + "format": "uint32" + } + }, + "levels": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "integer", + "format": "uint32" + } + }, + "description": "Topological execution layers (parallelizable batches)" + } + } + }, + "ActorCreate": { + "type": "object", + "required": [ + "id", + "pubkey", + "address" + ], + "properties": { + "id": { + "type": "integer", + "format": "uint32" + }, + "pubkey": { + "type": "string" + }, + "address": { + "type": "array", + "items": { + "$ref": "#/components/schemas/IpAddr" + } + } + } + }, + "ContextCreate": { + "type": "object", + "required": [ + "id", + "admins", + "readers", + "executors" + ], + "properties": { + "id": { + "type": "integer", + "format": "uint32" + }, + "admins": { + "type": "array", + "items": { + "type": "integer", + "format": "uint32" + } + }, + "readers": { + "type": "array", + "items": { + "type": "integer", + "format": "uint32" + } + }, + "executors": { + "type": "array", + "items": { + "type": "integer", + "format": "uint32" + } + } + } + }, + "RunnerCreate": { + "type": "object", + "required": [ + "id", + "pubkey", + "address", + "topic", + "local" + ], + "properties": { + "id": { + "type": "integer", + "format": "uint32" + }, + "pubkey": { + "type": "string" + }, + "address": { + "$ref": "#/components/schemas/IpAddr" + }, + "topic": { + "type": "string" + }, + "local": { + "type": "boolean" + }, + "secret": { + "type": "string" + } + } + }, + "FlowCreate": { + "type": "object", + "required": [ + "id", + "caller_id", + "context_id", + "jobs", + "env_vars" + ], + "properties": { + "id": { + "type": "integer", + "format": "uint32" + }, + "caller_id": { + "type": "integer", + "format": "uint32" + }, + "context_id": { + "type": "integer", + "format": "uint32" + }, + "jobs": { + "type": "array", + "items": { + "type": "integer", + "format": "uint32" + } + }, + "env_vars": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "result": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "JobCreate": { + "type": "object", + "required": [ + "id", + "caller_id", + "context_id", + "script", + "script_type", + "timeout", + "retries", + "env_vars", + "prerequisites", + "depends" + ], + "properties": { + "id": { + "type": "integer", + "format": "uint32" + }, + "caller_id": { + "type": "integer", + "format": "uint32" + }, + "context_id": { + "type": "integer", + "format": "uint32" + }, + "script": { + "type": "string" + }, + "script_type": { + "$ref": "#/components/schemas/ScriptType" + }, + "timeout": { + "type": "integer", + "format": "uint32" + }, + "retries": { + "type": "integer", + "minimum": 0, + "maximum": 255 + }, + "env_vars": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "result": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "prerequisites": { + "type": "array", + "items": { + "type": "string" + } + }, + "depends": { + "type": "array", + "items": { + "type": "integer", + "format": "uint32" + } + } + } + }, + "MessageCreate": { + "type": "object", + "required": [ + "id", + "caller_id", + "context_id", + "message", + "message_type", + "message_format_type", + "timeout", + "timeout_ack", + "timeout_result", + "job" + ], + "properties": { + "id": { + "type": "integer", + "format": "uint32" + }, + "caller_id": { + "type": "integer", + "format": "uint32" + }, + "context_id": { + "type": "integer", + "format": "uint32" + }, + "message": { + "type": "string" + }, + "message_type": { + "$ref": "#/components/schemas/ScriptType" + }, + "message_format_type": { + "$ref": "#/components/schemas/MessageFormatType" + }, + "timeout": { + "type": "integer", + "format": "uint32" + }, + "timeout_ack": { + "type": "integer", + "format": "uint32" + }, + "timeout_result": { + "type": "integer", + "format": "uint32" + }, + "job": { + "type": "array", + "items": { + "$ref": "#/components/schemas/JobCreate" + } + }, + "logs": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "ActorCreateParams": { + "type": "object", + "required": [ + "actor" + ], + "properties": { + "actor": { + "$ref": "#/components/schemas/ActorCreate" + } + } + }, + "ActorLoadParams": { + "type": "object", + "required": [ + "id" + ], + "properties": { + "id": { + "type": "integer", + "format": "uint32" + } + } + }, + "ContextCreateParams": { + "type": "object", + "required": [ + "context" + ], + "properties": { + "context": { + "$ref": "#/components/schemas/ContextCreate" + } + } + }, + "ContextLoadParams": { + "type": "object", + "required": [ + "id" + ], + "properties": { + "id": { + "type": "integer", + "format": "uint32" + } + } + }, + "RunnerCreateParams": { + "type": "object", + "required": [ + "context_id", + "runner" + ], + "properties": { + "context_id": { + "type": "integer", + "format": "uint32" + }, + "runner": { + "$ref": "#/components/schemas/RunnerCreate" + } + } + }, + "RunnerLoadParams": { + "type": "object", + "required": [ + "context_id", + "id" + ], + "properties": { + "context_id": { + "type": "integer", + "format": "uint32" + }, + "id": { + "type": "integer", + "format": "uint32" + } + } + }, + "FlowCreateParams": { + "type": "object", + "required": [ + "context_id", + "flow" + ], + "properties": { + "context_id": { + "type": "integer", + "format": "uint32" + }, + "flow": { + "$ref": "#/components/schemas/FlowCreate" + } + } + }, + "FlowLoadParams": { + "type": "object", + "required": [ + "context_id", + "id" + ], + "properties": { + "context_id": { + "type": "integer", + "format": "uint32" + }, + "id": { + "type": "integer", + "format": "uint32" + } + } + }, + "JobCreateParams": { + "type": "object", + "required": [ + "context_id", + "job" + ], + "properties": { + "context_id": { + "type": "integer", + "format": "uint32" + }, + "job": { + "$ref": "#/components/schemas/JobCreate" + } + } + }, + "JobLoadParams": { + "type": "object", + "required": [ + "context_id", + "caller_id", + "id" + ], + "properties": { + "context_id": { + "type": "integer", + "format": "uint32" + }, + "caller_id": { + "type": "integer", + "format": "uint32" + }, + "id": { + "type": "integer", + "format": "uint32" + } + } + }, + "MessageCreateParams": { + "type": "object", + "required": [ + "context_id", + "message" + ], + "properties": { + "context_id": { + "type": "integer", + "format": "uint32" + }, + "message": { + "$ref": "#/components/schemas/MessageCreate" + } + } + }, + "MessageLoadParams": { + "type": "object", + "required": [ + "context_id", + "caller_id", + "id" + ], + "properties": { + "context_id": { + "type": "integer", + "format": "uint32" + }, + "caller_id": { + "type": "integer", + "format": "uint32" + }, + "id": { + "type": "integer", + "format": "uint32" + } + } + } + }, + "errors": { + "InvalidParams": { + "code": -32602, + "message": "Invalid params" + }, + "NotFound": { + "code": -32001, + "message": "Not Found" + }, + "StorageError": { + "code": -32010, + "message": "Storage Error" + }, + "DagMissingDependency": { + "code": -32020, + "message": "DAG Missing Dependency" + }, + "DagCycleDetected": { + "code": -32021, + "message": "DAG Cycle Detected" + } + } + } +} diff --git a/bin/coordinator/specs/specs.md b/bin/coordinator/specs/specs.md new file mode 100644 index 0000000..9de6942 --- /dev/null +++ b/bin/coordinator/specs/specs.md @@ -0,0 +1,263 @@ + +## Objects Used + +| Component | What it **stores** | Where it lives (Redis key) | Main responsibilities | +|------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **Actor** | Public key, reachable addresses, timestamps | `actor:` (hash) | An identity that can request work, receive results and act as an administrator of a *Context*. | +| **Context**| Permission lists (`admins`, `readers`, `executors`), timestamps | `context:` (hash) | An isolated “tenant” – a separate Redis DB and filesystem area. All objects (flows, messages, jobs, runners) belonging to a given workflow are stored under this context. The permission lists control who may read, execute or administer the context. | +| **Flow** | DAG of job IDs, env‑vars, result map, status, timestamps | `flow:` (hash) | A high‑level workflow created by a single **Actor**. It groups many **RunnerJob** objects, records their execution order, supplies common environment variables and aggregates the final result. | +| **Message**| Payload, type (`job\|chat\|mail`), format (`html\|text\|md`), time‑outs, embedded **Job** objects, log stream, status, timestamps | `message::` (hash) | The transport unit that travels over **Mycelium** (the pub/sub/message bus). A message can contain a **RunnerJob** (or a list of jobs) and is queued in two generic Redis lists: `msg_out` (to be sent) and `msg_in` (already received). | +| **Runner** | Public key, Mycelium address, topic name, type (`v\|python\|osis\|rust`), local flag, timestamps | `runner:` (hash) | The *worker* that actually executes **RunnerJob** scripts. It subscribes to a Mycelium topic (normally `runner`). If `local == true` the runner also consumes jobs directly from a Redis queue that is named after the script‑type suffix (`v`, `python`, …). | +| **RunnerJob**| Script source, type (`osis\|sal\|v\|python`), env‑vars, prerequisites, dependencies, status, timestamps, result map | `job::` (hash) | A single executable unit. It lives inside a **Context**, belongs to a **Runner**, and is queued according to its `script_type` (e.g. `queue:python`). Its status moves through the lifecycle `dispatched → waiting_for_prerequisites → started → finished|error`. | + +> **Key idea:** All objects are persisted as *hashes*. Context‑scoped objects (**Context**, **Flow**, **Message**, **Runner**, **RunnerJob**) live in a **Redis** database dedicated to that context. **Actors are global** and are stored in Redis DB 0 under `actor:`. The system is completely **decentralised** – each actor owns its own context and can spin up as many runners as needed. Communication between actors, runners and the rest of the system happens over **Mycelium**, a message‑bus that uses Redis lists as queues. + +--- + +## Interaction diagram (who talks to who) + +### Sequence diagram – “Submit a flow and run it” + +```mermaid +%%{init: {"theme":"dark"}}%% +sequenceDiagram + participant A as Actor + participant L as Local‑Context (Redis) + participant M as Mycelium (msg_out / msg_in) + participant R as Remote‑Context (Redis) + participant W as Runner (worker) + + %% 1. Actor creates everything locally + A->>L: create Flow + RunnerJob (J) + A->>L: LPUSH msg_out Message{type=job, payload=J, target=Remote} + + %% 2. Mycelium transports the message + M->>R: LPUSH msg_in (Message key) + + %% 3. Remote context materialises the job + R->>R: HSET Message hash + R->>R: HSET RunnerJob (J') // copy of payload + R->>R: LPUSH queue:v (job key) + + %% 4. Runner consumes and executes + W->>R: BRPOP queue:v (job key) + W->>R: HSET job status = started + W->>W: execute script + W->>R: HSET job result + status = finished + + %% 5. Result is sent back + W->>M: LPUSH msg_out Message{type=result, payload=result, target=Local} + M->>L: LPUSH msg_in (result Message key) + + %% 6. Actor receives the result + A->>L: RPOP msg_in → read result +``` + +### 2.2 Component diagram – “Static view of objects & links” + +```mermaid +%%{init: {"theme":"dark"}}%% +graph LR + subgraph Redis["Redis (per Context)"] + A[Actor] -->|stores| Ctx[Context] + Ctx -->|stores| Fl[Flow] + Ctx -->|stores| Msg[Message] + Ctx -->|stores| Rnr[Runner] + Ctx -->|stores| Job[RunnerJob] + end + + subgraph Mycelium["Mycelium (Pub/Sub)"] + MsgOut["queue:msg_out"] -->|outgoing| Mcel[Mycelium Bus] + Mcel -->|incoming| MsgIn["queue:msg_in"] + RnrTopic["topic:runnerX"] -->|subscribed by| Rnr + queueV["queue:v"] -->|local jobs| Rnr + queuePython["queue:python"] -->|local jobs| Rnr + end + + A -->|creates / reads| Fl + A -->|creates / reads| Msg + A -->|creates / reads| Rnr + A -->|creates / reads| Job + Fl -->|references| Job + Msg -->|may embed| Job + Rnr -->|executes| Job + Job -->|updates| Fl + Msg -->|carries result back to| A +``` + +### 2.3 Flow‑status life‑cycle (state diagram) + +```mermaid +%%{init: {"theme":"dark"}}%% +stateDiagram-v2 + [*] --> dispatched + dispatched --> waiting_for_prerequisites : has prereqs + waiting_for_prerequisites --> started : prereqs met + dispatched --> started : no prereqs + started --> finished : success + started --> error : failure + waiting_for_prerequisites --> error : timeout / impossible + error --> [*] + finished --> [*] +``` + +--- + +## 3️⃣ Redis objects – concrete key & data layout + +All objects are stored as **hashes** (`HSET`). Below is a concise catalog that can be copied into a design doc. + +| Key pattern | Example | Fields (type) | Comments | +|-------------|---------|---------------|----------| +| `actor:${id}` | `actor:12` | `id` u32, `pubkey` str, `address` list\, `created_at` u32, `updated_at` u32 | One hash per actor. | +| `context:${id}` | `context:7` | `id` u32, `admins` list\, `readers` list\, `executors` list\, `created_at` u32, `updated_at` u32 | Holds permission lists for a tenant. | +| `flow:${id}` | `flow:33` | `id` u32, `caller_id` u32, `context_id` u32, `jobs` list\, `env_vars` map\, `result` map\, `created_at` u32, `updated_at` u32, `status` str (`dispatched|started|error|finished`) | +| `message:${caller_id}:${id}` | `message:12:101` | `id` u32, `caller_id` u32, `context_id` u32, `message` str, `message_type` str (`job|chat|mail`), `message_format_type` str (`html|text|md`), `timeout` u32, `timeout_ack` u32, `timeout_result` u32, `job` list\ (serialized), `logs` list\, `created_at` u32, `updated_at` u32, `status` str (`dispatched|acknowledged|error|processed`) | +| `runner:${id}` | `runner:20` | `id` u32, `pubkey` str, `address` str, `topic` str, `local` bool, `created_at` u32, `updated_at` u32 | +| `job:${caller_id}:${id}` | `job:12:2001` | `id` u32, `caller_id` u32, `context_id` u32, `script` str, `script_type` str (`osis|sal|v|python`), `timeout` u32, `retries` u8, `env_vars` map\, `result` map\, `prerequisites` list\, `dependends` list\, `created_at` u32, `updated_at` u32, `status` str (`dispatched|waiting_for_prerequisites|started|error|finished`) | + +#### Queue objects (lists) + +| Queue name | Purpose | +|------------|---------| +| `msg_out` | **Outbound** generic queue – every `Message` that an actor wants to send is pushed here. | +| `msg_in` | **Inbound** generic queue – every message received from Mycelium is placed here for the local consumer to process. | +| `queue:${suffix}` (e.g. `queue:v`, `queue:python`) | Local job queues used by a **Runner** when `local == true`. The suffix comes from `ScriptType.queue_suffix()`. | + +--- + +## 4️⃣ System specification (as a concise “specs” section) + +### 4.1 Naming conventions +* All Redis **hashes** are prefixed with the object name (`actor:`, `context:`, …). +* All **queues** are simple Redis lists (`LPUSH` / `RPOP`). +* **Message** keys embed both the *caller* and a locally unique *message id* – this guarantees global uniqueness across contexts. + +### 4.2 Permissions & security +* Only IDs present in `Context.admins` may **create** or **delete** any object inside that context. +* `Context.readers` can **GET** any hash but not modify it. +* `Context.executors` are allowed to **update** `RunnerJob.status`, `result` and to **pop** from local job queues. +* Every `Actor` must present a `pubkey` that can be verified by the receiving side (Mycelium uses asymmetric crypto). + + + +### 4.3 Message flow (publish / consume) + + + +Below is a **re‑written “Message flow (publish / consume)”** that reflects the real runtime components: + +* **Supervisor daemon** – runs on the node that owns the **Flow** (the *actor’s* side). + It is the only process that ever **RPOP**s from the global `msg_out` queue, adds the proper routing information and hands the message to **Mycelium**. + +* **Mycelium** – the pure pub/sub/message‑bus. It never touches Redis directly; it only receives a *payload key* from the coordinator and delivers that key to the remote tenant’s `msg_in` list. + +* **Remote‑side runner / service** – consumes from its own `msg_in`, materialises the job and executes it. + +The table now uses the exact component names and adds a short note about the permission check that the coordinator performs before it releases a message. + +| # | Action (what the system does) | Component that performs it | Redis interaction (exact commands) | +|---|-------------------------------|----------------------------|------------------------------------| +| **1️⃣ Publish** | Actor creates a `Message` hash and **LPUSH**es its key onto the *outbound* queue. | **Actor** (client code) | `HSET message:12:101 …`
`LPUSH msg_out message:12:101` | +| **2️⃣ Coordinate & route** | The **Supervisor daemon** (running at source) **RPOP**s the key, checks the actor’s permissions, adds the *target‑context* and *topic* fields, then forwards the key to Mycelium. | **Supervisor daemon** (per‑actor) | `RPOP msg_out` → (in‑process) → `LPUSH msg_out_coordinator ` (internal buffer) | +| **3️⃣ Transport** | Mycelium receives the key, looks at `Message.message_type` (or the explicit `topic`) and pushes the key onto the *inbound* queue of the **remote** tenant. | **Mycelium bus** (network layer) | `LPUSH msg_in: ` | +| **4️⃣ Consume** | The **Remote side** (runner or service) **RPOP**s from its `msg_in`, loads the full hash, verifies the actor’s signature and decides what to do based on `message_type`. | **Remote consumer** (runner / service | `RPOP msg_in:` → `HGETALL message:` | +| **5️⃣ Job materialisation** | If `message_type == "job"` the consumer creates a **RunnerJob** entry inside the **remote** context, adds the job **key** to the proper *script‑type* queue (`queue:v`, `queue:python`, …). | **Remote consumer** | `HSET job:: …`
`LPUSH queue: job::` | +| **6️⃣ Runner execution loop** | A **Runner** attached to that remote context **BRPOP**s from its script‑type queue, sets `status = started`, runs the script, writes `result` and final `status`. | **Runner** | `BRPOP queue:` → `HSET job:<…> status started` → … → `HSET job:<…> result … status finished` | +| **7️⃣ Result notification** | The runner builds a new `Message` (type `chat`, `result`, …) and pushes it onto **msg_out** again. The **Supervisor daemon** on the *originating* side will later pick it up and route it back to the original actor. | **Runner** → **Supervisor (remote side)** → **Mycelium** → **Supervisor (origin side)** → **Actor** | `HSET message: …`
`LPUSH msg_out message:` (steps 2‑3 repeat in reverse direction) | + +--- + +## Tiny end‑to‑end sequence (still simple enough to render) + +```mermaid +%%{init: {"theme":"dark"}}%% +sequenceDiagram + participant A as Actor + participant L as Local‑Redis (Flow ctx) + participant C as Supervisor daemon (local) + participant M as Mycelium bus + participant R as Remote‑Redis (target ctx) + participant W as Runner (remote) + + %% 1️⃣ publish + A->>L: HSET message:12:101 … + A->>L: LPUSH msg_out message:12:101 + + %% 2️⃣ coordinate + C->>L: RPOP msg_out + C->>C: check permissions / add routing info + C->>M: push key to Mycelium (msg_out_coordinator) + + %% 3️⃣ transport + M->>R: LPUSH msg_in message:12:101 + + %% 4️⃣ consume + R->>W: RPOP msg_in + R->>R: HGETALL message:12:101 + R->>R: verify signature + alt message_type == job + R->>R: HSET job:12:2001 … + R->>R: LPUSH queue:v job:12:2001 + end + + %% 5️⃣ runner loop + W->>R: BRPOP queue:v (job:12:2001) + W->>R: HSET job:12:2001 status started + W->>W: execute script + W->>R: HSET job:12:2001 result … status finished + + %% 6️⃣ result back + W->>R: HSET message:12:900 result … + W->>R: LPUSH msg_out message:12:900 + C->>M: (coordinator on remote side) routes back + M->>L: LPUSH msg_in message:12:900 + A->>L: RPOP msg_in → read result +``` + + +## 5️⃣ What the **system** is trying to achieve + +| Goal | How it is realized | +|------|--------------------| +| **Decentralised execution** | Every *actor* owns a **Context**; any number of **Runners** can be attached to that context, possibly on different machines, and they all talk over the same Mycelium/Redis backend. | +| **Fine‑grained permissions** | `Context.admins/readers/executors` enforce who can create, view or run jobs. | +| **Loose coupling via messages** | All actions (job submission, result propagation, chat, mail …) use the generic `Message` object; the same transport pipeline handles all of them. | +| **Workflow orchestration** | The **Flow** object models a DAG of jobs, tracks collective status and aggregates results, without needing a central scheduler. | +| **Pluggable runtimes** | `ScriptType` and `RunnerType` let a runner choose the proper execution environment (V, Python, OSIS, Rust, …) – adding a new language only means adding a new `ScriptType` and a corresponding worker. | +| **Observability** | `Log` arrays attached to a `Message` and the timestamps on every hash give a complete audit trail. | +| **Resilience** | Jobs are idempotent hash entries; queues are persisted in Redis, and status changes are atomic (`HSET`). Retries and time‑outs guarantee eventual consistency. | + +--- + +## 6️⃣ Diagram summary (quick visual cheat‑sheet) + +```mermaid +%%{init: {"theme":"dark"}}%% +graph TD + A[Actor] -->|creates| Ctx[Context] + A -->|creates| Flow + A -->|creates| Msg + A -->|creates| Rnr[Runner] + A -->|creates| Job[RunnerJob] + + subgraph Redis["Redis (per Context)"] + Ctx --> A + Ctx --> Flow + Ctx --> Msg + Ctx --> Rnr + Ctx --> Job + end + + Msg -->|push to| OutQ[msg_out] + OutQ --> Myc[Mycelium Bus] + Myc -->|deliver| InQ[msg_in] + InQ --> Rnr + Rnr -->|pop from| Qv["queue:v"] + Rnr -->|pop from| Qpy["queue:python"] + + Rnr -->|updates| Job + Job -->|updates| Flow + Flow -->|result Message| Msg +``` + diff --git a/bin/coordinator/src/clients/mod.rs b/bin/coordinator/src/clients/mod.rs new file mode 100644 index 0000000..e33ff23 --- /dev/null +++ b/bin/coordinator/src/clients/mod.rs @@ -0,0 +1,9 @@ +pub mod mycelium_client; +pub mod supervisor_client; +pub mod supervisor_hub; +pub mod types; + +pub use mycelium_client::{MyceliumClient, MyceliumClientError}; +pub use supervisor_client::{SupervisorClient, SupervisorClientError}; +pub use supervisor_hub::SupervisorHub; +pub use types::Destination; diff --git a/bin/coordinator/src/clients/mycelium_client.rs b/bin/coordinator/src/clients/mycelium_client.rs new file mode 100644 index 0000000..5417c8d --- /dev/null +++ b/bin/coordinator/src/clients/mycelium_client.rs @@ -0,0 +1,319 @@ +use std::sync::Arc; +use std::sync::atomic::{AtomicU64, Ordering}; + +use reqwest::Client as HttpClient; + +use base64::Engine; +use base64::engine::general_purpose::STANDARD as BASE64_STANDARD; +use serde_json::{Value, json}; +use thiserror::Error; + +use crate::clients::Destination; +use crate::models::TransportStatus; + +/// Lightweight client for Mycelium JSON-RPC (send + query status) +#[derive(Clone)] +pub struct MyceliumClient { + base_url: String, // e.g. http://127.0.0.1:8990 + http: HttpClient, + id_counter: Arc, +} + +#[derive(Debug, Error)] +pub enum MyceliumClientError { + #[error("HTTP error: {0}")] + Http(#[from] reqwest::Error), + #[error("JSON error: {0}")] + Json(#[from] serde_json::Error), + #[error("Transport timed out waiting for a reply (408)")] + TransportTimeout, + #[error("JSON-RPC error: {0}")] + RpcError(String), + #[error("Invalid response: {0}")] + InvalidResponse(String), +} + +impl MyceliumClient { + pub fn new(base_url: impl Into) -> Result { + let url = base_url.into(); + let http = HttpClient::builder().build()?; + Ok(Self { + base_url: url, + http, + id_counter: Arc::new(AtomicU64::new(1)), + }) + } + + fn next_id(&self) -> u64 { + self.id_counter.fetch_add(1, Ordering::Relaxed) + } + + async fn jsonrpc(&self, method: &str, params: Value) -> Result { + let req = json!({ + "jsonrpc": "2.0", + "id": self.next_id(), + "method": method, + "params": [ params ] + }); + + tracing::info!(%req, "jsonrpc"); + let resp = self.http.post(&self.base_url).json(&req).send().await?; + let status = resp.status(); + let body: Value = resp.json().await?; + if let Some(err) = body.get("error") { + let code = err.get("code").and_then(|v| v.as_i64()).unwrap_or(0); + let msg = err + .get("message") + .and_then(|v| v.as_str()) + .unwrap_or("unknown error"); + if code == 408 { + return Err(MyceliumClientError::TransportTimeout); + } + return Err(MyceliumClientError::RpcError(format!( + "code={code} msg={msg}" + ))); + } + if !status.is_success() { + return Err(MyceliumClientError::RpcError(format!( + "HTTP {status}, body {body}" + ))); + } + Ok(body) + } + + /// Call messageStatus with an outbound message id (hex string) + pub async fn message_status( + &self, + id_hex: &str, + ) -> Result { + let params = json!(id_hex); + let body = self.jsonrpc("getMessageInfo", params).await?; + let result = body.get("result").ok_or_else(|| { + MyceliumClientError::InvalidResponse(format!("missing result in response: {body}")) + })?; + // Accept both { state: "..."} and bare "..." + let status_str = if let Some(s) = result.get("state").and_then(|v| v.as_str()) { + s.to_string() + } else if let Some(s) = result.as_str() { + s.to_string() + } else { + return Err(MyceliumClientError::InvalidResponse(format!( + "unexpected result shape: {result}" + ))); + }; + let status = Self::map_status(&status_str).ok_or_else(|| { + MyceliumClientError::InvalidResponse(format!("unknown status: {status_str}")) + }); + tracing::info!(%id_hex, status = %status.as_ref().unwrap(), "queried messages status"); + status + } + + fn map_status(s: &str) -> Option { + match s { + "pending" => Some(TransportStatus::Queued), + "received" => Some(TransportStatus::Delivered), + "read" => Some(TransportStatus::Read), + "aborted" => Some(TransportStatus::Failed), + _ => None, + } + } + + /// Build params object for pushMessage without performing any network call. + /// Exposed for serializer-only tests and reuse. + pub(crate) fn build_push_params( + dst: &Destination, + topic: &str, + payload_b64: &str, + reply_timeout: Option, + ) -> Value { + let dst_v = match dst { + Destination::Ip(ip) => json!({ "ip": ip.to_string() }), + Destination::Pk(pk) => json!({ "pk": pk }), + }; + let mut message = json!({ + "dst": dst_v, + "topic": topic, + "payload": payload_b64, + }); + if let Some(rt) = reply_timeout { + message["reply_timeout"] = json!(rt); + } + message + } + + /// pushMessage: send a message with dst/topic/payload. Optional reply_timeout for sync replies. + pub async fn push_message( + &self, + dst: &Destination, + topic: &str, + payload_b64: &str, + reply_timeout: Option, + ) -> Result { + let params = Self::build_push_params(dst, topic, payload_b64, reply_timeout); + let body = self.jsonrpc("pushMessage", params).await?; + let result = body.get("result").ok_or_else(|| { + MyceliumClientError::InvalidResponse(format!("missing result in response: {body}")) + })?; + Ok(result.clone()) + } + + /// Helper to extract outbound message id from pushMessage result (InboundMessage or PushMessageResponseId) + pub fn extract_message_id_from_result(result: &Value) -> Option { + result + .get("id") + .and_then(|v| v.as_str()) + .map(|s| s.to_string()) + } + /// popMessage: retrieve an inbound message if available (optionally filtered by topic). + /// - peek: if true, do not remove the message from the queue + /// - timeout_secs: seconds to wait for a message (0 returns immediately) + /// - topic_plain: optional plain-text topic which will be base64-encoded per Mycelium spec + /// Returns: + /// - Ok(Some(result_json)) on success, where result_json matches InboundMessage schema + /// - Ok(None) when there is no message ready (Mycelium returns error code 204) + pub async fn pop_message( + &self, + peek: Option, + timeout_secs: Option, + topic_plain: Option<&str>, + ) -> Result, MyceliumClientError> { + // Build params array + let mut params_array = vec![]; + if let Some(p) = peek { + params_array.push(serde_json::Value::Bool(p)); + } else { + params_array.push(serde_json::Value::Null) + } + if let Some(t) = timeout_secs { + params_array.push(serde_json::Value::Number(t.into())); + } else { + params_array.push(serde_json::Value::Null) + } + if let Some(tp) = topic_plain { + let topic_b64 = BASE64_STANDARD.encode(tp.as_bytes()); + params_array.push(serde_json::Value::String(topic_b64)); + } else { + params_array.push(serde_json::Value::Null) + } + + let req = json!({ + "jsonrpc": "2.0", + "id": self.next_id(), + "method": "popMessage", + "params": serde_json::Value::Array(params_array), + }); + + tracing::info!(%req, "calling popMessage"); + + let resp = self.http.post(&self.base_url).json(&req).send().await?; + let status = resp.status(); + let body: Value = resp.json().await?; + + // Handle JSON-RPC error envelope specially for code 204 (no message ready) + if let Some(err) = body.get("error") { + let code = err.get("code").and_then(|v| v.as_i64()).unwrap_or(0); + let msg = err + .get("message") + .and_then(|v| v.as_str()) + .unwrap_or("unknown error"); + + if code == 204 { + // No message ready + return Ok(None); + } + if code == 408 { + // Align with other transport timeout mapping + return Err(MyceliumClientError::TransportTimeout); + } + return Err(MyceliumClientError::RpcError(format!( + "code={code} msg={msg}" + ))); + } + + if !status.is_success() { + return Err(MyceliumClientError::RpcError(format!( + "HTTP {status}, body {body}" + ))); + } + + let result = body.get("result").ok_or_else(|| { + MyceliumClientError::InvalidResponse(format!("missing result in response: {body}")) + })?; + Ok(Some(result.clone())) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::clients::Destination; + + #[test] + fn build_push_params_shapes_ip_pk_and_timeout() { + // IP destination + let p1 = MyceliumClient::build_push_params( + &Destination::Ip("2001:db8::1".parse().unwrap()), + "supervisor.rpc", + "Zm9vYmFy", // "foobar" + Some(10), + ); + let msg1 = p1.get("message").unwrap(); + assert_eq!( + msg1.get("topic").unwrap().as_str().unwrap(), + "supervisor.rpc" + ); + assert_eq!(msg1.get("payload").unwrap().as_str().unwrap(), "Zm9vYmFy"); + assert_eq!( + msg1.get("dst") + .unwrap() + .get("ip") + .unwrap() + .as_str() + .unwrap(), + "2001:db8::1" + ); + assert_eq!(p1.get("reply_timeout").unwrap().as_u64().unwrap(), 10); + + // PK destination without timeout + let p2 = MyceliumClient::build_push_params( + &Destination::Pk( + "bb39b4a3a4efd70f3e05e37887677e02efbda14681d0acd3882bc0f754792c32".into(), + ), + "supervisor.rpc", + "YmF6", // "baz" + None, + ); + let msg2 = p2.get("message").unwrap(); + assert_eq!( + msg2.get("dst") + .unwrap() + .get("pk") + .unwrap() + .as_str() + .unwrap(), + "bb39b4a3a4efd70f3e05e37887677e02efbda14681d0acd3882bc0f754792c32" + ); + assert!(p2.get("reply_timeout").is_none()); + } + + #[test] + fn extract_message_id_variants() { + // PushMessageResponseId + let r1 = json!({"id":"0123456789abcdef"}); + assert_eq!( + MyceliumClient::extract_message_id_from_result(&r1).unwrap(), + "0123456789abcdef" + ); + + // InboundMessage-like + let r2 = json!({ + "id":"fedcba9876543210", + "srcIp":"449:abcd:0123:defa::1", + "payload":"hpV+" + }); + assert_eq!( + MyceliumClient::extract_message_id_from_result(&r2).unwrap(), + "fedcba9876543210" + ); + } +} diff --git a/bin/coordinator/src/clients/supervisor_client.rs b/bin/coordinator/src/clients/supervisor_client.rs new file mode 100644 index 0000000..1cb3f04 --- /dev/null +++ b/bin/coordinator/src/clients/supervisor_client.rs @@ -0,0 +1,588 @@ +use std::sync::Arc; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::time::Duration; + +use base64::Engine; +use base64::engine::general_purpose::STANDARD as BASE64_STANDARD; +use serde_json::{Value, json}; +use thiserror::Error; +use tokio::time::timeout; + +use crate::clients::{Destination, MyceliumClient, MyceliumClientError, SupervisorHub}; + +#[derive(Clone)] +pub struct SupervisorClient { + hub: Arc, // Global hub with background pop loop and shared id generator + destination: Destination, // ip or pk + secret: Option, // optional, required by several supervisor methods +} + +#[derive(Debug, Error)] +pub enum SupervisorClientError { + #[error("HTTP error: {0}")] + Http(#[from] reqwest::Error), + #[error("JSON error: {0}")] + Json(#[from] serde_json::Error), + #[error("Transport timed out waiting for a reply (408)")] + TransportTimeout, + #[error("JSON-RPC error: {0}")] + RpcError(String), + #[error("Invalid response: {0}")] + InvalidResponse(String), + #[error("Missing secret for method requiring authentication")] + MissingSecret, +} + +impl From for SupervisorClientError { + fn from(e: MyceliumClientError) -> Self { + match e { + MyceliumClientError::TransportTimeout => SupervisorClientError::TransportTimeout, + MyceliumClientError::RpcError(m) => SupervisorClientError::RpcError(m), + MyceliumClientError::InvalidResponse(m) => SupervisorClientError::InvalidResponse(m), + MyceliumClientError::Http(err) => SupervisorClientError::Http(err), + MyceliumClientError::Json(err) => SupervisorClientError::Json(err), + } + } +} + +impl SupervisorClient { + /// Preferred constructor using a shared SupervisorHub (single global listener). + pub fn new_with_hub( + hub: Arc, + destination: Destination, + secret: Option, + ) -> Self { + Self { + hub, + destination, + secret, + } + } + + /// Backward-compatible constructor that builds a new Hub from base_url/topic. + /// NOTE: This spawns a background popMessage listener for the given topic. + /// Prefer `new_with_hub` so the process has a single global hub. + pub fn new( + base_url: impl Into, + destination: Destination, + topic: impl Into, + secret: Option, + ) -> Result { + let mut url = base_url.into(); + if url.is_empty() { + url = "http://127.0.0.1:8990".to_string(); + } + let mycelium = Arc::new(MyceliumClient::new(url)?); + Ok(Self::new_with_client(mycelium, destination, topic, secret)) + } + + /// Backward-compatible constructor that reuses an existing Mycelium client. + /// NOTE: This creates a new hub and its own background listener. Prefer `new_with_hub`. + pub fn new_with_client( + mycelium: Arc, + destination: Destination, + topic: impl Into, + secret: Option, + ) -> Self { + let hub = SupervisorHub::new_with_client(mycelium, topic); + Self::new_with_hub(hub, destination, secret) + } + + /// Internal helper used by tests to inspect dst JSON shape. + fn build_dst(&self) -> Value { + match &self.destination { + Destination::Ip(ip) => json!({ "ip": ip.to_string() }), + Destination::Pk(pk) => json!({ "pk": pk }), + } + } + + fn build_supervisor_payload(&self, method: &str, params: Value) -> Value { + json!({ + "jsonrpc": "2.0", + "id": self.hub.next_id(), + "method": method, + "params": params, + }) + } + + /// Build a supervisor JSON-RPC payload but force a specific id (used for correlation). + fn build_supervisor_payload_with_id(&self, method: &str, params: Value, id: u64) -> Value { + json!({ + "jsonrpc": "2.0", + "id": id, + "method": method, + "params": params, + }) + } + + fn encode_payload(payload: &Value) -> Result { + let s = serde_json::to_string(payload)?; + Ok(BASE64_STANDARD.encode(s.as_bytes())) + } + + fn encode_topic(topic: &[u8]) -> String { + BASE64_STANDARD.encode(topic) + } + + fn extract_message_id_from_result(result: &Value) -> Option { + // Two possibilities per Mycelium spec oneOf: + // - PushMessageResponseId: { "id": "0123456789abcdef" } + // - InboundMessage: object containing "id" plus srcIp, ...; we still return id. + result + .get("id") + .and_then(|v| v.as_str()) + .map(|s| s.to_string()) + } + + fn need_secret(&self) -> Result<&str, SupervisorClientError> { + self.secret + .as_deref() + .ok_or(SupervisorClientError::MissingSecret) + } + + // ----------------------------- + // Core: request-reply call via Hub with default 10s timeout + // ----------------------------- + + /// Send a supervisor JSON-RPC request and await its reply via the Hub. + /// Returns (outbound_message_id, reply_envelope_json). + pub async fn call_with_reply_timeout( + &self, + method: &str, + params: Value, + timeout_secs: u64, + ) -> Result<(String, Value), SupervisorClientError> { + let inner_id = self.hub.next_id(); + // Register waiter before sending to avoid race + let rx = self.hub.register_waiter(inner_id).await; + + let inner = self.build_supervisor_payload_with_id(method, params, inner_id); + let payload_b64 = Self::encode_payload(&inner)?; + + let result = self + .hub + .mycelium() + .push_message( + &self.destination, + &Self::encode_topic(self.hub.topic().as_bytes()), + &payload_b64, + None, + ) + .await?; + + let out_id = if let Some(id) = MyceliumClient::extract_message_id_from_result(&result) { + id + } else if let Some(arr) = result.as_array() + && arr.len() == 1 + && let Some(id) = MyceliumClient::extract_message_id_from_result(&arr[0]) + { + id + } else { + // Clean pending entry to avoid leak + let _ = self.hub.remove_waiter(inner_id).await; + return Err(SupervisorClientError::InvalidResponse(format!( + "result did not contain message id: {result}" + ))); + }; + + let d = Duration::from_secs(timeout_secs); + match timeout(d, rx).await { + Ok(Ok(reply)) => Ok((out_id, reply)), + Ok(Err(_canceled)) => Err(SupervisorClientError::InvalidResponse( + "oneshot canceled before receiving reply".into(), + )), + Err(_elapsed) => { + // Cleanup on timeout + let _ = self.hub.remove_waiter(inner_id).await; + Err(SupervisorClientError::TransportTimeout) + } + } + } + + /// Send and await with default 10s timeout. + pub async fn call_with_reply( + &self, + method: &str, + params: Value, + ) -> Result<(String, Value), SupervisorClientError> { + self.call_with_reply_timeout(method, params, 60).await + } + + /// Back-compat: Send and await a reply but return only the outbound id (discard reply). + /// This keeps existing call sites working while the system migrates to reply-aware paths. + pub async fn call(&self, method: &str, params: Value) -> Result { + let (out_id, _reply) = self.call_with_reply(method, params).await?; + Ok(out_id) + } + + // ----------------------------- + // Typed wrappers for Supervisor API (await replies) + // ----------------------------- + + // Runners + pub async fn list_runners_wait(&self) -> Result<(String, Value), SupervisorClientError> { + self.call_with_reply("list_runners", json!([])).await + } + + pub async fn register_runner_wait( + &self, + name: impl Into, + queue: impl Into, + ) -> Result<(String, Value), SupervisorClientError> { + let secret = self.need_secret()?; + let params = json!([{ + "secret": secret, + "name": name.into(), + "queue": queue.into() + }]); + self.call_with_reply("register_runner", params).await + } + + pub async fn remove_runner_wait( + &self, + actor_id: impl Into, + ) -> Result<(String, Value), SupervisorClientError> { + self.call_with_reply("remove_runner", json!([actor_id.into()])) + .await + } + + pub async fn start_runner_wait( + &self, + actor_id: impl Into, + ) -> Result<(String, Value), SupervisorClientError> { + self.call_with_reply("start_runner", json!([actor_id.into()])) + .await + } + + pub async fn stop_runner_wait( + &self, + actor_id: impl Into, + force: bool, + ) -> Result<(String, Value), SupervisorClientError> { + self.call_with_reply("stop_runner", json!([actor_id.into(), force])) + .await + } + + pub async fn get_runner_status_wait( + &self, + actor_id: impl Into, + ) -> Result<(String, Value), SupervisorClientError> { + self.call_with_reply("get_runner_status", json!([actor_id.into()])) + .await + } + + pub async fn get_all_runner_status_wait( + &self, + ) -> Result<(String, Value), SupervisorClientError> { + self.call_with_reply("get_all_runner_status", json!([])) + .await + } + + pub async fn start_all_wait(&self) -> Result<(String, Value), SupervisorClientError> { + self.call_with_reply("start_all", json!([])).await + } + + pub async fn stop_all_wait( + &self, + force: bool, + ) -> Result<(String, Value), SupervisorClientError> { + self.call_with_reply("stop_all", json!([force])).await + } + + pub async fn get_all_status_wait(&self) -> Result<(String, Value), SupervisorClientError> { + self.call_with_reply("get_all_status", json!([])).await + } + + // Jobs (await) + pub async fn jobs_create_wait( + &self, + job: Value, + ) -> Result<(String, Value), SupervisorClientError> { + let secret = self.need_secret()?; + let params = json!([{ + "secret": secret, + "job": job + }]); + self.call_with_reply("jobs.create", params).await + } + + pub async fn jobs_list_wait(&self) -> Result<(String, Value), SupervisorClientError> { + self.call_with_reply("jobs.list", json!([])).await + } + + pub async fn job_run_wait(&self, job: Value) -> Result<(String, Value), SupervisorClientError> { + let secret = self.need_secret()?; + let params = json!([{ + "secret": secret, + "job": job + }]); + self.call_with_reply("job.run", params).await + } + + pub async fn job_start_wait( + &self, + job_id: impl Into, + ) -> Result<(String, Value), SupervisorClientError> { + let secret = self.need_secret()?; + let params = json!([{ + "secret": secret, + "job_id": job_id.into() + }]); + self.call_with_reply("job.start", params).await + } + + pub async fn job_status_wait( + &self, + job_id: impl Into, + ) -> Result<(String, Value), SupervisorClientError> { + self.call_with_reply("job.status", json!([job_id.into()])) + .await + } + + pub async fn job_result_wait( + &self, + job_id: impl Into, + ) -> Result<(String, Value), SupervisorClientError> { + self.call_with_reply("job.result", json!([job_id.into()])) + .await + } + + pub async fn job_stop_wait( + &self, + job_id: impl Into, + ) -> Result<(String, Value), SupervisorClientError> { + let secret = self.need_secret()?; + let params = json!([{ + "secret": secret, + "job_id": job_id.into() + }]); + self.call_with_reply("job.stop", params).await + } + + pub async fn job_delete_wait( + &self, + job_id: impl Into, + ) -> Result<(String, Value), SupervisorClientError> { + let secret = self.need_secret()?; + let params = json!([{ + "secret": secret, + "job_id": job_id.into() + }]); + self.call_with_reply("job.delete", params).await + } + + pub async fn rpc_discover_wait(&self) -> Result<(String, Value), SupervisorClientError> { + self.call_with_reply("rpc.discover", json!([])).await + } + + // ----------------------------- + // Backward-compatible variants returning only outbound id (discarding reply) + // ----------------------------- + + pub async fn list_runners(&self) -> Result { + let (id, _) = self.list_runners_wait().await?; + Ok(id) + } + + pub async fn register_runner( + &self, + name: impl Into, + queue: impl Into, + ) -> Result { + let (id, _) = self.register_runner_wait(name, queue).await?; + Ok(id) + } + + pub async fn remove_runner( + &self, + actor_id: impl Into, + ) -> Result { + let (id, _) = self.remove_runner_wait(actor_id).await?; + Ok(id) + } + + pub async fn start_runner( + &self, + actor_id: impl Into, + ) -> Result { + let (id, _) = self.start_runner_wait(actor_id).await?; + Ok(id) + } + + pub async fn stop_runner( + &self, + actor_id: impl Into, + force: bool, + ) -> Result { + let (id, _) = self.stop_runner_wait(actor_id, force).await?; + Ok(id) + } + + pub async fn get_runner_status( + &self, + actor_id: impl Into, + ) -> Result { + let (id, _) = self.get_runner_status_wait(actor_id).await?; + Ok(id) + } + + pub async fn get_all_runner_status(&self) -> Result { + let (id, _) = self.get_all_runner_status_wait().await?; + Ok(id) + } + + pub async fn start_all(&self) -> Result { + let (id, _) = self.start_all_wait().await?; + Ok(id) + } + + pub async fn stop_all(&self, force: bool) -> Result { + let (id, _) = self.stop_all_wait(force).await?; + Ok(id) + } + + pub async fn get_all_status(&self) -> Result { + let (id, _) = self.get_all_status_wait().await?; + Ok(id) + } + + pub async fn jobs_create(&self, job: Value) -> Result { + let (id, _) = self.jobs_create_wait(job).await?; + Ok(id) + } + + pub async fn jobs_list(&self) -> Result { + let (id, _) = self.jobs_list_wait().await?; + Ok(id) + } + + pub async fn job_run(&self, job: Value) -> Result { + let (id, _) = self.job_run_wait(job).await?; + Ok(id) + } + + pub async fn job_start( + &self, + job_id: impl Into, + ) -> Result { + let (id, _) = self.job_start_wait(job_id).await?; + Ok(id) + } + + pub async fn job_status( + &self, + job_id: impl Into, + ) -> Result { + let (id, _) = self.job_status_wait(job_id).await?; + Ok(id) + } + + pub async fn job_result( + &self, + job_id: impl Into, + ) -> Result { + let (id, _) = self.job_result_wait(job_id).await?; + Ok(id) + } + + pub async fn job_stop( + &self, + job_id: impl Into, + ) -> Result { + let (id, _) = self.job_stop_wait(job_id).await?; + Ok(id) + } + + pub async fn job_delete( + &self, + job_id: impl Into, + ) -> Result { + let (id, _) = self.job_delete_wait(job_id).await?; + Ok(id) + } + + pub async fn rpc_discover(&self) -> Result { + let (id, _) = self.rpc_discover_wait().await?; + Ok(id) + } +} + +// ----------------------------- +// Tests (serialization-only) +// ----------------------------- +#[cfg(test)] +mod tests { + use super::*; + use std::net::IpAddr; + + fn mk_client() -> SupervisorClient { + // Build a hub but it won't issue real network calls in these serializer-only tests. + let mycelium = Arc::new(MyceliumClient::new("http://127.0.0.1:8990").unwrap()); + let hub = SupervisorHub::new_with_client(mycelium, "supervisor.rpc"); + SupervisorClient::new_with_hub( + hub, + Destination::Pk( + "bb39b4a3a4efd70f3e05e37887677e02efbda14681d0acd3882bc0f754792c32".to_string(), + ), + Some("secret".to_string()), + ) + } + + #[test] + fn builds_dst_ip_and_pk() { + let mycelium = Arc::new(MyceliumClient::new("http://127.0.0.1:8990").unwrap()); + let hub_ip = SupervisorHub::new_with_client(mycelium.clone(), "supervisor.rpc"); + let c_ip = SupervisorClient::new_with_hub( + hub_ip, + Destination::Ip("2001:db8::1".parse().unwrap()), + None, + ); + let v_ip = c_ip.build_dst(); + assert_eq!(v_ip.get("ip").unwrap().as_str().unwrap(), "2001:db8::1"); + + let c_pk = mk_client(); + let v_pk = c_pk.build_dst(); + assert_eq!( + v_pk.get("pk").unwrap().as_str().unwrap(), + "bb39b4a3a4efd70f3e05e37887677e02efbda14681d0acd3882bc0f754792c32" + ); + } + + #[test] + fn encodes_supervisor_payload_b64() { + let c = mk_client(); + let payload = c.build_supervisor_payload("list_runners", json!([])); + let b64 = SupervisorClient::encode_payload(&payload).unwrap(); + + // decode and compare round-trip JSON + let raw = base64::engine::general_purpose::STANDARD + .decode(b64.as_bytes()) + .unwrap(); + let decoded: Value = serde_json::from_slice(&raw).unwrap(); + assert_eq!( + decoded.get("method").unwrap().as_str().unwrap(), + "list_runners" + ); + assert_eq!(decoded.get("jsonrpc").unwrap().as_str().unwrap(), "2.0"); + } + + #[test] + fn extract_message_id_works_for_both_variants() { + // PushMessageResponseId + let r1 = json!({"id":"0123456789abcdef"}); + assert_eq!( + SupervisorClient::extract_message_id_from_result(&r1).unwrap(), + "0123456789abcdef" + ); + // InboundMessage-like + let r2 = json!({ + "id":"fedcba9876543210", + "srcIp":"449:abcd:0123:defa::1", + "payload":"hpV+" + }); + assert_eq!( + SupervisorClient::extract_message_id_from_result(&r2).unwrap(), + "fedcba9876543210" + ); + } +} diff --git a/bin/coordinator/src/clients/supervisor_hub.rs b/bin/coordinator/src/clients/supervisor_hub.rs new file mode 100644 index 0000000..3737803 --- /dev/null +++ b/bin/coordinator/src/clients/supervisor_hub.rs @@ -0,0 +1,143 @@ +use std::collections::HashMap; +use std::sync::Arc; +use std::sync::atomic::{AtomicU64, Ordering}; + +use base64::Engine; +use base64::engine::general_purpose::STANDARD as BASE64_STANDARD; +use serde_json::Value; +use tokio::sync::{Mutex, oneshot}; + +use crate::clients::mycelium_client::MyceliumClient; + +/// Global hub that: +/// - Owns a single MyceliumClient +/// - Spawns a background popMessage loop filtered by topic +/// - Correlates supervisor JSON-RPC replies by inner id to waiting callers via oneshot channels +#[derive(Clone)] +pub struct SupervisorHub { + mycelium: Arc, + topic: String, + pending: Arc>>>, + id_counter: Arc, +} + +impl SupervisorHub { + /// Create a new hub and start the background popMessage task. + /// - base_url: Mycelium JSON-RPC endpoint, e.g. "http://127.0.0.1:8990" + /// - topic: plain-text topic (e.g., "supervisor.rpc") + pub fn new( + base_url: impl Into, + topic: impl Into, + ) -> Result, crate::clients::MyceliumClientError> { + let myc = Arc::new(MyceliumClient::new(base_url)?); + Ok(Self::new_with_client(myc, topic)) + } + + /// Variant that reuses an existing Mycelium client. + pub fn new_with_client(mycelium: Arc, topic: impl Into) -> Arc { + let hub = Arc::new(Self { + mycelium, + topic: topic.into(), + pending: Arc::new(Mutex::new(HashMap::new())), + id_counter: Arc::new(AtomicU64::new(1)), + }); + Self::spawn_pop_loop(hub.clone()); + hub + } + + fn spawn_pop_loop(hub: Arc) { + tokio::spawn(async move { + loop { + match hub.mycelium.pop_message(Some(false), Some(20), None).await { + Ok(Some(inb)) => { + // Extract and decode payload + let Some(payload_b64) = inb.get("payload").and_then(|v| v.as_str()) else { + // Not a payload-bearing message; ignore + continue; + }; + let Ok(raw) = BASE64_STANDARD.decode(payload_b64.as_bytes()) else { + tracing::warn!(target: "supervisor_hub", "Failed to decode inbound payload base64"); + continue; + }; + let Ok(rpc): Result = serde_json::from_slice(&raw) else { + tracing::warn!(target: "supervisor_hub", "Failed to parse inbound payload JSON"); + continue; + }; + + // Extract inner JSON-RPC id + let inner_id_u64 = match rpc.get("id") { + Some(Value::Number(n)) => n.as_u64(), + Some(Value::String(s)) => s.parse::().ok(), + _ => None, + }; + + if let Some(inner_id) = inner_id_u64 { + // Try to deliver to a pending waiter + let sender_opt = { + let mut guard = hub.pending.lock().await; + guard.remove(&inner_id) + }; + if let Some(tx) = sender_opt { + let _ = tx.send(rpc); + } else { + tracing::warn!( + target: "supervisor_hub", + inner_id, + payload = %String::from_utf8_lossy(&raw), + "Unmatched supervisor reply; no waiter registered" + ); + } + } else { + tracing::warn!(target: "supervisor_hub", "Inbound supervisor reply missing id; dropping"); + } + } + Ok(None) => { + // No message; continue polling + continue; + } + Err(e) => { + tracing::warn!(target: "supervisor_hub", error = %e, "popMessage error; backing off"); + tokio::time::sleep(std::time::Duration::from_millis(200)).await; + } + } + } + }); + } + + /// Allocate a new inner supervisor JSON-RPC id. + pub fn next_id(&self) -> u64 { + self.id_counter.fetch_add(1, Ordering::Relaxed) + } + + /// Register a oneshot sender for the given inner id and return the receiver side. + pub async fn register_waiter(&self, inner_id: u64) -> oneshot::Receiver { + let (tx, rx) = oneshot::channel(); + let mut guard = self.pending.lock().await; + guard.insert(inner_id, tx); + rx + } + + /// Remove a pending waiter for a given id (used to cleanup on timeout). + pub async fn remove_waiter(&self, inner_id: u64) -> Option> { + let mut guard = self.pending.lock().await; + guard.remove(&inner_id) + } + + /// Access to underlying Mycelium client (for pushMessage). + pub fn mycelium(&self) -> Arc { + self.mycelium.clone() + } + + /// Access configured topic. + pub fn topic(&self) -> &str { + &self.topic + } +} + +impl std::fmt::Debug for SupervisorHub { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("SupervisorHub") + .field("topic", &self.topic) + .finish() + } +} diff --git a/bin/coordinator/src/clients/types.rs b/bin/coordinator/src/clients/types.rs new file mode 100644 index 0000000..c83180b --- /dev/null +++ b/bin/coordinator/src/clients/types.rs @@ -0,0 +1,9 @@ +use std::net::IpAddr; + +/// Destination for Mycelium messages (shared by clients) +#[derive(Clone, Debug)] +pub enum Destination { + Ip(IpAddr), + /// 64-hex public key of the receiver node + Pk(String), +} diff --git a/bin/coordinator/src/dag.rs b/bin/coordinator/src/dag.rs new file mode 100644 index 0000000..82745b5 --- /dev/null +++ b/bin/coordinator/src/dag.rs @@ -0,0 +1,381 @@ +use serde::{Deserialize, Serialize}; +use std::collections::{HashMap, HashSet, VecDeque}; +use std::fmt; + +use crate::{ + models::{Flow, Job, JobStatus, ScriptType}, + storage::RedisDriver, +}; + +pub type DagResult = Result; + +#[derive(Debug)] +pub enum DagError { + Storage(Box), + MissingDependency { job: u32, depends_on: u32 }, + CycleDetected { remaining: Vec }, + UnknownJob { job: u32 }, + DependenciesIncomplete { job: u32, missing: Vec }, + FlowFailed { failed_job: u32 }, + JobNotStarted { job: u32 }, +} + +impl fmt::Display for DagError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + DagError::Storage(e) => write!(f, "Storage error: {}", e), + DagError::MissingDependency { job, depends_on } => write!( + f, + "Job {} depends on {}, which is not part of the flow.jobs list", + job, depends_on + ), + DagError::CycleDetected { remaining } => { + write!(f, "Cycle detected; unresolved nodes: {:?}", remaining) + } + DagError::UnknownJob { job } => write!(f, "Unknown job id: {}", job), + DagError::DependenciesIncomplete { job, missing } => write!( + f, + "Job {} cannot start; missing completed deps: {:?}", + job, missing + ), + DagError::FlowFailed { failed_job } => { + write!(f, "Flow failed due to job {}", failed_job) + } + DagError::JobNotStarted { job } => write!( + f, + "Job {} cannot be completed because it is not marked as started", + job + ), + } + } +} + +impl std::error::Error for DagError {} + +impl From> for DagError { + fn from(e: Box) -> Self { + DagError::Storage(e) + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct JobSummary { + pub id: u32, + pub depends: Vec, + pub prerequisites: Vec, + pub script_type: ScriptType, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct FlowDag { + pub flow_id: u32, + pub caller_id: u32, + pub context_id: u32, + pub nodes: HashMap, + pub edges: Vec<(u32, u32)>, // (from prerequisite, to job) + pub reverse_edges: Vec<(u32, u32)>, // (from job, to prerequisite) + pub roots: Vec, // in_degree == 0 + pub leaves: Vec, // out_degree == 0 + pub levels: Vec>, // topological layers for parallel execution + // Runtime execution state + pub started: HashSet, + pub completed: HashSet, + pub failed_job: Option, +} + +pub async fn build_flow_dag( + redis: &RedisDriver, + context_id: u32, + flow_id: u32, +) -> DagResult { + // Load flow + let flow: Flow = redis + .load_flow(context_id, flow_id) + .await + .map_err(DagError::from)?; + let caller_id = flow.caller_id(); + let flow_job_ids = flow.jobs(); + + // Build a set for faster membership tests + let job_id_set: HashSet = flow_job_ids.iter().copied().collect(); + + // Load all jobs + let mut jobs: HashMap = HashMap::with_capacity(flow_job_ids.len()); + for jid in flow_job_ids { + let job = redis + .load_job(context_id, caller_id, *jid) + .await + .map_err(DagError::from)?; + jobs.insert(*jid, job); + } + + // Validate dependencies and construct adjacency + let mut edges: Vec<(u32, u32)> = Vec::new(); + let mut reverse_edges: Vec<(u32, u32)> = Vec::new(); + let mut adj: HashMap> = HashMap::with_capacity(jobs.len()); + let mut rev_adj: HashMap> = HashMap::with_capacity(jobs.len()); + let mut in_degree: HashMap = HashMap::with_capacity(jobs.len()); + + for &jid in flow_job_ids { + adj.entry(jid).or_default(); + rev_adj.entry(jid).or_default(); + in_degree.entry(jid).or_insert(0); + } + + for (&jid, job) in &jobs { + for &dep in job.depends() { + if !job_id_set.contains(&dep) { + return Err(DagError::MissingDependency { + job: jid, + depends_on: dep, + }); + } + // edge: dep -> jid + edges.push((dep, jid)); + reverse_edges.push((jid, dep)); + adj.get_mut(&dep).unwrap().push(jid); + rev_adj.get_mut(&jid).unwrap().push(dep); + *in_degree.get_mut(&jid).unwrap() += 1; + } + } + + // Kahn's algorithm for topological sorting, with level construction + let mut zero_in: VecDeque = in_degree + .iter() + .filter_map(|(k, v)| if *v == 0 { Some(*k) } else { None }) + .collect(); + + let mut processed_count = 0usize; + let mut levels: Vec> = Vec::new(); + + // To make deterministic, sort initial zero_in + { + let mut tmp: Vec = zero_in.iter().copied().collect(); + tmp.sort_unstable(); + zero_in = tmp.into_iter().collect(); + } + + while !zero_in.is_empty() { + let mut level: Vec = Vec::new(); + // drain current frontier + let mut next_zero: Vec = Vec::new(); + let mut current_frontier: Vec = zero_in.drain(..).collect(); + current_frontier.sort_unstable(); + for u in current_frontier { + level.push(u); + processed_count += 1; + if let Some(children) = adj.get(&u) { + let mut sorted_children = children.clone(); + sorted_children.sort_unstable(); + for &v in &sorted_children { + let d = in_degree.get_mut(&v).unwrap(); + *d -= 1; + if *d == 0 { + next_zero.push(v); + } + } + } + } + next_zero.sort_unstable(); + zero_in = next_zero.into_iter().collect(); + levels.push(level); + } + + if processed_count != jobs.len() { + let remaining: Vec = in_degree + .into_iter() + .filter_map(|(k, v)| if v > 0 { Some(k) } else { None }) + .collect(); + return Err(DagError::CycleDetected { remaining }); + } + + // Roots and leaves + let roots: Vec = levels.first().cloned().unwrap_or_default(); + let leaves: Vec = adj + .iter() + .filter_map(|(k, v)| if v.is_empty() { Some(*k) } else { None }) + .collect(); + + // Nodes map (JobSummary) + let mut nodes: HashMap = HashMap::with_capacity(jobs.len()); + for (&jid, job) in &jobs { + let summary = JobSummary { + id: jid, + depends: job.depends().to_vec(), + prerequisites: job.prerequisites().to_vec(), + script_type: job.script_type(), + }; + nodes.insert(jid, summary); + } + + // Sort edges deterministically + edges.sort_unstable(); + reverse_edges.sort_unstable(); + + // Populate runtime execution state from persisted Job.status() + let mut started_set: HashSet = HashSet::new(); + let mut completed_set: HashSet = HashSet::new(); + let mut error_ids: Vec = Vec::new(); + + for (&jid, job) in &jobs { + match job.status() { + JobStatus::Finished => { + completed_set.insert(jid); + } + JobStatus::Started => { + started_set.insert(jid); + } + JobStatus::Dispatched => { + // Consider Dispatched as "in-flight" for DAG runtime started set, + // so queued/running work is visible in periodic snapshots. + started_set.insert(jid); + } + JobStatus::Error => { + error_ids.push(jid); + } + JobStatus::WaitingForPrerequisites => { + // Neither started nor completed + } + } + } + + // Choose a deterministic failed job if any errors exist (smallest job id) + let failed_job = if error_ids.is_empty() { + None + } else { + error_ids.sort_unstable(); + Some(error_ids[0]) + }; + + let dag = FlowDag { + flow_id, + caller_id, + context_id, + nodes, + edges, + reverse_edges, + roots, + leaves, + levels, + started: started_set, + completed: completed_set, + failed_job, + }; + + Ok(dag) +} + +impl FlowDag { + /// Return all jobs that are ready to be processed. + /// A job is ready if: + /// - it exists in the DAG + /// - it is not already started or completed + /// - it has no dependencies, or all dependencies are completed + /// + /// If any job has failed, the entire flow is considered failed and an error is returned. + pub fn ready_jobs(&self) -> DagResult> { + if let Some(failed_job) = self.failed_job { + return Err(DagError::FlowFailed { failed_job }); + } + + let mut ready: Vec = Vec::new(); + for (&jid, summary) in &self.nodes { + if self.completed.contains(&jid) || self.started.contains(&jid) { + continue; + } + let mut deps_ok = true; + for dep in &summary.depends { + if !self.completed.contains(dep) { + deps_ok = false; + break; + } + } + if deps_ok { + ready.push(jid); + } + } + ready.sort_unstable(); + Ok(ready) + } + + /// Mark a job as started. + /// Strict validation rules: + /// - Unknown jobs are rejected with UnknownJob + /// - If the flow has already failed, return FlowFailed + /// - If the job is already started or completed, this is a no-op (idempotent) + /// - If any dependency is not completed, return DependenciesIncomplete with the missing deps + pub fn mark_job_started(&mut self, job: u32) -> DagResult<()> { + if !self.nodes.contains_key(&job) { + return Err(DagError::UnknownJob { job }); + } + if self.completed.contains(&job) || self.started.contains(&job) { + return Ok(()); + } + if let Some(failed_job) = self.failed_job { + return Err(DagError::FlowFailed { failed_job }); + } + + let summary = self.nodes.get(&job).expect("checked contains_key"); + let missing: Vec = summary + .depends + .iter() + .copied() + .filter(|d| !self.completed.contains(d)) + .collect(); + + if !missing.is_empty() { + return Err(DagError::DependenciesIncomplete { job, missing }); + } + + self.started.insert(job); + Ok(()) + } + + /// Mark a job as completed. + /// Strict validation rules: + /// - Unknown jobs are rejected with UnknownJob + /// - If the job is already completed, this is a no-op (idempotent) + /// - If the flow has already failed, return FlowFailed + /// - If the job was not previously started, return JobNotStarted + pub fn mark_job_completed(&mut self, job: u32) -> DagResult<()> { + if !self.nodes.contains_key(&job) { + return Err(DagError::UnknownJob { job }); + } + if self.completed.contains(&job) { + return Ok(()); + } + if let Some(failed_job) = self.failed_job { + return Err(DagError::FlowFailed { failed_job }); + } + if !self.started.contains(&job) { + return Err(DagError::JobNotStarted { job }); + } + + self.started.remove(&job); + self.completed.insert(job); + Ok(()) + } + + /// Mark a job as failed. + /// Behavior: + /// - Unknown jobs are rejected with UnknownJob + /// - If a failure is already recorded: + /// - If it is the same job, no-op (idempotent) + /// - If it is a different job, return FlowFailed with the already-failed job + /// - Otherwise record this job as the failed job + pub fn mark_job_failed(&mut self, job: u32) -> DagResult<()> { + if !self.nodes.contains_key(&job) { + return Err(DagError::UnknownJob { job }); + } + match self.failed_job { + Some(existing) if existing == job => Ok(()), + Some(existing) => Err(DagError::FlowFailed { + failed_job: existing, + }), + None => { + self.failed_job = Some(job); + Ok(()) + } + } + } +} diff --git a/bin/coordinator/src/lib.rs b/bin/coordinator/src/lib.rs new file mode 100644 index 0000000..3f689fb --- /dev/null +++ b/bin/coordinator/src/lib.rs @@ -0,0 +1,8 @@ +pub mod clients; +pub mod dag; +pub mod models; +pub mod router; +pub mod rpc; +pub mod service; +pub mod storage; +mod time; diff --git a/bin/coordinator/src/main.rs b/bin/coordinator/src/main.rs new file mode 100644 index 0000000..c562c36 --- /dev/null +++ b/bin/coordinator/src/main.rs @@ -0,0 +1,142 @@ +use clap::Parser; +use std::net::{IpAddr, SocketAddr}; +use std::sync::Arc; + +use tracing::{error, info}; +use tracing_subscriber::EnvFilter; +#[derive(Debug, Clone, Parser)] +#[command( + name = "herocoordinator", + version, + about = "Hero Coordinator CLI", + long_about = None +)] +struct Cli { + #[arg( + long = "mycelium-ip", + short = 'i', + env = "MYCELIUM_IP", + default_value = "127.0.0.1", + help = "IP address where Mycelium JSON-RPC is listening (default: 127.0.0.1)" + )] + mycelium_ip: IpAddr, + + #[arg( + long = "mycelium-port", + short = 'p', + env = "MYCELIUM_PORT", + default_value_t = 8990u16, + help = "Port for Mycelium JSON-RPC (default: 8990)" + )] + mycelium_port: u16, + + #[arg( + long = "redis-addr", + short = 'r', + env = "REDIS_ADDR", + default_value = "127.0.0.1:6379", + help = "Socket address of Redis instance (default: 127.0.0.1:6379)" + )] + redis_addr: SocketAddr, + + #[arg( + long = "api-http-ip", + env = "API_HTTP_IP", + default_value = "127.0.0.1", + help = "Bind IP for HTTP JSON-RPC server (default: 127.0.0.1)" + )] + api_http_ip: IpAddr, + + #[arg( + long = "api-http-port", + env = "API_HTTP_PORT", + default_value_t = 9652u16, + help = "Bind port for HTTP JSON-RPC server (default: 9652)" + )] + api_http_port: u16, + + #[arg( + long = "api-ws-ip", + env = "API_WS_IP", + default_value = "127.0.0.1", + help = "Bind IP for WebSocket JSON-RPC server (default: 127.0.0.1)" + )] + api_ws_ip: IpAddr, + + #[arg( + long = "api-ws-port", + env = "API_WS_PORT", + default_value_t = 9653u16, + help = "Bind port for WebSocket JSON-RPC server (default: 9653)" + )] + api_ws_port: u16, +} + +#[tokio::main] +async fn main() { + let cli = Cli::parse(); + // Initialize tracing subscriber (pretty formatter; controlled by RUST_LOG) + let filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info")); + tracing_subscriber::fmt() + .with_env_filter(filter) + .pretty() + .with_target(true) + .with_level(true) + .init(); + + let http_addr = SocketAddr::new(cli.api_http_ip, cli.api_http_port); + let ws_addr = SocketAddr::new(cli.api_ws_ip, cli.api_ws_port); + + // Initialize Redis driver + let redis = hero_coordinator::storage::RedisDriver::new(cli.redis_addr.to_string()) + .await + .expect("Failed to connect to Redis"); + + // Initialize Service + let service = hero_coordinator::service::AppService::new(redis); + let service_for_router = service.clone(); + + // Shared application state + let state = Arc::new(hero_coordinator::rpc::AppState::new(service)); + + // Start router workers (auto-discovered contexts) using a single global SupervisorHub (no separate inbound listener) + { + let base_url = format!("http://{}:{}", cli.mycelium_ip, cli.mycelium_port); + let hub = hero_coordinator::clients::SupervisorHub::new( + base_url.clone(), + "supervisor.rpc".to_string(), + ) + .expect("Failed to initialize SupervisorHub"); + let cfg = hero_coordinator::router::RouterConfig { + context_ids: Vec::new(), // ignored by start_router_auto + concurrency: 32, + base_url, + topic: "supervisor.rpc".to_string(), + sup_hub: hub.clone(), + transport_poll_interval_secs: 2, + transport_poll_timeout_secs: 300, + }; + // Per-context outbound delivery loops (replies handled by SupervisorHub) + let _auto_handle = hero_coordinator::router::start_router_auto(service_for_router, cfg); + } + + // Build RPC modules for both servers + let http_module = hero_coordinator::rpc::build_module(state.clone()); + let ws_module = hero_coordinator::rpc::build_module(state.clone()); + + info!(%http_addr, %ws_addr, redis_addr=%cli.redis_addr, "Starting JSON-RPC servers"); + + // Start servers + let _http_handle = hero_coordinator::rpc::start_http(http_addr, http_module) + .await + .expect("Failed to start HTTP server"); + let _ws_handle = hero_coordinator::rpc::start_ws(ws_addr, ws_module) + .await + .expect("Failed to start WS server"); + + // Wait for Ctrl+C to terminate + if let Err(e) = tokio::signal::ctrl_c().await { + error!(error=%e, "Failed to listen for shutdown signal"); + } + info!("Shutdown signal received, exiting."); +} diff --git a/bin/coordinator/src/models.rs b/bin/coordinator/src/models.rs new file mode 100644 index 0000000..467df98 --- /dev/null +++ b/bin/coordinator/src/models.rs @@ -0,0 +1,15 @@ +mod actor; +mod context; +mod flow; +mod job; +mod message; +mod runner; +mod script_type; + +pub use actor::Actor; +pub use context::Context; +pub use flow::{Flow, FlowStatus}; +pub use job::{Job, JobStatus}; +pub use message::{Message, MessageFormatType, MessageStatus, MessageType, TransportStatus}; +pub use runner::Runner; +pub use script_type::ScriptType; diff --git a/bin/coordinator/src/models/actor.rs b/bin/coordinator/src/models/actor.rs new file mode 100644 index 0000000..9237ee2 --- /dev/null +++ b/bin/coordinator/src/models/actor.rs @@ -0,0 +1,15 @@ +use std::net::IpAddr; + +use serde::{Deserialize, Serialize}; + +use crate::time::Timestamp; + +#[derive(Serialize, Deserialize, Clone)] +pub struct Actor { + id: u32, + pubkey: String, + /// IP where the actor is reachable, can be mycelium but that is not mandatory + address: Vec, + created_at: Timestamp, + updated_at: Timestamp, +} diff --git a/bin/coordinator/src/models/context.rs b/bin/coordinator/src/models/context.rs new file mode 100644 index 0000000..cb7e9d8 --- /dev/null +++ b/bin/coordinator/src/models/context.rs @@ -0,0 +1,17 @@ +use serde::{Deserialize, Serialize}; + +use crate::time::Timestamp; + +#[derive(Serialize, Deserialize, Clone)] +pub struct Context { + /// Redis DB to use + pub id: u32, + /// Actor ids which have admin rights on this context + pub admins: Vec, + /// Actor ids which can read the context info + pub readers: Vec, + /// Actor ids which can execute jobs in this context + pub executors: Vec, + pub created_at: Timestamp, + pub updated_at: Timestamp, +} diff --git a/bin/coordinator/src/models/flow.rs b/bin/coordinator/src/models/flow.rs new file mode 100644 index 0000000..c0d98d2 --- /dev/null +++ b/bin/coordinator/src/models/flow.rs @@ -0,0 +1,49 @@ +use std::collections::HashMap; + +use serde::{Deserialize, Serialize}; + +use crate::time::Timestamp; + +#[derive(Serialize, Deserialize, Clone)] +pub struct Flow { + /// Job Id set tby the actor which created it + pub id: u32, + /// Actor Id who created this job + pub caller_id: u32, + /// The context in which this job is executed + pub context_id: u32, + /// List of jobs which make up the flow + pub jobs: Vec, + /// Environment variables, passed to every job when executed + pub env_vars: HashMap, + /// The result of the flow + pub result: HashMap, + pub created_at: Timestamp, + pub updated_at: Timestamp, + pub status: FlowStatus, +} + +/// The status of a flow +#[derive(Serialize, Deserialize, Clone, PartialEq, Eq, Debug)] +pub enum FlowStatus { + Created, + Dispatched, + Started, + Error, + Finished, +} + +impl Flow { + pub fn id(&self) -> u32 { + self.id + } + pub fn caller_id(&self) -> u32 { + self.caller_id + } + pub fn context_id(&self) -> u32 { + self.context_id + } + pub fn jobs(&self) -> &[u32] { + &self.jobs + } +} diff --git a/bin/coordinator/src/models/job.rs b/bin/coordinator/src/models/job.rs new file mode 100644 index 0000000..a43659d --- /dev/null +++ b/bin/coordinator/src/models/job.rs @@ -0,0 +1,62 @@ +use std::collections::HashMap; + +use serde::{Deserialize, Serialize}; + +use crate::{models::ScriptType, time::Timestamp}; + +#[derive(Clone, Serialize, Deserialize)] +pub struct Job { + /// Job Id, this is given by the actor who created the job + pub id: u32, + /// Actor ID which created this job + pub caller_id: u32, + /// Context in which the job is executed + pub context_id: u32, + pub script: String, + pub script_type: ScriptType, + /// Timeout in seconds for this job + pub timeout: u32, + /// Max amount of times to retry this job + pub retries: u8, + pub env_vars: HashMap, + pub result: HashMap, + pub prerequisites: Vec, + /// Ids of jobs this job depends on, i.e. this job can't start until those have finished + pub depends: Vec, + pub created_at: Timestamp, + pub updated_at: Timestamp, + pub status: JobStatus, +} + +#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, Debug)] +pub enum JobStatus { + Dispatched, + WaitingForPrerequisites, + Started, + Error, + Finished, +} + +impl Job { + pub fn id(&self) -> u32 { + self.id + } + pub fn caller_id(&self) -> u32 { + self.caller_id + } + pub fn context_id(&self) -> u32 { + self.context_id + } + pub fn depends(&self) -> &[u32] { + &self.depends + } + pub fn prerequisites(&self) -> &[String] { + &self.prerequisites + } + pub fn script_type(&self) -> ScriptType { + self.script_type.clone() + } + pub fn status(&self) -> JobStatus { + self.status.clone() + } +} diff --git a/bin/coordinator/src/models/message.rs b/bin/coordinator/src/models/message.rs new file mode 100644 index 0000000..15338ce --- /dev/null +++ b/bin/coordinator/src/models/message.rs @@ -0,0 +1,81 @@ +use serde::{Deserialize, Serialize}; + +use crate::{ + models::{Job, ScriptType}, + time::Timestamp, +}; + +#[derive(Clone, Serialize, Deserialize)] +pub struct Message { + /// Unique ID for the message, set by the caller + pub id: u32, + /// Id of the actor who sent this message + pub caller_id: u32, + /// Id of the context in which this message was sent + pub context_id: u32, + pub message: String, + pub message_type: ScriptType, + pub message_format_type: MessageFormatType, + /// Seconds for the message to arrive at the destination + pub timeout: u32, + /// Seconds for the receiver to acknowledge receipt of the message + pub timeout_ack: u32, + /// Seconds for the receiver to send us a reply + pub timeout_result: u32, + + /// Outbound transport id returned by Mycelium on push + pub transport_id: Option, + /// Latest transport status as reported by Mycelium + pub transport_status: Option, + + pub job: Vec, + pub logs: Vec, + pub created_at: Timestamp, + pub updated_at: Timestamp, + pub status: MessageStatus, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum MessageType { + Job, + Chat, + Mail, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub enum MessageStatus { + Dispatched, + Acknowledged, + Error, + Processed, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub enum TransportStatus { + Queued, + Sent, + Delivered, + Read, + Failed, +} + +impl std::fmt::Display for TransportStatus { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + TransportStatus::Queued => f.write_str("queued"), + TransportStatus::Sent => f.write_str("sent"), + TransportStatus::Delivered => f.write_str("delivered"), + TransportStatus::Read => f.write_str("read"), + TransportStatus::Failed => f.write_str("failed"), + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum MessageFormatType { + Html, + Text, + Md, +} + +type Log = String; diff --git a/bin/coordinator/src/models/runner.rs b/bin/coordinator/src/models/runner.rs new file mode 100644 index 0000000..8022545 --- /dev/null +++ b/bin/coordinator/src/models/runner.rs @@ -0,0 +1,25 @@ +use std::net::IpAddr; + +use serde::{Deserialize, Serialize}; + +use crate::models::ScriptType; +use crate::time::Timestamp; + +#[derive(Serialize, Deserialize, Clone)] +pub struct Runner { + pub id: u32, + /// Mycelium public key + pub pubkey: String, + /// Mycelium address + pub address: IpAddr, + /// Needs to be set by the runner, usually `runner, + pub created_at: Timestamp, + pub updated_at: Timestamp, +} diff --git a/bin/coordinator/src/models/script_type.rs b/bin/coordinator/src/models/script_type.rs new file mode 100644 index 0000000..a55ccca --- /dev/null +++ b/bin/coordinator/src/models/script_type.rs @@ -0,0 +1,9 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +pub enum ScriptType { + Osis, + Sal, + V, + Python, +} diff --git a/bin/coordinator/src/router.rs b/bin/coordinator/src/router.rs new file mode 100644 index 0000000..645a4ba --- /dev/null +++ b/bin/coordinator/src/router.rs @@ -0,0 +1,972 @@ +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; + +use base64::Engine; +use base64::engine::general_purpose::STANDARD as BASE64_STANDARD; +use serde_json::{Value, json}; +use std::collections::hash_map::DefaultHasher; +use std::hash::{Hash, Hasher}; +use tokio::sync::{Mutex, Semaphore}; + +use crate::{ + clients::{Destination, MyceliumClient, SupervisorClient, SupervisorHub}, + models::{Job, JobStatus, Message, MessageStatus, ScriptType, TransportStatus}, + service::AppService, +}; +use tracing::{error, info}; + +#[derive(Clone, Debug)] +pub struct RouterConfig { + pub context_ids: Vec, + pub concurrency: usize, + pub base_url: String, // e.g. http://127.0.0.1:8990 + pub topic: String, // e.g. "supervisor.rpc" + pub sup_hub: Arc, // global supervisor hub for replies + // Transport status polling configuration + pub transport_poll_interval_secs: u64, // e.g. 2 + pub transport_poll_timeout_secs: u64, // e.g. 300 (5 minutes) +} + +/* +SupervisorClient reuse cache (Router-local): + +Rationale: +- SupervisorClient maintains an internal JSON-RPC id_counter per instance. +- Rebuilding a client for each message resets this counter, causing inner JSON-RPC ids to restart at 1. +- We reuse one SupervisorClient per (destination, topic, secret) to preserve monotonically increasing ids. + +Scope: +- Cache is per Router loop (and a separate one for the inbound listener). +- If cross-loop/process reuse becomes necessary later, promote to a process-global cache. + +Keying: +- Key: destination + topic + secret-presence (secret content hashed; not stored in plaintext). + +Concurrency: +- tokio::Mutex protects a HashMap>. +- Values are Arc so call sites clone cheaply and share the same id_counter. +*/ +#[derive(Clone)] +struct SupervisorClientCache { + map: Arc>>>, +} + +impl SupervisorClientCache { + fn new() -> Self { + Self { + map: Arc::new(Mutex::new(HashMap::new())), + } + } + + fn make_key(dest: &Destination, topic: &str, secret: &Option) -> String { + let dst = match dest { + Destination::Ip(ip) => format!("ip:{ip}"), + Destination::Pk(pk) => format!("pk:{pk}"), + }; + // Hash the secret to avoid storing plaintext in keys while still differentiating values + let sec_hash = match secret { + Some(s) if !s.is_empty() => { + let mut hasher = DefaultHasher::new(); + s.hash(&mut hasher); + format!("s:{}", hasher.finish()) + } + _ => "s:none".to_string(), + }; + format!("{dst}|t:{topic}|{sec_hash}") + } + + async fn get_or_create( + &self, + hub: Arc, + dest: Destination, + topic: String, + secret: Option, + ) -> Arc { + let key = Self::make_key(&dest, &topic, &secret); + + { + let guard = self.map.lock().await; + if let Some(existing) = guard.get(&key) { + tracing::debug!(target: "router", cache="supervisor", hit=true, %topic, secret = %if secret.is_some() { "set" } else { "none" }, "SupervisorClient cache lookup"); + return existing.clone(); + } + } + + let mut guard = self.map.lock().await; + if let Some(existing) = guard.get(&key) { + tracing::debug!(target: "router", cache="supervisor", hit=true, %topic, secret = %if secret.is_some() { "set" } else { "none" }, "SupervisorClient cache lookup (double-checked)"); + return existing.clone(); + } + let client = Arc::new(SupervisorClient::new_with_hub(hub, dest, secret.clone())); + guard.insert(key, client.clone()); + tracing::debug!(target: "router", cache="supervisor", hit=false, %topic, secret = %if secret.is_some() { "set" } else { "none" }, "SupervisorClient cache insert"); + client + } +} + +/// Start background router loops, one per context. +/// Each loop: +/// - BRPOP msg_out with 1s timeout +/// - Loads the Message by key, selects a Runner by script_type +/// - Sends supervisor JSON-RPC via Mycelium +/// - On success: Message.status = Acknowledged +/// - On error: Message.status = Error and append a log +pub fn start_router(service: AppService, cfg: RouterConfig) -> Vec> { + let mut handles = Vec::new(); + for ctx_id in cfg.context_ids.clone() { + let service_cloned = service.clone(); + let cfg_cloned = cfg.clone(); + let handle = tokio::spawn(async move { + let sem = Arc::new(Semaphore::new(cfg_cloned.concurrency)); + + // Use the global SupervisorHub and its Mycelium client + let sup_hub = cfg_cloned.sup_hub.clone(); + let mycelium = sup_hub.mycelium(); + + let cache = Arc::new(SupervisorClientCache::new()); + + loop { + // Pop next message key (blocking with timeout) + match service_cloned.brpop_msg_out(ctx_id, 1).await { + Ok(Some(key)) => { + let permit = { + // acquire a concurrency permit (non-fair is fine) + let sem = sem.clone(); + // if semaphore is exhausted, await until a slot becomes available + match sem.acquire_owned().await { + Ok(p) => p, + Err(_) => { + // Semaphore closed; exit loop + break; + } + } + }; + let service_task = service_cloned.clone(); + let cfg_task = cfg_cloned.clone(); + tokio::spawn({ + let mycelium = mycelium.clone(); + let cache = cache.clone(); + let sup_hub = sup_hub.clone(); + async move { + // Ensure permit is dropped at end of task + let _permit = permit; + if let Err(e) = deliver_one( + &service_task, + &cfg_task, + ctx_id, + &key, + mycelium, + sup_hub, + cache.clone(), + ) + .await + { + error!(context_id=ctx_id, key=%key, error=%e, "Delivery error"); + } + } + }); + } + Ok(None) => { + // timeout: just tick + continue; + } + Err(e) => { + error!(context_id=ctx_id, error=%e, "BRPOP error"); + // small backoff to avoid busy-loop on persistent errors + tokio::time::sleep(std::time::Duration::from_millis(200)).await; + } + } + } + }); + handles.push(handle); + } + handles +} + +async fn deliver_one( + service: &AppService, + cfg: &RouterConfig, + context_id: u32, + msg_key: &str, + mycelium: Arc, + sup_hub: Arc, + cache: Arc, +) -> Result<(), Box> { + // Parse "message:{caller_id}:{id}" + let (caller_id, id) = parse_message_key(msg_key) + .ok_or_else(|| format!("invalid message key format: {}", msg_key))?; + + // Load message + let msg: Message = service.load_message(context_id, caller_id, id).await?; + // Embedded job id (if any) + let job_id_opt: Option = msg.job.first().map(|j| j.id); + + // Determine routing script_type + let desired: ScriptType = determine_script_type(&msg); + + // Discover runners and select a matching one + let runners = service.scan_runners(context_id).await?; + let Some(runner) = runners.into_iter().find(|r| r.script_type == desired) else { + let log = format!( + "No runner with script_type {:?} available in context {} for message {}", + desired, context_id, msg_key + ); + let _ = service + .append_message_logs(context_id, caller_id, id, vec![log.clone()]) + .await; + let _ = service + .update_message_status(context_id, caller_id, id, MessageStatus::Error) + .await; + return Err(log.into()); + }; + + // Build SupervisorClient + let dest = if !runner.pubkey.trim().is_empty() { + Destination::Pk(runner.pubkey.clone()) + } else { + Destination::Ip(runner.address) + }; + // Keep clones for poller usage + let dest_for_poller = dest.clone(); + let topic_for_poller = cfg.topic.clone(); + let secret_for_poller = runner.secret.clone(); + let client = cache + .get_or_create( + sup_hub.clone(), + dest.clone(), + cfg.topic.clone(), + runner.secret.clone(), + ) + .await; + + // Build supervisor method and params from Message + let method = msg.message.clone(); + let params = build_params(&msg)?; + + // Send + // If this is a job.run and we have a secret configured on the client, + // prefer the typed wrapper that injects the secret into inner supervisor params, + // and await the reply to capture job_queued immediately. + let (out_id, reply_opt) = if method == "job.run" { + if let Some(j) = msg.job.first() { + let jv = job_to_json(j)?; + // Returns (outbound message id, reply envelope) + let (out, reply) = client.job_run_wait(jv).await?; + (out, Some(reply)) + } else { + // Fallback: no embedded job, use the generic call (await reply, discard) + let out = client.call(&method, params).await?; + (out, None) + } + } else { + let out = client.call(&method, params).await?; + (out, None) + }; + + // Store transport id and initial Sent status + let _ = service + .update_message_transport( + context_id, + caller_id, + id, + Some(out_id.clone()), + Some(TransportStatus::Sent), + ) + .await; + + // Mark as acknowledged on success + service + .update_message_status(context_id, caller_id, id, MessageStatus::Acknowledged) + .await?; + + // If we got a job.run reply, interpret job_queued immediately + if let (Some(reply), Some(job_id)) = (reply_opt, msg.job.first().map(|j| j.id)) { + let result_opt = reply.get("result"); + let error_opt = reply.get("error"); + + // Handle job.run success (job_queued) + let is_job_queued = result_opt + .and_then(|res| { + if res.get("job_queued").is_some() { + Some(true) + } else if let Some(s) = res.as_str() { + Some(s == "job_queued") + } else { + None + } + }) + .unwrap_or(false); + + if is_job_queued { + let _ = service + .update_job_status_unchecked(context_id, caller_id, job_id, JobStatus::Dispatched) + .await; + let _ = service + .append_message_logs( + context_id, + caller_id, + id, + vec![format!( + "Supervisor reply for job {}: job_queued (processed synchronously)", + job_id + )], + ) + .await; + } else if let Some(err_obj) = error_opt { + let _ = service + .update_job_status_unchecked(context_id, caller_id, job_id, JobStatus::Error) + .await; + let _ = service + .append_message_logs( + context_id, + caller_id, + id, + vec![format!( + "Supervisor error for job {}: {} (processed synchronously)", + job_id, err_obj + )], + ) + .await; + } + } + + // No correlation map needed; replies are handled synchronously via SupervisorHub + + // Spawn transport-status poller + { + let service_poll = service.clone(); + let poll_interval = std::time::Duration::from_secs(cfg.transport_poll_interval_secs); + let poll_timeout = std::time::Duration::from_secs(cfg.transport_poll_timeout_secs); + let out_id_cloned = out_id.clone(); + let mycelium = mycelium.clone(); + + tokio::spawn(async move { + let start = std::time::Instant::now(); + let client = mycelium; + + // Supervisor call context captured for sync status checks + let sup_dest = dest_for_poller; + let sup_topic = topic_for_poller; + let job_id_opt = job_id_opt; + + let mut last_status: Option = Some(TransportStatus::Sent); + // Ensure we only request supervisor job.status or job.result once per outbound message + let mut requested_job_check: bool = false; + + loop { + if start.elapsed() >= poll_timeout { + let _ = service_poll + .append_message_logs( + context_id, + caller_id, + id, + vec!["Transport-status polling timed out".to_string()], + ) + .await; + // leave last known status; do not override + break; + } + + match client.message_status(&out_id_cloned).await { + Ok(s) => { + if last_status.as_ref() != Some(&s) { + let _ = service_poll + .update_message_transport( + context_id, + caller_id, + id, + None, + Some(s.clone()), + ) + .await; + last_status = Some(s.clone()); + } + + // Stop on terminal states + if matches!(s, TransportStatus::Delivered | TransportStatus::Read) { + if let Some(job_id) = job_id_opt { + // First consult Redis for the latest job state in case we already have a terminal update + match service_poll.load_job(context_id, caller_id, job_id).await { + Ok(job) => { + // Promote to Started as soon as transport is delivered/read, + // if currently Dispatched or WaitingForPrerequisites. + // This makes DAG.started reflect "in-flight" work even when jobs + // complete too quickly to observe an intermediate supervisor "running" status. + if matches!( + job.status(), + JobStatus::Dispatched + | JobStatus::WaitingForPrerequisites + ) { + let _ = service_poll + .update_job_status_unchecked( + context_id, + caller_id, + job_id, + JobStatus::Started, + ) + .await; + } + match job.status() { + JobStatus::Finished | JobStatus::Error => { + // Local job is already terminal; skip supervisor job.status + let _ = service_poll + .append_message_logs( + context_id, + caller_id, + id, + vec![format!( + "Local job {} status is terminal ({:?}); skipping supervisor job.status", + job_id, + job.status() + )], + ) + .await; + + // If result is still empty, immediately request supervisor job.result + if job.result.is_empty() { + let sup = cache + .get_or_create( + sup_hub.clone(), + sup_dest.clone(), + sup_topic.clone(), + secret_for_poller.clone(), + ) + .await; + match sup + .job_result_wait(job_id.to_string()) + .await + { + Ok((_out2, reply2)) => { + // Interpret reply synchronously: success/error/bare string + let res = reply2.get("result"); + if let Some(obj) = + res.and_then(|v| v.as_object()) + { + if let Some(s) = obj + .get("success") + .and_then(|v| v.as_str()) + { + let mut patch = std::collections::HashMap::new(); + patch.insert( + "success".to_string(), + s.to_string(), + ); + let _ = service_poll + .update_job_result_merge_unchecked( + context_id, caller_id, job_id, patch, + ) + .await; + let _ = service_poll + .update_message_status( + context_id, + caller_id, + id, + MessageStatus::Processed, + ) + .await; + // Also mark job as Finished so the flow can progress (ignore invalid transitions) + let _ = service_poll + .update_job_status_unchecked( + context_id, caller_id, job_id, JobStatus::Finished, + ) + .await; + let _ = service_poll + .append_message_logs( + context_id, + caller_id, + id, + vec![format!( + "Updated job {} status to Finished (sync)", job_id + )], + ) + .await; + // Existing log about storing result + let _ = service_poll + .append_message_logs( + context_id, + caller_id, + id, + vec![format!( + "Stored supervisor job.result for job {} (success, sync)", + job_id + )], + ) + .await; + } else if let Some(s) = obj + .get("error") + .and_then(|v| v.as_str()) + { + let mut patch = std::collections::HashMap::new(); + patch.insert( + "error".to_string(), + s.to_string(), + ); + let _ = service_poll + .update_job_result_merge_unchecked( + context_id, caller_id, job_id, patch, + ) + .await; + let _ = service_poll + .update_message_status( + context_id, + caller_id, + id, + MessageStatus::Processed, + ) + .await; + // Also mark job as Error so the flow can handle failure (ignore invalid transitions) + let _ = service_poll + .update_job_status_unchecked( + context_id, caller_id, job_id, JobStatus::Error, + ) + .await; + let _ = service_poll + .append_message_logs( + context_id, + caller_id, + id, + vec![format!( + "Updated job {} status to Error (sync)", job_id + )], + ) + .await; + // Existing log about storing result + let _ = service_poll + .append_message_logs( + context_id, + caller_id, + id, + vec![format!( + "Stored supervisor job.result for job {} (error, sync)", + job_id + )], + ) + .await; + } + } else if let Some(s) = + res.and_then(|v| v.as_str()) + { + let mut patch = + std::collections::HashMap::new( + ); + patch.insert( + "success".to_string(), + s.to_string(), + ); + let _ = service_poll + .update_job_result_merge_unchecked( + context_id, caller_id, job_id, patch, + ) + .await; + let _ = service_poll + .update_message_status( + context_id, + caller_id, + id, + MessageStatus::Processed, + ) + .await; + // Also mark job as Finished so the flow can progress (ignore invalid transitions) + let _ = service_poll + .update_job_status_unchecked( + context_id, + caller_id, + job_id, + JobStatus::Finished, + ) + .await; + let _ = service_poll + .append_message_logs( + context_id, + caller_id, + id, + vec![format!( + "Updated job {} status to Finished (sync)", job_id + )], + ) + .await; + // Existing log about storing result + let _ = service_poll + .append_message_logs( + context_id, + caller_id, + id, + vec![format!( + "Stored supervisor job.result for job {} (success, sync)", + job_id + )], + ) + .await; + } else { + let _ = service_poll + .append_message_logs( + context_id, + caller_id, + id, + vec!["Supervisor job.result reply missing recognizable fields".to_string()], + ) + .await; + } + } + Err(e) => { + let _ = service_poll + .append_message_logs( + context_id, + caller_id, + id, + vec![format!( + "job.result request error for job {}: {}", + job_id, e + )], + ) + .await; + } + } + } else { + // Result already present; nothing to fetch + let _ = service_poll + .append_message_logs( + context_id, + caller_id, + id, + vec![format!( + "Job {} already has result; no supervisor calls needed", + job_id + )], + ) + .await; + } + + // Mark processed and stop polling for this message + let _ = service_poll + .update_message_status( + context_id, + caller_id, + id, + MessageStatus::Processed, + ) + .await; + let _ = service_poll + .append_message_logs( + context_id, + caller_id, + id, + vec![format!( + "Terminal job {} detected; stopping transport polling", + job_id + )], + ) + .await; + break; + } + // Not terminal yet -> request supervisor job.status as before + _ => { + let sup = cache + .get_or_create( + sup_hub.clone(), + sup_dest.clone(), + sup_topic.clone(), + secret_for_poller.clone(), + ) + .await; + match sup.job_status_wait(job_id.to_string()).await + { + Ok((_out_id, reply_status)) => { + // Interpret status reply synchronously + let result_opt = reply_status.get("result"); + let error_opt = reply_status.get("error"); + if let Some(err_obj) = error_opt { + let _ = service_poll + .update_job_status_unchecked( + context_id, + caller_id, + job_id, + JobStatus::Error, + ) + .await; + let _ = service_poll + .append_message_logs( + context_id, caller_id, id, + vec![format!( + "Supervisor error for job {}: {} (sync)", + job_id, err_obj + )], + ) + .await; + } else if let Some(res) = result_opt { + let status_candidate = res + .get("status") + .and_then(|v| v.as_str()) + .or_else(|| res.as_str()); + if let Some(remote_status) = + status_candidate + { + if let Some((mapped, terminal)) = + map_supervisor_job_status( + remote_status, + ) + { + let _ = service_poll + .update_job_status_unchecked( + context_id, caller_id, job_id, mapped.clone(), + ) + .await; + let _ = service_poll + .append_message_logs( + context_id, caller_id, id, + vec![format!( + "Supervisor job.status for job {} -> {} (mapped to {:?}, sync)", + job_id, remote_status, mapped + )], + ) + .await; + + // If terminal, request job.result now (handled above for local terminal case) + if terminal { + // trigger job.result only if result empty to avoid spam + if let Ok(j_after) = + service_poll + .load_job( + context_id, + caller_id, + job_id, + ) + .await + { + if j_after + .result + .is_empty() + { + let sup2 = cache + .get_or_create( + sup_hub.clone(), + sup_dest.clone(), + sup_topic.clone(), + secret_for_poller.clone(), + ) + .await; + let _ = sup2.job_result_wait(job_id.to_string()).await + .and_then(|(_oid, reply2)| { + // Minimal parse and store + let res2 = reply2.get("result"); + if let Some(obj) = res2.and_then(|v| v.as_object()) { + if let Some(s) = obj.get("success").and_then(|v| v.as_str()) { + let mut patch = std::collections::HashMap::new(); + patch.insert("success".to_string(), s.to_string()); + tokio::spawn({ + let service_poll = service_poll.clone(); + async move { + let _ = service_poll.update_job_result_merge_unchecked(context_id, caller_id, job_id, patch).await; + } + }); + } + } + Ok((String::new(), Value::Null)) + }); + } + } + + // Mark processed and stop polling for this message + let _ = service_poll + .update_message_status( + context_id, + caller_id, + id, + MessageStatus::Processed, + ) + .await; + let _ = service_poll + .append_message_logs( + context_id, + caller_id, + id, + vec![format!( + "Terminal job {} detected from supervisor status; stopping transport polling", + job_id + )], + ) + .await; + break; + } + } + } + } + } + Err(e) => { + let _ = service_poll + .append_message_logs( + context_id, + caller_id, + id, + vec![format!( + "job.status request error: {}", + e + )], + ) + .await; + } + } + } + } + } + // If we cannot load the job, fall back to requesting job.status + Err(_) => { + let sup = cache + .get_or_create( + sup_hub.clone(), + sup_dest.clone(), + sup_topic.clone(), + secret_for_poller.clone(), + ) + .await; + match sup.job_status_wait(job_id.to_string()).await { + Ok((_out_id, _reply_status)) => { + let _ = service_poll + .append_message_logs( + context_id, + caller_id, + id, + vec![format!( + "Requested supervisor job.status for job {} (fallback; load_job failed, sync)", + job_id + )], + ) + .await; + } + Err(e) => { + let _ = service_poll + .append_message_logs( + context_id, + caller_id, + id, + vec![format!( + "job.status request error: {}", + e + )], + ) + .await; + } + } + } + } + // Ensure we only do this once + requested_job_check = true; + } + // break; + } + if matches!(s, TransportStatus::Failed) { + let _ = service_poll + .append_message_logs( + context_id, + caller_id, + id, + vec![format!( + "Transport failed for outbound id {out_id_cloned}" + )], + ) + .await; + break; + } + } + Err(e) => { + // Log and continue polling + let _ = service_poll + .append_message_logs( + context_id, + caller_id, + id, + vec![format!("messageStatus query error: {e}")], + ) + .await; + } + } + + tokio::time::sleep(poll_interval).await; + } + }); + } + + Ok(()) +} + +fn determine_script_type(msg: &Message) -> ScriptType { + // Prefer embedded job's script_type if available, else fallback to message.message_type + match msg.job.first() { + Some(j) => j.script_type.clone(), + None => msg.message_type.clone(), + } +} + +fn build_params(msg: &Message) -> Result> { + // Minimal mapping: + // - "job.run" with exactly one embedded job: [{ "job": }] + // - otherwise: [] + if msg.message == "job.run" + && let Some(j) = msg.job.first() + { + let jv = job_to_json(j)?; + return Ok(json!([ { "job": jv } ])); + } + + Ok(json!([])) +} + +fn job_to_json(job: &Job) -> Result> { + Ok(serde_json::to_value(job)?) +} + +fn parse_message_key(s: &str) -> Option<(u32, u32)> { + // Expect "message:{caller_id}:{id}" + let mut it = s.split(':'); + match (it.next(), it.next(), it.next(), it.next()) { + (Some("message"), Some(caller), Some(id), None) => { + let caller_id = caller.parse::().ok()?; + let msg_id = id.parse::().ok()?; + Some((caller_id, msg_id)) + } + _ => None, + } +} + +/// Map supervisor job.status -> (local JobStatus, terminal) +fn map_supervisor_job_status(s: &str) -> Option<(JobStatus, bool)> { + match s { + "created" | "queued" => Some((JobStatus::Dispatched, false)), + "running" => Some((JobStatus::Started, false)), + "completed" => Some((JobStatus::Finished, true)), + "failed" | "timeout" => Some((JobStatus::Error, true)), + _ => None, + } +} + +/// Auto-discover contexts periodically and ensure a router loop exists for each. +/// Returns a JoinHandle of the discovery task (router loops are detached). +pub fn start_router_auto(service: AppService, cfg: RouterConfig) -> tokio::task::JoinHandle<()> { + tokio::spawn(async move { + let mut active: HashSet = HashSet::new(); + loop { + match service.list_context_ids().await { + Ok(ids) => { + for ctx_id in ids { + if !active.contains(&ctx_id) { + // Spawn a loop for this new context + let cfg_ctx = RouterConfig { + context_ids: vec![ctx_id], + ..cfg.clone() + }; + let _ = start_router(service.clone(), cfg_ctx); + active.insert(ctx_id); + info!(context_id = ctx_id, "Started loop for context"); + } + } + } + Err(e) => { + error!(error=%e, "list_context_ids error"); + } + } + tokio::time::sleep(std::time::Duration::from_secs(5)).await; + } + }) +} diff --git a/bin/coordinator/src/rpc.rs b/bin/coordinator/src/rpc.rs new file mode 100644 index 0000000..9ea4dfe --- /dev/null +++ b/bin/coordinator/src/rpc.rs @@ -0,0 +1,676 @@ +use std::{ + collections::HashMap, + net::{IpAddr, SocketAddr}, + sync::Arc, +}; + +use jsonrpsee::{ + RpcModule, + server::{ServerBuilder, ServerHandle}, + types::error::ErrorObjectOwned, +}; +use serde::Deserialize; +use serde_json::{Value, json}; + +use crate::{ + dag::{DagError, FlowDag}, + models::{ + Actor, Context, Flow, FlowStatus, Job, JobStatus, Message, MessageFormatType, + MessageStatus, Runner, ScriptType, + }, + service::AppService, + time::current_timestamp, +}; + +/// The OpenRPC specification for the HeroCoordinator JSON-RPC API +const OPENRPC_SPEC: &str = include_str!("../specs/openrpc.json"); + +pub struct AppState { + pub service: AppService, +} + +impl AppState { + pub fn new(service: AppService) -> Self { + Self { service } + } +} + +// ----------------------------- +// Error helpers +// ----------------------------- + +fn invalid_params_err(e: E) -> ErrorObjectOwned { + ErrorObjectOwned::owned(-32602, "Invalid params", Some(Value::String(e.to_string()))) +} + +fn storage_err(e: Box) -> ErrorObjectOwned { + let msg = e.to_string(); + if msg.contains("Key not found") { + ErrorObjectOwned::owned(-32001, "Not Found", Some(Value::String(msg))) + } else { + ErrorObjectOwned::owned(-32010, "Storage Error", Some(Value::String(msg))) + } +} + +fn dag_err(e: DagError) -> ErrorObjectOwned { + match e { + DagError::Storage(inner) => storage_err(inner), + DagError::MissingDependency { .. } => ErrorObjectOwned::owned( + -32020, + "DAG Missing Dependency", + Some(Value::String(e.to_string())), + ), + DagError::CycleDetected { .. } => ErrorObjectOwned::owned( + -32021, + "DAG Cycle Detected", + Some(Value::String(e.to_string())), + ), + DagError::UnknownJob { .. } => ErrorObjectOwned::owned( + -32022, + "DAG Unknown Job", + Some(Value::String(e.to_string())), + ), + DagError::DependenciesIncomplete { .. } => ErrorObjectOwned::owned( + -32023, + "DAG Dependencies Incomplete", + Some(Value::String(e.to_string())), + ), + DagError::FlowFailed { .. } => ErrorObjectOwned::owned( + -32024, + "DAG Flow Failed", + Some(Value::String(e.to_string())), + ), + DagError::JobNotStarted { .. } => ErrorObjectOwned::owned( + -32025, + "DAG Job Not Started", + Some(Value::String(e.to_string())), + ), + } +} + +// ----------------------------- +// Create DTOs and Param wrappers +// ----------------------------- + +#[derive(Debug, Deserialize)] +pub struct ActorCreate { + pub id: u32, + pub pubkey: String, + pub address: Vec, +} +impl ActorCreate { + pub fn into_domain(self) -> Result { + let ts = current_timestamp(); + let v = json!({ + "id": self.id, + "pubkey": self.pubkey, + "address": self.address, + "created_at": ts, + "updated_at": ts, + }); + serde_json::from_value(v).map_err(|e| e.to_string()) + } +} + +#[derive(Debug, Deserialize)] +pub struct ContextCreate { + pub id: u32, + pub admins: Vec, + pub readers: Vec, + pub executors: Vec, +} +impl ContextCreate { + pub fn into_domain(self) -> Context { + let ts = current_timestamp(); + + let ContextCreate { + id, + admins, + readers, + executors, + } = self; + + Context { + id, + admins, + readers, + executors, + created_at: ts, + updated_at: ts, + } + } +} + +#[derive(Debug, Deserialize)] +pub struct RunnerCreate { + pub id: u32, + pub pubkey: String, + pub address: IpAddr, + pub topic: String, + /// The script type this runner executes (used for routing) + pub script_type: ScriptType, + pub local: bool, + /// Optional secret used for authenticated supervisor calls (if required) + pub secret: Option, +} +impl RunnerCreate { + pub fn into_domain(self) -> Runner { + let ts = current_timestamp(); + + let RunnerCreate { + id, + pubkey, + address, + topic, + script_type, + local, + secret, + } = self; + + Runner { + id, + pubkey, + address, + topic, + script_type, + local, + secret, + created_at: ts, + updated_at: ts, + } + } +} + +#[derive(Debug, Deserialize)] +pub struct FlowCreate { + pub id: u32, + pub caller_id: u32, + pub context_id: u32, + pub jobs: Vec, + pub env_vars: HashMap, +} + +impl FlowCreate { + pub fn into_domain(self) -> Flow { + let ts = current_timestamp(); + + let FlowCreate { + id, + caller_id, + context_id, + jobs, + env_vars, + } = self; + + Flow { + id, + caller_id, + context_id, + jobs, + env_vars, + result: HashMap::new(), + created_at: ts, + updated_at: ts, + status: FlowStatus::Created, + } + } +} + +#[derive(Debug, Deserialize)] +pub struct JobCreate { + pub id: u32, + pub caller_id: u32, + pub context_id: u32, + pub script: String, + pub script_type: ScriptType, + pub timeout: u32, + pub retries: u8, + pub env_vars: HashMap, + pub prerequisites: Vec, + pub depends: Vec, +} + +impl JobCreate { + pub fn into_domain(self) -> Job { + let ts = current_timestamp(); + + let JobCreate { + id, + caller_id, + context_id, + script, + script_type, + timeout, + retries, + env_vars, + prerequisites, + depends, + } = self; + + Job { + id, + caller_id, + context_id, + script, + script_type, + timeout, + retries, + env_vars, + result: HashMap::new(), + prerequisites, + depends, + created_at: ts, + updated_at: ts, + status: JobStatus::WaitingForPrerequisites, + } + } +} + +#[derive(Debug, Deserialize)] +pub struct MessageCreate { + pub id: u32, + pub caller_id: u32, + pub context_id: u32, + pub message: String, + pub message_type: ScriptType, + pub message_format_type: MessageFormatType, + pub timeout: u32, + pub timeout_ack: u32, + pub timeout_result: u32, + pub job: Vec, +} +impl MessageCreate { + pub fn into_domain(self) -> Message { + let ts = current_timestamp(); + + let MessageCreate { + id, + caller_id, + context_id, + message, + message_type, + message_format_type, + timeout, + timeout_ack, + timeout_result, + job, + } = self; + + Message { + id, + caller_id, + context_id, + message, + message_type, + message_format_type, + timeout, + timeout_ack, + timeout_result, + transport_id: None, + transport_status: None, + job: job.into_iter().map(JobCreate::into_domain).collect(), + logs: Vec::new(), + created_at: ts, + updated_at: ts, + status: MessageStatus::Dispatched, + } + } +} + +#[derive(Debug, Deserialize)] +pub struct ActorCreateParams { + pub actor: ActorCreate, +} +#[derive(Debug, Deserialize)] +pub struct ActorLoadParams { + pub id: u32, +} + +#[derive(Debug, Deserialize)] +pub struct ContextCreateParams { + pub context: ContextCreate, +} +#[derive(Debug, Deserialize)] +pub struct ContextLoadParams { + pub id: u32, +} + +#[derive(Debug, Deserialize)] +pub struct RunnerCreateParams { + pub context_id: u32, + pub runner: RunnerCreate, +} +#[derive(Debug, Deserialize)] +pub struct RunnerLoadParams { + pub context_id: u32, + pub id: u32, +} + +#[derive(Debug, Deserialize)] +pub struct FlowCreateParams { + pub context_id: u32, + pub flow: FlowCreate, +} +#[derive(Debug, Deserialize)] +pub struct FlowLoadParams { + pub context_id: u32, + pub id: u32, +} + +#[derive(Debug, Deserialize)] +pub struct JobCreateParams { + pub context_id: u32, + pub job: JobCreate, +} +#[derive(Debug, Deserialize)] +pub struct JobLoadParams { + pub context_id: u32, + pub caller_id: u32, + pub id: u32, +} + +#[derive(Debug, Deserialize)] +pub struct MessageCreateParams { + pub context_id: u32, + pub message: MessageCreate, +} +#[derive(Debug, Deserialize)] +pub struct MessageLoadParams { + pub context_id: u32, + pub caller_id: u32, + pub id: u32, +} + +// ----------------------------- +// Rpc module builder (manual registration) +// ----------------------------- + +pub fn build_module(state: Arc) -> RpcModule<()> { + let mut module: RpcModule<()> = RpcModule::new(()); + + // Actor + { + let state = state.clone(); + module + .register_async_method("actor.create", move |params, _caller, _ctx| { + let state = state.clone(); + async move { + let p: ActorCreateParams = params.parse().map_err(invalid_params_err)?; + let actor = p.actor.into_domain().map_err(invalid_params_err)?; + let actor = state + .service + .create_actor(actor) + .await + .map_err(storage_err)?; + Ok::<_, ErrorObjectOwned>(actor) + } + }) + .expect("register actor.create"); + } + { + let state = state.clone(); + module + .register_async_method("actor.load", move |params, _caller, _ctx| { + let state = state.clone(); + async move { + let p: ActorLoadParams = params.parse().map_err(invalid_params_err)?; + let actor = state.service.load_actor(p.id).await.map_err(storage_err)?; + Ok::<_, ErrorObjectOwned>(actor) + } + }) + .expect("register actor.load"); + } + + // Context + { + let state = state.clone(); + module + .register_async_method("context.create", move |params, _caller, _ctx| { + let state = state.clone(); + async move { + let p: ContextCreateParams = params.parse().map_err(invalid_params_err)?; + let ctx = p.context.into_domain(); + let ctx = state + .service + .create_context(ctx) + .await + .map_err(storage_err)?; + Ok::<_, ErrorObjectOwned>(ctx) + } + }) + .expect("register context.create"); + } + { + let state = state.clone(); + module + .register_async_method("context.load", move |params, _caller, _ctx| { + let state = state.clone(); + async move { + let p: ContextLoadParams = params.parse().map_err(invalid_params_err)?; + let ctx = state + .service + .load_context(p.id) + .await + .map_err(storage_err)?; + Ok::<_, ErrorObjectOwned>(ctx) + } + }) + .expect("register context.load"); + } + + // Runner + { + let state = state.clone(); + module + .register_async_method("runner.create", move |params, _caller, _ctx| { + let state = state.clone(); + async move { + let p: RunnerCreateParams = params.parse().map_err(invalid_params_err)?; + let runner = p.runner.into_domain(); + let runner = state + .service + .create_runner(p.context_id, runner) + .await + .map_err(storage_err)?; + Ok::<_, ErrorObjectOwned>(runner) + } + }) + .expect("register runner.create"); + } + { + let state = state.clone(); + module + .register_async_method("runner.load", move |params, _caller, _ctx| { + let state = state.clone(); + async move { + let p: RunnerLoadParams = params.parse().map_err(invalid_params_err)?; + let runner = state + .service + .load_runner(p.context_id, p.id) + .await + .map_err(storage_err)?; + Ok::<_, ErrorObjectOwned>(runner) + } + }) + .expect("register runner.load"); + } + + // Flow + { + let state = state.clone(); + module + .register_async_method("flow.create", move |params, _caller, _ctx| { + let state = state.clone(); + async move { + let p: FlowCreateParams = params.parse().map_err(invalid_params_err)?; + let flow = p.flow.into_domain(); + let flow = state + .service + .create_flow(p.context_id, flow) + .await + .map_err(storage_err)?; + Ok::<_, ErrorObjectOwned>(flow) + } + }) + .expect("register flow.create"); + } + { + let state = state.clone(); + module + .register_async_method("flow.load", move |params, _caller, _ctx| { + let state = state.clone(); + async move { + let p: FlowLoadParams = params.parse().map_err(invalid_params_err)?; + let flow = state + .service + .load_flow(p.context_id, p.id) + .await + .map_err(storage_err)?; + Ok::<_, ErrorObjectOwned>(flow) + } + }) + .expect("register flow.load"); + } + { + let state = state.clone(); + module + .register_async_method("flow.dag", move |params, _caller, _ctx| { + let state = state.clone(); + async move { + let p: FlowLoadParams = params.parse().map_err(invalid_params_err)?; + let dag: FlowDag = state + .service + .flow_dag(p.context_id, p.id) + .await + .map_err(dag_err)?; + Ok::<_, ErrorObjectOwned>(dag) + } + }) + .expect("register flow.dag"); + } + { + let state = state.clone(); + module + .register_async_method("flow.start", move |params, _caller, _ctx| { + let state = state.clone(); + async move { + let p: FlowLoadParams = params.parse().map_err(invalid_params_err)?; + let started: bool = state + .service + .flow_start(p.context_id, p.id) + .await + .map_err(storage_err)?; + Ok::<_, ErrorObjectOwned>(started) + } + }) + .expect("register flow.start"); + } + + // Job + { + let state = state.clone(); + module + .register_async_method("job.create", move |params, _caller, _ctx| { + let state = state.clone(); + async move { + let p: JobCreateParams = params.parse().map_err(invalid_params_err)?; + let job = p.job.into_domain(); + let job = state + .service + .create_job(p.context_id, job) + .await + .map_err(storage_err)?; + Ok::<_, ErrorObjectOwned>(job) + } + }) + .expect("register job.create"); + } + { + let state = state.clone(); + module + .register_async_method("job.load", move |params, _caller, _ctx| { + let state = state.clone(); + async move { + let p: JobLoadParams = params.parse().map_err(invalid_params_err)?; + let job = state + .service + .load_job(p.context_id, p.caller_id, p.id) + .await + .map_err(storage_err)?; + Ok::<_, ErrorObjectOwned>(job) + } + }) + .expect("register job.load"); + } + + // Message + { + let state = state.clone(); + module + .register_async_method("message.create", move |params, _caller, _ctx| { + let state = state.clone(); + async move { + let p: MessageCreateParams = params.parse().map_err(invalid_params_err)?; + let message = p.message.into_domain(); + let message = state + .service + .create_message(p.context_id, message) + .await + .map_err(storage_err)?; + Ok::<_, ErrorObjectOwned>(message) + } + }) + .expect("register message.create"); + } + { + let state = state; + module + .register_async_method("message.load", move |params, _caller, _ctx| { + let state = state.clone(); + async move { + let p: MessageLoadParams = params.parse().map_err(invalid_params_err)?; + let msg = state + .service + .load_message(p.context_id, p.caller_id, p.id) + .await + .map_err(storage_err)?; + Ok::<_, ErrorObjectOwned>(msg) + } + }) + .expect("register message.load"); + } + { + module + .register_async_method("rpc.discover", move |_params, _caller, _ctx| async move { + let spec = serde_json::from_str::(OPENRPC_SPEC) + .expect("Failed to parse OpenRPC spec"); + Ok::<_, ErrorObjectOwned>(spec) + }) + .expect("register rpc.discover"); + } + + module +} + +// ----------------------------- +// Server runners (HTTP/WS on separate listeners) +// ----------------------------- + +pub async fn start_http( + addr: SocketAddr, + module: RpcModule, +) -> Result> { + let server = ServerBuilder::default().build(addr).await?; + let handle = server.start(module); + Ok(handle) +} + +pub async fn start_ws( + addr: SocketAddr, + module: RpcModule, +) -> Result> { + // jsonrpsee server supports both HTTP and WS; using a second listener gives us a dedicated WS port. + let server = ServerBuilder::default().build(addr).await?; + let handle = server.start(module); + Ok(handle) +} diff --git a/bin/coordinator/src/service.rs b/bin/coordinator/src/service.rs new file mode 100644 index 0000000..c2043a8 --- /dev/null +++ b/bin/coordinator/src/service.rs @@ -0,0 +1,1211 @@ +use crate::dag::{DagError, DagResult, FlowDag, build_flow_dag}; +use crate::models::{ + Actor, Context, Flow, FlowStatus, Job, JobStatus, Message, MessageFormatType, MessageStatus, + Runner, TransportStatus, +}; +use crate::storage::RedisDriver; + +use serde::Serialize; +use serde_json::Value; +use std::collections::{HashMap, HashSet}; +use std::sync::Arc; +use tokio::sync::Mutex; +use tokio::time::{Duration, sleep}; + +pub type BoxError = Box; + +#[derive(Debug)] +struct InvalidJobStatusTransition { + from: JobStatus, + to: JobStatus, +} + +impl std::fmt::Display for InvalidJobStatusTransition { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "Invalid job status transition: {:?} -> {:?}", + self.from, self.to + ) + } +} +impl std::error::Error for InvalidJobStatusTransition {} + +#[derive(Debug)] +struct ValidationError { + msg: String, +} +impl ValidationError { + fn new(msg: impl Into) -> Self { + Self { msg: msg.into() } + } +} +impl std::fmt::Display for ValidationError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Validation error: {}", self.msg) + } +} +impl std::error::Error for ValidationError {} + +#[derive(Debug)] +struct PermissionDeniedError { + actor_id: u32, + context_id: u32, + action: String, +} +impl std::fmt::Display for PermissionDeniedError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "Permission denied: actor {} cannot {} in context {}", + self.actor_id, self.action, self.context_id + ) + } +} +impl std::error::Error for PermissionDeniedError {} + +#[derive(Debug)] +struct AlreadyExistsError { + key: String, +} +impl std::fmt::Display for AlreadyExistsError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Already exists: {}", self.key) + } +} +impl std::error::Error for AlreadyExistsError {} + +// ----------------------------- +// Internal helpers +// ----------------------------- + +fn as_json(model: &impl Serialize) -> Result { + Ok(serde_json::to_value(model)?) +} + +fn json_get_u32(v: &Value, key: &str) -> Result { + v.get(key) + .and_then(|v| v.as_u64()) + .map(|x| x as u32) + .ok_or_else(|| { + ValidationError::new(format!("missing or invalid u32 field '{}'", key)).into() + }) +} + +fn json_get_str(v: &Value, key: &str) -> Result { + v.get(key) + .and_then(|v| v.as_str()) + .map(|s| s.to_string()) + .ok_or_else(|| { + ValidationError::new(format!("missing or invalid string field '{}'", key)).into() + }) +} + +fn json_get_array(v: &Value, key: &str) -> Result, BoxError> { + let arr = v + .get(key) + .and_then(|v| v.as_array()) + .ok_or_else(|| ValidationError::new(format!("missing or invalid array field '{}'", key)))?; + Ok(arr.clone()) +} + +fn contains_key_not_found(e: &BoxError) -> bool { + e.to_string().contains("Key not found") +} + +fn has_duplicate_u32s(list: &Vec) -> bool { + let mut seen = std::collections::HashSet::new(); + for it in list { + if let Some(x) = it.as_u64() { + if !seen.insert(x) { + return true; + } + } + } + false +} + +fn vec_u32_contains(list: &[Value], val: u32) -> bool { + list.iter().any(|v| v.as_u64() == Some(val as u64)) +} + +// role = "admins" | "executors" | "readers" +fn context_has_role(ctx: &Context, role: &str, actor_id: u32) -> Result { + let v = as_json(ctx)?; + let arr = v + .get(role) + .and_then(|r| r.as_array()) + .ok_or_else(|| ValidationError::new(format!("Context.{} missing or invalid", role)))?; + Ok(arr.iter().any(|x| x.as_u64() == Some(actor_id as u64))) +} + +// ----------------------------- +// Validation helpers (minimal, spec-aligned) +// ----------------------------- + +fn validate_context(ctx: &Context) -> Result<(), BoxError> { + let v = as_json(ctx)?; + let id = json_get_u32(&v, "id")?; + if id == 0 { + return Err(ValidationError::new("Context.id must be > 0").into()); + } + // admins required + let admins = json_get_array(&v, "admins")?; + if admins.is_empty() { + return Err(ValidationError::new("Context.admins must not be empty").into()); + } + Ok(()) +} + +fn validate_actor(actor: &Actor) -> Result<(), BoxError> { + let v = as_json(actor)?; + let id = json_get_u32(&v, "id")?; + if id == 0 { + return Err(ValidationError::new("Actor.id must be > 0").into()); + } + let pubkey = json_get_str(&v, "pubkey")?; + if pubkey.trim().is_empty() { + return Err(ValidationError::new("Actor.pubkey must not be empty").into()); + } + let addr = json_get_array(&v, "address")?; + if addr.is_empty() { + return Err(ValidationError::new("Actor.address must not be empty").into()); + } + Ok(()) +} + +fn validate_runner(_context_id: u32, runner: &Runner) -> Result<(), BoxError> { + let v = as_json(runner)?; + let id = json_get_u32(&v, "id")?; + if id == 0 { + return Err(ValidationError::new("Runner.id must be > 0").into()); + } + let pubkey = json_get_str(&v, "pubkey")?; + if pubkey.trim().is_empty() { + return Err(ValidationError::new("Runner.pubkey must not be empty").into()); + } + let topic = json_get_str(&v, "topic")?; + if topic.trim().is_empty() { + return Err(ValidationError::new("Runner.topic must not be empty").into()); + } + // address presence is ensured by serde typing; no additional validation here + Ok(()) +} + +fn validate_flow(context_id: u32, flow: &Flow) -> Result<(), BoxError> { + let v = as_json(flow)?; + let id = json_get_u32(&v, "id")?; + if id == 0 { + return Err(ValidationError::new("Flow.id must be > 0").into()); + } + let ctx = json_get_u32(&v, "context_id")?; + if ctx != context_id { + return Err(ValidationError::new(format!( + "Flow.context_id ({}) does not match path context_id ({})", + ctx, context_id + )) + .into()); + } + let jobs = json_get_array(&v, "jobs")?; + if has_duplicate_u32s(&jobs) { + return Err(ValidationError::new("Flow.jobs must not contain duplicates").into()); + } + Ok(()) +} + +fn validate_job(context_id: u32, job: &Job) -> Result<(), BoxError> { + let v = as_json(job)?; + let id = json_get_u32(&v, "id")?; + if id == 0 { + return Err(ValidationError::new("Job.id must be > 0").into()); + } + let ctx = json_get_u32(&v, "context_id")?; + if ctx != context_id { + return Err(ValidationError::new(format!( + "Job.context_id ({}) does not match path context_id ({})", + ctx, context_id + )) + .into()); + } + let script = json_get_str(&v, "script")?; + if script.trim().is_empty() { + return Err(ValidationError::new("Job.script must not be empty").into()); + } + let timeout = json_get_u32(&v, "timeout")?; + if timeout == 0 { + return Err(ValidationError::new("Job.timeout must be > 0").into()); + } + let depends = json_get_array(&v, "depends")?; + if has_duplicate_u32s(&depends) { + return Err(ValidationError::new("Job.depends must not contain duplicates").into()); + } + if vec_u32_contains(&depends, id) { + return Err(ValidationError::new("Job.depends must not include the job's own id").into()); + } + Ok(()) +} + +fn validate_message(context_id: u32, msg: &Message) -> Result<(), BoxError> { + let v = as_json(msg)?; + let id = json_get_u32(&v, "id")?; + if id == 0 { + return Err(ValidationError::new("Message.id must be > 0").into()); + } + let ctx = json_get_u32(&v, "context_id")?; + if ctx != context_id { + return Err(ValidationError::new(format!( + "Message.context_id ({}) does not match path context_id ({})", + ctx, context_id + )) + .into()); + } + let body = json_get_str(&v, "message")?; + if body.trim().is_empty() { + return Err(ValidationError::new("Message.message must not be empty").into()); + } + let t = json_get_u32(&v, "timeout")?; + let ta = json_get_u32(&v, "timeout_ack")?; + let tr = json_get_u32(&v, "timeout_result")?; + if t == 0 || ta == 0 || tr == 0 { + return Err(ValidationError::new( + "Message timeouts (timeout|timeout_ack|timeout_result) must be > 0", + ) + .into()); + } + // Validate embedded jobs minimal consistency (caller/context match) + let jobs = json_get_array(&v, "job")?; + let msg_caller = json_get_u32(&v, "caller_id")?; + for jv in jobs { + if let Some(obj) = jv.as_object() { + let mut jid = 0u32; + if let Some(u) = obj.get("id").and_then(|x| x.as_u64()) { + jid = u as u32; + } + if let (Some(jctx), Some(jcaller)) = ( + obj.get("context_id").and_then(|x| x.as_u64()), + obj.get("caller_id").and_then(|x| x.as_u64()), + ) { + if jctx as u32 != context_id { + return Err(ValidationError::new(format!( + "Embedded Job {} context_id mismatch ({} != {})", + jid, jctx as u32, context_id + )) + .into()); + } + if jcaller as u32 != msg_caller { + return Err(ValidationError::new(format!( + "Embedded Job {} caller_id mismatch ({} != {})", + jid, jcaller as u32, msg_caller + )) + .into()); + } + } + } + } + Ok(()) +} + +// ----------------------------- +// Service API +// ----------------------------- + +#[derive(Clone)] +pub struct AppService { + redis: Arc, + schedulers: Arc>>, +} + +impl AppService { + pub fn new(redis: RedisDriver) -> Self { + Self { + redis: Arc::new(redis), + schedulers: Arc::new(Mutex::new(HashSet::new())), + } + } + + // ----------------------------- + // Context + // ----------------------------- + pub async fn create_context(&self, ctx: Context) -> Result { + validate_context(&ctx)?; + // id + let v = as_json(&ctx)?; + let context_id = json_get_u32(&v, "id")?; + self.ensure_context_not_exists(context_id).await?; + self.redis.save_context(&ctx).await?; + Ok(ctx) + } + + pub async fn load_context(&self, id: u32) -> Result { + let ctx = self.redis.load_context(id).await?; + Ok(ctx) + } + + // ----------------------------- + // Actor + // ----------------------------- + pub async fn create_actor(&self, actor: Actor) -> Result { + validate_actor(&actor)?; + let v = as_json(&actor)?; + let id = json_get_u32(&v, "id")?; + self.ensure_actor_not_exists_global(id).await?; + self.redis.save_actor_global(&actor).await?; + Ok(actor) + } + + pub async fn load_actor(&self, id: u32) -> Result { + let actor = self.redis.load_actor_global(id).await?; + Ok(actor) + } + + // ----------------------------- + // Runner + // ----------------------------- + pub async fn create_runner(&self, context_id: u32, runner: Runner) -> Result { + validate_runner(context_id, &runner)?; + let v = as_json(&runner)?; + let id = json_get_u32(&v, "id")?; + self.ensure_runner_not_exists(context_id, id).await?; + self.redis.save_runner(context_id, &runner).await?; + Ok(runner) + } + + pub async fn load_runner(&self, context_id: u32, id: u32) -> Result { + let runner = self.redis.load_runner(context_id, id).await?; + Ok(runner) + } + + // ----------------------------- + // Flow + // ----------------------------- + pub async fn create_flow(&self, context_id: u32, flow: Flow) -> Result { + validate_flow(context_id, &flow)?; + + // Permission: require that flow.caller_id is admin in the context + let v = as_json(&flow)?; + let fid = json_get_u32(&v, "id")?; + let caller_id = json_get_u32(&v, "caller_id")?; + self.require_admin(context_id, caller_id, "create flow") + .await?; + + self.ensure_flow_not_exists(context_id, fid).await?; + // Require that the context exists + let _ = self.redis.load_context(context_id).await?; + self.redis.save_flow(context_id, &flow).await?; + Ok(flow) + } + + pub async fn load_flow(&self, context_id: u32, id: u32) -> Result { + let flow = self.redis.load_flow(context_id, id).await?; + Ok(flow) + } + + pub async fn flow_dag(&self, context_id: u32, flow_id: u32) -> DagResult { + build_flow_dag(&self.redis, context_id, flow_id).await + } + + /// Start a background scheduler for a flow. + /// - Ticks every 1 second. + /// - Sets Flow status to Started immediately. + /// - Dispatches jobs whose dependencies are Finished: creates a Message and LPUSHes its key into msg_out, + /// and marks the job status to Dispatched. + /// - When all jobs are Finished sets Flow to Finished; if any job is Error sets Flow to Error. + /// Returns: + /// - Ok(true) if a scheduler was started + /// - Ok(false) if a scheduler was already running for this (context_id, flow_id) + pub async fn flow_start(&self, context_id: u32, flow_id: u32) -> Result { + // Ensure flow exists (and load caller_id) + let flow = self.redis.load_flow(context_id, flow_id).await?; + let caller_id = flow.caller_id(); + + // Try to register this flow in the active scheduler set + { + let mut guard = self.schedulers.lock().await; + if !guard.insert((context_id, flow_id)) { + // Already running + return Ok(false); + } + } + + // Clone resources for background task + let redis = self.redis.clone(); + let schedulers = self.schedulers.clone(); + + // Set Flow status to Started + let _ = redis + .update_flow_status(context_id, flow_id, FlowStatus::Started) + .await; + + tokio::spawn(async move { + // Background loop + loop { + // Load current flow; stop if missing + let flow = match redis.load_flow(context_id, flow_id).await { + Ok(f) => f, + Err(_) => break, + }; + + // Track aggregate state + let mut all_finished = true; + let mut any_error = false; + + // Iterate jobs declared in the flow + for jid in flow.jobs() { + // Load job + let job = match redis.load_job(context_id, caller_id, *jid).await { + Ok(j) => j, + Err(_) => { + // If job is missing treat as error state for the flow and stop + any_error = true; + all_finished = false; + break; + } + }; + + match job.status() { + JobStatus::Finished => { + // done + } + JobStatus::Error => { + any_error = true; + all_finished = false; + } + JobStatus::Dispatched | JobStatus::Started => { + all_finished = false; + } + JobStatus::WaitingForPrerequisites => { + all_finished = false; + + // Check dependencies complete + let mut deps_ok = true; + for dep in job.depends() { + match redis.load_job(context_id, caller_id, *dep).await { + Ok(dj) => { + if dj.status() != JobStatus::Finished { + deps_ok = false; + break; + } + } + Err(_) => { + deps_ok = false; + break; + } + } + } + + if deps_ok { + // Build Message embedding this job + let ts = crate::time::current_timestamp(); + let msg_id: u32 = job.id(); // deterministic message id per job for now + + let message = Message { + id: msg_id, + caller_id: job.caller_id(), + context_id, + message: "job.run".to_string(), + message_type: job.script_type(), + message_format_type: MessageFormatType::Text, + timeout: job.timeout, + timeout_ack: 10, + timeout_result: job.timeout, + transport_id: None, + transport_status: None, + job: vec![job.clone()], + logs: Vec::new(), + created_at: ts, + updated_at: ts, + status: MessageStatus::Dispatched, + }; + + // Persist the message and enqueue it + if redis.save_message(context_id, &message).await.is_ok() { + let _ = redis + .enqueue_msg_out(context_id, job.caller_id(), msg_id) + .await; + // Mark job as Dispatched + let _ = redis + .update_job_status( + context_id, + job.caller_id(), + job.id(), + JobStatus::Dispatched, + ) + .await; + } + } + } + } + } + + if any_error { + let _ = redis + .update_flow_status(context_id, flow_id, FlowStatus::Error) + .await; + break; + } + if all_finished { + let _ = redis + .update_flow_status(context_id, flow_id, FlowStatus::Finished) + .await; + break; + } + + sleep(Duration::from_secs(1)).await; + } + + // Remove from active schedulers set + let mut guard = schedulers.lock().await; + guard.remove(&(context_id, flow_id)); + }); + + Ok(true) + } + + /// Execute a flow: compute DAG, create Message entries for ready jobs, and enqueue their keys to msg_out. + /// Returns the list of enqueued message keys ("message:{caller_id}:{id}") in deterministic order (by job id). + pub async fn flow_execute(&self, context_id: u32, flow_id: u32) -> DagResult> { + let dag = build_flow_dag(&self.redis, context_id, flow_id).await?; + let mut ready = dag.ready_jobs()?; + ready.sort_unstable(); + + let mut queued: Vec = Vec::with_capacity(ready.len()); + for jid in ready { + // Load the concrete Job + let job = self + .redis + .load_job(context_id, dag.caller_id, jid) + .await + .map_err(DagError::from)?; + + // Build a Message that embeds this job + let ts = crate::time::current_timestamp(); + let msg_id: u32 = job.id(); // deterministic; adjust strategy later if needed + + let message = Message { + id: msg_id, + caller_id: job.caller_id(), + context_id, + message: "job.run".to_string(), + message_type: job.script_type(), // uses ScriptType (matches model) + message_format_type: MessageFormatType::Text, + timeout: job.timeout, + timeout_ack: 10, + timeout_result: job.timeout, + transport_id: None, + transport_status: None, + job: vec![job.clone()], + logs: Vec::new(), + created_at: ts, + updated_at: ts, + status: MessageStatus::Dispatched, + }; + + // Persist the Message and enqueue its key to the outbound queue + let _ = self + .create_message(context_id, message) + .await + .map_err(DagError::from)?; + + self.redis + .enqueue_msg_out(context_id, job.caller_id(), msg_id) + .await + .map_err(DagError::from)?; + + let key = format!("message:{}:{}", job.caller_id(), msg_id); + queued.push(key); + } + + Ok(queued) + } + + // ----------------------------- + // Job + // ----------------------------- + pub async fn create_job(&self, context_id: u32, job: Job) -> Result { + validate_job(context_id, &job)?; + let v = as_json(&job)?; + let id = json_get_u32(&v, "id")?; + let caller_id = json_get_u32(&v, "caller_id")?; + self.ensure_job_not_exists(context_id, caller_id, id) + .await?; + self.redis.save_job(context_id, &job).await?; + Ok(job) + } + + pub async fn load_job( + &self, + context_id: u32, + caller_id: u32, + id: u32, + ) -> Result { + let job = self.redis.load_job(context_id, caller_id, id).await?; + Ok(job) + } + + /// Update a job status with transition validation. + /// + /// Allowed transitions: + /// - Dispatched -> WaitingForPrerequisites | Started | Error + /// - WaitingForPrerequisites -> Started | Error + /// - Started -> Finished | Error + /// - Finished, Error -> terminal (no transitions) + /// + /// If the new status equals the current status, this is a no-op. + pub async fn update_job_status( + &self, + context_id: u32, + executor_id: u32, + caller_id: u32, + id: u32, + new_status: JobStatus, + ) -> Result<(), BoxError> { + self.require_executor(context_id, executor_id, "update job status") + .await?; + let job = self.redis.load_job(context_id, caller_id, id).await?; + let current = job.status(); + + if new_status == current { + // Idempotent: don't touch storage if no change + return Ok(()); + } + + let allowed = match current { + JobStatus::Dispatched => matches!( + new_status, + JobStatus::WaitingForPrerequisites + | JobStatus::Started + | JobStatus::Finished + | JobStatus::Error + ), + JobStatus::WaitingForPrerequisites => { + matches!( + new_status, + JobStatus::Started | JobStatus::Finished | JobStatus::Error + ) + } + JobStatus::Started => matches!(new_status, JobStatus::Finished | JobStatus::Error), + JobStatus::Finished | JobStatus::Error => false, + }; + + if !allowed { + return Err(Box::new(InvalidJobStatusTransition { + from: current, + to: new_status, + })); + } + + self.redis + .update_job_status(context_id, caller_id, id, new_status) + .await?; + + Ok(()) + } + /// Bypass-permission variant to update a job status with transition validation. + /// This skips the executor permission check but enforces the same state transition rules. + pub async fn update_job_status_unchecked( + &self, + context_id: u32, + caller_id: u32, + id: u32, + new_status: JobStatus, + ) -> Result<(), BoxError> { + let job = self.redis.load_job(context_id, caller_id, id).await?; + let current = job.status(); + + if new_status == current { + // Idempotent: don't touch storage if no change + return Ok(()); + } + + let allowed = match current { + JobStatus::Dispatched => matches!( + new_status, + JobStatus::WaitingForPrerequisites + | JobStatus::Started + | JobStatus::Finished + | JobStatus::Error + ), + JobStatus::WaitingForPrerequisites => { + matches!( + new_status, + JobStatus::Started | JobStatus::Finished | JobStatus::Error + ) + } + JobStatus::Started => matches!(new_status, JobStatus::Finished | JobStatus::Error), + JobStatus::Finished | JobStatus::Error => false, + }; + + if !allowed { + return Err(Box::new(InvalidJobStatusTransition { + from: current, + to: new_status, + })); + } + + self.redis + .update_job_status(context_id, caller_id, id, new_status) + .await?; + + Ok(()) + } + + // ----------------------------- + // Message + // ----------------------------- + pub async fn create_message( + &self, + context_id: u32, + message: Message, + ) -> Result { + validate_message(context_id, &message)?; + let v = as_json(&message)?; + let id = json_get_u32(&v, "id")?; + let caller_id = json_get_u32(&v, "caller_id")?; + self.ensure_message_not_exists(context_id, caller_id, id) + .await?; + self.redis.save_message(context_id, &message).await?; + Ok(message) + } + + pub async fn load_message( + &self, + context_id: u32, + caller_id: u32, + id: u32, + ) -> Result { + let msg = self.redis.load_message(context_id, caller_id, id).await?; + Ok(msg) + } + + pub async fn update_flow_status( + &self, + context_id: u32, + requestor_id: u32, + id: u32, + new_status: FlowStatus, + ) -> Result<(), BoxError> { + self.require_admin(context_id, requestor_id, "update flow status") + .await?; + let flow = self.redis.load_flow(context_id, id).await?; + let v = as_json(&flow)?; + let cur_raw = v + .get("status") + .cloned() + .unwrap_or(Value::String("Dispatched".to_string())); + + let cur = match cur_raw { + Value::String(s) if s == "Dispatched" => FlowStatus::Dispatched, + Value::String(s) if s == "Started" => FlowStatus::Started, + Value::String(s) if s == "Error" => FlowStatus::Error, + Value::String(s) if s == "Finished" => FlowStatus::Finished, + _ => FlowStatus::Dispatched, + }; + + if cur == new_status { + return Ok(()); + } + + let allowed = match cur { + FlowStatus::Created => matches!(new_status, FlowStatus::Dispatched | FlowStatus::Error), + FlowStatus::Dispatched => matches!(new_status, FlowStatus::Started | FlowStatus::Error), + FlowStatus::Started => matches!(new_status, FlowStatus::Finished | FlowStatus::Error), + FlowStatus::Finished | FlowStatus::Error => false, + }; + if !allowed { + return Err(ValidationError::new(format!( + "Invalid flow status transition: {:?} -> {:?}", + cur, new_status + )) + .into()); + } + + self.redis + .update_flow_status(context_id, id, new_status) + .await + } + + pub async fn update_message_status( + &self, + context_id: u32, + caller_id: u32, + id: u32, + new_status: MessageStatus, + ) -> Result<(), BoxError> { + let msg = self.redis.load_message(context_id, caller_id, id).await?; + let v = as_json(&msg)?; + let cur_raw = v + .get("status") + .cloned() + .unwrap_or(Value::String("Dispatched".to_string())); + + let cur = match cur_raw { + Value::String(s) if s == "Dispatched" => MessageStatus::Dispatched, + Value::String(s) if s == "Acknowledged" => MessageStatus::Acknowledged, + Value::String(s) if s == "Error" => MessageStatus::Error, + Value::String(s) if s == "Processed" => MessageStatus::Processed, + _ => MessageStatus::Dispatched, + }; + + if cur == new_status { + return Ok(()); + } + + let allowed = match cur { + MessageStatus::Dispatched => { + matches!( + new_status, + MessageStatus::Acknowledged | MessageStatus::Error + ) + } + MessageStatus::Acknowledged => { + matches!(new_status, MessageStatus::Processed | MessageStatus::Error) + } + MessageStatus::Processed | MessageStatus::Error => false, + }; + if !allowed { + return Err(ValidationError::new(format!( + "Invalid message status transition: {:?} -> {:?}", + cur, new_status + )) + .into()); + } + + self.redis + .update_message_status(context_id, caller_id, id, new_status) + .await + } + + pub async fn update_message_transport( + &self, + context_id: u32, + caller_id: u32, + id: u32, + transport_id: Option, + transport_status: Option, + ) -> Result<(), BoxError> { + // Ensure message exists (provides clearer error) + let _ = self.redis.load_message(context_id, caller_id, id).await?; + self.redis + .update_message_transport(context_id, caller_id, id, transport_id, transport_status) + .await + } + + pub async fn update_flow_env_vars_merge( + &self, + context_id: u32, + requestor_id: u32, + flow_id: u32, + patch: HashMap, + ) -> Result<(), BoxError> { + self.require_admin(context_id, requestor_id, "update flow env_vars") + .await?; + // Ensure flow exists + let _ = self.redis.load_flow(context_id, flow_id).await?; + self.redis + .update_flow_env_vars_merge(context_id, flow_id, patch) + .await + } + + pub async fn update_flow_result_merge( + &self, + context_id: u32, + requestor_id: u32, + flow_id: u32, + patch: HashMap, + ) -> Result<(), BoxError> { + self.require_admin(context_id, requestor_id, "update flow result") + .await?; + let _ = self.redis.load_flow(context_id, flow_id).await?; + self.redis + .update_flow_result_merge(context_id, flow_id, patch) + .await + } + + pub async fn update_flow_jobs_add_remove( + &self, + context_id: u32, + requestor_id: u32, + flow_id: u32, + add: Vec, + remove: Vec, + ) -> Result<(), BoxError> { + self.require_admin(context_id, requestor_id, "update flow jobs") + .await?; + let flow = self.redis.load_flow(context_id, flow_id).await?; + let mut set: std::collections::BTreeSet = flow.jobs().iter().copied().collect(); + for a in add { + set.insert(a); + } + for r in remove { + set.remove(&r); + } + let new_jobs: Vec = set.into_iter().collect(); + self.redis + .update_flow_jobs_set(context_id, flow_id, new_jobs) + .await + } + + pub async fn update_job_env_vars_merge( + &self, + context_id: u32, + requestor_id: u32, + caller_id: u32, + job_id: u32, + patch: HashMap, + ) -> Result<(), BoxError> { + self.require_admin(context_id, requestor_id, "update job env_vars") + .await?; + let _ = self.redis.load_job(context_id, caller_id, job_id).await?; + self.redis + .update_job_env_vars_merge(context_id, caller_id, job_id, patch) + .await + } + + pub async fn update_job_result_merge( + &self, + context_id: u32, + requestor_id: u32, + caller_id: u32, + job_id: u32, + patch: HashMap, + ) -> Result<(), BoxError> { + // Allow if admin OR executor + let ctx = self.redis.load_context(context_id).await?; + let is_admin = context_has_role(&ctx, "admins", requestor_id)?; + let is_exec = context_has_role(&ctx, "executors", requestor_id)?; + if !(is_admin || is_exec) { + return Err(Box::new(PermissionDeniedError { + actor_id: requestor_id, + context_id, + action: "update job result".to_string(), + })); + } + let _ = self.redis.load_job(context_id, caller_id, job_id).await?; + self.redis + .update_job_result_merge(context_id, caller_id, job_id, patch) + .await + } + + /// Bypass-permission variant to merge into a job's result field. + /// Intended for internal router/scheduler use where no actor identity is present. + pub async fn update_job_result_merge_unchecked( + &self, + context_id: u32, + caller_id: u32, + job_id: u32, + patch: HashMap, + ) -> Result<(), BoxError> { + // Ensure job exists, then write directly + let _ = self.redis.load_job(context_id, caller_id, job_id).await?; + self.redis + .update_job_result_merge(context_id, caller_id, job_id, patch) + .await + } + + pub async fn append_message_logs( + &self, + context_id: u32, + caller_id: u32, + id: u32, + new_logs: Vec, + ) -> Result<(), BoxError> { + let _ = self.redis.load_message(context_id, caller_id, id).await?; + self.redis + .append_message_logs(context_id, caller_id, id, new_logs) + .await + } +} + +// ----------------------------- +// Existence checks (strict create) and permissions +// ----------------------------- +impl AppService { + async fn ensure_context_not_exists(&self, id: u32) -> Result<(), BoxError> { + match self.redis.load_context(id).await { + Ok(_) => Err(Box::new(AlreadyExistsError { + key: format!("context:{}", id), + })), + Err(e) => { + if contains_key_not_found(&e) { + Ok(()) + } else { + Err(e) + } + } + } + } + + async fn ensure_actor_not_exists_global(&self, id: u32) -> Result<(), BoxError> { + match self.redis.load_actor_global(id).await { + Ok(_) => Err(Box::new(AlreadyExistsError { + key: format!("actor:{}", id), + })), + Err(e) => { + if contains_key_not_found(&e) { + Ok(()) + } else { + Err(e) + } + } + } + } + + async fn ensure_runner_not_exists(&self, db: u32, id: u32) -> Result<(), BoxError> { + match self.redis.load_runner(db, id).await { + Ok(_) => Err(Box::new(AlreadyExistsError { + key: format!("runner:{}", id), + })), + Err(e) => { + if contains_key_not_found(&e) { + Ok(()) + } else { + Err(e) + } + } + } + } + + async fn ensure_flow_not_exists(&self, db: u32, id: u32) -> Result<(), BoxError> { + match self.redis.load_flow(db, id).await { + Ok(_) => Err(Box::new(AlreadyExistsError { + key: format!("flow:{}", id), + })), + Err(e) => { + if contains_key_not_found(&e) { + Ok(()) + } else { + Err(e) + } + } + } + } + + async fn ensure_job_not_exists( + &self, + db: u32, + caller_id: u32, + id: u32, + ) -> Result<(), BoxError> { + match self.redis.load_job(db, caller_id, id).await { + Ok(_) => Err(Box::new(AlreadyExistsError { + key: format!("job:{}:{}", caller_id, id), + })), + Err(e) => { + if contains_key_not_found(&e) { + Ok(()) + } else { + Err(e) + } + } + } + } + + async fn ensure_message_not_exists( + &self, + db: u32, + caller_id: u32, + id: u32, + ) -> Result<(), BoxError> { + match self.redis.load_message(db, caller_id, id).await { + Ok(_) => Err(Box::new(AlreadyExistsError { + key: format!("message:{}:{}", caller_id, id), + })), + Err(e) => { + if contains_key_not_found(&e) { + Ok(()) + } else { + Err(e) + } + } + } + } + + async fn require_admin( + &self, + context_id: u32, + actor_id: u32, + action: &str, + ) -> Result<(), BoxError> { + let ctx = self.redis.load_context(context_id).await?; + let ok = context_has_role(&ctx, "admins", actor_id)?; + if !ok { + return Err(Box::new(PermissionDeniedError { + actor_id, + context_id, + action: action.to_string(), + })); + } + Ok(()) + } + + async fn require_executor( + &self, + context_id: u32, + actor_id: u32, + action: &str, + ) -> Result<(), BoxError> { + let ctx = self.redis.load_context(context_id).await?; + let ok = context_has_role(&ctx, "executors", actor_id)?; + if !ok { + return Err(Box::new(PermissionDeniedError { + actor_id, + context_id, + action: action.to_string(), + })); + } + Ok(()) + } +} + +/// Router/helper wrappers exposed on AppService so background tasks don't need direct Redis access. +impl AppService { + /// Block-pop from the per-context msg_out queue with a timeout (seconds). + /// Returns Some(message_key) like "message:{caller_id}:{id}" or None on timeout. + pub async fn brpop_msg_out( + &self, + context_id: u32, + timeout_secs: usize, + ) -> Result, BoxError> { + self.redis.brpop_msg_out(context_id, timeout_secs).await + } + + /// Scan all runner:* in the given context and return deserialized Runner entries. + pub async fn scan_runners(&self, context_id: u32) -> Result, BoxError> { + self.redis.scan_runners(context_id).await + } + + /// Correlation map: store mapping from inner supervisor JSON-RPC id to context/caller/job/message. + pub async fn supcorr_set( + &self, + inner_id: u64, + context_id: u32, + caller_id: u32, + job_id: u32, + message_id: u32, + ) -> Result<(), BoxError> { + self.redis + .supcorr_set(inner_id, context_id, caller_id, job_id, message_id) + .await + .map_err(Into::into) + } + + /// Correlation map: load mapping by inner supervisor JSON-RPC id. + pub async fn supcorr_get( + &self, + inner_id: u64, + ) -> Result, BoxError> { + self.redis.supcorr_get(inner_id).await.map_err(Into::into) + } + + /// Correlation map: delete mapping by inner supervisor JSON-RPC id. + pub async fn supcorr_del(&self, inner_id: u64) -> Result<(), BoxError> { + self.redis.supcorr_del(inner_id).await.map_err(Into::into) + } +} + +/// Auto-discovery helpers for contexts (wrappers over RedisDriver) +impl AppService { + pub async fn list_context_ids(&self) -> Result, BoxError> { + self.redis.list_context_ids().await + } +} diff --git a/bin/coordinator/src/storage.rs b/bin/coordinator/src/storage.rs new file mode 100644 index 0000000..0f726e5 --- /dev/null +++ b/bin/coordinator/src/storage.rs @@ -0,0 +1,3 @@ +pub mod redis; + +pub use redis::RedisDriver; diff --git a/bin/coordinator/src/storage/redis.rs b/bin/coordinator/src/storage/redis.rs new file mode 100644 index 0000000..2a6f323 --- /dev/null +++ b/bin/coordinator/src/storage/redis.rs @@ -0,0 +1,827 @@ +use std::collections::HashMap as StdHashMap; + +use redis::{AsyncCommands, aio::ConnectionManager}; +use serde::Serialize; +use serde::de::DeserializeOwned; +use serde_json::{Map as JsonMap, Value}; +use tokio::sync::Mutex; + +use crate::models::{ + Actor, Context, Flow, FlowStatus, Job, JobStatus, Message, MessageStatus, Runner, + TransportStatus, +}; +use tracing::{error, warn}; + +type Result = std::result::Result>; + +/// Async Redis driver that saves/loads every model as a Redis hash (HSET), +/// using canonical keys as specified in the specs. +/// - Complex fields (arrays, maps, nested structs) are JSON-encoded per field +/// - Scalars are written as plain strings (numbers/bools as their string representation) +/// - On load, each field value is first attempted to parse as JSON; if that fails it is treated as a plain string +pub struct RedisDriver { + /// Base address, e.g. "127.0.0.1:6379" or "redis://127.0.0.1:6379" + base_addr: String, + /// Cache of connection managers per DB index + managers: Mutex>, +} + +impl RedisDriver { + /// Create a new driver for the given Redis address. + /// Accepts either "host:port" or "redis://host:port" + pub async fn new(addr: impl Into) -> Result { + let raw = addr.into(); + let base_addr = if raw.starts_with("redis://") { + raw + } else { + format!("redis://{}", raw) + }; + Ok(Self { + base_addr, + managers: Mutex::new(StdHashMap::new()), + }) + } + + /// Get or create a ConnectionManager for the given DB index. + async fn manager_for_db(&self, db: u32) -> Result { + { + // Fast path: check existing + let guard = self.managers.lock().await; + if let Some(cm) = guard.get(&db) { + return Ok(cm.clone()); + } + } + + // Slow path: create a new manager and cache it + let url = format!("{}/{}", self.base_addr.trim_end_matches('/'), db); + let client = redis::Client::open(url.as_str()).map_err(|e| { + error!(%url, db=%db, error=%e, "Redis client open failed"); + e + })?; + let cm = client.get_connection_manager().await.map_err(|e| { + error!(%url, db=%db, error=%e, "Redis connection manager init failed"); + e + })?; + + let mut guard = self.managers.lock().await; + let entry = guard.entry(db).or_insert(cm); + Ok(entry.clone()) + } + + // ----------------------------- + // Generic helpers (serde <-> HSET) + // ----------------------------- + + fn struct_to_hset_pairs(value: &T) -> Result> { + let json = serde_json::to_value(value)?; + let obj = json + .as_object() + .ok_or("Model must serialize to a JSON object")?; + let mut pairs = Vec::with_capacity(obj.len()); + for (k, v) in obj { + let s = match v { + Value::Array(_) | Value::Object(_) => serde_json::to_string(v)?, // complex - store JSON + Value::String(s) => s.clone(), // string - plain + Value::Number(n) => n.to_string(), // number - plain + Value::Bool(b) => b.to_string(), // bool - plain + Value::Null => "null".to_string(), // null sentinel + }; + pairs.push((k.clone(), s)); + } + Ok(pairs) + } + + fn hmap_to_struct(map: StdHashMap) -> Result { + let mut obj = JsonMap::with_capacity(map.len()); + for (k, s) in map { + // Try parse as JSON first (works for arrays, objects, numbers, booleans, null) + // If that fails, fallback to string. + match serde_json::from_str::(&s) { + Ok(v) => { + obj.insert(k, v); + } + Err(_) => { + obj.insert(k, Value::String(s)); + } + } + } + let json = Value::Object(obj); + let model = serde_json::from_value(json)?; + Ok(model) + } + + async fn hset_model(&self, db: u32, key: &str, model: &T) -> Result<()> { + let mut cm = self.manager_for_db(db).await?; + let pairs = Self::struct_to_hset_pairs(model).map_err(|e| { + error!(db=%db, key=%key, error=%e, "Serialize model to HSET pairs failed"); + e + })?; + // Ensure no stale fields + let del_res: redis::RedisResult = cm.del(key).await; + if let Err(e) = del_res { + warn!(db=%db, key=%key, error=%e, "DEL before HSET failed"); + } + // Write all fields + let _: () = cm.hset_multiple(key, &pairs).await.map_err(|e| { + error!(db=%db, key=%key, error=%e, "HSET multiple failed"); + e + })?; + Ok(()) + } + + async fn hget_model(&self, db: u32, key: &str) -> Result { + let mut cm = self.manager_for_db(db).await?; + let map: StdHashMap = cm.hgetall(key).await.map_err(|e| { + error!(db=%db, key=%key, error=%e, "HGETALL failed"); + e + })?; + if map.is_empty() { + // NotFound is expected in some flows; don't log as error + return Err(format!("Key not found: {}", key).into()); + } + Self::hmap_to_struct(map).map_err(|e| { + error!(db=%db, key=%key, error=%e, "Deserialize model from HGETALL failed"); + e + }) + } + + // ----------------------------- + // Key helpers (canonical keys) + // ----------------------------- + + fn actor_key(id: u32) -> String { + format!("actor:{}", id) + } + + fn context_key(id: u32) -> String { + format!("context:{}", id) + } + + fn flow_key(id: u32) -> String { + format!("flow:{}", id) + } + + fn runner_key(id: u32) -> String { + format!("runner:{}", id) + } + + fn job_key(caller_id: u32, id: u32) -> String { + format!("job:{}:{}", caller_id, id) + } + + fn message_key(caller_id: u32, id: u32) -> String { + format!("message:{}:{}", caller_id, id) + } + + // ----------------------------- + // Context (DB = context.id) + // ----------------------------- + + /// Save a Context in its own DB (db index = context.id) + pub async fn save_context(&self, ctx: &Context) -> Result<()> { + // We don't have field access; compute db and key via JSON to avoid changing model definitions. + // Extract "id" from serialized JSON object. + let json = serde_json::to_value(ctx)?; + let id = json + .get("id") + .and_then(|v| v.as_u64()) + .ok_or("Context.id missing or not a number")? as u32; + let key = Self::context_key(id); + // Write the context hash in its own DB + self.hset_model(id, &key, ctx).await?; + // Register this context id in the global registry (DB 0) + let _ = self.register_context_id(id).await; + Ok(()) + } + + /// Load a Context from its own DB (db index = id) + pub async fn load_context(&self, id: u32) -> Result { + let key = Self::context_key(id); + self.hget_model(id, &key).await + } + + // ----------------------------- + // Actor + // ----------------------------- + + /// Save an Actor to the given DB (tenant/context DB) + pub async fn save_actor(&self, db: u32, actor: &Actor) -> Result<()> { + let json = serde_json::to_value(actor)?; + let id = json + .get("id") + .and_then(|v| v.as_u64()) + .ok_or("Actor.id missing or not a number")? as u32; + let key = Self::actor_key(id); + self.hset_model(db, &key, actor).await + } + + /// Load an Actor by id from the given DB + pub async fn load_actor(&self, db: u32, id: u32) -> Result { + let key = Self::actor_key(id); + self.hget_model(db, &key).await + } + /// Save an Actor globally in DB 0 (Actor is context-independent) + pub async fn save_actor_global(&self, actor: &Actor) -> Result<()> { + let json = serde_json::to_value(actor)?; + let id = json + .get("id") + .and_then(|v| v.as_u64()) + .ok_or("Actor.id missing or not a number")? as u32; + let key = Self::actor_key(id); + self.hset_model(0, &key, actor).await + } + + /// Load an Actor globally from DB 0 by id + pub async fn load_actor_global(&self, id: u32) -> Result { + let key = Self::actor_key(id); + self.hget_model(0, &key).await + } + + // ----------------------------- + // Runner + // ----------------------------- + + pub async fn save_runner(&self, db: u32, runner: &Runner) -> Result<()> { + let json = serde_json::to_value(runner)?; + let id = json + .get("id") + .and_then(|v| v.as_u64()) + .ok_or("Runner.id missing or not a number")? as u32; + let key = Self::runner_key(id); + self.hset_model(db, &key, runner).await + } + + pub async fn load_runner(&self, db: u32, id: u32) -> Result { + let key = Self::runner_key(id); + self.hget_model(db, &key).await + } + + // ----------------------------- + // Flow + // ----------------------------- + + pub async fn save_flow(&self, db: u32, flow: &Flow) -> Result<()> { + let json = serde_json::to_value(flow)?; + let id = json + .get("id") + .and_then(|v| v.as_u64()) + .ok_or("Flow.id missing or not a number")? as u32; + let key = Self::flow_key(id); + self.hset_model(db, &key, flow).await + } + + pub async fn load_flow(&self, db: u32, id: u32) -> Result { + let key = Self::flow_key(id); + self.hget_model(db, &key).await + } + + // ----------------------------- + // Job + // ----------------------------- + + pub async fn save_job(&self, db: u32, job: &Job) -> Result<()> { + let json = serde_json::to_value(job)?; + let id = json + .get("id") + .and_then(|v| v.as_u64()) + .ok_or("Job.id missing or not a number")? as u32; + let caller_id = json + .get("caller_id") + .and_then(|v| v.as_u64()) + .ok_or("Job.caller_id missing or not a number")? as u32; + let key = Self::job_key(caller_id, id); + self.hset_model(db, &key, job).await + } + + pub async fn load_job(&self, db: u32, caller_id: u32, id: u32) -> Result { + let key = Self::job_key(caller_id, id); + self.hget_model(db, &key).await + } + + /// Atomically update a job's status and `updated_at` fields. + /// - No transition validation is performed. + /// - Writes only the two fields via HSET to avoid rewriting the whole model. + pub async fn update_job_status( + &self, + db: u32, + caller_id: u32, + id: u32, + status: JobStatus, + ) -> Result<()> { + let mut cm = self.manager_for_db(db).await?; + let key = Self::job_key(caller_id, id); + + // Serialize enum into the same plain string representation stored by create paths + let status_str = match serde_json::to_value(&status)? { + Value::String(s) => s, + v => v.to_string(), + }; + + let ts = crate::time::current_timestamp(); + + let pairs = vec![ + ("status".to_string(), status_str), + ("updated_at".to_string(), ts.to_string()), + ]; + let _: () = cm.hset_multiple(&key, &pairs).await.map_err(|e| { + error!(db=%db, key=%key, error=%e, "HSET update_job_status failed"); + e + })?; + Ok(()) + } + + // ----------------------------- + // Message + // ----------------------------- + + pub async fn save_message(&self, db: u32, message: &Message) -> Result<()> { + let json = serde_json::to_value(message)?; + let id = json + .get("id") + .and_then(|v| v.as_u64()) + .ok_or("Message.id missing or not a number")? as u32; + let caller_id = json + .get("caller_id") + .and_then(|v| v.as_u64()) + .ok_or("Message.caller_id missing or not a number")? as u32; + let key = Self::message_key(caller_id, id); + self.hset_model(db, &key, message).await + } + + pub async fn load_message(&self, db: u32, caller_id: u32, id: u32) -> Result { + let key = Self::message_key(caller_id, id); + self.hget_model(db, &key).await + } + + // ----------------------------- + // Partial update helpers + // ----------------------------- + + /// Flow: update only status and updated_at + pub async fn update_flow_status(&self, db: u32, id: u32, status: FlowStatus) -> Result<()> { + let mut cm = self.manager_for_db(db).await?; + let key = Self::flow_key(id); + + let status_str = match serde_json::to_value(&status)? { + Value::String(s) => s, + v => v.to_string(), + }; + let ts = crate::time::current_timestamp(); + + let pairs = vec![ + ("status".to_string(), status_str), + ("updated_at".to_string(), ts.to_string()), + ]; + let _: () = cm.hset_multiple(&key, &pairs).await.map_err(|e| { + error!(db=%db, key=%key, error=%e, "HSET update_flow_status failed"); + e + })?; + Ok(()) + } + + /// Message: update only status and updated_at + pub async fn update_message_status( + &self, + db: u32, + caller_id: u32, + id: u32, + status: MessageStatus, + ) -> Result<()> { + let mut cm = self.manager_for_db(db).await?; + let key = Self::message_key(caller_id, id); + + let status_str = match serde_json::to_value(&status)? { + Value::String(s) => s, + v => v.to_string(), + }; + let ts = crate::time::current_timestamp(); + + let pairs = vec![ + ("status".to_string(), status_str), + ("updated_at".to_string(), ts.to_string()), + ]; + let _: () = cm.hset_multiple(&key, &pairs).await.map_err(|e| { + error!(db=%db, key=%key, error=%e, "HSET update_message_status failed"); + e + })?; + Ok(()) + } + + /// Message: update transport_id / transport_status (optionally) and bump updated_at + pub async fn update_message_transport( + &self, + db: u32, + caller_id: u32, + id: u32, + transport_id: Option, + transport_status: Option, + ) -> Result<()> { + let mut cm = self.manager_for_db(db).await?; + let key = Self::message_key(caller_id, id); + + let mut pairs: Vec<(String, String)> = Vec::new(); + + if let Some(tid) = transport_id { + pairs.push(("transport_id".to_string(), tid)); + } + + if let Some(ts_status) = transport_status { + let status_str = match serde_json::to_value(&ts_status)? { + Value::String(s) => s, + v => v.to_string(), + }; + pairs.push(("transport_status".to_string(), status_str)); + } + + // Always bump updated_at + let ts = crate::time::current_timestamp(); + pairs.push(("updated_at".to_string(), ts.to_string())); + + let _: () = cm.hset_multiple(&key, &pairs).await.map_err(|e| { + error!(db=%db, key=%key, error=%e, "HSET update_message_transport failed"); + e + })?; + Ok(()) + } + + /// Flow: merge env_vars map and bump updated_at + pub async fn update_flow_env_vars_merge( + &self, + db: u32, + id: u32, + patch: StdHashMap, + ) -> Result<()> { + let mut cm = self.manager_for_db(db).await?; + let key = Self::flow_key(id); + + let current: Option = cm.hget(&key, "env_vars").await.ok(); + let mut obj = match current + .and_then(|s| serde_json::from_str::(&s).ok()) + .and_then(|v| v.as_object().cloned()) + { + Some(m) => m, + None => JsonMap::new(), + }; + + for (k, v) in patch { + obj.insert(k, Value::String(v)); + } + + let env_vars_str = Value::Object(obj).to_string(); + let ts = crate::time::current_timestamp(); + let pairs = vec![ + ("env_vars".to_string(), env_vars_str), + ("updated_at".to_string(), ts.to_string()), + ]; + let _: () = cm.hset_multiple(&key, &pairs).await.map_err(|e| { + error!(db=%db, key=%key, error=%e, "HSET update_flow_env_vars_merge failed"); + e + })?; + Ok(()) + } + + /// Flow: merge result map and bump updated_at + pub async fn update_flow_result_merge( + &self, + db: u32, + id: u32, + patch: StdHashMap, + ) -> Result<()> { + let mut cm = self.manager_for_db(db).await?; + let key = Self::flow_key(id); + + let current: Option = cm.hget(&key, "result").await.ok(); + let mut obj = match current + .and_then(|s| serde_json::from_str::(&s).ok()) + .and_then(|v| v.as_object().cloned()) + { + Some(m) => m, + None => JsonMap::new(), + }; + + for (k, v) in patch { + obj.insert(k, Value::String(v)); + } + + let result_str = Value::Object(obj).to_string(); + let ts = crate::time::current_timestamp(); + let pairs = vec![ + ("result".to_string(), result_str), + ("updated_at".to_string(), ts.to_string()), + ]; + let _: () = cm.hset_multiple(&key, &pairs).await.map_err(|e| { + error!(db=%db, key=%key, error=%e, "HSET update_flow_result_merge failed"); + e + })?; + Ok(()) + } + + /// Job: merge env_vars map and bump updated_at + pub async fn update_job_env_vars_merge( + &self, + db: u32, + caller_id: u32, + id: u32, + patch: StdHashMap, + ) -> Result<()> { + let mut cm = self.manager_for_db(db).await?; + let key = Self::job_key(caller_id, id); + + let current: Option = cm.hget(&key, "env_vars").await.ok(); + let mut obj = match current + .and_then(|s| serde_json::from_str::(&s).ok()) + .and_then(|v| v.as_object().cloned()) + { + Some(m) => m, + None => JsonMap::new(), + }; + + for (k, v) in patch { + obj.insert(k, Value::String(v)); + } + + let env_vars_str = Value::Object(obj).to_string(); + let ts = crate::time::current_timestamp(); + let pairs = vec![ + ("env_vars".to_string(), env_vars_str), + ("updated_at".to_string(), ts.to_string()), + ]; + let _: () = cm.hset_multiple(&key, &pairs).await.map_err(|e| { + error!(db=%db, key=%key, error=%e, "HSET update_job_env_vars_merge failed"); + e + })?; + Ok(()) + } + + /// Job: merge result map and bump updated_at + pub async fn update_job_result_merge( + &self, + db: u32, + caller_id: u32, + id: u32, + patch: StdHashMap, + ) -> Result<()> { + let mut cm = self.manager_for_db(db).await?; + let key = Self::job_key(caller_id, id); + + let current: Option = cm.hget(&key, "result").await.ok(); + let mut obj = match current + .and_then(|s| serde_json::from_str::(&s).ok()) + .and_then(|v| v.as_object().cloned()) + { + Some(m) => m, + None => JsonMap::new(), + }; + + for (k, v) in patch { + obj.insert(k, Value::String(v)); + } + + let result_str = Value::Object(obj).to_string(); + let ts = crate::time::current_timestamp(); + let pairs = vec![ + ("result".to_string(), result_str), + ("updated_at".to_string(), ts.to_string()), + ]; + let _: () = cm.hset_multiple(&key, &pairs).await.map_err(|e| { + error!(db=%db, key=%key, error=%e, "HSET update_job_result_merge failed"); + e + })?; + Ok(()) + } + + /// Flow: set jobs list and bump updated_at + pub async fn update_flow_jobs_set(&self, db: u32, id: u32, new_jobs: Vec) -> Result<()> { + let mut cm = self.manager_for_db(db).await?; + let key = Self::flow_key(id); + + let jobs_str = serde_json::to_string(&new_jobs)?; + let ts = crate::time::current_timestamp(); + let pairs = vec![ + ("jobs".to_string(), jobs_str), + ("updated_at".to_string(), ts.to_string()), + ]; + let _: () = cm.hset_multiple(&key, &pairs).await.map_err(|e| { + error!(db=%db, key=%key, error=%e, "HSET update_flow_jobs_set failed"); + e + })?; + Ok(()) + } + + /// Message: append logs (no dedup) and bump updated_at + pub async fn append_message_logs( + &self, + db: u32, + caller_id: u32, + id: u32, + new_logs: Vec, + ) -> Result<()> { + let mut cm = self.manager_for_db(db).await?; + let key = Self::message_key(caller_id, id); + + let current: Option = cm.hget(&key, "logs").await.ok(); + let mut arr: Vec = current + .and_then(|s| serde_json::from_str::(&s).ok()) + .and_then(|v| v.as_array().cloned()) + .unwrap_or_default(); + + for l in new_logs { + arr.push(Value::String(l)); + } + + let logs_str = Value::Array(arr).to_string(); + let ts = crate::time::current_timestamp(); + let pairs = vec![ + ("logs".to_string(), logs_str), + ("updated_at".to_string(), ts.to_string()), + ]; + let _: () = cm.hset_multiple(&key, &pairs).await.map_err(|e| { + error!(db=%db, key=%key, error=%e, "HSET append_message_logs failed"); + e + })?; + Ok(()) + } + + // ----------------------------- + // Queues (lists) + // ----------------------------- + + /// Push a value onto a Redis list using LPUSH in the given DB. + pub async fn lpush_list(&self, db: u32, list: &str, value: &str) -> Result<()> { + let mut cm = self.manager_for_db(db).await?; + let _: i64 = cm.lpush(list, value).await.map_err(|e| { + error!(db=%db, list=%list, value=%value, error=%e, "LPUSH failed"); + e + })?; + Ok(()) + } + + /// Enqueue a message key onto the outbound queue (msg_out). + /// The value is the canonical message key "message:{caller_id}:{id}". + pub async fn enqueue_msg_out(&self, db: u32, caller_id: u32, id: u32) -> Result<()> { + let key = Self::message_key(caller_id, id); + self.lpush_list(db, "msg_out", &key).await + } + + /// Block-pop from msg_out with timeout (seconds). Returns the message key if present. + /// Uses BRPOP so that the queue behaves FIFO with LPUSH producer. + pub async fn brpop_msg_out(&self, db: u32, timeout_secs: usize) -> Result> { + let mut cm = self.manager_for_db(db).await?; + // BRPOP returns (list, element) on success + let res: Option<(String, String)> = redis::cmd("BRPOP") + .arg("msg_out") + .arg(timeout_secs) + .query_async(&mut cm) + .await + .map_err(|e| { + error!(db=%db, timeout_secs=%timeout_secs, error=%e, "BRPOP failed"); + e + })?; + Ok(res.map(|(_, v)| v)) + } + + /// Scan all runner:* keys in this DB and return the deserialized Runner entries. + pub async fn scan_runners(&self, db: u32) -> Result> { + let mut cm = self.manager_for_db(db).await?; + let mut out: Vec = Vec::new(); + let mut cursor: u64 = 0; + loop { + let (next, keys): (u64, Vec) = redis::cmd("SCAN") + .arg(cursor) + .arg("MATCH") + .arg("runner:*") + .arg("COUNT") + .arg(100) + .query_async(&mut cm) + .await + .map_err(|e| { + error!(db=%db, cursor=%cursor, error=%e, "SCAN failed"); + e + })?; + for k in keys { + if let Ok(r) = self.hget_model::(db, &k).await { + out.push(r); + } + } + if next == 0 { + break; + } + cursor = next; + } + Ok(out) + } + + // ----------------------------- + // Global registry (DB 0) for Context IDs + // ----------------------------- + + /// Register a context id in the global set "contexts" stored in DB 0. + pub async fn register_context_id(&self, id: u32) -> Result<()> { + let mut cm = self.manager_for_db(0).await?; + let _: i64 = redis::cmd("SADD") + .arg("contexts") + .arg(id) + .query_async(&mut cm) + .await + .map_err(|e| { + error!(db=0, context_id=%id, error=%e, "SADD contexts failed"); + e + })?; + Ok(()) + } + + /// List all registered context ids from the global set in DB 0. + pub async fn list_context_ids(&self) -> Result> { + let mut cm = self.manager_for_db(0).await?; + // Using SMEMBERS and parsing into u32 + let vals: Vec = redis::cmd("SMEMBERS") + .arg("contexts") + .query_async(&mut cm) + .await + .map_err(|e| { + error!(db=0, error=%e, "SMEMBERS contexts failed"); + e + })?; + let mut out = Vec::with_capacity(vals.len()); + for v in vals { + if let Ok(n) = v.parse::() { + out.push(n); + } + } + out.sort_unstable(); + Ok(out) + } + + // ----------------------------- + // Supervisor correlation mapping (DB 0) + // Key: "supcorr:{inner_id_decimal}" + // Value: JSON {"context_id":u32,"caller_id":u32,"job_id":u32,"message_id":u32} + // TTL: 1 hour to avoid leaks in case of crashes + pub async fn supcorr_set( + &self, + inner_id: u64, + context_id: u32, + caller_id: u32, + job_id: u32, + message_id: u32, + ) -> Result<()> { + let mut cm = self.manager_for_db(0).await?; + let key = format!("supcorr:{}", inner_id); + let val = serde_json::json!({ + "context_id": context_id, + "caller_id": caller_id, + "job_id": job_id, + "message_id": message_id, + }) + .to_string(); + // SET key val EX 3600 + let _: () = redis::cmd("SET") + .arg(&key) + .arg(&val) + .arg("EX") + .arg(3600) + .query_async(&mut cm) + .await + .map_err(|e| { + error!(db=0, key=%key, error=%e, "SET supcorr_set failed"); + e + })?; + Ok(()) + } + + pub async fn supcorr_get(&self, inner_id: u64) -> Result> { + let mut cm = self.manager_for_db(0).await?; + let key = format!("supcorr:{}", inner_id); + let res: Option = redis::cmd("GET") + .arg(&key) + .query_async(&mut cm) + .await + .map_err(|e| { + error!(db=0, key=%key, error=%e, "GET supcorr_get failed"); + e + })?; + if let Some(s) = res { + let v: Value = serde_json::from_str(&s)?; + let ctx = v.get("context_id").and_then(|x| x.as_u64()).unwrap_or(0) as u32; + let caller = v.get("caller_id").and_then(|x| x.as_u64()).unwrap_or(0) as u32; + let job = v.get("job_id").and_then(|x| x.as_u64()).unwrap_or(0) as u32; + let msg = v.get("message_id").and_then(|x| x.as_u64()).unwrap_or(0) as u32; + return Ok(Some((ctx, caller, job, msg))); + } + Ok(None) + } + + pub async fn supcorr_del(&self, inner_id: u64) -> Result<()> { + let mut cm = self.manager_for_db(0).await?; + let key = format!("supcorr:{}", inner_id); + let _: i64 = redis::cmd("DEL") + .arg(&key) + .query_async(&mut cm) + .await + .map_err(|e| { + error!(db=0, key=%key, error=%e, "DEL supcorr_del failed"); + e + })?; + Ok(()) + } +} diff --git a/bin/coordinator/src/time.rs b/bin/coordinator/src/time.rs new file mode 100644 index 0000000..dc1b4d3 --- /dev/null +++ b/bin/coordinator/src/time.rs @@ -0,0 +1,14 @@ +use std::time::{SystemTime, UNIX_EPOCH}; + +/// A timestamp since the unix epoch +pub type Timestamp = i64; + +/// Get the current system timestamp +pub fn current_timestamp() -> Timestamp { + let now = SystemTime::now(); + // A duration is always positive so this returns an unsigned integer, while a timestamp can + // predate the unix epoch so we must cast to a signed integer. + now.duration_since(UNIX_EPOCH) + .expect("Time moves forward") + .as_secs() as i64 +} diff --git a/bin/osiris/Cargo.toml b/bin/osiris/Cargo.toml new file mode 100644 index 0000000..0737597 --- /dev/null +++ b/bin/osiris/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "osiris-server" +version.workspace = true +edition.workspace = true +description = "Osiris HTTP server" +license = "MIT OR Apache-2.0" + +[[bin]] +name = "osiris" +path = "src/main.rs" + +[dependencies] +# Osiris core +osiris-core = { path = "../../lib/osiris/core" } + +# Web framework +axum = "0.7" +tower = "0.4" +tower-http.workspace = true + +# Core dependencies +tokio.workspace = true +serde.workspace = true +serde_json.workspace = true +anyhow.workspace = true + +# Tracing +tracing.workspace = true +tracing-subscriber = { version = "0.3", features = ["env-filter"] } diff --git a/bin/osiris/src/main.rs b/bin/osiris/src/main.rs new file mode 100644 index 0000000..5fa1973 --- /dev/null +++ b/bin/osiris/src/main.rs @@ -0,0 +1,145 @@ +//! Osiris Server - Generic OpenAPI REST server for Osiris data structures +//! +//! Provides generic CRUD operations for all Osiris structs via REST API. +//! Routes follow pattern: GET /api/:struct_name/:id + +use axum::{ + extract::{Path, Query, State}, + http::StatusCode, + response::{IntoResponse, Json}, + routing::get, + Router, +}; +use serde::{Deserialize, Serialize}; +use serde_json::{json, Value}; +use std::collections::HashMap; +use std::sync::Arc; +use tower_http::cors::{Any, CorsLayer}; +use tracing::{info, warn}; + +#[derive(Clone)] +struct AppState { + // In a real implementation, this would be a Redis connection pool + // For now, we'll use an in-memory store for demonstration + store: Arc>>>, +} + +impl AppState { + fn new() -> Self { + Self { + store: Arc::new(tokio::sync::RwLock::new(HashMap::new())), + } + } +} + +#[tokio::main] +async fn main() { + // Initialize tracing + tracing_subscriber::fmt() + .with_target(false) + .compact() + .init(); + + let state = AppState::new(); + + // Build router + let app = Router::new() + .route("/health", get(health_check)) + .route("/api/:struct_name", get(list_structs)) + .route("/api/:struct_name/:id", get(get_struct)) + .layer( + CorsLayer::new() + .allow_origin(Any) + .allow_methods(Any) + .allow_headers(Any), + ) + .with_state(state); + + let addr = "0.0.0.0:8081"; + info!("🚀 Osiris Server starting on {}", addr); + info!("📖 API Documentation: http://localhost:8081/health"); + + let listener = tokio::net::TcpListener::bind(addr) + .await + .expect("Failed to bind address"); + + axum::serve(listener, app) + .await + .expect("Server failed"); +} + +/// Health check endpoint +async fn health_check() -> impl IntoResponse { + Json(json!({ + "status": "healthy", + "service": "osiris-server", + "version": "0.1.0" + })) +} + +/// Generic GET endpoint for a single struct by ID +/// GET /api/:struct_name/:id +async fn get_struct( + State(state): State, + Path((struct_name, id)): Path<(String, String)>, +) -> Result, (StatusCode, String)> { + info!("GET /api/{}/{}", struct_name, id); + + let store = state.store.read().await; + + if let Some(struct_store) = store.get(&struct_name) { + if let Some(data) = struct_store.get(&id) { + return Ok(Json(data.clone())); + } + } + + warn!("Not found: {}/{}", struct_name, id); + Err(( + StatusCode::NOT_FOUND, + format!("{}/{} not found", struct_name, id), + )) +} + +/// Generic LIST endpoint for all instances of a struct +/// GET /api/:struct_name?field=value +async fn list_structs( + State(state): State, + Path(struct_name): Path, + Query(params): Query>, +) -> Result>, (StatusCode, String)> { + info!("GET /api/{} with params: {:?}", struct_name, params); + + let store = state.store.read().await; + + if let Some(struct_store) = store.get(&struct_name) { + let mut results: Vec = struct_store.values().cloned().collect(); + + // Apply filters if any + if !params.is_empty() { + results.retain(|item| { + params.iter().all(|(key, value)| { + item.get(key) + .and_then(|v| v.as_str()) + .map(|v| v == value) + .unwrap_or(false) + }) + }); + } + + return Ok(Json(results)); + } + + // Return empty array if struct type doesn't exist yet + Ok(Json(vec![])) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn test_health_check() { + let response = health_check().await.into_response(); + assert_eq!(response.status(), StatusCode::OK); + } +} diff --git a/bin/runners/osiris/Cargo.toml b/bin/runners/osiris/Cargo.toml new file mode 100644 index 0000000..30f9ee7 --- /dev/null +++ b/bin/runners/osiris/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "runner-osiris" +version.workspace = true +edition.workspace = true +description = "Osiris Runner - Database-backed runner" +license = "MIT OR Apache-2.0" + +[[bin]] +name = "runner_osiris" +path = "src/main.rs" + +[dependencies] +# Runner library +hero-runner = { path = "../../../lib/runner" } +hero-job = { path = "../../../lib/models/job" } + +# Core dependencies +anyhow.workspace = true +tokio.workspace = true +log.workspace = true +env_logger.workspace = true +clap.workspace = true + +# Rhai scripting +rhai = { version = "1.21.0", features = ["std", "sync", "serde"] } + +# Osiris dependencies +osiris = { package = "osiris-core", path = "../../../lib/osiris/core" } +heromodels = { git = "https://git.ourworld.tf/herocode/db.git" } +heromodels_core = { git = "https://git.ourworld.tf/herocode/db.git" } +heromodels-derive = { git = "https://git.ourworld.tf/herocode/db.git" } +rhailib_dsl = { git = "https://git.ourworld.tf/herocode/rhailib.git" } diff --git a/bin/runners/osiris/src/engine.rs b/bin/runners/osiris/src/engine.rs new file mode 100644 index 0000000..35c762e --- /dev/null +++ b/bin/runners/osiris/src/engine.rs @@ -0,0 +1,294 @@ +/// OSIRIS Rhai Engine +/// +/// Creates a Rhai engine configured with OSIRIS contexts and methods. + +use osiris::context::OsirisContext; +use osiris::objects::note::rhai::register_note_functions; +use osiris::objects::event::rhai::register_event_functions; +use osiris::objects::heroledger::rhai::register_heroledger_modules; +use osiris::objects::kyc::rhai::register_kyc_modules; +use osiris::objects::flow::rhai::register_flow_modules; +use osiris::objects::communication::rhai::register_communication_modules; +use osiris::objects::money::rhai::register_money_modules; +use osiris::objects::legal::rhai::register_legal_modules; +use osiris::objects::supervisor::rhai::register_supervisor_modules; +use rhai::{Engine, def_package, FuncRegistration}; +use rhai::packages::{Package, StandardPackage}; + +/// Register get_context function in a Rhai engine with signatory-based access control +/// +/// Simple logic: +/// - Context is a list of public keys (participants) +/// - To get_context, at least one participant must be a signatory +/// - No state tracking, no caching - creates fresh context each time +pub fn register_context_api(engine: &mut rhai::Engine) { + // Register get_context function with signatory-based access control + // Usage: get_context(['pk1', 'pk2', 'pk3']) + engine.register_fn("get_context", move |context: rhai::NativeCallContext, participants: rhai::Array| -> Result> { + // Extract SIGNATORIES from context tag + let tag_map = context + .tag() + .and_then(|tag| tag.read_lock::()) + .ok_or_else(|| Box::new(rhai::EvalAltResult::ErrorRuntime("Context tag must be a Map.".into(), context.position())))?; + + let signatories_dynamic = tag_map.get("SIGNATORIES") + .ok_or_else(|| Box::new(rhai::EvalAltResult::ErrorRuntime("'SIGNATORIES' not found in context tag Map.".into(), context.position())))?; + + // Convert SIGNATORIES array to Vec + let signatories_array = signatories_dynamic.clone().into_array() + .map_err(|e| Box::new(rhai::EvalAltResult::ErrorRuntime(format!("SIGNATORIES must be an array: {}", e).into(), context.position())))?; + + let signatories: Vec = signatories_array.into_iter() + .map(|s| s.into_string()) + .collect::, _>>() + .map_err(|e| Box::new(rhai::EvalAltResult::ErrorRuntime(format!("SIGNATORIES must contain strings: {}", e).into(), context.position())))?; + + // Convert participants array to Vec + let participant_keys: Vec = participants.into_iter() + .map(|p| p.into_string()) + .collect::, _>>() + .map_err(|e| Box::new(rhai::EvalAltResult::ErrorRuntime(format!("Participants must be strings: {}", e).into(), context.position())))?; + + // Verify at least one participant is a signatory + let has_signatory = participant_keys.iter().any(|p| signatories.contains(p)); + if !has_signatory { + return Err(Box::new(rhai::EvalAltResult::ErrorRuntime( + format!("Access denied: none of the participants are signatories. Signatories: {}", signatories.join(", ")).into(), + context.position() + ))); + } + + // Create context directly with participants + OsirisContext::builder() + .participants(participant_keys) + .build() + .map_err(|e| format!("Failed to create context: {}", e).into()) + }); +} + +// Define the OSIRIS package +def_package! { + /// OSIRIS package with all OSIRIS types and functions + pub OsirisPackage(module) : StandardPackage { + // Register OsirisContext type with all its methods + module.set_custom_type::("OsirisContext"); + + // Register OsirisContext methods + FuncRegistration::new("participants") + .set_into_module(module, |ctx: &mut OsirisContext| ctx.participants()); + FuncRegistration::new("context_id") + .set_into_module(module, |ctx: &mut OsirisContext| ctx.context_id()); + // Typed save methods - all named "save" for function overloading using generic save_object + FuncRegistration::new("save") + .set_into_module(module, |ctx: &mut OsirisContext, note: osiris::objects::Note| ctx.save_object(note)); + FuncRegistration::new("save") + .set_into_module(module, |ctx: &mut OsirisContext, event: osiris::objects::Event| ctx.save_object(event)); + FuncRegistration::new("save") + .set_into_module(module, |ctx: &mut OsirisContext, user: osiris::objects::heroledger::user::User| ctx.save_object(user)); + FuncRegistration::new("save") + .set_into_module(module, |ctx: &mut OsirisContext, group: osiris::objects::heroledger::group::Group| ctx.save_object(group)); + FuncRegistration::new("save") + .set_into_module(module, |ctx: &mut OsirisContext, account: osiris::objects::heroledger::money::Account| ctx.save_object(account)); + FuncRegistration::new("save") + .set_into_module(module, |ctx: &mut OsirisContext, zone: osiris::objects::heroledger::dnsrecord::DNSZone| ctx.save_object(zone)); + FuncRegistration::new("save") + .set_into_module(module, |ctx: &mut OsirisContext, kyc_info: osiris::objects::KycInfo| ctx.save_object(kyc_info)); + FuncRegistration::new("save") + .set_into_module(module, |ctx: &mut OsirisContext, kyc_session: osiris::objects::KycSession| ctx.save_object(kyc_session)); + FuncRegistration::new("save") + .set_into_module(module, |ctx: &mut OsirisContext, flow_template: osiris::objects::FlowTemplate| ctx.save_object(flow_template)); + FuncRegistration::new("save") + .set_into_module(module, |ctx: &mut OsirisContext, flow_instance: osiris::objects::FlowInstance| ctx.save_object(flow_instance)); + FuncRegistration::new("save") + .set_into_module(module, |ctx: &mut OsirisContext, verification: osiris::objects::Verification| ctx.save_object(verification)); + FuncRegistration::new("save") + .set_into_module(module, |ctx: &mut OsirisContext, email_client: osiris::objects::communication::email::EmailClient| ctx.save_object(email_client)); + FuncRegistration::new("save") + .set_into_module(module, |ctx: &mut OsirisContext, mail_template: osiris::objects::communication::email::MailTemplate| ctx.save_object(mail_template)); + FuncRegistration::new("save") + .set_into_module(module, |ctx: &mut OsirisContext, account: osiris::objects::Account| ctx.save_object(account)); + FuncRegistration::new("save") + .set_into_module(module, |ctx: &mut OsirisContext, asset: osiris::objects::Asset| ctx.save_object(asset)); + FuncRegistration::new("save") + .set_into_module(module, |ctx: &mut OsirisContext, transaction: osiris::objects::Transaction| ctx.save_object(transaction)); + FuncRegistration::new("save") + .set_into_module(module, |ctx: &mut OsirisContext, contract: osiris::objects::Contract| ctx.save_object(contract)); + + // Supervisor objects + FuncRegistration::new("save") + .set_into_module(module, |ctx: &mut OsirisContext, api_key: osiris::objects::supervisor::ApiKey| ctx.save_object(api_key)); + FuncRegistration::new("save") + .set_into_module(module, |ctx: &mut OsirisContext, runner: osiris::objects::supervisor::Runner| ctx.save_object(runner)); + FuncRegistration::new("save") + .set_into_module(module, |ctx: &mut OsirisContext, job_metadata: osiris::objects::supervisor::JobMetadata| ctx.save_object(job_metadata)); + FuncRegistration::new("list") + .set_into_module(module, |ctx: &mut OsirisContext, collection: String| ctx.list(collection)); + FuncRegistration::new("get") + .set_into_module(module, |ctx: &mut OsirisContext, collection: String, id: String| ctx.get(collection, id)); + FuncRegistration::new("delete") + .set_into_module(module, |ctx: &mut OsirisContext, collection: String, id: String| ctx.delete(collection, id)); + + // Register Note functions + register_note_functions(module); + + // Register Event functions + register_event_functions(module); + + // Register HeroLedger modules (User, Group, Account, DNSZone) + register_heroledger_modules(module); + + // Register KYC modules (KycClient, KycSession) + register_kyc_modules(module); + + // Register Flow modules (FlowTemplate, FlowInstance) + register_flow_modules(module); + + // Register Communication modules (Verification, EmailClient) + register_communication_modules(module); + + // Register Money modules (Account, Asset, Transaction, PaymentClient) + register_money_modules(module); + + // Register Legal modules (Contract) + register_legal_modules(module); + + // Register Supervisor modules (ApiKey, Runner, JobMetadata) + register_supervisor_modules(module); + + // Register get_context function with signatory-based access control + FuncRegistration::new("get_context") + .set_into_module(module, |context: rhai::NativeCallContext, participants: rhai::Array| -> Result> { + // Extract SIGNATORIES from context tag + let tag_map = context + .tag() + .and_then(|tag| tag.read_lock::()) + .ok_or_else(|| Box::new(rhai::EvalAltResult::ErrorRuntime("Context tag must be a Map.".into(), context.position())))?; + + let signatories_dynamic = tag_map.get("SIGNATORIES") + .ok_or_else(|| Box::new(rhai::EvalAltResult::ErrorRuntime("'SIGNATORIES' not found in context tag Map.".into(), context.position())))?; + + // Convert SIGNATORIES array to Vec + let signatories_array = signatories_dynamic.clone().into_array() + .map_err(|e| Box::new(rhai::EvalAltResult::ErrorRuntime(format!("SIGNATORIES must be an array: {}", e).into(), context.position())))?; + + let signatories: Vec = signatories_array.into_iter() + .map(|s| s.into_string()) + .collect::, _>>() + .map_err(|e| Box::new(rhai::EvalAltResult::ErrorRuntime(format!("SIGNATORIES must contain strings: {}", e).into(), context.position())))?; + + // Convert participants array to Vec + let participant_keys: Vec = participants.into_iter() + .map(|p| p.into_string()) + .collect::, _>>() + .map_err(|e| Box::new(rhai::EvalAltResult::ErrorRuntime(format!("Participants must be strings: {}", e).into(), context.position())))?; + + // Verify at least one participant is a signatory + let has_signatory = participant_keys.iter().any(|p| signatories.contains(p)); + if !has_signatory { + return Err(Box::new(rhai::EvalAltResult::ErrorRuntime( + format!("Access denied: none of the participants are signatories. Signatories: {}", signatories.join(", ")).into(), + context.position() + ))); + } + + // Create context directly with participants + OsirisContext::builder() + .participants(participant_keys) + .build() + .map_err(|e| format!("Failed to create context: {}", e).into()) + }); + } +} + +/// Register all OSIRIS components into an engine +/// This is a convenience function that registers the complete OsirisPackage +pub fn register_osiris_full(engine: &mut Engine) { + let package = OsirisPackage::new(); + package.register_into_engine(engine); +} + +/// Create a single OSIRIS engine (for backward compatibility) +pub fn create_osiris_engine() -> Result> { + let mut engine = Engine::new_raw(); + register_osiris_full(&mut engine); + Ok(engine) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_create_osiris_engine() { + let result = create_osiris_engine(); + assert!(result.is_ok()); + + let mut engine = result.unwrap(); + + // Set up context tags with SIGNATORIES (like in runner_rust example) + let mut tag_map = rhai::Map::new(); + // Create a proper Rhai array + let signatories: rhai::Array = vec![ + rhai::Dynamic::from("pk1".to_string()), + rhai::Dynamic::from("pk2".to_string()), + rhai::Dynamic::from("pk3".to_string()), + ]; + tag_map.insert("SIGNATORIES".into(), rhai::Dynamic::from(signatories)); + tag_map.insert("DB_PATH".into(), "/tmp/test_db".to_string().into()); + tag_map.insert("CONTEXT_ID".into(), "test_context".to_string().into()); + engine.set_default_tag(rhai::Dynamic::from(tag_map)); + + // Test get_context with valid signatories + let mut scope = rhai::Scope::new(); + let test_result = engine.eval_with_scope::( + &mut scope, + r#" + // All participants must be signatories + let ctx = get_context(["pk1", "pk2"]); + ctx.context_id() + "# + ); + + if let Err(ref e) = test_result { + eprintln!("Test error: {}", e); + } + assert!(test_result.is_ok(), "Failed to get context: {:?}", test_result.err()); + assert_eq!(test_result.unwrap().to_string(), "pk1,pk2"); + } + + #[test] + fn test_engine_with_manager_access_denied() { + let result = create_osiris_engine(); + assert!(result.is_ok()); + + let mut engine = result.unwrap(); + + // Set up context tags with SIGNATORIES + let mut tag_map = rhai::Map::new(); + // Create a proper Rhai array + let signatories: rhai::Array = vec![ + rhai::Dynamic::from("pk1".to_string()), + rhai::Dynamic::from("pk2".to_string()), + ]; + tag_map.insert("SIGNATORIES".into(), rhai::Dynamic::from(signatories)); + tag_map.insert("DB_PATH".into(), "/tmp/test_db".to_string().into()); + tag_map.insert("CONTEXT_ID".into(), "test_context".to_string().into()); + engine.set_default_tag(rhai::Dynamic::from(tag_map)); + + // Test get_context with invalid participant (not a signatory) + let mut scope = rhai::Scope::new(); + let test_result = engine.eval_with_scope::( + &mut scope, + r#" + // pk3 is not a signatory, should fail + let ctx = get_context(["pk1", "pk3"]); + ctx.context_id() + "# + ); + + // Should fail because pk3 is not in SIGNATORIES + assert!(test_result.is_err()); + let err_msg = test_result.unwrap_err().to_string(); + assert!(err_msg.contains("Access denied") || err_msg.contains("not a signatory")); + } +} diff --git a/bin/runners/osiris/src/main.rs b/bin/runners/osiris/src/main.rs new file mode 100644 index 0000000..c44f104 --- /dev/null +++ b/bin/runners/osiris/src/main.rs @@ -0,0 +1,117 @@ +use hero_runner::{spawn_sync_runner, script_mode::execute_script_mode}; +use clap::Parser; +use log::{error, info}; +use tokio::sync::mpsc; + +mod engine; +use engine::create_osiris_engine; + +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +struct Args { + /// Runner ID + runner_id: String, + + /// Redis URL (also used as HeroDB URL) + #[arg(short = 'r', long, default_value = "redis://localhost:6379")] + redis_url: String, + + /// Base database ID for OSIRIS contexts + #[arg(long, default_value_t = 1)] + base_db_id: u16, + + /// Script to execute in single-job mode (optional) + #[arg(short, long)] + script: Option, +} + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Initialize logging + env_logger::init(); + + let args = Args::parse(); + + // Check if we're in script mode + if let Some(script_content) = args.script { + info!("Running in script mode with runner ID: {}", args.runner_id); + + let redis_url = args.redis_url.clone(); + let base_db_id = args.base_db_id; + let result = execute_script_mode( + &script_content, + &args.runner_id, + args.redis_url, + std::time::Duration::from_secs(300), // Default timeout for OSIS + move || create_osiris_engine() + .expect("Failed to create OSIRIS engine"), + ).await; + + match result { + Ok(output) => { + println!("Script execution result:\n{}", output); + return Ok(()); + } + Err(e) => { + error!("Script execution failed: {}", e); + return Err(e); + } + } + } + + info!("Starting OSIS Sync Runner with ID: {}", args.runner_id); + info!("Redis URL: {}", args.redis_url); + + // Create shutdown channel + let (shutdown_tx, shutdown_rx) = mpsc::channel::<()>(1); + + // Setup signal handling for graceful shutdown + let shutdown_tx_clone = shutdown_tx.clone(); + tokio::spawn(async move { + tokio::signal::ctrl_c().await.expect("Failed to listen for ctrl+c"); + info!("Received Ctrl+C, initiating shutdown..."); + let _ = shutdown_tx_clone.send(()).await; + }); + + // Spawn the sync runner with engine factory + let redis_url = args.redis_url.clone(); + let base_db_id = args.base_db_id; + let runner_handle = spawn_sync_runner( + args.runner_id.clone(), + args.redis_url, + shutdown_rx, + move || create_osiris_engine() + .expect("Failed to create OSIRIS engine"), + ); + + info!("OSIS Sync Runner '{}' started successfully", args.runner_id); + + // Wait for the runner to complete + match runner_handle.await { + Ok(Ok(())) => { + info!("OSIS Sync Runner '{}' shut down successfully", args.runner_id); + } + Ok(Err(e)) => { + error!("OSIS Sync Runner '{}' encountered an error: {}", args.runner_id, e); + return Err(e); + } + Err(e) => { + error!("Failed to join OSIS Sync Runner '{}' task: {}", args.runner_id, e); + return Err(Box::new(e)); + } + } + + Ok(()) +} + + + + +/// Example: Run a Rhai script with OSIRIS support +pub fn run_osiris_script( + script: &str, +) -> Result<(), Box> { + let engine = create_osiris_engine()?; + engine.run(script)?; + Ok(()) +} \ No newline at end of file diff --git a/bin/runners/sal/Cargo.toml b/bin/runners/sal/Cargo.toml new file mode 100644 index 0000000..90140ed --- /dev/null +++ b/bin/runners/sal/Cargo.toml @@ -0,0 +1,41 @@ +[package] +name = "runner-sal" +version.workspace = true +edition.workspace = true +description = "SAL Runner - System Abstraction Layer runner" +license = "MIT OR Apache-2.0" + +[[bin]] +name = "runner_sal" +path = "src/main.rs" + +[dependencies] +# Runner library +hero-runner = { path = "../../../lib/runner" } +hero-job = { path = "../../../lib/models/job" } + +# Core dependencies +anyhow.workspace = true +tokio.workspace = true +log.workspace = true +env_logger.workspace = true +clap.workspace = true + +# Rhai and logging +rhai = { version = "1.21.0", features = ["std", "sync", "decimal", "internals", "serde"] } +hero_logger = { git = "https://git.ourworld.tf/herocode/baobab.git", branch = "logger" } + +# SAL modules +sal-os = { git = "https://git.ourworld.tf/herocode/herolib_rust.git" } +sal-redisclient = { git = "https://git.ourworld.tf/herocode/herolib_rust.git" } +sal-postgresclient = { git = "https://git.ourworld.tf/herocode/herolib_rust.git" } +sal-process = { git = "https://git.ourworld.tf/herocode/herolib_rust.git" } +sal-virt = { git = "https://git.ourworld.tf/herocode/herolib_rust.git" } +sal-git = { git = "https://git.ourworld.tf/herocode/herolib_rust.git" } +sal-zinit-client = { git = "https://git.ourworld.tf/herocode/herolib_rust.git" } +sal-mycelium = { git = "https://git.ourworld.tf/herocode/herolib_rust.git" } +sal-text = { git = "https://git.ourworld.tf/herocode/herolib_rust.git" } +sal-net = { git = "https://git.ourworld.tf/herocode/herolib_rust.git" } +sal-kubernetes = { git = "https://git.ourworld.tf/herocode/herolib_rust.git" } +sal-vault = { git = "https://git.ourworld.tf/herocode/herolib_rust.git" } +sal-hetzner = { git = "https://git.ourworld.tf/herocode/herolib_rust.git" } diff --git a/bin/runners/sal/src/README.md b/bin/runners/sal/src/README.md new file mode 100644 index 0000000..3d0a694 --- /dev/null +++ b/bin/runners/sal/src/README.md @@ -0,0 +1,87 @@ +# SAL Runner + +The SAL (System Abstraction Layer) Runner is an asynchronous job processing engine that executes Rhai scripts with access to system-level operations and infrastructure management capabilities. + +## Features + +- **Asynchronous Processing**: Handles multiple jobs concurrently with configurable timeouts +- **Redis Integration**: Uses Redis for job queue management and coordination +- **System Operations**: Full access to SAL modules including OS, networking, containers, and cloud services +- **Graceful Shutdown**: Responds to SIGINT (Ctrl+C) for clean termination +- **Comprehensive Logging**: Detailed logging for monitoring and debugging + +## Usage + +```bash +cargo run --bin runner_sal -- [OPTIONS] +``` + +### Arguments + +- ``: Unique identifier for this runner instance (required, positional) + +### Options + +- `-d, --db-path `: Database file path (default: `/tmp/sal.db`) +- `-r, --redis-url `: Redis connection URL (default: `redis://localhost:6379`) +- `-t, --timeout `: Default job timeout in seconds (default: `300`) + +### Examples + +```bash +# Basic usage with default settings +cargo run --bin runner_sal -- myrunner + +# Custom Redis URL and database path +cargo run --bin runner_sal -- production-runner -r redis://prod-redis:6379 -d /var/lib/sal.db + +# Custom timeout for long-running jobs +cargo run --bin runner_sal -- batch-runner -t 3600 +``` + +## Available SAL Modules + +The SAL runner provides access to the following system modules through Rhai scripts: + +- **OS Operations**: File system, process management, system information +- **Redis Client**: Redis database operations and caching +- **PostgreSQL Client**: Database connectivity and queries +- **Process Management**: System process control and monitoring +- **Virtualization**: Container and VM management +- **Git Operations**: Version control system integration +- **Zinit Client**: Service management and initialization +- **Mycelium**: Networking and mesh connectivity +- **Text Processing**: String manipulation and text utilities +- **Network Operations**: HTTP requests, network utilities +- **Kubernetes**: Container orchestration and cluster management +- **Hetzner Cloud**: Cloud infrastructure management + +## Architecture + +The SAL runner uses an asynchronous architecture that: + +1. Connects to Redis for job queue management +2. Creates a Rhai engine with all SAL modules registered +3. Processes jobs concurrently with configurable timeouts +4. Handles graceful shutdown on SIGINT +5. Provides comprehensive error handling and logging + +## Error Handling + +The runner provides detailed error messages for common issues: + +- Redis connection failures +- Database access problems +- Script execution errors +- Timeout handling +- Resource cleanup on shutdown + +## Logging + +Set the `RUST_LOG` environment variable to control logging levels: + +```bash +RUST_LOG=debug cargo run --bin runner_sal -- myrunner +``` + +Available log levels: `error`, `warn`, `info`, `debug`, `trace` diff --git a/bin/runners/sal/src/engine.rs b/bin/runners/sal/src/engine.rs new file mode 100644 index 0000000..c902791 --- /dev/null +++ b/bin/runners/sal/src/engine.rs @@ -0,0 +1,73 @@ +use std::sync::{Arc, OnceLock}; +// Re-export common Rhai types for convenience +pub use rhai::Engine; + +// Re-export specific functions from sal-os package + +// Re-export Redis client module registration function + +// Re-export PostgreSQL client module registration function + + +// Re-export virt functions from sal-virt package + + +/// Engine factory for creating and sharing Rhai engines with SAL modules. +pub struct EngineFactory { + engine: Arc, +} + +impl EngineFactory { + /// Create a new engine factory with a configured Rhai engine. + pub fn new() -> Self { + let mut engine = Engine::new(); + register_sal_modules(&mut engine); + // Logger + hero_logger::rhai_integration::configure_rhai_logging(&mut engine, "sal_runner"); + + Self { + engine: Arc::new(engine), + } + } + + /// Get a shared reference to the engine. + pub fn get_engine(&self) -> Arc { + Arc::clone(&self.engine) + } + + /// Get the global singleton engine factory. + pub fn global() -> &'static EngineFactory { + static FACTORY: OnceLock = OnceLock::new(); + FACTORY.get_or_init(|| EngineFactory::new()) + } +} + +pub fn register_sal_modules(engine: &mut Engine) { + let _ = sal_os::rhai::register_os_module(engine); + let _ = sal_redisclient::rhai::register_redisclient_module(engine); + let _ = sal_postgresclient::rhai::register_postgresclient_module(engine); + let _ = sal_process::rhai::register_process_module(engine); + let _ = sal_virt::rhai::register_virt_module(engine); + let _ = sal_git::rhai::register_git_module(engine); + let _ = sal_zinit_client::rhai::register_zinit_module(engine); + let _ = sal_mycelium::rhai::register_mycelium_module(engine); + let _ = sal_text::rhai::register_text_module(engine); + let _ = sal_net::rhai::register_net_module(engine); + let _ = sal_kubernetes::rhai::register_kubernetes_module(engine); + let _ = sal_hetzner::rhai::register_hetzner_module(engine); + + println!("SAL modules registered successfully."); +} + +/// Create a new SAL engine instance. +pub fn create_sal_engine() -> Engine { + let mut engine = Engine::new(); + register_sal_modules(&mut engine); + hero_logger::rhai_integration::configure_rhai_logging(&mut engine, "sal_runner"); + engine +} + +/// Create a shared system engine using the factory. +pub fn create_shared_sal_engine() -> Arc { + EngineFactory::global().get_engine() +} diff --git a/bin/runners/sal/src/main.rs b/bin/runners/sal/src/main.rs new file mode 100644 index 0000000..0617839 --- /dev/null +++ b/bin/runners/sal/src/main.rs @@ -0,0 +1,108 @@ +use hero_runner::{spawn_async_runner, script_mode::execute_script_mode}; +use clap::Parser; +use log::{error, info}; +use std::time::Duration; +use tokio::sync::mpsc; + +mod engine; +use engine::create_sal_engine; + +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +struct Args { + /// Runner ID + runner_id: String, + + /// Database path + #[arg(short, long, default_value = "/tmp/sal.db")] + db_path: String, + + /// Redis URL + #[arg(short = 'r', long, default_value = "redis://localhost:6379")] + redis_url: String, + + /// Default timeout for jobs in seconds + #[arg(short, long, default_value_t = 300)] + timeout: u64, + + /// Script to execute in single-job mode (optional) + #[arg(short, long)] + script: Option, +} + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Initialize logging + env_logger::init(); + + let args = Args::parse(); + + // Check if we're in script mode + if let Some(script_content) = args.script { + info!("Running in script mode with runner ID: {}", args.runner_id); + + let result = execute_script_mode( + &script_content, + &args.runner_id, + args.redis_url, + Duration::from_secs(args.timeout), + create_sal_engine, + ).await; + + match result { + Ok(output) => { + println!("Script execution result:\n{}", output); + return Ok(()); + } + Err(e) => { + error!("Script execution failed: {}", e); + return Err(e); + } + } + } + + info!("Starting SAL Async Runner with ID: {}", args.runner_id); + info!("Database path: {}", args.db_path); + info!("Redis URL: {}", args.redis_url); + info!("Default timeout: {} seconds", args.timeout); + + // Create shutdown channel + let (shutdown_tx, shutdown_rx) = mpsc::channel::<()>(1); + + // Setup signal handling for graceful shutdown + let shutdown_tx_clone = shutdown_tx.clone(); + tokio::spawn(async move { + tokio::signal::ctrl_c().await.expect("Failed to listen for ctrl+c"); + info!("Received Ctrl+C, initiating shutdown..."); + let _ = shutdown_tx_clone.send(()).await; + }); + + // Spawn the async runner with engine factory + let runner_handle = spawn_async_runner( + args.runner_id.clone(), + args.db_path, + args.redis_url, + shutdown_rx, + Duration::from_secs(args.timeout), + create_sal_engine, + ); + + info!("SAL Async Runner '{}' started successfully", args.runner_id); + + // Wait for the runner to complete + match runner_handle.await { + Ok(Ok(())) => { + info!("SAL Async Runner '{}' shut down successfully", args.runner_id); + } + Ok(Err(e)) => { + error!("SAL Async Runner '{}' encountered an error: {}", args.runner_id, e); + return Err(e); + } + Err(e) => { + error!("Failed to join SAL Async Runner '{}' task: {}", args.runner_id, e); + return Err(Box::new(e)); + } + } + + Ok(()) +} diff --git a/bin/supervisor/.env.example b/bin/supervisor/.env.example new file mode 100644 index 0000000..b167021 --- /dev/null +++ b/bin/supervisor/.env.example @@ -0,0 +1,23 @@ +# Hero Supervisor Configuration + +# Redis connection URL +REDIS_URL=redis://127.0.0.1:6379 + +# OpenRPC Server Configuration +BIND_ADDRESS=127.0.0.1 +PORT=3030 + +# Authentication Secrets (generate with: ./scripts/generate_secret.sh) +# At least one admin secret is required +ADMIN_SECRETS=your_admin_secret_here + +# Optional: Additional secrets for different access levels +# USER_SECRETS=user_secret_1,user_secret_2 +# REGISTER_SECRETS=register_secret_1 + +# Optional: Pre-configured runners (comma-separated names) +# These runners will be automatically registered on startup +# RUNNERS=runner1,runner2,runner3 + +# Optional: Mycelium network URL (requires mycelium feature) +# MYCELIUM_URL=http://127.0.0.1:8989 diff --git a/bin/supervisor/.gitignore b/bin/supervisor/.gitignore new file mode 100644 index 0000000..b8c474b --- /dev/null +++ b/bin/supervisor/.gitignore @@ -0,0 +1,4 @@ +target +.bin +.env +/tmp/supervisor-*.log \ No newline at end of file diff --git a/bin/supervisor/Cargo.toml b/bin/supervisor/Cargo.toml new file mode 100644 index 0000000..795b8e5 --- /dev/null +++ b/bin/supervisor/Cargo.toml @@ -0,0 +1,66 @@ +[package] +name = "hero-supervisor" +version.workspace = true +edition.workspace = true + +[lib] +name = "hero_supervisor" +path = "src/lib.rs" + +[[bin]] +name = "supervisor" +path = "src/bin/supervisor.rs" + +[dependencies] +# Job types +hero-job = { path = "../../lib/models/job" } +hero-job-client = { path = "../../lib/clients/job" } + +# Async runtime +tokio.workspace = true +async-trait.workspace = true + +# Redis client +redis.workspace = true + +# Core dependencies +uuid.workspace = true +log.workspace = true +thiserror.workspace = true +chrono.workspace = true +serde.workspace = true +serde_json.workspace = true +env_logger.workspace = true + +# CLI argument parsing +clap.workspace = true +toml.workspace = true + +# OpenRPC dependencies +jsonrpsee.workspace = true +anyhow.workspace = true +futures.workspace = true + +# CORS support for OpenRPC server +tower-http.workspace = true +tower.workspace = true +hyper.workspace = true +hyper-util.workspace = true +http-body-util.workspace = true + +# Osiris client for persistent storage +# osiris-client = { git = "https://git.ourworld.tf/herocode/osiris.git" } # Temporarily disabled - needs update + +[dev-dependencies] +tokio-test = "0.4" +hero-supervisor-openrpc-client = { path = "../../lib/clients/supervisor" } +escargot = "0.5" + +[features] +default = ["cli"] +cli = [] + +# Examples +[[example]] +name = "osiris_openrpc" +path = "examples/osiris_openrpc/main.rs" diff --git a/bin/supervisor/README.md b/bin/supervisor/README.md new file mode 100644 index 0000000..ea5255f --- /dev/null +++ b/bin/supervisor/README.md @@ -0,0 +1,46 @@ +# Supervisor + +A job execution supervisor that queues jobs to runners over Redis and returns their output. It provides an OpenRPC server for remote job dispatching. The OpenRPC server requires authorization via API keys. API keys are scoped to grant one of three levels of access: Admin, Registrar (can register runners), User (can dispatch jobs). + +Jobs contain scripts, environment variables, an identifier of the runner to execute the script, and signatures. The supervisor verifies the signatures, however access control based on who the signatories of a script is handled by the runner logic. + +**Note:** Runners are expected to be started and managed externally. The supervisor only tracks which runners are registered and queues jobs to them via Redis. + +## Usage + +The supervisor needs an admin key to be configured to get started. +`cargo run -- --admin-secret ` + +You can also use the run script which uses the `.env` file to get the admin key. +`./scripts/run.sh` + +The scripts directory also offers other scripts for building testing etc. + +## Functionality + +Beyond job dispatching, the supervisor provides: +- **API Key Management**: Create, list, and remove API keys with different permission scopes +- **Runner Registration**: Register runners so the supervisor knows which queues are available +- **Job Lifecycle**: Create, start, stop, and monitor jobs +- **Job Queuing**: Queue jobs to specific runners via Redis + +Runner registration simply means the supervisor becomes aware that a certain runner is listening to its queue. The full API specification can be seen in `docs/openrpc.json`. + +## OpenRPC + +### Server + +The supervisor automatically starts an OpenRPC server on `127.0.0.1:3030` that exposes all supervisor functionality via JSON-RPC. + + +### Example JSON-RPC Call + +```bash +curl -X POST -H "Content-Type: application/json" \ + -d '{"jsonrpc":"2.0","method":"list_runners","id":1}' \ + http://127.0.0.1:3030 +``` + +### Client + +The repository also offers OpenRPC Client for supervisor compatible with WASM targets as well. \ No newline at end of file diff --git a/bin/supervisor/docs/AUTH.md b/bin/supervisor/docs/AUTH.md new file mode 100644 index 0000000..1e6fcf7 --- /dev/null +++ b/bin/supervisor/docs/AUTH.md @@ -0,0 +1,146 @@ +# Hero Supervisor Authentication + +The Hero Supervisor now supports API key-based authentication with three permission scopes: + +## Permission Scopes + +1. **Admin** - Full access to all operations including key management +2. **Registrar** - Can register new runners +3. **User** - Can create and manage jobs + +## Starting the Supervisor with an Admin Key + +Bootstrap an initial admin key when starting the supervisor: + +```bash +cargo run --bin supervisor -- --bootstrap-admin-key "my-admin" +``` + +This will output: + +``` +╔════════════════════════════════════════════════════════════╗ +║ 🔑 Admin API Key Created ║ +╚════════════════════════════════════════════════════════════╝ + Name: my-admin + Key: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + Scope: admin + ⚠️ SAVE THIS KEY - IT WILL NOT BE SHOWN AGAIN! +╚════════════════════════════════════════════════════════════╝ +``` + +**IMPORTANT:** Save this key securely - it will not be displayed again! + +## API Endpoints + +### Verify API Key + +Verify a key and get its metadata: + +```bash +curl -X POST http://127.0.0.1:3030 \ + -H "Content-Type: application/json" \ + -d '{ + "jsonrpc": "2.0", + "method": "auth.verify", + "params": { + "key": "your-api-key-here" + }, + "id": 1 + }' +``` + +Response: + +```json +{ + "jsonrpc": "2.0", + "result": { + "valid": true, + "name": "my-admin", + "scope": "admin" + }, + "id": 1 +} +``` + +### Create New API Key (Admin Only) + +```bash +curl -X POST http://127.0.0.1:3030 \ + -H "Content-Type: application/json" \ + -d '{ + "jsonrpc": "2.0", + "method": "auth.create_key", + "params": { + "admin_key": "your-admin-key", + "name": "runner-bot", + "scope": "registrar" + }, + "id": 1 + }' +``` + +Response: + +```json +{ + "jsonrpc": "2.0", + "result": { + "key": "new-generated-uuid", + "name": "runner-bot", + "scope": "registrar", + "created_at": "2025-10-27T15:00:00Z", + "expires_at": null + }, + "id": 1 +} +``` + +### List All API Keys (Admin Only) + +```bash +curl -X POST http://127.0.0.1:3030 \ + -H "Content-Type: application/json" \ + -d '{ + "jsonrpc": "2.0", + "method": "auth.list_keys", + "params": { + "admin_key": "your-admin-key" + }, + "id": 1 + }' +``` + +### Remove API Key (Admin Only) + +```bash +curl -X POST http://127.0.0.1:3030 \ + -H "Content-Type: application/json" \ + -d '{ + "jsonrpc": "2.0", + "method": "auth.remove_key", + "params": { + "admin_key": "your-admin-key", + "key": "key-to-remove" + }, + "id": 1 + }' +``` + +## Using Keys in the Admin UI + +The admin UI will use the `auth.verify` endpoint during login to: +1. Validate the provided API key +2. Retrieve the key's name and scope +3. Display the user's name and permissions in the header +4. Show/hide UI elements based on scope + +## Migration from Legacy Secrets + +The supervisor still supports the legacy secret-based authentication for backward compatibility: +- `--admin-secret` - Legacy admin secrets +- `--user-secret` - Legacy user secrets +- `--register-secret` - Legacy register secrets + +However, the new API key system is recommended for better management and auditability. diff --git a/bin/supervisor/docs/MYCELIUM.md b/bin/supervisor/docs/MYCELIUM.md new file mode 100644 index 0000000..2fda463 --- /dev/null +++ b/bin/supervisor/docs/MYCELIUM.md @@ -0,0 +1,268 @@ +# Mycelium Integration - Now Optional! + +The Mycelium integration is now an optional feature. The supervisor can run with just the OpenRPC HTTP server, making it simpler to use and deploy. + +## What Changed + +### Before +- Mycelium integration was always enabled +- Supervisor would continuously try to connect to Mycelium on port 8990 +- Error logs if Mycelium wasn't available +- Required additional dependencies + +### After +- ✅ Mycelium is now an optional feature +- ✅ Supervisor runs with clean OpenRPC HTTP server by default +- ✅ No connection errors when Mycelium isn't needed +- ✅ Smaller binary size without Mycelium dependencies + +## Running the Supervisor + +### Option 1: Simple OpenRPC Server (Recommended) + +**No Mycelium, just OpenRPC:** + +```bash +# Using the helper script +./run_supervisor_simple.sh + +# Or manually +MYCELIUM_URL="" cargo run --bin supervisor -- \ + --redis-url redis://localhost:6379 \ + --port 3030 +``` + +This starts: +- ✅ OpenRPC HTTP server on port 3030 +- ✅ Redis connection for job queuing +- ❌ No Mycelium integration + +### Option 2: With Mycelium Integration + +**Enable Mycelium feature:** + +```bash +# Build with Mycelium support +cargo build --bin supervisor --features mycelium + +# Run with Mycelium URL +MYCELIUM_URL="http://localhost:8990" cargo run --bin supervisor --features mycelium -- \ + --redis-url redis://localhost:6379 \ + --port 3030 +``` + +This starts: +- ✅ OpenRPC HTTP server on port 3030 +- ✅ Redis connection for job queuing +- ✅ Mycelium integration (connects to daemon) + +## Feature Flags + +### Available Features + +| Feature | Description | Default | +|---------|-------------|---------| +| `cli` | Command-line interface | ✅ Yes | +| `mycelium` | Mycelium integration | ❌ No | + +### Building with Features + +```bash +# Default build (CLI only, no Mycelium) +cargo build --bin supervisor + +# With Mycelium +cargo build --bin supervisor --features mycelium + +# Minimal (no CLI, no Mycelium) +cargo build --bin supervisor --no-default-features +``` + +## Architecture + +### Without Mycelium (Default) + +``` +┌─────────────────┐ +│ Client │ +└────────┬────────┘ + │ HTTP/JSON-RPC + ▼ +┌─────────────────┐ +│ Supervisor │ +│ OpenRPC Server │ +│ (Port 3030) │ +└────────┬────────┘ + │ Redis + ▼ +┌─────────────────┐ +│ Runners │ +└─────────────────┘ +``` + +### With Mycelium (Optional) + +``` +┌─────────────────┐ +│ Client │ +└────────┬────────┘ + │ HTTP/JSON-RPC + ▼ +┌─────────────────┐ ┌──────────────┐ +│ Supervisor │◄────►│ Mycelium │ +│ OpenRPC Server │ │ Daemon │ +│ (Port 3030) │ │ (Port 8990) │ +└────────┬────────┘ └──────────────┘ + │ Redis + ▼ +┌─────────────────┐ +│ Runners │ +└─────────────────┘ +``` + +## Environment Variables + +| Variable | Description | Default | Required | +|----------|-------------|---------|----------| +| `MYCELIUM_URL` | Mycelium daemon URL | `http://127.0.0.1:8990` | No | +| `RUST_LOG` | Log level | `info` | No | + +**To disable Mycelium:** +```bash +export MYCELIUM_URL="" +``` + +## Dependencies + +### Core Dependencies (Always) +- `tokio` - Async runtime +- `redis` - Job queuing +- `jsonrpsee` - OpenRPC server +- `runner_rust` - Job model + +### Mycelium Dependencies (Optional) +- `reqwest` - HTTP client +- `base64` - Encoding +- `rand` - Random IDs + +## Examples + +All examples work without Mycelium: + +```bash +# Simple end-to-end example +RUST_LOG=info cargo run --example simple_e2e + +# Full automated demo +RUST_LOG=info cargo run --example end_to_end_demo +``` + +## Migration Guide + +### If you were using Mycelium + +**Before:** +```bash +cargo run --bin supervisor +# Would try to connect to Mycelium automatically +``` + +**After:** +```bash +# Option A: Disable Mycelium (recommended for most use cases) +MYCELIUM_URL="" cargo run --bin supervisor + +# Option B: Enable Mycelium feature +cargo run --bin supervisor --features mycelium +``` + +### If you weren't using Mycelium + +**Before:** +```bash +cargo run --bin supervisor +# Would see connection errors to port 8990 +``` + +**After:** +```bash +cargo run --bin supervisor +# Clean startup, no connection errors! 🎉 +``` + +## Benefits + +### For Development +- ✅ Faster builds (fewer dependencies) +- ✅ Simpler setup (no Mycelium daemon needed) +- ✅ Cleaner logs (no connection errors) +- ✅ Easier debugging + +### For Production +- ✅ Smaller binary size +- ✅ Fewer runtime dependencies +- ✅ More flexible deployment +- ✅ Optional advanced features + +## Testing + +### Test without Mycelium +```bash +# Build +cargo build --bin supervisor + +# Run tests +cargo test + +# Run examples +cargo run --example simple_e2e +``` + +### Test with Mycelium +```bash +# Build with feature +cargo build --bin supervisor --features mycelium + +# Start Mycelium daemon (if you have one) +# mycelium-daemon --port 8990 + +# Run supervisor +MYCELIUM_URL="http://localhost:8990" cargo run --bin supervisor --features mycelium +``` + +## Troubleshooting + +### "Mycelium integration not enabled" + +This is informational, not an error. If you need Mycelium: + +```bash +cargo build --features mycelium +``` + +### "HTTP request failed: error sending request" + +If you see this with Mycelium enabled, check: +1. Is Mycelium daemon running? +2. Is the URL correct? (`MYCELIUM_URL`) +3. Is the port accessible? + +Or simply disable Mycelium: +```bash +export MYCELIUM_URL="" +``` + +## Summary + +🎉 **The supervisor now runs cleanly with just OpenRPC!** + +- Default: OpenRPC HTTP server only +- Optional: Enable Mycelium with `--features mycelium` +- No more connection errors when Mycelium isn't needed +- Simpler, faster, cleaner! + +--- + +**Status:** ✅ Complete +**Version:** 0.1.0 +**Last Updated:** 2025-10-24 diff --git a/bin/supervisor/docs/QUICK_START.md b/bin/supervisor/docs/QUICK_START.md new file mode 100644 index 0000000..c15b3d7 --- /dev/null +++ b/bin/supervisor/docs/QUICK_START.md @@ -0,0 +1,214 @@ +# Quick Start Guide + +Complete guide to running the Hero Supervisor with OSIS runners and examples. + +## Prerequisites + +1. **Redis** - Must be running +2. **Rust** - Version 1.88+ (run `rustup update`) + +## 1. Start Redis + +```bash +redis-server +``` + +## 2. Start Supervisor + +```bash +cd /Users/timurgordon/code/git.ourworld.tf/herocode/supervisor +cargo run --bin supervisor +``` + +You should see: +``` +╔════════════════════════════════════════════════════════════╗ +║ Hero Supervisor Started ║ +╚════════════════════════════════════════════════════════════╝ + 📡 OpenRPC Server: http://127.0.0.1:3030 + 🔗 Redis: redis://localhost:6379 + 🌐 Mycelium: Not compiled (use --features mycelium) +╚════════════════════════════════════════════════════════════╝ +``` + +## 3. Start OSIS Runner + +```bash +cd /Users/timurgordon/code/git.ourworld.tf/herocode/runner_rust +cargo run --bin runner_osis -- test_runner \ + --redis-url redis://localhost:6379 \ + --db-path /tmp/test_runner.db +``` + +You should see: +``` +Starting OSIS Sync Runner with ID: test_runner +Database path: /tmp/test_runner.db +Redis URL: redis://localhost:6379 +OSIS Sync Runner 'test_runner' started successfully +``` + +## 4. Run Example + +```bash +cd /Users/timurgordon/code/git.ourworld.tf/herocode/supervisor +RUST_LOG=info cargo run --example simple_e2e +``` + +## Terminal Layout + +``` +┌─────────────────────┬─────────────────────┐ +│ Terminal 1 │ Terminal 2 │ +│ Redis │ Supervisor │ +│ redis-server │ cargo run --bin │ +│ │ supervisor │ +├─────────────────────┼─────────────────────┤ +│ Terminal 3 │ Terminal 4 │ +│ OSIS Runner │ Example │ +│ cargo run --bin │ cargo run │ +│ runner_osis │ --example │ +│ │ simple_e2e │ +└─────────────────────┴─────────────────────┘ +``` + +## What Each Component Does + +### Redis +- Job queue storage +- Job result storage +- Runner coordination + +### Supervisor +- OpenRPC HTTP server (port 3030) +- Job dispatch to runners +- Runner registration +- Job execution coordination + +### OSIS Runner +- Listens for jobs on Redis queue +- Executes Rhai scripts +- Stores results back to Redis +- Uses HeroDB for data persistence + +### Example +- Creates jobs with Rhai scripts +- Sends jobs to supervisor via OpenRPC +- Receives results +- Demonstrates both blocking and non-blocking modes + +## Architecture + +``` +┌─────────────┐ +│ Example │ (simple_e2e.rs) +└──────┬──────┘ + │ HTTP/JSON-RPC + ▼ +┌─────────────┐ +│ Supervisor │ (port 3030) +└──────┬──────┘ + │ Redis Queue + ▼ +┌─────────────┐ +│ OSIS Runner │ (test_runner) +└──────┬──────┘ + │ + ▼ +┌─────────────┐ +│ HeroDB │ (Redis + local DB) +└─────────────┘ +``` + +## Troubleshooting + +### "Connection refused" on port 3030 +- Make sure supervisor is running +- Check if another process is using port 3030 + +### "Connection refused" on port 6379 +- Make sure Redis is running +- Check: `redis-cli ping` (should return "PONG") + +### Runner not receiving jobs +- Check runner is registered: Look for "Runner registered successfully" in example output +- Check Redis connection: Both supervisor and runner must use same Redis URL +- Check queue name matches: Should be `hero:q:work:type:osis:group:default:inst:test_runner` + +### "Job execution timeout" +- Increase timeout in job builder: `.timeout(120)` +- Check if runner is actually processing jobs (look for logs) + +## Example Output + +### Successful Run + +``` +╔════════════════════════════════════════╗ +║ Simple End-to-End Demo ║ +╚════════════════════════════════════════╝ + +📋 Step 1: Registering Runner +───────────────────────────────────────── +✅ Runner registered successfully + +📋 Step 2: Running a Simple Job (Blocking) +───────────────────────────────────────── +✅ Job completed! + Result: {"message":"Hello from the runner!","number":42} + +📋 Step 3: Running a Calculation Job +───────────────────────────────────────── +✅ Calculation completed! + Result: {"sum":55,"product":3628800,"count":10} + +📋 Step 4: Starting a Non-Blocking Job +───────────────────────────────────────── +✅ Job started! + Job ID: abc-123 (running in background) + +🎉 Demo completed successfully! +``` + +## Next Steps + +1. **Try different Rhai scripts** - Modify the payload in examples +2. **Add more runners** - Start multiple runners with different IDs +3. **Explore the API** - Use the OpenRPC client library +4. **Build your own client** - See `client/` for examples + +## Useful Commands + +```bash +# Check Redis +redis-cli ping + +# List Redis keys +redis-cli keys "hero:*" + +# Monitor Redis commands +redis-cli monitor + +# Check supervisor is running +curl http://localhost:3030 + +# View runner logs +# (check terminal where runner is running) +``` + +## Clean Up + +```bash +# Stop all processes (Ctrl+C in each terminal) + +# Clean up test database +rm /tmp/test_runner.db + +# (Optional) Flush Redis +redis-cli FLUSHALL +``` + +--- + +**Status:** ✅ Ready to Use +**Last Updated:** 2025-10-24 diff --git a/bin/supervisor/docs/RESTRUCTURE.md b/bin/supervisor/docs/RESTRUCTURE.md new file mode 100644 index 0000000..1b92af4 --- /dev/null +++ b/bin/supervisor/docs/RESTRUCTURE.md @@ -0,0 +1,58 @@ +# Repository Restructure + +## Changes Made + +The supervisor repository has been restructured to follow a cleaner organization: + +### Before: +``` +supervisor/ +├── clients/ +│ ├── openrpc/ # OpenRPC client library +│ └── admin-ui/ # Admin UI (Yew WASM app) +├── src/ # Main supervisor library +└── cmd/ # Supervisor binary +``` + +### After: +``` +supervisor/ +├── client/ # OpenRPC client library (renamed from clients/openrpc) +├── ui/ # Admin UI (renamed from clients/admin-ui) +├── src/ # Main supervisor library +└── cmd/ # Supervisor binary +``` + +## Package Names + +The package names remain unchanged: +- **Client**: `hero-supervisor-openrpc-client` +- **UI**: `supervisor-admin-ui` +- **Main**: `hero-supervisor` + +## Git Dependencies + +External projects using Git URLs will automatically pick up the new structure: + +```toml +# This continues to work +hero-supervisor-openrpc-client = { git = "https://git.ourworld.tf/herocode/supervisor.git" } +``` + +Cargo will find the package by name regardless of its location in the repository. + +## Local Path Dependencies + +If you have local path dependencies, update them: + +```toml +# Old +hero-supervisor-openrpc-client = { path = "../supervisor/clients/openrpc" } + +# New +hero-supervisor-openrpc-client = { path = "../supervisor/client" } +``` + +## Scripts and Documentation + +All references in scripts, documentation, and examples have been updated to reflect the new structure. diff --git a/bin/supervisor/docs/job-api-convention.md b/bin/supervisor/docs/job-api-convention.md new file mode 100644 index 0000000..b4c4102 --- /dev/null +++ b/bin/supervisor/docs/job-api-convention.md @@ -0,0 +1,333 @@ +# Hero Supervisor Job API Convention + +## Overview + +The Hero Supervisor OpenRPC API follows a consistent naming convention for job-related operations: + +- **`jobs.`** - General job operations (plural) +- **`job.`** - Specific job operations (singular) + +This convention provides a clear distinction between operations that work with multiple jobs or create new jobs versus operations that work with a specific existing job. + +## API Methods + +### General Job Operations (`jobs.`) + +#### `jobs.create` +Creates a new job without immediately queuing it to a runner. + +**Parameters:** +- `secret` (string): Authentication secret (admin or user) +- `job` (Job object): Complete job specification + +**Returns:** +- `job_id` (string): Unique identifier of the created job + +**Usage:** +```json +{ + "method": "jobs.create", + "params": { + "secret": "your-secret", + "job": { + "id": "job-123", + "caller_id": "client-1", + "context_id": "context-1", + "payload": "print('Hello World')", + "executor": "osis", + "runner": "osis-runner-1", + "timeout": 300, + "env_vars": {}, + "created_at": "2023-01-01T00:00:00Z", + "updated_at": "2023-01-01T00:00:00Z" + } + } +} +``` + +#### `jobs.list` +Lists all jobs in the system with full details. + +**Parameters:** None + +**Returns:** +- `jobs` (array of Job objects): List of all jobs with complete information + +**Usage:** +```json +{ + "method": "jobs.list", + "params": [] +} +``` + +**Response:** +```json +[ + { + "id": "job-123", + "caller_id": "client-1", + "context_id": "context-1", + "payload": "print('Hello World')", + "executor": "osis", + "runner": "osis-runner-1", + "timeout": 300, + "env_vars": {}, + "created_at": "2023-01-01T00:00:00Z", + "updated_at": "2023-01-01T00:00:00Z" + } +] +``` + +### Specific Job Operations (`job.`) + +#### `job.run` +Runs a job immediately on the appropriate runner and returns the result. + +**Parameters:** +- `secret` (string): Authentication secret (admin or user) +- `job` (Job object): Complete job specification + +**Returns:** +- `result` (JobResult): Either success or error result + +**JobResult Format:** +```json +// Success case +{ + "success": "Job completed successfully with output..." +} + +// Error case +{ + "error": "Job failed with error message..." +} +``` + +**Usage:** +```json +{ + "method": "job.run", + "params": { + "secret": "your-secret", + "job": { /* job object */ } + } +} +``` + +#### `job.start` +Starts a previously created job by queuing it to its assigned runner. + +**Parameters:** +- `secret` (string): Authentication secret (admin or user) +- `job_id` (string): ID of the job to start + +**Returns:** `null` (void) + +**Usage:** +```json +{ + "method": "job.start", + "params": { + "secret": "your-secret", + "job_id": "job-123" + } +} +``` + +#### `job.status` +Gets the current status of a job. + +**Parameters:** +- `job_id` (string): ID of the job to check + +**Returns:** +- `status` (JobStatusResponse): Current job status information + +**JobStatusResponse Format:** +```json +{ + "job_id": "job-123", + "status": "running", + "created_at": "2023-01-01T00:00:00Z", + "started_at": "2023-01-01T00:00:05Z", + "completed_at": null +} +``` + +**Status Values:** +- `created` - Job has been created but not queued +- `queued` - Job has been queued to a runner +- `running` - Job is currently executing +- `completed` - Job finished successfully +- `failed` - Job failed with an error +- `timeout` - Job timed out + +**Usage:** +```json +{ + "method": "job.status", + "params": ["job-123"] +} +``` + +#### `job.result` +Gets the result of a completed job. This method blocks until the result is available. + +**Parameters:** +- `job_id` (string): ID of the job to get results for + +**Returns:** +- `result` (JobResult): Either success or error result + +**Usage:** +```json +{ + "method": "job.result", + "params": ["job-123"] +} +``` + +#### `job.stop` +Stops a running job. + +**Parameters:** +- `secret` (string): Authentication secret (admin or user) +- `job_id` (string): ID of the job to stop + +**Returns:** `null` (void) + +**Usage:** +```json +{ + "method": "job.stop", + "params": { + "secret": "your-secret", + "job_id": "job-123" + } +} +``` + +#### `job.delete` +Deletes a job from the system. + +**Parameters:** +- `secret` (string): Authentication secret (admin or user) +- `job_id` (string): ID of the job to delete + +**Returns:** `null` (void) + +**Usage:** +```json +{ + "method": "job.delete", + "params": { + "secret": "your-secret", + "job_id": "job-123" + } +} +``` + +## Workflow Examples + +### Fire-and-Forget Job +```javascript +// Create and immediately run a job +const result = await client.job_run(secret, jobSpec); +if (result.success) { + console.log("Job completed:", result.success); +} else { + console.error("Job failed:", result.error); +} +``` + +### Asynchronous Job Processing +```javascript +// 1. Create the job +const jobId = await client.jobs_create(secret, jobSpec); + +// 2. Start the job +await client.job_start(secret, jobId); + +// 3. Poll for completion (non-blocking) +let status; +do { + status = await client.job_status(jobId); + if (status.status === 'running') { + await sleep(1000); // Wait 1 second + } +} while (status.status === 'running' || status.status === 'queued'); + +// 4. Get the result +const result = await client.job_result(jobId); +``` + +### Batch Job Management +```javascript +// Create multiple jobs +const jobIds = []; +for (const jobSpec of jobSpecs) { + const jobId = await client.jobs_create(secret, jobSpec); + jobIds.push(jobId); +} + +// Start all jobs +for (const jobId of jobIds) { + await client.job_start(secret, jobId); +} + +// Monitor progress +const results = []; +for (const jobId of jobIds) { + const result = await client.job_result(jobId); // Blocks until complete + results.push(result); +} + +// Optional: Stop or delete jobs if needed +for (const jobId of jobIds) { + await client.job_stop(secret, jobId); // Stop running job + await client.job_delete(secret, jobId); // Delete from system +} +``` + +## Authentication + +All job operations require authentication using one of the following secret types: + +- **Admin secrets**: Full access to all operations +- **User secrets**: Access to job operations (`jobs.create`, `job.run`, `job.start`) +- **Register secrets**: Only access to runner registration + +## Error Handling + +All methods return standard JSON-RPC error responses for: + +- **Authentication errors** (-32602): Invalid or missing secrets +- **Job not found errors** (-32000): Job ID doesn't exist +- **Internal errors** (-32603): Server-side processing errors + +## Migration from Legacy API + +### Old → New Method Names + +| Legacy Method | New Method | Notes | +|---------------|------------|-------| +| `run_job` | `job.run` | Same functionality, new naming | +| `list_jobs` | `jobs.list` | Same functionality, new naming | +| `create_job` | `jobs.create` | Enhanced to not auto-queue | + +### New Methods Added + +- `job.start` - Start a created job +- `job.stop` - Stop a running job +- `job.delete` - Delete a job from the system +- `job.status` - Get job status (non-blocking) +- `job.result` - Get job result (blocking) + +### API Changes + +- **Job struct**: Replaced `job_type` field with `executor` +- **jobs.list**: Now returns full Job objects instead of just job IDs +- **Enhanced job lifecycle**: Added stop and delete operations + +This provides much more granular control over job lifecycle management. diff --git a/bin/supervisor/docs/openrpc.json b/bin/supervisor/docs/openrpc.json new file mode 100644 index 0000000..780bda0 --- /dev/null +++ b/bin/supervisor/docs/openrpc.json @@ -0,0 +1,391 @@ +{ + "openrpc": "1.3.2", + "info": { + "title": "Hero Supervisor OpenRPC API", + "version": "1.0.0", + "description": "OpenRPC API for managing Hero Supervisor runners and jobs. Job operations follow the convention: 'jobs.' for general operations and 'job.' for specific job operations." + }, + "components": { + "schemas": { + "Job": { + "type": "object", + "properties": { + "id": { "type": "string" }, + "caller_id": { "type": "string" }, + "context_id": { "type": "string" }, + "payload": { "type": "string" }, + "runner": { "type": "string" }, + "executor": { "type": "string" }, + "timeout": { "type": "number" }, + "env_vars": { "type": "object" }, + "created_at": { "type": "string" }, + "updated_at": { "type": "string" } + }, + "required": ["id", "caller_id", "context_id", "payload", "runner", "executor", "timeout", "env_vars", "created_at", "updated_at"] + } + } + }, + "methods": [ + { + "name": "list_runners", + "description": "List all registered runners", + "params": [], + "result": { + "name": "runners", + "schema": { + "type": "array", + "items": { "type": "string" } + } + } + }, + { + "name": "register_runner", + "description": "Register a new runner to the supervisor with secret authentication", + "params": [ + { + "name": "params", + "schema": { + "type": "object", + "properties": { + "secret": { "type": "string" }, + "name": { "type": "string" }, + "queue": { "type": "string" } + }, + "required": ["secret", "name", "queue"] + } + } + ], + "result": { + "name": "result", + "schema": { "type": "null" } + } + }, + { + "name": "jobs.create", + "description": "Create a new job without queuing it to a runner", + "params": [ + { + "name": "params", + "schema": { + "type": "object", + "properties": { + "secret": { "type": "string" }, + "job": { + "$ref": "#/components/schemas/Job" + } + }, + "required": ["secret", "job"] + } + } + ], + "result": { + "name": "job_id", + "schema": { "type": "string" } + } + }, + { + "name": "jobs.list", + "description": "List all jobs", + "params": [], + "result": { + "name": "jobs", + "schema": { + "type": "array", + "items": { "$ref": "#/components/schemas/Job" } + } + } + }, + { + "name": "job.run", + "description": "Run a job on the appropriate runner and return the result", + "params": [ + { + "name": "params", + "schema": { + "type": "object", + "properties": { + "secret": { "type": "string" }, + "job": { + "$ref": "#/components/schemas/Job" + } + }, + "required": ["secret", "job"] + } + } + ], + "result": { + "name": "result", + "schema": { + "oneOf": [ + { + "type": "object", + "properties": { + "success": { "type": "string" } + }, + "required": ["success"] + }, + { + "type": "object", + "properties": { + "error": { "type": "string" } + }, + "required": ["error"] + } + ] + } + } + }, + { + "name": "job.start", + "description": "Start a previously created job by queuing it to its assigned runner", + "params": [ + { + "name": "params", + "schema": { + "type": "object", + "properties": { + "secret": { "type": "string" }, + "job_id": { "type": "string" } + }, + "required": ["secret", "job_id"] + } + } + ], + "result": { + "name": "result", + "schema": { "type": "null" } + } + }, + { + "name": "job.status", + "description": "Get the current status of a job", + "params": [ + { + "name": "job_id", + "schema": { "type": "string" } + } + ], + "result": { + "name": "status", + "schema": { + "type": "object", + "properties": { + "job_id": { "type": "string" }, + "status": { + "type": "string", + "enum": ["created", "queued", "running", "completed", "failed", "timeout"] + }, + "created_at": { "type": "string" }, + "started_at": { "type": ["string", "null"] }, + "completed_at": { "type": ["string", "null"] } + }, + "required": ["job_id", "status", "created_at"] + } + } + }, + { + "name": "job.result", + "description": "Get the result of a completed job (blocks until result is available)", + "params": [ + { + "name": "job_id", + "schema": { "type": "string" } + } + ], + "result": { + "name": "result", + "schema": { + "oneOf": [ + { + "type": "object", + "properties": { + "success": { "type": "string" } + }, + "required": ["success"] + }, + { + "type": "object", + "properties": { + "error": { "type": "string" } + }, + "required": ["error"] + } + ] + } + } + }, + { + "name": "remove_runner", + "description": "Remove a runner from the supervisor", + "params": [ + { + "name": "actor_id", + "schema": { "type": "string" } + } + ], + "result": { + "name": "result", + "schema": { "type": "null" } + } + }, + { + "name": "start_runner", + "description": "Start a specific runner", + "params": [ + { + "name": "actor_id", + "schema": { "type": "string" } + } + ], + "result": { + "name": "result", + "schema": { "type": "null" } + } + }, + { + "name": "stop_runner", + "description": "Stop a specific runner", + "params": [ + { + "name": "actor_id", + "schema": { "type": "string" } + }, + { + "name": "force", + "schema": { "type": "boolean" } + } + ], + "result": { + "name": "result", + "schema": { "type": "null" } + } + }, + { + "name": "get_runner_status", + "description": "Get the status of a specific runner", + "params": [ + { + "name": "actor_id", + "schema": { "type": "string" } + } + ], + "result": { + "name": "status", + "schema": { "type": "object" } + } + }, + { + "name": "get_all_runner_status", + "description": "Get status of all runners", + "params": [], + "result": { + "name": "statuses", + "schema": { + "type": "array", + "items": { "type": "object" } + } + } + }, + { + "name": "start_all", + "description": "Start all runners", + "params": [], + "result": { + "name": "results", + "schema": { + "type": "array", + "items": { + "type": "array", + "items": { "type": "string" } + } + } + } + }, + { + "name": "stop_all", + "description": "Stop all runners", + "params": [ + { + "name": "force", + "schema": { "type": "boolean" } + } + ], + "result": { + "name": "results", + "schema": { + "type": "array", + "items": { + "type": "array", + "items": { "type": "string" } + } + } + } + }, + { + "name": "get_all_status", + "description": "Get status of all runners (alternative format)", + "params": [], + "result": { + "name": "statuses", + "schema": { + "type": "array", + "items": { + "type": "array", + "items": { "type": "string" } + } + } + } + }, + { + "name": "job.stop", + "description": "Stop a running job", + "params": [ + { + "name": "params", + "schema": { + "type": "object", + "properties": { + "secret": { "type": "string" }, + "job_id": { "type": "string" } + }, + "required": ["secret", "job_id"] + } + } + ], + "result": { + "name": "result", + "schema": { "type": "null" } + } + }, + { + "name": "job.delete", + "description": "Delete a job from the system", + "params": [ + { + "name": "params", + "schema": { + "type": "object", + "properties": { + "secret": { "type": "string" }, + "job_id": { "type": "string" } + }, + "required": ["secret", "job_id"] + } + } + ], + "result": { + "name": "result", + "schema": { "type": "null" } + } + }, + { + "name": "rpc.discover", + "description": "OpenRPC discovery method - returns the OpenRPC document describing this API", + "params": [], + "result": { + "name": "openrpc_document", + "schema": { "type": "object" } + } + } + ] +} diff --git a/bin/supervisor/docs/test_keypairs.md b/bin/supervisor/docs/test_keypairs.md new file mode 100644 index 0000000..f186ff0 --- /dev/null +++ b/bin/supervisor/docs/test_keypairs.md @@ -0,0 +1,80 @@ +# Test Keypairs for Supervisor Auth + +These are secp256k1 keypairs for testing the supervisor authentication system. + +## Keypair 1 (Alice - Admin) +``` +Private Key: 0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef +Public Key: 0x04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235 +Address: 0x1234567890abcdef1234567890abcdef12345678 +``` + +## Keypair 2 (Bob - User) +``` +Private Key: 0xfedcba0987654321fedcba0987654321fedcba0987654321fedcba0987654321 +Public Key: 0x04d0de0aaeaefad02b8bdf8a56451a9852d7f851fee0cc8b4d42f3a0a4c3c2f66c1e5e3e8e3c3e3e3e3e3e3e3e3e3e3e3e3e3e3e3e3e3e3e3e3e3e3e3e3e3e3e3e +Address: 0xfedcba0987654321fedcba0987654321fedcba09 +``` + +## Keypair 3 (Charlie - Register) +``` +Private Key: 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +Public Key: 0x04e68acfc0253a10620dff706b0a1b1f1f5833ea3beb3bde6250d4e5e1e283bb4e9504be11a68d7a263f8e2000d1f8b8c5e5e5e5e5e5e5e5e5e5e5e5e5e5e5e5e +Address: 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +``` + +## Keypair 4 (Dave - Test) +``` +Private Key: 0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb +Public Key: 0x04f71e8f6c7e8f6c7e8f6c7e8f6c7e8f6c7e8f6c7e8f6c7e8f6c7e8f6c7e8f6c7e8f6c7e8f6c7e8f6c7e8f6c7e8f6c7e8f6c7e8f6c7e8f6c7e8f6c7e8f6c7e +Address: 0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb +``` + +## Keypair 5 (Eve - Test) +``` +Private Key: 0xcccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc +Public Key: 0x04a0b1c2d3e4f5a6b7c8d9e0f1a2b3c4d5e6f7a8b9c0d1e2f3a4b5c6d7e8f9a0b1c2d3e4f5a6b7c8d9e0f1a2b3c4d5e6f7a8b9c0d1e2f3a4b5c6d7e8f9a0 +Address: 0xcccccccccccccccccccccccccccccccccccccccc +``` + +## Usage Examples + +### Using with OpenRPC Client + +```rust +use secp256k1::{Secp256k1, SecretKey}; +use hex; + +// Alice's private key +let alice_privkey = SecretKey::from_slice( + &hex::decode("1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef").unwrap() +).unwrap(); + +// Sign a message +let secp = Secp256k1::new(); +let message = "Hello, Supervisor!"; +// ... sign with alice_privkey +``` + +### Using with Admin UI + +You can use the public keys as identifiers when creating API keys: +- Alice: `0x04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd...` +- Bob: `0x04d0de0aaeaefad02b8bdf8a56451a9852d7f851fee0cc8b4d42f3a0a4c3c2f66c...` + +### Testing Different Scopes + +1. **Admin Scope** - Use Alice's keypair for full admin access +2. **User Scope** - Use Bob's keypair for limited user access +3. **Register Scope** - Use Charlie's keypair for runner registration only + +## Notes + +⚠️ **WARNING**: These are TEST keypairs only! Never use these in production! + +The private keys are intentionally simple patterns for easy testing: +- Alice: All 0x12...ef pattern +- Bob: Reverse pattern 0xfe...21 +- Charlie: All 0xaa +- Dave: All 0xbb +- Eve: All 0xcc diff --git a/bin/supervisor/examples/README.md b/bin/supervisor/examples/README.md new file mode 100644 index 0000000..76131a1 --- /dev/null +++ b/bin/supervisor/examples/README.md @@ -0,0 +1,74 @@ +# Hero Supervisor Examples + +This directory contains examples demonstrating Hero Supervisor functionality. + +## Available Examples + +### osiris_openrpc + +Comprehensive example showing the complete workflow of using Hero Supervisor with OSIRIS runners via OpenRPC. + +**Features:** +- Automatic supervisor and runner startup +- OpenRPC client communication +- Runner registration and management +- Job dispatching with multiple scripts +- Context-based access control +- Graceful shutdown + +**Run:** +```bash +cargo run --example osiris_openrpc +``` + +See [osiris_openrpc/README.md](osiris_openrpc/README.md) for details. + +## Prerequisites + +All examples require: +- Redis server running on `localhost:6379` +- Rust toolchain installed + +## Example Structure + +``` +examples/ +├── README.md # This file +├── osiris_openrpc/ # OSIRIS + OpenRPC example +│ ├── main.rs # Main example code +│ ├── README.md # Detailed documentation +│ ├── note.rhai # Note creation script +│ ├── event.rhai # Event creation script +│ ├── query.rhai # Query script +│ └── access_denied.rhai # Access control test script +└── _archive/ # Archived old examples +``` + +## Architecture Overview + +The examples demonstrate the Hero Supervisor architecture: + +``` +Client (OpenRPC) + ↓ +Supervisor (OpenRPC Server) + ↓ +Redis Queue + ↓ +Runners (OSIRIS, SAL, etc.) +``` + +## Development + +To add a new example: + +1. Create a new directory under `examples/` +2. Add `main.rs` with your example code +3. Add any required script files (`.rhai`) +4. Add a `README.md` documenting the example +5. Update `Cargo.toml` to register the example +6. Update this README with a link + +## Archived Examples + +Previous examples have been moved to `_archive/` for reference. These may be outdated but can provide useful patterns for specific use cases. diff --git a/bin/supervisor/examples/_archive/E2E_EXAMPLES.md b/bin/supervisor/examples/_archive/E2E_EXAMPLES.md new file mode 100644 index 0000000..f763f02 --- /dev/null +++ b/bin/supervisor/examples/_archive/E2E_EXAMPLES.md @@ -0,0 +1,364 @@ +# End-to-End Examples + +Complete examples demonstrating the full Supervisor + Runner + Client workflow. + +## Overview + +These examples show how to: +1. Start a Hero Supervisor +2. Start an OSIS Runner +3. Register the runner with the supervisor +4. Execute jobs using both blocking (`job.run`) and non-blocking (`job.start`) modes + +## Prerequisites + +### Required Services + +1. **Redis** - Must be running on `localhost:6379` + ```bash + redis-server + ``` + +2. **Supervisor** - Hero Supervisor with Mycelium integration + ```bash + cargo run --bin hero-supervisor -- --redis-url redis://localhost:6379 + ``` + +3. **Runner** - OSIS Runner to execute jobs + ```bash + cargo run --bin runner_osis -- test_runner --redis-url redis://localhost:6379 + ``` + +## Examples + +### 1. Simple End-to-End (`simple_e2e.rs`) + +**Recommended for beginners** - A minimal example with clear step-by-step execution. + +#### What it does: +- Registers a runner with the supervisor +- Runs 2 blocking jobs (with immediate results) +- Starts 1 non-blocking job (fire and forget) +- Shows clear output at each step + +#### How to run: + +**Terminal 1 - Redis:** +```bash +redis-server +``` + +**Terminal 2 - Supervisor:** +```bash +cd /Users/timurgordon/code/git.ourworld.tf/herocode/supervisor +RUST_LOG=info cargo run --bin hero-supervisor -- --redis-url redis://localhost:6379 +``` + +**Terminal 3 - Runner:** +```bash +cd /Users/timurgordon/code/git.ourworld.tf/herocode/runner_rust +RUST_LOG=info cargo run --bin runner_osis -- test_runner \ + --redis-url redis://localhost:6379 \ + --db-path /tmp/test_runner.db +``` + +**Terminal 4 - Demo:** +```bash +cd /Users/timurgordon/code/git.ourworld.tf/herocode/supervisor +RUST_LOG=info cargo run --example simple_e2e +``` + +#### Expected Output: + +``` +╔════════════════════════════════════════╗ +║ Simple End-to-End Demo ║ +╚════════════════════════════════════════╝ + +📋 Step 1: Registering Runner +───────────────────────────────────────── +✅ Runner registered successfully + +📋 Step 2: Running a Simple Job (Blocking) +───────────────────────────────────────── +✅ Job completed! + Result: {"message":"Hello from the runner!","number":42,"timestamp":1234567890} + +📋 Step 3: Running a Calculation Job +───────────────────────────────────────── +✅ Calculation completed! + Result: {"sum":55,"product":3628800,"count":10,"average":5} + +📋 Step 4: Starting a Non-Blocking Job +───────────────────────────────────────── +✅ Job started! + Job ID: abc-123 (running in background) + +🎉 Demo completed successfully! +``` + +### 2. Full End-to-End (`end_to_end_demo.rs`) + +**Advanced** - Automatically spawns supervisor and runner processes. + +#### What it does: +- Automatically starts supervisor and runner +- Runs multiple test jobs +- Demonstrates both execution modes +- Handles cleanup automatically + +#### How to run: + +**Terminal 1 - Redis:** +```bash +redis-server +``` + +**Terminal 2 - Demo:** +```bash +cd /Users/timurgordon/code/git.ourworld.tf/herocode/supervisor +RUST_LOG=info cargo run --example end_to_end_demo +``` + +#### Features: +- ✅ Automatic process management +- ✅ Multiple job examples +- ✅ Graceful shutdown +- ✅ Comprehensive logging + +## Job Execution Modes + +### job.run (Blocking) + +Executes a job and waits for the result. + +**Request:** +```json +{ + "jsonrpc": "2.0", + "method": "job.run", + "params": [{ + "secret": "admin_secret", + "job": { /* job object */ }, + "timeout": 30 + }], + "id": 1 +} +``` + +**Response:** +```json +{ + "jsonrpc": "2.0", + "result": { + "job_id": "uuid", + "status": "completed", + "result": "{ /* actual result */ }" + }, + "id": 1 +} +``` + +**Use when:** +- You need immediate results +- Job completes quickly (< 60 seconds) +- Synchronous workflow + +### job.start (Non-Blocking) + +Starts a job and returns immediately. + +**Request:** +```json +{ + "jsonrpc": "2.0", + "method": "job.start", + "params": [{ + "secret": "admin_secret", + "job": { /* job object */ } + }], + "id": 1 +} +``` + +**Response:** +```json +{ + "jsonrpc": "2.0", + "result": { + "job_id": "uuid", + "status": "queued" + }, + "id": 1 +} +``` + +**Use when:** +- Long-running operations +- Background processing +- Async workflows +- Don't need immediate results + +## Job Structure + +Jobs are created using the `JobBuilder`: + +```rust +use runner_rust::job::JobBuilder; + +let job = JobBuilder::new() + .caller_id("my_client") + .context_id("my_context") + .payload(r#" + // Rhai script to execute + let result = 2 + 2; + to_json(result) + "#) + .runner("runner_name") + .executor("rhai") + .timeout(30) + .build()?; +``` + +### Job Fields + +- **caller_id**: Identifier for the client making the request +- **context_id**: Context for the job execution +- **payload**: Rhai script to execute +- **runner**: Name of the runner to execute on +- **executor**: Type of executor (always "rhai" for OSIS) +- **timeout**: Maximum execution time in seconds + +## Rhai Script Examples + +### Simple Calculation +```rhai +let result = 2 + 2; +to_json(result) +``` + +### String Manipulation +```rhai +let message = "Hello, World!"; +let upper = message.to_upper(); +to_json(upper) +``` + +### Array Operations +```rhai +let numbers = [1, 2, 3, 4, 5]; +let sum = 0; +for n in numbers { + sum += n; +} +to_json(#{sum: sum, count: numbers.len()}) +``` + +### Object Creation +```rhai +let person = #{ + name: "Alice", + age: 30, + email: "alice@example.com" +}; +to_json(person) +``` + +## Troubleshooting + +### "Failed to connect to supervisor" + +**Problem:** Supervisor is not running or wrong port. + +**Solution:** +```bash +# Check if supervisor is running +curl http://localhost:3030 + +# Start supervisor +cargo run --bin hero-supervisor -- --redis-url redis://localhost:6379 +``` + +### "Runner not found" + +**Problem:** Runner is not registered or not running. + +**Solution:** +```bash +# Start the runner +cargo run --bin runner_osis -- test_runner --redis-url redis://localhost:6379 + +# Check runner logs for connection issues +``` + +### "Job execution timeout" + +**Problem:** Job took longer than timeout value. + +**Solution:** +- Increase timeout in job builder: `.timeout(60)` +- Or in job.run request: `"timeout": 60` + +### "Redis connection failed" + +**Problem:** Redis is not running. + +**Solution:** +```bash +# Start Redis +redis-server + +# Or specify custom Redis URL +cargo run --bin hero-supervisor -- --redis-url redis://localhost:6379 +``` + +## Architecture + +``` +┌─────────────┐ +│ Client │ +│ (Example) │ +└──────┬──────┘ + │ HTTP/JSON-RPC + ▼ +┌─────────────┐ +│ Supervisor │ +│ (Mycelium) │ +└──────┬──────┘ + │ Redis Queue + ▼ +┌─────────────┐ +│ Runner │ +│ (OSIS) │ +└─────────────┘ +``` + +### Flow + +1. **Client** creates a job with Rhai script +2. **Client** sends job to supervisor via JSON-RPC +3. **Supervisor** verifies signatures (if present) +4. **Supervisor** queues job to runner's Redis queue +5. **Runner** picks up job from queue +6. **Runner** executes Rhai script +7. **Runner** stores result in Redis +8. **Supervisor** retrieves result (for job.run) +9. **Client** receives result + +## Next Steps + +- Add signature verification to jobs (see `JOB_SIGNATURES.md`) +- Implement job status polling for non-blocking jobs +- Create custom Rhai functions for your use case +- Scale with multiple runners + +## Related Documentation + +- `JOB_EXECUTION.md` - Detailed job execution modes +- `JOB_SIGNATURES.md` - Cryptographic job signing +- `README.md` - Supervisor overview + +--- + +**Status:** ✅ Production Ready +**Last Updated:** 2025-10-24 diff --git a/bin/supervisor/examples/_archive/EXAMPLES_SUMMARY.md b/bin/supervisor/examples/_archive/EXAMPLES_SUMMARY.md new file mode 100644 index 0000000..0c740a2 --- /dev/null +++ b/bin/supervisor/examples/_archive/EXAMPLES_SUMMARY.md @@ -0,0 +1,192 @@ +# Supervisor Examples - Summary + +## ✅ **Complete End-to-End Examples with OpenRPC Client** + +All examples now use the official `hero-supervisor-openrpc-client` library for type-safe, async communication with the supervisor. + +### **What Was Updated:** + +1. **OpenRPC Client Library** (`client/src/lib.rs`) + - Added `JobRunResponse` - Response from blocking `job.run` + - Added `JobStartResponse` - Response from non-blocking `job.start` + - Updated `job_run()` method - Now accepts timeout parameter + - Updated `job_start()` method - Now accepts Job instead of job_id + - Re-exports `Job` and `JobBuilder` from `runner_rust` + +2. **Simple E2E Example** (`examples/simple_e2e.rs`) + - Uses `SupervisorClient` from OpenRPC library + - Clean, type-safe API calls + - No manual JSON-RPC construction + - Perfect for learning and testing + +3. **Full E2E Demo** (`examples/end_to_end_demo.rs`) + - Automated supervisor and runner spawning + - Uses OpenRPC client throughout + - Helper functions for common operations + - Comprehensive test scenarios + +### **Key Changes:** + +**Before (Manual JSON-RPC):** +```rust +let request = json!({ + "jsonrpc": "2.0", + "method": "job.run", + "params": [{ + "secret": secret, + "job": job, + "timeout": 30 + }], + "id": 1 +}); +let response = http_client.post(url).json(&request).send().await?; +``` + +**After (OpenRPC Client):** +```rust +let response = client.job_run(secret, job, Some(30)).await?; +println!("Result: {:?}", response.result); +``` + +### **Client API:** + +#### **Job Execution** + +```rust +use hero_supervisor_openrpc_client::{SupervisorClient, JobBuilder}; + +// Create client +let client = SupervisorClient::new("http://localhost:3030")?; + +// Register runner +client.register_runner("admin_secret", "runner_name", "queue_name").await?; + +// Run job (blocking - waits for result) +let response = client.job_run("admin_secret", job, Some(60)).await?; +// response.result contains the actual result + +// Start job (non-blocking - returns immediately) +let response = client.job_start("admin_secret", job).await?; +// response.job_id for later polling +``` + +#### **Response Types** + +```rust +// JobRunResponse (from job.run) +pub struct JobRunResponse { + pub job_id: String, + pub status: String, // "completed" + pub result: Option, // Actual result from runner +} + +// JobStartResponse (from job.start) +pub struct JobStartResponse { + pub job_id: String, + pub status: String, // "queued" +} +``` + +### **Examples Overview:** + +| Example | Description | Use Case | +|---------|-------------|----------| +| `simple_e2e.rs` | Manual setup, step-by-step | Learning, testing | +| `end_to_end_demo.rs` | Automated, comprehensive | CI/CD, integration tests | + +### **Running the Examples:** + +**Prerequisites:** +```bash +# Terminal 1: Redis +redis-server + +# Terminal 2: Supervisor +cargo run --bin hero-supervisor -- --redis-url redis://localhost:6379 + +# Terminal 3: Runner +cargo run --bin runner_osis -- test_runner --redis-url redis://localhost:6379 +``` + +**Run Simple Example:** +```bash +# Terminal 4 +RUST_LOG=info cargo run --example simple_e2e +``` + +**Run Full Demo:** +```bash +# Only needs Redis running (spawns supervisor and runner automatically) +RUST_LOG=info cargo run --example end_to_end_demo +``` + +### **Benefits of OpenRPC Client:** + +✅ **Type Safety** - Compile-time checking of requests/responses +✅ **Async/Await** - Native Rust async support +✅ **Error Handling** - Proper Result types with detailed errors +✅ **Auto Serialization** - No manual JSON construction +✅ **Documentation** - IntelliSense and type hints +✅ **Maintainability** - Single source of truth for API + +### **Architecture:** + +``` +┌─────────────────┐ +│ Example Code │ +│ (simple_e2e) │ +└────────┬────────┘ + │ + ▼ +┌─────────────────┐ +│ OpenRPC Client │ +│ (typed API) │ +└────────┬────────┘ + │ JSON-RPC over HTTP + ▼ +┌─────────────────┐ +│ Supervisor │ +│ (Mycelium) │ +└────────┬────────┘ + │ Redis Queue + ▼ +┌─────────────────┐ +│ OSIS Runner │ +│ (Rhai Engine) │ +└─────────────────┘ +``` + +### **Job Execution Modes:** + +**Blocking (`job.run`):** +- Client waits for result +- Uses `queue_and_wait` internally +- Returns actual result +- Best for: CRUD, queries, short jobs + +**Non-Blocking (`job.start`):** +- Client returns immediately +- Job runs in background +- Returns job_id for polling +- Best for: Long jobs, batch processing + +### **Files Modified:** + +- ✅ `client/src/lib.rs` - Updated client methods and response types +- ✅ `examples/simple_e2e.rs` - Refactored to use OpenRPC client +- ✅ `examples/end_to_end_demo.rs` - Refactored to use OpenRPC client +- ✅ `examples/E2E_EXAMPLES.md` - Updated documentation +- ✅ `examples/EXAMPLES_SUMMARY.md` - This file + +### **Next Steps:** + +1. **Add more examples** - Specific use cases (batch jobs, error handling) +2. **Job polling** - Implement `wait_for_job()` helper +3. **WASM support** - Browser-based examples +4. **Signature examples** - Jobs with cryptographic signatures + +--- + +**Status:** ✅ Complete and Production Ready +**Last Updated:** 2025-10-24 +**Client Version:** hero-supervisor-openrpc-client 0.1.0 diff --git a/bin/supervisor/examples/_archive/README.md b/bin/supervisor/examples/_archive/README.md new file mode 100644 index 0000000..bd21499 --- /dev/null +++ b/bin/supervisor/examples/_archive/README.md @@ -0,0 +1,182 @@ +# Hero Supervisor Examples + +This directory contains examples demonstrating the new job API functionality and workflows. + +## Examples Overview + +### 1. `job_api_examples.rs` - Comprehensive API Demo +Complete demonstration of all new job API methods: +- **Fire-and-forget execution** using `job.run` +- **Asynchronous processing** with `jobs.create`, `job.start`, `job.status`, `job.result` +- **Batch job processing** for multiple jobs +- **Job listing** with `jobs.list` + +**Run with:** +```bash +cargo run --example job_api_examples +``` + +### 2. `simple_job_workflow.rs` - Basic Workflow +Simple example showing the basic job lifecycle: +1. Create job with `jobs.create` +2. Start job with `job.start` +3. Monitor with `job.status` +4. Get result with `job.result` + +**Run with:** +```bash +cargo run --example simple_job_workflow +``` + +### 3. `integration_test.rs` - Integration Tests +Comprehensive integration tests validating: +- Complete job lifecycle +- Immediate job execution +- Job listing functionality +- Authentication error handling +- Nonexistent job operations + +**Run with:** +```bash +cargo test --test integration_test +``` + +## Prerequisites + +Before running the examples, ensure: + +1. **Redis is running:** + ```bash + docker run -d -p 6379:6379 redis:alpine + ``` + +2. **Supervisor is running:** + ```bash + ./target/debug/supervisor --config examples/supervisor/config.toml + ``` + +3. **Runners are configured** in your config.toml: + ```toml + [[actors]] + id = "osis_runner_1" + name = "osis_runner_1" + binary_path = "/path/to/osis_runner" + db_path = "/tmp/osis_db" + redis_url = "redis://localhost:6379" + process_manager = "simple" + ``` + +## API Convention Summary + +The examples demonstrate the new job API convention: + +### General Operations (`jobs.`) +- `jobs.create` - Create a job without queuing it +- `jobs.list` - List all job IDs in the system + +### Specific Operations (`job.`) +- `job.run` - Run a job immediately and return result +- `job.start` - Start a previously created job +- `job.status` - Get current job status (non-blocking) +- `job.result` - Get job result (blocking until complete) + +## Workflow Patterns + +### Pattern 1: Fire-and-Forget +```rust +let result = client.job_run(secret, job).await?; +match result { + JobResult::Success { success } => println!("Output: {}", success), + JobResult::Error { error } => println!("Error: {}", error), +} +``` + +### Pattern 2: Asynchronous Processing +```rust +// Create and start +let job_id = client.jobs_create(secret, job).await?; +client.job_start(secret, &job_id).await?; + +// Monitor (non-blocking) +loop { + let status = client.job_status(&job_id).await?; + if status.status == "completed" { break; } + sleep(Duration::from_secs(1)).await; +} + +// Get result +let result = client.job_result(&job_id).await?; +``` + +### Pattern 3: Batch Processing +```rust +// Create all jobs +let mut job_ids = Vec::new(); +for job_spec in job_specs { + let job_id = client.jobs_create(secret, job_spec).await?; + job_ids.push(job_id); +} + +// Start all jobs +for job_id in &job_ids { + client.job_start(secret, job_id).await?; +} + +// Collect results +for job_id in &job_ids { + let result = client.job_result(job_id).await?; + // Process result... +} +``` + +## Error Handling + +The examples demonstrate proper error handling for: +- **Authentication errors** - Invalid secrets +- **Job not found errors** - Nonexistent job IDs +- **Connection errors** - Supervisor not available +- **Execution errors** - Job failures + +## Authentication + +Examples use different secret types: +- **Admin secrets**: Full system access +- **User secrets**: Job operations only (used in examples) +- **Register secrets**: Runner registration only + +Configure secrets in your supervisor config: +```toml +admin_secrets = ["admin-secret-123"] +user_secrets = ["user-secret-456"] +register_secrets = ["register-secret-789"] +``` + +## Troubleshooting + +### Common Issues + +1. **Connection refused** + - Ensure supervisor is running on localhost:3030 + - Check supervisor logs for errors + +2. **Authentication failed** + - Verify secret is configured in supervisor + - Check secret type matches operation requirements + +3. **Job execution failed** + - Ensure runners are properly configured and running + - Check runner logs for execution errors + - Verify job payload is valid for the target runner + +4. **Redis connection failed** + - Ensure Redis is running on localhost:6379 + - Check Redis connectivity from supervisor + +### Debug Mode + +Run examples with debug logging: +```bash +RUST_LOG=debug cargo run --example job_api_examples +``` + +This will show detailed API calls and responses for troubleshooting. diff --git a/bin/supervisor/examples/_archive/basic_openrpc_client.rs b/bin/supervisor/examples/_archive/basic_openrpc_client.rs new file mode 100644 index 0000000..397c295 --- /dev/null +++ b/bin/supervisor/examples/_archive/basic_openrpc_client.rs @@ -0,0 +1,290 @@ +//! Comprehensive OpenRPC Example for Hero Supervisor +//! +//! This example demonstrates the complete OpenRPC workflow: +//! 1. Automatically starting a Hero Supervisor with OpenRPC server using escargot +//! 2. Building and using a mock runner binary +//! 3. Connecting with the OpenRPC client +//! 4. Managing runners (add, start, stop, remove) +//! 5. Creating and queuing jobs +//! 6. Monitoring job execution and verifying results +//! 7. Bulk operations and status monitoring +//! 8. Gracefully shutting down the supervisor +//! +//! To run this example: +//! `cargo run --example basic_openrpc_client` +//! +//! This example is completely self-contained and will start/stop the supervisor automatically. + +use hero_supervisor_openrpc_client::{ + SupervisorClient, RunnerConfig, RunnerType, ProcessManagerType, + JobBuilder +}; +use std::time::Duration; +use escargot::CargoBuild; +use std::process::Stdio; +use tokio::time::sleep; + +#[tokio::main] +async fn main() -> Result<(), Box> { + // env_logger::init(); // Commented out to avoid version conflicts + + println!("🚀 Comprehensive OpenRPC Example for Hero Supervisor"); + println!("===================================================="); + + // Build the supervisor with OpenRPC feature (force rebuild to avoid escargot caching) + println!("\n🔨 Force rebuilding supervisor with OpenRPC feature..."); + + // Clear target directory to force fresh build + let _ = std::process::Command::new("cargo") + .arg("clean") + .output(); + + let supervisor_binary = CargoBuild::new() + .bin("supervisor") + .features("openrpc") + .current_release() + .run()?; + + println!("✅ Supervisor binary built successfully"); + + // Build the mock runner binary + println!("\n🔨 Building mock runner binary..."); + let mock_runner_binary = CargoBuild::new() + .example("mock_runner") + .current_release() + .run()?; + + println!("✅ Mock runner binary built successfully"); + + // Start the supervisor process + println!("\n🚀 Starting supervisor with OpenRPC server..."); + let mut supervisor_process = supervisor_binary + .command() + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn()?; + + println!("✅ Supervisor process started (PID: {})", supervisor_process.id()); + + // Wait for the server to start up + println!("\n⏳ Waiting for OpenRPC server to start..."); + sleep(Duration::from_secs(5)).await; + + // Create client + let client = SupervisorClient::new("http://127.0.0.1:3030")?; + println!("✅ Client created for: {}", client.server_url()); + + // Test connectivity with retries + println!("\n🔍 Testing server connectivity..."); + let mut connection_attempts = 0; + let max_attempts = 10; + + loop { + connection_attempts += 1; + match client.list_runners().await { + Ok(runners) => { + println!("✅ Server is responsive"); + println!("📋 Current runners: {:?}", runners); + break; + } + Err(e) if connection_attempts < max_attempts => { + println!("⏳ Attempt {}/{}: Server not ready yet, retrying...", connection_attempts, max_attempts); + sleep(Duration::from_secs(1)).await; + continue; + } + Err(e) => { + eprintln!("❌ Failed to connect to server after {} attempts: {}", max_attempts, e); + // Clean up the supervisor process before returning + let _ = supervisor_process.kill(); + return Err(e.into()); + } + } + } + + // Add a simple runner using the mock runner binary + let config = RunnerConfig { + actor_id: "basic_example_actor".to_string(), + runner_type: RunnerType::OSISRunner, + binary_path: mock_runner_binary.path().to_path_buf(), + db_path: "/tmp/example_db".to_string(), + redis_url: "redis://localhost:6379".to_string(), + }; + + println!("➕ Adding runner: {}", config.actor_id); + client.add_runner(config, ProcessManagerType::Simple).await?; + + // Start the runner + println!("▶️ Starting runner..."); + client.start_runner("basic_example_actor").await?; + + // Check status + let status = client.get_runner_status("basic_example_actor").await?; + println!("📊 Runner status: {:?}", status); + + // Create and queue multiple jobs to demonstrate functionality + let jobs = vec![ + ("Hello World", "print('Hello from comprehensive OpenRPC example!');"), + ("Math Calculation", "let result = 42 * 2; print(`The answer is: ${result}`);"), + ("Current Time", "print('Job executed at: ' + new Date().toISOString());"), + ]; + + let mut job_ids = Vec::new(); + + for (description, payload) in jobs { + let job = JobBuilder::new() + .caller_id("comprehensive_client") + .context_id("demo") + .payload(payload) + .runner("basic_example_actor") + .executor("rhai") + .timeout(30) + .build()?; + + println!("📤 Queuing job '{}': {}", description, job.id); + client.queue_job_to_runner("basic_example_actor", job.clone()).await?; + job_ids.push((job.id, description.to_string())); + + // Small delay between jobs + sleep(Duration::from_millis(500)).await; + } + + // Demonstrate synchronous job execution using polling approach + // (Note: queue_and_wait OpenRPC method registration needs debugging) + println!("\n🎯 Demonstrating synchronous job execution with result verification..."); + + let sync_jobs = vec![ + ("Synchronous Hello", "print('Hello from synchronous execution!');"), + ("Synchronous Math", "let result = 123 + 456; print(`Calculation result: ${result}`);"), + ("Synchronous Status", "print('Job processed with result verification');"), + ]; + + for (description, payload) in sync_jobs { + let job = JobBuilder::new() + .caller_id("sync_client") + .context_id("sync_demo") + .payload(payload) + .runner("basic_example_actor") + .executor("rhai") + .timeout(30) + .build()?; + + println!("🚀 Executing '{}' with result verification...", description); + let job_id = job.id.clone(); + + // Queue the job + client.queue_job_to_runner("basic_example_actor", job).await?; + + // Poll for completion with timeout + let mut attempts = 0; + let max_attempts = 20; // 10 seconds with 500ms intervals + let mut result = None; + + while attempts < max_attempts { + match client.get_job_result(&job_id).await { + Ok(Some(job_result)) => { + result = Some(job_result); + break; + } + Ok(None) => { + // Job not finished yet, wait and retry + sleep(Duration::from_millis(500)).await; + attempts += 1; + } + Err(e) => { + println!("⚠️ Error getting result for job {}: {}", job_id, e); + break; + } + } + } + + match result { + Some(job_result) => { + println!("✅ Job '{}' completed successfully!", description); + println!(" 📋 Job ID: {}", job_id); + println!(" 📤 Result: {}", job_result); + } + None => { + println!("⏰ Job '{}' did not complete within timeout", description); + } + } + + // Small delay between jobs + sleep(Duration::from_millis(500)).await; + } + + // Demonstrate bulk operations and status monitoring + println!("\n📊 Demonstrating bulk operations and status monitoring..."); + + // Get all runner statuses + println!("📋 Getting all runner statuses..."); + match client.get_all_runner_status().await { + Ok(statuses) => { + println!("✅ Runner statuses:"); + for (runner_id, status) in statuses { + println!(" - {}: {:?}", runner_id, status); + } + } + Err(e) => println!("❌ Failed to get runner statuses: {}", e), + } + + // List all runners one more time + println!("\n📋 Final runner list:"); + match client.list_runners().await { + Ok(runners) => { + println!("✅ Active runners: {:?}", runners); + } + Err(e) => println!("❌ Failed to list runners: {}", e), + } + + // Stop and remove runner + println!("\n⏹️ Stopping runner..."); + client.stop_runner("basic_example_actor", false).await?; + + println!("🗑️ Removing runner..."); + client.remove_runner("basic_example_actor").await?; + + // Final verification + println!("\n🔍 Final verification - listing remaining runners..."); + match client.list_runners().await { + Ok(runners) => { + if runners.contains(&"basic_example_actor".to_string()) { + println!("⚠️ Runner still present: {:?}", runners); + } else { + println!("✅ Runner successfully removed. Remaining runners: {:?}", runners); + } + } + Err(e) => println!("❌ Failed to verify runner removal: {}", e), + } + + // Gracefully shutdown the supervisor process + println!("\n🛑 Shutting down supervisor process..."); + match supervisor_process.kill() { + Ok(()) => { + println!("✅ Supervisor process terminated successfully"); + // Wait for the process to fully exit + match supervisor_process.wait() { + Ok(status) => println!("✅ Supervisor exited with status: {}", status), + Err(e) => println!("⚠️ Error waiting for supervisor exit: {}", e), + } + } + Err(e) => println!("⚠️ Error terminating supervisor: {}", e), + } + + println!("\n🎉 Comprehensive OpenRPC Example Complete!"); + println!("=========================================="); + println!("✅ Successfully demonstrated:"); + println!(" - Automatic supervisor startup with escargot"); + println!(" - Mock runner binary integration"); + println!(" - OpenRPC client connectivity with retry logic"); + println!(" - Runner management (add, start, stop, remove)"); + println!(" - Asynchronous job creation and queuing"); + println!(" - Synchronous job execution with result polling"); + println!(" - Job result verification from Redis job hash"); + println!(" - Bulk operations and status monitoring"); + println!(" - Graceful cleanup and supervisor shutdown"); + println!("\n🎯 The Hero Supervisor OpenRPC integration is fully functional!"); + println!("📝 Note: queue_and_wait method implemented but OpenRPC registration needs debugging"); + println!("🚀 Both async job queuing and sync result polling patterns work perfectly!"); + + Ok(()) +} diff --git a/bin/supervisor/examples/_archive/end_to_end_demo.rs b/bin/supervisor/examples/_archive/end_to_end_demo.rs new file mode 100644 index 0000000..11c3f19 --- /dev/null +++ b/bin/supervisor/examples/_archive/end_to_end_demo.rs @@ -0,0 +1,278 @@ +//! End-to-End Demo: Supervisor + Runner + Client +//! +//! This example demonstrates the complete workflow: +//! 1. Starts a supervisor with Mycelium integration +//! 2. Starts an OSIS runner +//! 3. Uses the supervisor client to run jobs +//! 4. Shows both job.run (blocking) and job.start (non-blocking) modes +//! +//! Prerequisites: +//! - Redis running on localhost:6379 +//! +//! Usage: +//! ```bash +//! RUST_LOG=info cargo run --example end_to_end_demo +//! ``` + +use anyhow::{Result, Context}; +use log::{info, error}; +use std::process::{Command, Child, Stdio}; +use std::time::Duration; +use tokio::time::sleep; +use hero_supervisor_openrpc_client::{SupervisorClient, JobBuilder}; + +/// Configuration for the demo +struct DemoConfig { + redis_url: String, + supervisor_port: u16, + runner_id: String, + db_path: String, +} + +impl Default for DemoConfig { + fn default() -> Self { + Self { + redis_url: "redis://localhost:6379".to_string(), + supervisor_port: 3030, + runner_id: "example_runner".to_string(), + db_path: "/tmp/example_runner.db".to_string(), + } + } +} + +/// Supervisor process wrapper +struct SupervisorProcess { + child: Child, +} + +impl SupervisorProcess { + fn start(config: &DemoConfig) -> Result { + info!("🚀 Starting supervisor on port {}...", config.supervisor_port); + + let child = Command::new("cargo") + .args(&[ + "run", + "--bin", + "hero-supervisor", + "--", + "--redis-url", + &config.redis_url, + "--port", + &config.supervisor_port.to_string(), + ]) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .context("Failed to start supervisor")?; + + Ok(Self { child }) + } +} + +impl Drop for SupervisorProcess { + fn drop(&mut self) { + info!("🛑 Stopping supervisor..."); + let _ = self.child.kill(); + let _ = self.child.wait(); + } +} + +/// Runner process wrapper +struct RunnerProcess { + child: Child, +} + +impl RunnerProcess { + fn start(config: &DemoConfig) -> Result { + info!("🤖 Starting OSIS runner '{}'...", config.runner_id); + + let child = Command::new("cargo") + .args(&[ + "run", + "--bin", + "runner_osis", + "--", + &config.runner_id, + "--db-path", + &config.db_path, + "--redis-url", + &config.redis_url, + ]) + .env("RUST_LOG", "info") + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .context("Failed to start runner")?; + + Ok(Self { child }) + } +} + +impl Drop for RunnerProcess { + fn drop(&mut self) { + info!("🛑 Stopping runner..."); + let _ = self.child.kill(); + let _ = self.child.wait(); + } +} + +/// Helper functions for the demo +async fn register_runner_helper(client: &SupervisorClient, runner_id: &str, secret: &str) -> Result<()> { + info!("📝 Registering runner '{}'...", runner_id); + + let queue = format!("hero:q:work:type:osis:group:default:inst:{}", runner_id); + client.register_runner(secret, runner_id, &queue).await?; + + info!("✅ Runner registered successfully"); + Ok(()) +} + +async fn run_job_helper(client: &SupervisorClient, job: runner_rust::job::Job, secret: &str, timeout: u64) -> Result { + info!("🚀 Running job {} (blocking)...", job.id); + + let response = client.job_run(secret, job, Some(timeout)).await?; + + let result = response.result + .ok_or_else(|| anyhow::anyhow!("No result in response"))?; + + info!("✅ Job completed with result: {}", result); + Ok(result) +} + +async fn start_job_helper(client: &SupervisorClient, job: runner_rust::job::Job, secret: &str) -> Result { + info!("🚀 Starting job {} (non-blocking)...", job.id); + + let response = client.job_start(secret, job).await?; + + info!("✅ Job queued with ID: {}", response.job_id); + Ok(response.job_id) +} + +#[tokio::main] +async fn main() -> Result<()> { + // Initialize logging + env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init(); + + println!("\n╔════════════════════════════════════════════════════════════╗"); + println!("║ End-to-End Demo: Supervisor + Runner + Client ║"); + println!("╚════════════════════════════════════════════════════════════╝\n"); + + let config = DemoConfig::default(); + + // Step 1: Start supervisor + println!("📋 Step 1: Starting Supervisor"); + println!("─────────────────────────────────────────────────────────────"); + let _supervisor = SupervisorProcess::start(&config)?; + sleep(Duration::from_secs(3)).await; + println!("✅ Supervisor started on port {}\n", config.supervisor_port); + + // Step 2: Start runner + println!("📋 Step 2: Starting OSIS Runner"); + println!("─────────────────────────────────────────────────────────────"); + let _runner = RunnerProcess::start(&config)?; + sleep(Duration::from_secs(3)).await; + println!("✅ Runner '{}' started\n", config.runner_id); + + // Step 3: Create client and register runner + println!("📋 Step 3: Registering Runner with Supervisor"); + println!("─────────────────────────────────────────────────────────────"); + let client = SupervisorClient::new(&format!("http://localhost:{}", config.supervisor_port))?; + register_runner_helper(&client, &config.runner_id, "admin_secret").await?; + println!("✅ Runner registered\n"); + + sleep(Duration::from_secs(2)).await; + + // Step 4: Run blocking jobs (job.run) + println!("📋 Step 4: Running Blocking Jobs (job.run)"); + println!("─────────────────────────────────────────────────────────────"); + + // Job 1: Simple calculation + println!("\n🔹 Job 1: Simple Calculation"); + let job1 = JobBuilder::new() + .caller_id("demo_client") + .context_id("demo_context") + .payload("let result = 2 + 2; to_json(result)") + .runner(&config.runner_id) + .executor("rhai") + .timeout(30) + .build()?; + + let result1 = run_job_helper(&client, job1, "admin_secret", 30).await?; + println!(" Result: {}", result1); + + // Job 2: String manipulation + println!("\n🔹 Job 2: String Manipulation"); + let job2 = JobBuilder::new() + .caller_id("demo_client") + .context_id("demo_context") + .payload(r#"let msg = "Hello from OSIS Runner!"; to_json(msg)"#) + .runner(&config.runner_id) + .executor("rhai") + .timeout(30) + .build()?; + + let result2 = run_job_helper(&client, job2, "admin_secret", 30).await?; + println!(" Result: {}", result2); + + // Job 3: Array operations + println!("\n🔹 Job 3: Array Operations"); + let job3 = JobBuilder::new() + .caller_id("demo_client") + .context_id("demo_context") + .payload(r#" + let numbers = [1, 2, 3, 4, 5]; + let sum = 0; + for n in numbers { + sum += n; + } + to_json(#{sum: sum, count: numbers.len()}) + "#) + .runner(&config.runner_id) + .executor("rhai") + .timeout(30) + .build()?; + + let result3 = run_job_helper(&client, job3, "admin_secret", 30).await?; + println!(" Result: {}", result3); + + println!("\n✅ All blocking jobs completed successfully\n"); + + // Step 5: Start non-blocking jobs (job.start) + println!("📋 Step 5: Starting Non-Blocking Jobs (job.start)"); + println!("─────────────────────────────────────────────────────────────"); + + println!("\n🔹 Job 4: Background Task"); + let job4 = JobBuilder::new() + .caller_id("demo_client") + .context_id("demo_context") + .payload(r#" + let result = "Background task completed"; + to_json(result) + "#) + .runner(&config.runner_id) + .executor("rhai") + .timeout(30) + .build()?; + + let job4_id = start_job_helper(&client, job4, "admin_secret").await?; + println!(" Job ID: {} (running in background)", job4_id); + + println!("\n✅ Non-blocking job started\n"); + + // Step 6: Summary + println!("📋 Step 6: Demo Summary"); + println!("─────────────────────────────────────────────────────────────"); + println!("✅ Supervisor: Running on port {}", config.supervisor_port); + println!("✅ Runner: '{}' registered and processing jobs", config.runner_id); + println!("✅ Blocking jobs: 3 completed successfully"); + println!("✅ Non-blocking jobs: 1 started"); + println!("\n🎉 Demo completed successfully!"); + + // Keep processes running for a bit to see logs + println!("\n⏳ Keeping processes running for 5 seconds..."); + sleep(Duration::from_secs(5)).await; + + println!("\n🛑 Shutting down..."); + + Ok(()) +} diff --git a/bin/supervisor/examples/_archive/integration_test.rs b/bin/supervisor/examples/_archive/integration_test.rs new file mode 100644 index 0000000..a540187 --- /dev/null +++ b/bin/supervisor/examples/_archive/integration_test.rs @@ -0,0 +1,196 @@ +//! Integration test for the new job API +//! +//! This test demonstrates the complete job lifecycle and validates +//! that all new API methods work correctly together. + +use hero_supervisor_openrpc_client::{SupervisorClient, JobBuilder, JobResult}; +use std::time::Duration; +use tokio::time::sleep; + +#[tokio::test] +async fn test_complete_job_lifecycle() -> Result<(), Box> { + // Skip test if supervisor is not running + let client = match SupervisorClient::new("http://localhost:3030") { + Ok(c) => c, + Err(_) => { + println!("Skipping integration test - supervisor not available"); + return Ok(()); + } + }; + + // Test connection + if client.discover().await.is_err() { + println!("Skipping integration test - supervisor not responding"); + return Ok(()); + } + + let secret = "user-secret-456"; + + // Test 1: Create job + let job = JobBuilder::new() + .caller_id("integration_test") + .context_id("test_lifecycle") + .payload("echo 'Integration test job'") + .executor("osis") + .runner("osis_runner_1") + .timeout(30) + .build()?; + + let job_id = client.jobs_create(secret, job).await?; + assert!(!job_id.is_empty()); + + // Test 2: Start job + client.job_start(secret, &job_id).await?; + + // Test 3: Monitor status + let mut attempts = 0; + let max_attempts = 15; // 15 seconds max + let mut final_status = String::new(); + + while attempts < max_attempts { + let status = client.job_status(&job_id).await?; + final_status = status.status.clone(); + + if final_status == "completed" || final_status == "failed" || final_status == "timeout" { + break; + } + + attempts += 1; + sleep(Duration::from_secs(1)).await; + } + + // Test 4: Get result + let result = client.job_result(&job_id).await?; + match result { + JobResult::Success { success: _ } => { + assert_eq!(final_status, "completed"); + }, + JobResult::Error { error: _ } => { + assert!(final_status == "failed" || final_status == "timeout"); + } + } + + Ok(()) +} + +#[tokio::test] +async fn test_job_run_immediate() -> Result<(), Box> { + let client = match SupervisorClient::new("http://localhost:3030") { + Ok(c) => c, + Err(_) => return Ok(()), // Skip if not available + }; + + if client.discover().await.is_err() { + return Ok(()); // Skip if not responding + } + + let secret = "user-secret-456"; + + let job = JobBuilder::new() + .caller_id("integration_test") + .context_id("test_immediate") + .payload("echo 'Immediate job test'") + .executor("osis") + .runner("osis_runner_1") + .timeout(30) + .build()?; + + // Test immediate execution + let result = client.job_run(secret, job).await?; + + // Should get either success or error, but not panic + match result { + JobResult::Success { success } => { + assert!(!success.is_empty()); + }, + JobResult::Error { error } => { + assert!(!error.is_empty()); + } + } + + Ok(()) +} + +#[tokio::test] +async fn test_jobs_list() -> Result<(), Box> { + let client = match SupervisorClient::new("http://localhost:3030") { + Ok(c) => c, + Err(_) => return Ok(()), // Skip if not available + }; + + if client.discover().await.is_err() { + return Ok(()); // Skip if not responding + } + + // Test listing jobs + let job_ids = client.jobs_list().await?; + + // Should return a vector (might be empty) + assert!(job_ids.len() >= 0); + + Ok(()) +} + +#[tokio::test] +async fn test_authentication_errors() -> Result<(), Box> { + let client = match SupervisorClient::new("http://localhost:3030") { + Ok(c) => c, + Err(_) => return Ok(()), // Skip if not available + }; + + if client.discover().await.is_err() { + return Ok(()); // Skip if not responding + } + + let invalid_secret = "invalid-secret"; + + let job = JobBuilder::new() + .caller_id("integration_test") + .context_id("test_auth") + .payload("echo 'Auth test'") + .executor("osis") + .runner("osis_runner_1") + .timeout(30) + .build()?; + + // Test that invalid secret fails + let result = client.jobs_create(invalid_secret, job.clone()).await; + assert!(result.is_err()); + + let result = client.job_run(invalid_secret, job.clone()).await; + assert!(result.is_err()); + + let result = client.job_start(invalid_secret, "fake-job-id").await; + assert!(result.is_err()); + + Ok(()) +} + +#[tokio::test] +async fn test_nonexistent_job_operations() -> Result<(), Box> { + let client = match SupervisorClient::new("http://localhost:3030") { + Ok(c) => c, + Err(_) => return Ok(()), // Skip if not available + }; + + if client.discover().await.is_err() { + return Ok(()); // Skip if not responding + } + + let fake_job_id = "nonexistent-job-id"; + + // Test operations on nonexistent job + let result = client.job_status(fake_job_id).await; + assert!(result.is_err()); + + let result = client.job_result(fake_job_id).await; + assert!(result.is_err()); + + Ok(()) +} + +#[tokio::main] +async fn main() -> Result<(), Box> { + println!("Integration test example - this would contain test logic"); + Ok(()) +} diff --git a/bin/supervisor/examples/_archive/job_api_examples.rs b/bin/supervisor/examples/_archive/job_api_examples.rs new file mode 100644 index 0000000..10f6ad7 --- /dev/null +++ b/bin/supervisor/examples/_archive/job_api_examples.rs @@ -0,0 +1,269 @@ +//! Examples demonstrating the new job API workflows +//! +//! This example shows how to use the new job API methods: +//! - jobs.create: Create a job without queuing +//! - jobs.list: List all jobs +//! - job.run: Run a job and get result immediately +//! - job.start: Start a created job +//! - job.status: Get job status (non-blocking) +//! - job.result: Get job result (blocking) + +use hero_supervisor_openrpc_client::{SupervisorClient, JobBuilder, JobResult}; +use std::time::Duration; +use tokio::time::sleep; + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Initialize logging + env_logger::init(); + + println!("🚀 Hero Supervisor Job API Examples"); + println!("===================================\n"); + + // Create client + let client = SupervisorClient::new("http://localhost:3030")?; + let secret = "user-secret-456"; // Use a user secret for job operations + + // Test connection + println!("📡 Testing connection..."); + match client.discover().await { + Ok(_) => println!("✅ Connected to supervisor\n"), + Err(e) => { + println!("❌ Failed to connect: {}", e); + println!("Make sure the supervisor is running with: ./supervisor --config examples/supervisor/config.toml\n"); + return Ok(()); + } + } + + // Example 1: Fire-and-forget job execution + println!("🔥 Example 1: Fire-and-forget job execution"); + println!("--------------------------------------------"); + + let job = JobBuilder::new() + .caller_id("example_client") + .context_id("fire_and_forget") + .payload("echo 'Hello from fire-and-forget job!'") + .executor("osis") + .runner("osis_runner_1") + .timeout(30) + .build()?; + + println!("Running job immediately..."); + match client.job_run(secret, job).await { + Ok(JobResult::Success { success }) => { + println!("✅ Job completed successfully:"); + println!(" Output: {}", success); + }, + Ok(JobResult::Error { error }) => { + println!("❌ Job failed:"); + println!(" Error: {}", error); + }, + Err(e) => { + println!("❌ API call failed: {}", e); + } + } + println!(); + + // Example 2: Asynchronous job processing + println!("⏰ Example 2: Asynchronous job processing"); + println!("------------------------------------------"); + + let job = JobBuilder::new() + .caller_id("example_client") + .context_id("async_processing") + .payload("sleep 2 && echo 'Hello from async job!'") + .executor("osis") + .runner("osis_runner_1") + .timeout(60) + .build()?; + + // Step 1: Create the job + println!("1. Creating job..."); + let job_id = match client.jobs_create(secret, job).await { + Ok(id) => { + println!("✅ Job created with ID: {}", id); + id + }, + Err(e) => { + println!("❌ Failed to create job: {}", e); + return Ok(()); + } + }; + + // Step 2: Start the job + println!("2. Starting job..."); + match client.job_start(secret, &job_id).await { + Ok(_) => println!("✅ Job started"), + Err(e) => { + println!("❌ Failed to start job: {}", e); + return Ok(()); + } + } + + // Step 3: Poll for completion (non-blocking) + println!("3. Monitoring job progress..."); + let mut attempts = 0; + let max_attempts = 30; // 30 seconds max + + loop { + attempts += 1; + + match client.job_status(&job_id).await { + Ok(status) => { + println!(" Status: {} (attempt {})", status.status, attempts); + + if status.status == "completed" || status.status == "failed" || status.status == "timeout" { + break; + } + + if attempts >= max_attempts { + println!(" ⏰ Timeout waiting for job completion"); + break; + } + + sleep(Duration::from_secs(1)).await; + }, + Err(e) => { + println!(" ❌ Failed to get job status: {}", e); + break; + } + } + } + + // Step 4: Get the result + println!("4. Getting job result..."); + match client.job_result(&job_id).await { + Ok(JobResult::Success { success }) => { + println!("✅ Job completed successfully:"); + println!(" Output: {}", success); + }, + Ok(JobResult::Error { error }) => { + println!("❌ Job failed:"); + println!(" Error: {}", error); + }, + Err(e) => { + println!("❌ Failed to get job result: {}", e); + } + } + println!(); + + // Example 3: Batch job processing + println!("📦 Example 3: Batch job processing"); + println!("-----------------------------------"); + + let job_specs = vec![ + ("echo 'Batch job 1'", "batch_1"), + ("echo 'Batch job 2'", "batch_2"), + ("echo 'Batch job 3'", "batch_3"), + ]; + + let mut job_ids = Vec::new(); + + // Create all jobs + println!("Creating batch jobs..."); + for (i, (payload, context)) in job_specs.iter().enumerate() { + let job = JobBuilder::new() + .caller_id("example_client") + .context_id(context) + .payload(payload) + .executor("osis") + .runner("osis_runner_1") + .timeout(30) + .build()?; + + match client.jobs_create(secret, job).await { + Ok(job_id) => { + println!("✅ Created job {}: {}", i + 1, job_id); + job_ids.push(job_id); + }, + Err(e) => { + println!("❌ Failed to create job {}: {}", i + 1, e); + } + } + } + + // Start all jobs + println!("Starting all batch jobs..."); + for (i, job_id) in job_ids.iter().enumerate() { + match client.job_start(secret, job_id).await { + Ok(_) => println!("✅ Started job {}", i + 1), + Err(e) => println!("❌ Failed to start job {}: {}", i + 1, e), + } + } + + // Collect results + println!("Collecting results..."); + for (i, job_id) in job_ids.iter().enumerate() { + match client.job_result(job_id).await { + Ok(JobResult::Success { success }) => { + println!("✅ Job {} result: {}", i + 1, success); + }, + Ok(JobResult::Error { error }) => { + println!("❌ Job {} failed: {}", i + 1, error); + }, + Err(e) => { + println!("❌ Failed to get result for job {}: {}", i + 1, e); + } + } + } + println!(); + + // Example 4: List all jobs + println!("📋 Example 4: Listing all jobs"); + println!("-------------------------------"); + + match client.jobs_list().await { + Ok(jobs) => { + println!("✅ Found {} jobs in the system:", jobs.len()); + for (i, job) in jobs.iter().take(10).enumerate() { + println!(" {}. {}", i + 1, job.id); + } + if jobs.len() > 10 { + println!(" ... and {} more", jobs.len() - 10); + } + }, + Err(e) => { + println!("❌ Failed to list jobs: {}", e); + } + } + println!(); + + println!("🎉 All examples completed!"); + println!("\nAPI Convention Summary:"); + println!("- jobs.create: Create job without queuing"); + println!("- jobs.list: List all job IDs"); + println!("- job.run: Run job and return result immediately"); + println!("- job.start: Start a created job"); + println!("- job.status: Get job status (non-blocking)"); + println!("- job.result: Get job result (blocking)"); + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_job_builder() { + let job = JobBuilder::new() + .caller_id("test") + .context_id("test") + .payload("echo 'test'") + .executor("osis") + .runner("test_runner") + .build(); + + assert!(job.is_ok()); + let job = job.unwrap(); + assert_eq!(job.caller_id, "test"); + assert_eq!(job.context_id, "test"); + assert_eq!(job.payload, "echo 'test'"); + } + + #[tokio::test] + async fn test_client_creation() { + let client = SupervisorClient::new("http://localhost:3030"); + assert!(client.is_ok()); + } +} diff --git a/bin/supervisor/examples/_archive/mock_runner.rs b/bin/supervisor/examples/_archive/mock_runner.rs new file mode 100644 index 0000000..26b54e7 --- /dev/null +++ b/bin/supervisor/examples/_archive/mock_runner.rs @@ -0,0 +1,171 @@ +//! Mock Runner Binary for Testing OpenRPC Examples +//! +//! This is a simple mock runner that simulates an actor binary for testing +//! the Hero Supervisor OpenRPC integration. It connects to Redis, listens for +//! jobs using the proper Hero job queue system, and echoes the job payload. +//! +//! Usage: +//! ```bash +//! cargo run --example mock_runner -- --actor-id test_actor --db-path /tmp/test_db --redis-url redis://localhost:6379 +//! ``` + +use std::env; +use std::time::Duration; +use tokio::time::sleep; +use redis::AsyncCommands; +use hero_supervisor::{ + Job, JobStatus, JobError, Client, ClientBuilder +}; + +#[derive(Debug, Clone)] +pub struct MockRunnerConfig { + pub actor_id: String, + pub db_path: String, + pub redis_url: String, +} + +impl MockRunnerConfig { + pub fn from_args() -> Result> { + let args: Vec = env::args().collect(); + + let mut actor_id = None; + let mut db_path = None; + let mut redis_url = None; + + let mut i = 1; + while i < args.len() { + match args[i].as_str() { + "--actor-id" => { + if i + 1 < args.len() { + actor_id = Some(args[i + 1].clone()); + i += 2; + } else { + return Err("Missing value for --actor-id".into()); + } + } + "--db-path" => { + if i + 1 < args.len() { + db_path = Some(args[i + 1].clone()); + i += 2; + } else { + return Err("Missing value for --db-path".into()); + } + } + "--redis-url" => { + if i + 1 < args.len() { + redis_url = Some(args[i + 1].clone()); + i += 2; + } else { + return Err("Missing value for --redis-url".into()); + } + } + _ => i += 1, + } + } + + Ok(MockRunnerConfig { + actor_id: actor_id.ok_or("Missing required --actor-id argument")?, + db_path: db_path.ok_or("Missing required --db-path argument")?, + redis_url: redis_url.unwrap_or_else(|| "redis://localhost:6379".to_string()), + }) + } +} + +pub struct MockRunner { + config: MockRunnerConfig, + client: Client, +} + +impl MockRunner { + pub async fn new(config: MockRunnerConfig) -> Result> { + let client = ClientBuilder::new() + .redis_url(&config.redis_url) + .build() + .await?; + + Ok(MockRunner { + config, + client, + }) + } + + pub async fn run(&self) -> Result<(), Box> { + println!("🤖 Mock Runner '{}' starting...", self.config.actor_id); + println!("📂 DB Path: {}", self.config.db_path); + println!("🔗 Redis URL: {}", self.config.redis_url); + + // Use the proper Hero job queue key for this actor instance + // Format: hero:q:work:type:{job_type}:group:{group}:inst:{instance} + let work_queue_key = format!("hero:q:work:type:osis:group:default:inst:{}", self.config.actor_id); + + println!("👂 Listening for jobs on queue: {}", work_queue_key); + + loop { + // Try to pop a job ID from the work queue using the Hero protocol + let job_id = self.client.get_job_id(&work_queue_key).await?; + + match job_id { + Some(job_id) => { + println!("📨 Received job ID: {}", job_id); + if let Err(e) = self.process_job(&job_id).await { + eprintln!("❌ Error processing job {}: {}", job_id, e); + // Mark job as error + if let Err(e2) = self.client.set_job_status(&job_id, JobStatus::Error).await { + eprintln!("❌ Failed to set job error status: {}", e2); + } + } + } + None => { + // No jobs available, wait a bit + sleep(Duration::from_millis(100)).await; + } + } + } + } + + async fn process_job(&self, job_id: &str) -> Result<(), JobError> { + // Load the job from Redis using the Hero job system + let job = self.client.get_job(job_id).await?; + + self.process_job_internal(&self.client, job_id, &job).await + } + + async fn process_job_internal( + &self, + client: &Client, + job_id: &str, + job: &Job, + ) -> Result<(), JobError> { + println!("🔄 Processing job {} with payload: {}", job_id, job.payload); + + // Mark job as started + client.set_job_status(job_id, JobStatus::Started).await?; + println!("🚀 Job {} marked as Started", job_id); + + // Simulate processing time + sleep(Duration::from_millis(500)).await; + + // Echo the payload (simulate job execution) + let output = format!("echo: {}", job.payload); + println!("📤 Output: {}", output); + + // Set the job result + client.set_result(job_id, &output).await?; + + println!("✅ Job {} completed successfully", job_id); + + Ok(()) + } +} + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Parse command line arguments + let config = MockRunnerConfig::from_args()?; + + // Create and run the mock runner + let runner = MockRunner::new(config).await?; + runner.run().await?; + + Ok(()) +} diff --git a/bin/supervisor/examples/_archive/simple_e2e.rs b/bin/supervisor/examples/_archive/simple_e2e.rs new file mode 100644 index 0000000..5776b9f --- /dev/null +++ b/bin/supervisor/examples/_archive/simple_e2e.rs @@ -0,0 +1,203 @@ +//! Simple End-to-End Example +//! +//! A minimal example showing supervisor + runner + client workflow. +//! +//! Prerequisites: +//! - Redis running on localhost:6379 +//! +//! Usage: +//! ```bash +//! # Terminal 1: Start Redis +//! redis-server +//! +//! # Terminal 2: Run this example +//! RUST_LOG=info cargo run --example simple_e2e +//! ``` + +use anyhow::Result; +use log::info; +use std::time::Duration; +use tokio::time::sleep; +use hero_supervisor_openrpc_client::{SupervisorClient, JobBuilder}; + +#[tokio::main] +async fn main() -> Result<()> { + env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init(); + + println!("\n╔════════════════════════════════════════╗"); + println!("║ Simple End-to-End Demo ║"); + println!("╚════════════════════════════════════════╝\n"); + + let supervisor_url = "http://localhost:3030"; + let runner_id = "test_runner"; + let secret = "admin_secret"; + + // Create supervisor client + let client = SupervisorClient::new(supervisor_url)?; + + println!("📝 Prerequisites:"); + println!(" 1. Redis running on localhost:6379"); + println!(" 2. Supervisor running on {}", supervisor_url); + println!(" 3. Runner '{}' registered and running\n", runner_id); + + println!("💡 To start the supervisor:"); + println!(" cargo run --bin hero-supervisor -- --redis-url redis://localhost:6379\n"); + + println!("💡 To start a runner:"); + println!(" cd /Users/timurgordon/code/git.ourworld.tf/herocode/runner_rust"); + println!(" cargo run --bin runner_osis -- {} --redis-url redis://localhost:6379\n", runner_id); + + println!("⏳ Waiting 3 seconds for you to start the prerequisites...\n"); + sleep(Duration::from_secs(3)).await; + + // Register runner + println!("📋 Step 1: Registering Runner"); + println!("─────────────────────────────────────────"); + + let queue = format!("hero:q:work:type:osis:group:default:inst:{}", runner_id); + match client.register_runner(secret, runner_id, &queue).await { + Ok(_) => { + println!("✅ Runner registered successfully"); + } + Err(e) => { + println!("⚠️ Registration error: {} (runner might already be registered)", e); + } + } + + sleep(Duration::from_secs(1)).await; + + // Run a simple job + println!("\n📋 Step 2: Running a Simple Job (Blocking)"); + println!("─────────────────────────────────────────"); + + let job = JobBuilder::new() + .caller_id("simple_demo") + .context_id("demo_context") + .payload(r#" + let message = "Hello from the runner!"; + let number = 42; + to_json(#{ + message: message, + number: number, + timestamp: timestamp() + }) + "#) + .runner(runner_id) + .executor("rhai") + .timeout(30) + .build()?; + + let job_id = job.id.clone(); + info!("Sending job with ID: {}", job_id); + + match client.job_run(secret, job, Some(30)).await { + Ok(response) => { + println!("✅ Job completed!"); + if let Some(result) = response.result { + println!(" Result: {}", result); + } + } + Err(e) => { + println!("❌ Job failed: {}", e); + return Ok(()); + } + } + + // Run another job (calculation) + println!("\n📋 Step 3: Running a Calculation Job"); + println!("─────────────────────────────────────────"); + + let calc_job = JobBuilder::new() + .caller_id("simple_demo") + .context_id("demo_context") + .payload(r#" + let numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; + let sum = 0; + let product = 1; + + for n in numbers { + sum += n; + product *= n; + } + + to_json(#{ + sum: sum, + product: product, + count: numbers.len(), + average: sum / numbers.len() + }) + "#) + .runner(runner_id) + .executor("rhai") + .timeout(30) + .build()?; + + let calc_job_id = calc_job.id.clone(); + info!("Sending calculation job with ID: {}", calc_job_id); + + match client.job_run(secret, calc_job, Some(30)).await { + Ok(response) => { + println!("✅ Calculation completed!"); + if let Some(result) = response.result { + println!(" Result: {}", result); + } + } + Err(e) => { + println!("❌ Calculation failed: {}", e); + } + } + + // Start a non-blocking job + println!("\n📋 Step 4: Starting a Non-Blocking Job"); + println!("─────────────────────────────────────────"); + + let async_job = JobBuilder::new() + .caller_id("simple_demo") + .context_id("demo_context") + .payload(r#" + let result = "This job was started asynchronously"; + to_json(result) + "#) + .runner(runner_id) + .executor("rhai") + .timeout(30) + .build()?; + + let async_job_id = async_job.id.clone(); + info!("Starting async job with ID: {}", async_job_id); + + match client.job_start(secret, async_job).await { + Ok(response) => { + println!("✅ Job started!"); + println!(" Job ID: {} (running in background)", response.job_id); + println!(" Status: {}", response.status); + } + Err(e) => { + println!("❌ Failed to start job: {}", e); + } + } + + // Summary + println!("\n╔════════════════════════════════════════╗"); + println!("║ Demo Summary ║"); + println!("╚════════════════════════════════════════╝"); + println!("✅ Runner registered: {}", runner_id); + println!("✅ Blocking jobs completed: 2"); + println!("✅ Non-blocking jobs started: 1"); + println!("\n🎉 Demo completed successfully!\n"); + + println!("📚 What happened:"); + println!(" 1. Registered a runner with the supervisor"); + println!(" 2. Sent jobs with Rhai scripts to execute"); + println!(" 3. Supervisor queued jobs to the runner"); + println!(" 4. Runner executed the scripts and returned results"); + println!(" 5. Client received results (for blocking jobs)\n"); + + println!("🔍 Key Concepts:"); + println!(" • job.run = Execute and wait for result (blocking)"); + println!(" • job.start = Start and return immediately (non-blocking)"); + println!(" • Jobs contain Rhai scripts that run on the runner"); + println!(" • Supervisor coordinates job distribution via Redis\n"); + + Ok(()) +} diff --git a/bin/supervisor/examples/_archive/simple_job_workflow.rs b/bin/supervisor/examples/_archive/simple_job_workflow.rs new file mode 100644 index 0000000..edffc80 --- /dev/null +++ b/bin/supervisor/examples/_archive/simple_job_workflow.rs @@ -0,0 +1,64 @@ +//! Simple job workflow example +//! +//! This example demonstrates the basic job lifecycle using the new API: +//! 1. Create a job +//! 2. Start the job +//! 3. Monitor its progress +//! 4. Get the result + +use hero_supervisor_openrpc_client::{SupervisorClient, JobBuilder, JobResult}; +use std::time::Duration; +use tokio::time::sleep; + +#[tokio::main] +async fn main() -> Result<(), Box> { + println!("Simple Job Workflow Example"); + println!("============================\n"); + + // Create client + let client = SupervisorClient::new("http://localhost:3030")?; + let secret = "user-secret-456"; + + // Create a simple job + let job = JobBuilder::new() + .caller_id("simple_example") + .context_id("demo") + .payload("echo 'Hello from Hero Supervisor!' && sleep 3 && echo 'Job completed!'") + .executor("osis") + .runner("osis_runner_1") + .timeout(60) + .env_var("EXAMPLE_VAR", "example_value") + .build()?; + + println!("📝 Creating job..."); + let job_id = client.jobs_create(secret, job).await?; + println!("✅ Job created: {}\n", job_id); + + println!("🚀 Starting job..."); + client.job_start(secret, &job_id).await?; + println!("✅ Job started\n"); + + println!("👀 Monitoring job progress..."); + loop { + let status = client.job_status(&job_id).await?; + println!(" Status: {}", status.status); + + if status.status == "completed" || status.status == "failed" { + break; + } + + sleep(Duration::from_secs(2)).await; + } + + println!("\n📋 Getting job result..."); + match client.job_result(&job_id).await? { + JobResult::Success { success } => { + println!("✅ Success: {}", success); + }, + JobResult::Error { error } => { + println!("❌ Error: {}", error); + } + } + + Ok(()) +} diff --git a/bin/supervisor/examples/_archive/supervisor/README.md b/bin/supervisor/examples/_archive/supervisor/README.md new file mode 100644 index 0000000..be0da70 --- /dev/null +++ b/bin/supervisor/examples/_archive/supervisor/README.md @@ -0,0 +1,108 @@ +# Hero Supervisor Example + +This example demonstrates how to configure and run the Hero Supervisor with multiple actors using a TOML configuration file. + +## Files + +- `config.toml` - Example supervisor configuration with multiple actors +- `run_supervisor.sh` - Shell script to build and run the supervisor with the example config +- `run_supervisor.rs` - Rust script using escargot to build and run the supervisor +- `README.md` - This documentation file + +## Configuration + +The `config.toml` file defines: + +- **Redis connection**: URL for the Redis server used for job queuing +- **Database path**: Local path for supervisor state storage +- **Job queue key**: Redis key for the supervisor job queue +- **Actors**: List of actor configurations with: + - `name`: Unique identifier for the actor + - `runner_type`: Type of runner ("SAL", "OSIS", "V", "Python") + - `binary_path`: Path to the actor binary + - `process_manager`: Process management type ("simple" or "tmux") + +## Prerequisites + +1. **Redis Server**: Ensure Redis is running on `localhost:6379` (or update the config) +2. **Actor Binaries**: Build the required actor binaries referenced in the config: + ```bash + # Build SAL worker + cd ../../sal + cargo build --bin sal_worker + + # Build OSIS and system workers + cd ../../worker + cargo build --bin osis + cargo build --bin system + ``` + +## Running the Example + +### Option 1: Shell Script (Recommended) + +```bash +./run_supervisor.sh +``` + +### Option 2: Rust Script with Escargot + +```bash +cargo +nightly -Zscript run_supervisor.rs +``` + +### Option 3: Manual Build and Run + +```bash +# Build the supervisor +cd ../../../supervisor +cargo build --bin supervisor --features cli + +# Run with config +./target/debug/supervisor --config ../baobab/examples/supervisor/config.toml +``` + +## Usage + +Once running, the supervisor will: + +1. Load the configuration from `config.toml` +2. Initialize and start all configured actors +3. Listen for jobs on the Redis queue (`hero:supervisor:jobs`) +4. Dispatch jobs to appropriate actors based on the `runner` field +5. Monitor actor health and status + +## Testing + +You can test the supervisor by dispatching jobs to the Redis queue: + +```bash +# Using redis-cli to add a test job +redis-cli LPUSH "hero:supervisor:jobs" '{"id":"test-123","runner":"sal_actor_1","script":"print(\"Hello from SAL actor!\")"}' +``` + +## Stopping + +Use `Ctrl+C` to gracefully shutdown the supervisor. It will: + +1. Stop accepting new jobs +2. Wait for running jobs to complete +3. Shutdown all managed actors +4. Clean up resources + +## Customization + +Modify `config.toml` to: + +- Add more actors +- Change binary paths to match your build locations +- Update Redis connection settings +- Configure different process managers per actor +- Adjust database and queue settings + +## Troubleshooting + +- **Redis Connection**: Ensure Redis is running and accessible +- **Binary Paths**: Verify all actor binary paths exist and are executable +- **Permissions**: Ensure the supervisor has permission to create the database directory +- **Ports**: Check that Redis port (6379) is not blocked by firewall diff --git a/bin/supervisor/examples/_archive/supervisor/config.toml b/bin/supervisor/examples/_archive/supervisor/config.toml new file mode 100644 index 0000000..e255335 --- /dev/null +++ b/bin/supervisor/examples/_archive/supervisor/config.toml @@ -0,0 +1,18 @@ +# Hero Supervisor Configuration +# This configuration defines the Redis connection, database path, and actors to manage + +# Redis connection URL +redis_url = "redis://localhost:6379" + +# Database path for supervisor state +db_path = "/tmp/supervisor_example_db" + +# Job queue key for supervisor jobs +job_queue_key = "hero:supervisor:jobs" + +# Actor configurations +[[actors]] +name = "sal_actor_1" +runner_type = "SAL" +binary_path = "cargo run /Users/timurgordon/code/git.ourworld.tf/herocode/supervisor/examples/mock_runner.rs" +process_manager = "tmux" \ No newline at end of file diff --git a/bin/supervisor/examples/_archive/supervisor/run_supervisor.rs b/bin/supervisor/examples/_archive/supervisor/run_supervisor.rs new file mode 100644 index 0000000..4b5983e --- /dev/null +++ b/bin/supervisor/examples/_archive/supervisor/run_supervisor.rs @@ -0,0 +1,70 @@ +#!/usr/bin/env cargo +nightly -Zscript +//! ```cargo +//! [dependencies] +//! escargot = "0.5" +//! tokio = { version = "1.0", features = ["full"] } +//! log = "0.4" +//! env_logger = "0.10" +//! ``` + +use escargot::CargoBuild; +use std::process::Command; +use log::{info, error}; + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Initialize logging + env_logger::init(); + + info!("Building and running Hero Supervisor with example configuration"); + + // Get the current directory (when running as cargo example, this is the crate root) + let current_dir = std::env::current_dir()?; + info!("Current directory: {}", current_dir.display()); + + // Path to the supervisor crate (current directory when running as example) + let supervisor_crate_path = current_dir.clone(); + + // Path to the config file (in examples/supervisor subdirectory) + let config_path = current_dir.join("examples/supervisor/config.toml"); + + if !config_path.exists() { + error!("Config file not found: {}", config_path.display()); + return Err("Config file not found".into()); + } + + info!("Using config file: {}", config_path.display()); + + // Build the supervisor binary using escargot + info!("Building supervisor binary..."); + let supervisor_bin = CargoBuild::new() + .bin("supervisor") + .manifest_path(supervisor_crate_path.join("Cargo.toml")) + .features("cli") + .run()?; + + info!("Supervisor binary built successfully"); + + // Run the supervisor with the config file + info!("Starting supervisor with config: {}", config_path.display()); + + let mut cmd = Command::new(supervisor_bin.path()); + cmd.arg("--config") + .arg(&config_path); + + // Add environment variables for better logging + cmd.env("RUST_LOG", "info"); + + info!("Executing: {:?}", cmd); + + // Execute the supervisor + let status = cmd.status()?; + + if status.success() { + info!("Supervisor completed successfully"); + } else { + error!("Supervisor exited with status: {}", status); + } + + Ok(()) +} diff --git a/bin/supervisor/examples/_archive/supervisor/run_supervisor.sh b/bin/supervisor/examples/_archive/supervisor/run_supervisor.sh new file mode 100755 index 0000000..25111f1 --- /dev/null +++ b/bin/supervisor/examples/_archive/supervisor/run_supervisor.sh @@ -0,0 +1,52 @@ +#!/bin/bash + +# Hero Supervisor Example Runner +# This script builds and runs the supervisor binary with the example configuration + +set -e + +# Get the directory of this script +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +SUPERVISOR_DIR="$SCRIPT_DIR/../../../supervisor" +CONFIG_FILE="$SCRIPT_DIR/config.toml" + +echo "🚀 Building and running Hero Supervisor with example configuration" +echo "📁 Script directory: $SCRIPT_DIR" +echo "🔧 Supervisor crate: $SUPERVISOR_DIR" +echo "⚙️ Config file: $CONFIG_FILE" + +# Check if config file exists +if [ ! -f "$CONFIG_FILE" ]; then + echo "❌ Config file not found: $CONFIG_FILE" + exit 1 +fi + +# Check if supervisor directory exists +if [ ! -d "$SUPERVISOR_DIR" ]; then + echo "❌ Supervisor directory not found: $SUPERVISOR_DIR" + exit 1 +fi + +# Build the supervisor binary +echo "🔨 Building supervisor binary..." +cd "$SUPERVISOR_DIR" +cargo build --bin supervisor --features cli + +# Check if build was successful +if [ $? -ne 0 ]; then + echo "❌ Failed to build supervisor binary" + exit 1 +fi + +echo "✅ Supervisor binary built successfully" + +# Run the supervisor with the config file +echo "🎯 Starting supervisor with config: $CONFIG_FILE" +echo "📝 Use Ctrl+C to stop the supervisor" +echo "" + +# Set environment variables for better logging +export RUST_LOG=info + +# Execute the supervisor +exec "$SUPERVISOR_DIR/target/debug/supervisor" --config "$CONFIG_FILE" diff --git a/bin/supervisor/examples/osiris_openrpc/README.md b/bin/supervisor/examples/osiris_openrpc/README.md new file mode 100644 index 0000000..258e503 --- /dev/null +++ b/bin/supervisor/examples/osiris_openrpc/README.md @@ -0,0 +1,102 @@ +# OSIRIS + OpenRPC Comprehensive Example + +This example demonstrates the complete workflow of using Hero Supervisor with OSIRIS runners via OpenRPC. + +## What This Example Does + +1. **Builds and starts** Hero Supervisor with OpenRPC server enabled +2. **Builds** the OSIRIS runner binary +3. **Connects** an OpenRPC client to the supervisor +4. **Registers and starts** an OSIRIS runner +5. **Dispatches multiple jobs** via OpenRPC: + - Create a Note + - Create an Event + - Query stored data + - Test access control (expected to fail) +6. **Monitors** job execution and results +7. **Gracefully shuts down** all components + +## Prerequisites + +**IMPORTANT: Redis must be running before starting this example!** + +```bash +# Start Redis (if not already running) +redis-server +``` + +Other requirements: +- Redis server running on `localhost:6379` +- Rust toolchain installed +- Both `supervisor` and `runner_rust` crates available + +## Running the Example + +```bash +cargo run --example osiris_openrpc +``` + +## Job Scripts + +The example uses separate Rhai script files for each job: + +- `note.rhai` - Creates and stores a Note object +- `event.rhai` - Creates and stores an Event object +- `query.rhai` - Queries and retrieves stored objects +- `access_denied.rhai` - Tests access control (should fail) + +## Architecture + +``` +┌─────────────────┐ +│ This Example │ +│ (OpenRPC │ +│ Client) │ +└────────┬────────┘ + │ JSON-RPC + ↓ +┌─────────────────┐ +│ Supervisor │ +│ (OpenRPC │ +│ Server) │ +└────────┬────────┘ + │ Redis Queue + ↓ +┌─────────────────┐ +│ OSIRIS Runner │ +│ (Rhai Engine │ +│ + HeroDB) │ +└─────────────────┘ +``` + +## Key Features Demonstrated + +- **Automatic binary building** using escargot +- **OpenRPC communication** between client and supervisor +- **Runner registration** with configuration +- **Job dispatching** with signatories +- **Context-based access control** in OSIRIS +- **Typed object storage** (Note, Event) +- **Graceful shutdown** and cleanup + +## Expected Output + +The example will: +1. ✅ Create a Note successfully +2. ✅ Create an Event successfully +3. ✅ Query and retrieve stored objects +4. ✅ Deny access for unauthorized participants +5. ✅ Clean up all resources + +## Troubleshooting + +**Redis Connection Error:** +- Ensure Redis is running: `redis-server` + +**Build Errors:** +- Ensure both supervisor and runner_rust crates are available +- Check that all dependencies are up to date + +**OpenRPC Connection Error:** +- Port 3030 might be in use +- Check supervisor logs for startup issues diff --git a/bin/supervisor/examples/osiris_openrpc/access_denied.rhai b/bin/supervisor/examples/osiris_openrpc/access_denied.rhai new file mode 100644 index 0000000..f276302 --- /dev/null +++ b/bin/supervisor/examples/osiris_openrpc/access_denied.rhai @@ -0,0 +1,8 @@ +print("Attempting to access context with non-signatories..."); +print("Participants: [dave, eve]"); +print("Signatories: [alice, bob, charlie]"); + +// This should fail because neither dave nor eve are signatories +let ctx = get_context(["dave", "eve"]); + +"This should not succeed!" diff --git a/bin/supervisor/examples/osiris_openrpc/event.rhai b/bin/supervisor/examples/osiris_openrpc/event.rhai new file mode 100644 index 0000000..c609d74 --- /dev/null +++ b/bin/supervisor/examples/osiris_openrpc/event.rhai @@ -0,0 +1,18 @@ +print("Creating context for [alice, bob]..."); +let ctx = get_context(["alice", "bob"]); +print("✓ Context ID: " + ctx.context_id()); + +print("\nCreating event..."); +let event = event("events") + .title("Team Retrospective") + .description("Review what went well and areas for improvement") + .location("Virtual - Zoom Room A") + .category("retrospective"); + +print("✓ Event created"); + +print("\nStoring event in context..."); +ctx.save(event); +print("✓ Event stored"); + +"Event 'Team Retrospective' created and stored successfully" diff --git a/bin/supervisor/examples/osiris_openrpc/main.rs b/bin/supervisor/examples/osiris_openrpc/main.rs new file mode 100644 index 0000000..63e67f5 --- /dev/null +++ b/bin/supervisor/examples/osiris_openrpc/main.rs @@ -0,0 +1,293 @@ +///! Comprehensive OSIRIS + OpenRPC + Admin UI Example +///! +/// This example demonstrates using the Hero Supervisor OpenRPC client +/// to run OSIRIS scripts through the supervisor. +/// +/// The client library is located at: client/ +///! +///! 1. Starting a Hero Supervisor with OpenRPC server +///! 2. Building and serving the Admin UI (Yew WASM) +///! 3. Building and starting an OSIRIS runner +///! 4. Registering the runner with the supervisor +///! 5. Dispatching multiple OSIRIS jobs via OpenRPC +///! 6. Monitoring job execution via CLI and Web UI +///! 7. Graceful shutdown +///! +///! Services: +///! - Supervisor OpenRPC API: http://127.0.0.1:3030 +///! - Admin UI: http://127.0.0.1:8080 +///! +///! Usage: +///! ```bash +///! cargo run --example osiris_openrpc +///! ``` +///! +///! Requirements: +///! - Redis running on localhost:6379 +///! - Trunk installed (cargo install trunk) + +use hero_supervisor_openrpc_client::{SupervisorClient, JobBuilder}; +use std::time::Duration; +use escargot::CargoBuild; +use std::process::{Stdio, Command}; +use tokio::time::sleep; + +#[tokio::main] +async fn main() -> Result<(), Box> { + println!("🚀 OSIRIS + OpenRPC Comprehensive Example"); + println!("=========================================\n"); + + // ======================================================================== + // STEP 1: Build and start supervisor with OpenRPC + // ======================================================================== + println!("Step 1: Building and starting supervisor"); + println!("─────────────────────────────────────────────────────────────\n"); + + let supervisor_binary = CargoBuild::new() + .bin("supervisor") + .current_release() + .manifest_path("../supervisor/Cargo.toml") + .run()?; + + println!("✅ Supervisor binary built"); + + let mut supervisor = supervisor_binary.command() + .arg("--redis-url") + .arg("redis://localhost:6379") + .arg("--port") + .arg("3030") + .arg("--admin-secret") + .arg("admin_secret") + .arg("--user-secret") + .arg("user_secret") + .stdout(Stdio::inherit()) + .stderr(Stdio::inherit()) + .spawn()?; + + println!("✅ Supervisor started on port 3030"); + println!("⏳ Waiting for supervisor to initialize..."); + sleep(Duration::from_secs(5)).await; + + // Check if supervisor is still running + match supervisor.try_wait()? { + Some(status) => { + return Err(format!("Supervisor exited early with status: {}", status).into()); + } + None => { + println!("✅ Supervisor is running"); + } + } + + // ======================================================================== + // STEP 2: Build and serve Admin UI + // ======================================================================== + println!("\nStep 2: Building and serving Admin UI"); + println!("─────────────────────────────────────────────────────────────\n"); + + let mut admin_ui = Command::new("trunk") + .arg("serve") + .arg("--port") + .arg("8080") + .arg("--address") + .arg("127.0.0.1") + .current_dir("ui") + .stdout(Stdio::null()) + .stderr(Stdio::null()) + .spawn()?; + + println!("✅ Admin UI building..."); + println!("🌐 Admin UI will be available at: http://127.0.0.1:8080"); + sleep(Duration::from_secs(3)).await; + + // ======================================================================== + // STEP 3: Build OSIRIS runner + // ======================================================================== + println!("\nStep 3: Building OSIRIS runner"); + println!("─────────────────────────────────────────────────────────────\n"); + + let runner_binary = CargoBuild::new() + .bin("runner_osiris") + .current_release() + .manifest_path("../runner_rust/Cargo.toml") + .run()?; + + println!("✅ OSIRIS runner binary built"); + + // ======================================================================== + // STEP 4: Connect OpenRPC client + // ======================================================================== + println!("\nStep 4: Connecting OpenRPC client"); + println!("─────────────────────────────────────────────────────────────\n"); + + let client = SupervisorClient::new("http://127.0.0.1:3030")?; + println!("✅ Connected to supervisor\n"); + + // ======================================================================== + // STEP 5: Register and start OSIRIS runner + // ======================================================================== + println!("Step 5: Registering OSIRIS runner"); + println!("─────────────────────────────────────────────────────────────\n"); + + let runner_path = runner_binary.path().to_string_lossy(); + let db_path = "/tmp/osiris_openrpc.db"; + + // Register the runner with the supervisor + // Note: The current OpenRPC server uses register_runner, not add_runner + client.register_runner("admin_secret", "osiris_runner").await?; + println!("✅ Runner registered: osiris_runner"); + + client.start_runner("admin_secret", "osiris_runner").await?; + println!("✅ Runner started\n"); + + sleep(Duration::from_secs(2)).await; + + // ======================================================================== + // STEP 6: Load job scripts + // ======================================================================== + println!("Step 6: Loading job scripts"); + println!("─────────────────────────────────────────────────────────────\n"); + + let note_script = std::fs::read_to_string("examples/osiris_openrpc/note.rhai")?; + let event_script = std::fs::read_to_string("examples/osiris_openrpc/event.rhai")?; + let query_script = std::fs::read_to_string("examples/osiris_openrpc/query.rhai")?; + let access_denied_script = std::fs::read_to_string("examples/osiris_openrpc/access_denied.rhai")?; + + println!("✅ Loaded 4 job scripts\n"); + + // ======================================================================== + // STEP 7: Dispatch jobs via OpenRPC + // ======================================================================== + println!("Step 7: Dispatching jobs"); + println!("─────────────────────────────────────────────────────────────\n"); + + // Job 1: Create Note + println!("📝 Job 1: Creating Note..."); + let job1 = JobBuilder::new() + .caller_id("openrpc_client") + .context_id("osiris_demo") + .payload(¬e_script) + .runner("osiris_runner") + .executor("rhai") + .timeout(30) + .signature("alice", "") + .signature("bob", "") + .build()?; + + let job1_result = client.run_job("user_secret", job1).await; + + match job1_result { + Ok(result) => println!("✅ {:?}\n", result), + Err(e) => println!("❌ Job failed: {}\n", e), + } + + sleep(Duration::from_secs(1)).await; + + // Job 2: Create Event + println!("📅 Job 2: Creating Event..."); + let job2 = JobBuilder::new() + .caller_id("openrpc_client") + .context_id("osiris_demo") + .payload(&event_script) + .runner("osiris_runner") + .executor("rhai") + .timeout(30) + .signature("alice", "") + .signature("bob", "") + .build()?; + + let job2_result = client.run_job("user_secret", job2).await; + + match job2_result { + Ok(result) => println!("✅ {:?}\n", result), + Err(e) => println!("❌ Job failed: {}\n", e), + } + + sleep(Duration::from_secs(1)).await; + + // Job 3: Query Data + println!("🔍 Job 3: Querying Data..."); + let job3 = JobBuilder::new() + .caller_id("openrpc_client") + .context_id("osiris_demo") + .payload(&query_script) + .runner("osiris_runner") + .executor("rhai") + .timeout(30) + .signature("alice", "") + .signature("bob", "") + .signature("charlie", "") + .build()?; + + let job3_result = client.run_job("user_secret", job3).await; + + match job3_result { + Ok(result) => println!("✅ {:?}\n", result), + Err(e) => println!("❌ Job failed: {}\n", e), + } + + sleep(Duration::from_secs(1)).await; + + // Job 4: Access Control Test (should fail) + println!("🔒 Job 4: Testing Access Control (expected to fail)..."); + let job4 = JobBuilder::new() + .caller_id("openrpc_client") + .context_id("osiris_demo") + .payload(&access_denied_script) + .runner("osiris_runner") + .executor("rhai") + .timeout(30) + .signature("alice", "") + .signature("bob", "") + .signature("charlie", "") + .build()?; + + let job4_result = client.run_job("user_secret", job4).await; + + match job4_result { + Ok(result) => println!("❌ Unexpected success: {:?}\n", result), + Err(e) => println!("✅ Access denied as expected: {}\n", e), + } + + // ======================================================================== + // STEP 8: Check runner status + // ======================================================================== + println!("\nStep 8: Checking runner status"); + println!("─────────────────────────────────────────────────────────────\n"); + + let status = client.get_runner_status("admin_secret", "osiris_runner").await?; + println!("Runner status: {:?}\n", status); + + // ======================================================================== + // STEP 9: Keep services running for manual testing + // ======================================================================== + println!("\nStep 9: Services Running"); + println!("─────────────────────────────────────────────────────────────\n"); + println!("🌐 Admin UI: http://127.0.0.1:8080"); + println!("📡 OpenRPC API: http://127.0.0.1:3030"); + println!("\n⏸️ Press Ctrl+C to stop all services...\n"); + + // Wait for Ctrl+C + tokio::signal::ctrl_c().await?; + + // ======================================================================== + // STEP 10: Cleanup + // ======================================================================== + println!("\n\nStep 10: Cleanup"); + println!("─────────────────────────────────────────────────────────────\n"); + + client.stop_runner("admin_secret", "osiris_runner", false).await?; + println!("✅ Runner stopped"); + + client.remove_runner("admin_secret", "osiris_runner").await?; + println!("✅ Runner removed"); + + admin_ui.kill()?; + println!("✅ Admin UI stopped"); + + supervisor.kill()?; + println!("✅ Supervisor stopped"); + + println!("\n✨ Example completed successfully!"); + + Ok(()) +} diff --git a/bin/supervisor/examples/osiris_openrpc/note.rhai b/bin/supervisor/examples/osiris_openrpc/note.rhai new file mode 100644 index 0000000..7bc74b1 --- /dev/null +++ b/bin/supervisor/examples/osiris_openrpc/note.rhai @@ -0,0 +1,20 @@ +print("Creating context for [alice, bob]..."); +let ctx = get_context(["alice", "bob"]); +print("✓ Context ID: " + ctx.context_id()); + +print("\nCreating note..."); +let note = note("notes") + .title("Sprint Planning Meeting") + .content("Discussed Q1 2025 roadmap and milestones") + .tag("sprint", "2025-Q1") + .tag("team", "engineering") + .tag("priority", "high") + .mime("text/markdown"); + +print("✓ Note created"); + +print("\nStoring note in context..."); +ctx.save(note); +print("✓ Note stored"); + +"Note 'Sprint Planning Meeting' created and stored successfully" diff --git a/bin/supervisor/examples/osiris_openrpc/query.rhai b/bin/supervisor/examples/osiris_openrpc/query.rhai new file mode 100644 index 0000000..97ff892 --- /dev/null +++ b/bin/supervisor/examples/osiris_openrpc/query.rhai @@ -0,0 +1,21 @@ +print("Querying context [alice, bob]..."); +let ctx = get_context(["alice", "bob"]); +print("✓ Context ID: " + ctx.context_id()); + +print("\nListing all notes..."); +let notes = ctx.list("notes"); +print("✓ Found " + notes.len() + " note(s)"); + +print("\nRetrieving specific note..."); +let note = ctx.get("notes", "sprint_planning_001"); +print("✓ Retrieved note: sprint_planning_001"); + +print("\nQuerying context [alice, bob, charlie]..."); +let ctx2 = get_context(["alice", "bob", "charlie"]); +print("✓ Context ID: " + ctx2.context_id()); + +print("\nListing all events..."); +let events = ctx2.list("events"); +print("✓ Found " + events.len() + " event(s)"); + +"Query complete: Found " + notes.len() + " notes and " + events.len() + " events" diff --git a/bin/supervisor/scripts/build.sh b/bin/supervisor/scripts/build.sh new file mode 100755 index 0000000..599b69c --- /dev/null +++ b/bin/supervisor/scripts/build.sh @@ -0,0 +1,53 @@ +#!/bin/bash + +SCRIPT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +PROJECT_DIR=$(cd "$SCRIPT_DIR/.." && pwd) + +# Spinner function +spinner() { + local pid=$1 + local delay=0.1 + local spinstr='⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏' + while ps -p $pid > /dev/null 2>&1; do + local temp=${spinstr#?} + printf " [%c] " "$spinstr" + local spinstr=$temp${spinstr%"$temp"} + sleep $delay + printf "\b\b\b\b\b\b" + done + printf " \b\b\b\b" +} + +echo "Building Hero Supervisor Workspace" +echo "" + +# Build core and client +printf "📦 Core & Client... " +cd "$PROJECT_DIR" +if RUSTFLAGS="-A warnings" cargo build --release --workspace > /tmp/supervisor-build-core.log 2>&1 & spinner $!; wait $!; then + echo "✅" +else + echo "❌" + echo " Error: Build failed. Run 'cd $PROJECT_DIR && cargo build --release --workspace' for details" + exit 1 +fi + +# # Build UI +# printf "📦 UI (WASM)... " +# cd "$PROJECT_DIR/ui" + +# if ! command -v trunk &> /dev/null; then +# echo "⚠️ (trunk not installed)" +# echo " Install with: cargo install trunk" +# else +# if trunk build --release > /tmp/supervisor-build-ui.log 2>&1 & spinner $!; wait $!; then +# echo "✅" +# else +# echo "❌" +# echo " Error: Build failed. Run 'cd $PROJECT_DIR/ui && trunk build --release' for details" +# exit 1 +# fi +# fi + +echo "" +echo "✅ All builds completed" \ No newline at end of file diff --git a/bin/supervisor/scripts/environment.sh b/bin/supervisor/scripts/environment.sh new file mode 100755 index 0000000..79afc6f --- /dev/null +++ b/bin/supervisor/scripts/environment.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# Load environment variables from .env file + +SCRIPT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +PROJECT_DIR=$(cd "$SCRIPT_DIR/.." && pwd) +ENV_FILE="$PROJECT_DIR/.env" + +if [ -f "$ENV_FILE" ]; then + # Export variables from .env file + set -a + source "$ENV_FILE" + set +a + echo "✅ Loaded environment from .env" +else + echo "⚠️ No .env file found at $ENV_FILE" + echo " Copy .env.example to .env and configure your settings" + exit 1 +fi diff --git a/bin/supervisor/scripts/generate_secret.sh b/bin/supervisor/scripts/generate_secret.sh new file mode 100755 index 0000000..721990b --- /dev/null +++ b/bin/supervisor/scripts/generate_secret.sh @@ -0,0 +1,11 @@ +#!/bin/bash +# Generate a supervisor secret key in the correct format + +# Generate a random 32-byte hex string +SECRET=$(openssl rand -hex 32) + +echo "Generated supervisor secret:" +echo "$SECRET" +echo "" +echo "Add this to your .env file:" +echo "SUPERVISOR_ADMIN_SECRET=$SECRET" diff --git a/bin/supervisor/scripts/install.sh b/bin/supervisor/scripts/install.sh new file mode 100755 index 0000000..a78cf9f --- /dev/null +++ b/bin/supervisor/scripts/install.sh @@ -0,0 +1,8 @@ +#!/bin/bash +set -e + +SCRIPT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +ROOT_DIR=$(cd "$SCRIPT_DIR/.." && pwd) + +pushd "$ROOT_DIR" +cargo update \ No newline at end of file diff --git a/bin/supervisor/scripts/release.sh b/bin/supervisor/scripts/release.sh new file mode 100755 index 0000000..e7daa4d --- /dev/null +++ b/bin/supervisor/scripts/release.sh @@ -0,0 +1,161 @@ +#!/bin/bash +# release.sh - Build optimized WASM and serve with Caddy + Brotli compression +set -e + +############################################################################### +# Freezone Portal Release Script +# - Builds the WASM app with trunk in release mode +# - Optionally optimizes .wasm with wasm-opt (-Oz, strip) +# - Precompresses assets with gzip and brotli for efficient static serving +# - Generates a manifest (manifest.json) with sizes and SHA-256 checksums +# +# Usage: +# ./release.sh [--outdir dist] [--no-opt] [--compress] [--no-manifest] +# [--trunk-args "--public-url /portal/"] +# +# Notes: +# - Precompression is OFF by default; enable with --compress +# - Only modifies files within the output directory (default: dist) +# - Non-destructive to your source tree +############################################################################### + +set -u + +SCRIPT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +PROJECT_DIR=$(cd "$SCRIPT_DIR/.." && pwd) +BUILD_SCRIPT="$SCRIPT_DIR/build.sh" + +# Defaults +OUTDIR="dist" +DO_OPT=1 +DO_COMPRESS=0 +DO_MANIFEST=1 +TRUNK_ARGS="" + +usage() { + cat < Output directory (default: dist) + --no-opt Skip wasm-opt optimization + --compress Enable gzip/brotli precompression + --no-manifest Skip manifest generation + --trunk-args "..." Extra arguments forwarded to trunk build + -h, --help Show this help + +Examples: + $(basename "$0") --outdir dist --trunk-args "--public-url /" + $(basename "$0") --no-opt --no-compress +EOF +} + +# Parse args +while [[ $# -gt 0 ]]; do + case "$1" in + --outdir) + OUTDIR="$2"; shift 2;; + --no-opt) + DO_OPT=0; shift;; + --compress) + DO_COMPRESS=1; shift;; + --no-manifest) + DO_MANIFEST=0; shift;; + --trunk-args) + TRUNK_ARGS="$2"; shift 2;; + -h|--help) + usage; exit 0;; + *) + echo "❌ Unknown option: $1"; echo; usage; exit 1;; + esac +done + +# Tool checks +if [[ ! -x "$BUILD_SCRIPT" ]]; then + echo "❌ build.sh not found or not executable at: $BUILD_SCRIPT" + echo " Ensure portal/scripts/build.sh exists and is chmod +x." + exit 1 +fi +if ! command -v trunk >/dev/null 2>&1; then + echo "❌ trunk not found. Install with: cargo install trunk"; exit 1; +fi + +HAS_WASM_OPT=0 +if command -v wasm-opt >/dev/null 2>&1; then HAS_WASM_OPT=1; fi +if [[ $DO_OPT -eq 1 && $HAS_WASM_OPT -eq 0 ]]; then + echo "⚠️ wasm-opt not found. Skipping WASM optimization." + DO_OPT=0 +fi + +if [[ $DO_COMPRESS -eq 1 ]]; then + if ! command -v gzip >/dev/null 2>&1; then + echo "⚠️ gzip not found. Skipping gzip compression."; GZIP_OK=0; else GZIP_OK=1; fi + if ! command -v brotli >/dev/null 2>&1; then + echo "⚠️ brotli not found. Skipping brotli compression."; BR_OK=0; else BR_OK=1; fi +else + GZIP_OK=0; BR_OK=0 +fi + +echo "🔧 Building optimized WASM bundle (via build.sh)..." +set -x +"$BUILD_SCRIPT" --release --outdir "$OUTDIR" ${TRUNK_ARGS:+--trunk-args "$TRUNK_ARGS"} +set +x + +DIST_DIR="$PROJECT_DIR/$OUTDIR" +if [[ ! -d "$DIST_DIR" ]]; then + echo "❌ Build failed: output directory not found: $DIST_DIR"; exit 1; +fi + +# Optimize .wasm files +if [[ $DO_OPT -eq 1 && $HAS_WASM_OPT -eq 1 ]]; then + echo "🛠️ Optimizing WASM with wasm-opt (-Oz, strip)..." + while IFS= read -r -d '' wasm; do + echo " • $(basename "$wasm")" + tmp="$wasm.opt" + wasm-opt -Oz --strip-dwarf "$wasm" -o "$tmp" + mv "$tmp" "$wasm" + done < <(find "$DIST_DIR" -type f -name "*.wasm" -print0) +fi + +# Precompress assets +if [[ $DO_COMPRESS -eq 1 ]]; then + echo "🗜️ Precompressing assets (gzip/brotli)..." + while IFS= read -r -d '' f; do + if [[ $GZIP_OK -eq 1 ]]; then + gzip -kf9 "$f" + fi + if [[ $BR_OK -eq 1 ]]; then + brotli -f -q 11 "$f" + fi + done < <(find "$DIST_DIR" -type f \( -name "*.wasm" -o -name "*.js" -o -name "*.css" \) -print0) +fi + +# Manifest with sizes and SHA-256 +if [[ $DO_MANIFEST -eq 1 ]]; then + echo "🧾 Generating manifest.json (sizes, sha256)..." + manifest="$DIST_DIR/manifest.json" + echo "{" > "$manifest" + first=1 + while IFS= read -r -d '' f; do + rel="${f#"$DIST_DIR/"}" + size=$(stat -f%z "$f" 2>/dev/null || stat -c%s "$f") + if command -v shasum >/dev/null 2>&1; then + hash=$(shasum -a 256 "$f" | awk '{print $1}') + else + hash=$(openssl dgst -sha256 -r "$f" | awk '{print $1}') + fi + [[ $first -eq 1 ]] || echo "," >> "$manifest" + first=0 + printf " \"%s\": { \"bytes\": %s, \"sha256\": \"%s\" }" "$rel" "$size" "$hash" >> "$manifest" + done < <(find "$DIST_DIR" -type f ! -name "manifest.json" -print0 | sort -z) + echo "\n}" >> "$manifest" +fi + +echo "📦 Checking bundle sizes ($OUTDIR)..." +if [ -d "$OUTDIR" ]; then + echo "Bundle sizes:" + find "$OUTDIR" -name "*.wasm" -exec ls -lh {} \; | awk '{print " WASM: " $5 " - " $9}' + find "$OUTDIR" -name "*.js" -exec ls -lh {} \; | awk '{print " JS: " $5 " - " $9}' + find "$OUTDIR" -name "*.css" -exec ls -lh {} \; | awk '{print " CSS: " $5 " - " $9}' + echo "" +fi diff --git a/bin/supervisor/scripts/run.sh b/bin/supervisor/scripts/run.sh new file mode 100755 index 0000000..1298cad --- /dev/null +++ b/bin/supervisor/scripts/run.sh @@ -0,0 +1,71 @@ +#!/bin/bash + +SCRIPT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +PROJECT_DIR=$(cd "$SCRIPT_DIR/.." && pwd) + +# Load environment variables +source "$SCRIPT_DIR/environment.sh" + +# Build first +echo "🔨 Building supervisor..." +"$SCRIPT_DIR/build.sh" + +# Validate required environment variables +if [ -z "$ADMIN_SECRETS" ]; then + echo "❌ Error: ADMIN_SECRETS not set in .env" + echo " Generate a secret with: ./scripts/generate_secret.sh" + exit 1 +fi + +# Set defaults from env vars +REDIS_URL="${REDIS_URL:-redis://127.0.0.1:6379}" +PORT="${PORT:-3030}" +BIND_ADDRESS="${BIND_ADDRESS:-127.0.0.1}" +LOG_LEVEL="${LOG_LEVEL:-info}" + +cd "$PROJECT_DIR" + +# Build command with flags from env vars +SUPERVISOR_CMD="target/release/supervisor --redis-url $REDIS_URL --port $PORT --bind-address $BIND_ADDRESS" + +# Add admin secrets +IFS=',' read -ra SECRETS <<< "$ADMIN_SECRETS" +for secret in "${SECRETS[@]}"; do + SUPERVISOR_CMD="$SUPERVISOR_CMD --admin-secret $secret" +done + +# Add user secrets if provided +if [ ! -z "$USER_SECRETS" ]; then + IFS=',' read -ra SECRETS <<< "$USER_SECRETS" + for secret in "${SECRETS[@]}"; do + SUPERVISOR_CMD="$SUPERVISOR_CMD --user-secret $secret" + done +fi + +# Add register secrets if provided +if [ ! -z "$REGISTER_SECRETS" ]; then + IFS=',' read -ra SECRETS <<< "$REGISTER_SECRETS" + for secret in "${SECRETS[@]}"; do + SUPERVISOR_CMD="$SUPERVISOR_CMD --register-secret $secret" + done +fi + +# Add mycelium URL if provided +if [ ! -z "$MYCELIUM_URL" ]; then + SUPERVISOR_CMD="$SUPERVISOR_CMD --mycelium-url $MYCELIUM_URL" +fi + +# Add runners if provided +if [ ! -z "$RUNNERS" ]; then + SUPERVISOR_CMD="$SUPERVISOR_CMD --runners $RUNNERS" +fi + +echo "" +echo "🚀 Starting Hero Supervisor" +echo " Redis: $REDIS_URL" +echo " Port: $PORT" +echo " Log Level: $LOG_LEVEL" +echo "" + +# Run supervisor directly with output visible +exec env RUST_LOG="$LOG_LEVEL" RUST_LOG_STYLE=never $SUPERVISOR_CMD \ No newline at end of file diff --git a/bin/supervisor/scripts/test.sh b/bin/supervisor/scripts/test.sh new file mode 100755 index 0000000..35e5ecc --- /dev/null +++ b/bin/supervisor/scripts/test.sh @@ -0,0 +1,53 @@ +#!/bin/bash + +SCRIPT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +PROJECT_DIR=$(cd "$SCRIPT_DIR/.." && pwd) + +# Spinner function +spinner() { + local pid=$1 + local delay=0.1 + local spinstr='⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏' + while ps -p $pid > /dev/null 2>&1; do + local temp=${spinstr#?} + printf " [%c] " "$spinstr" + local spinstr=$temp${spinstr%"$temp"} + sleep $delay + printf "\b\b\b\b\b\b" + done + printf " \b\b\b\b" +} + +echo "Testing Hero Supervisor Workspace" +echo "" + +# Test core and client +printf "🧪 Core & Client... " +cd "$PROJECT_DIR" +if cargo test --workspace > /tmp/supervisor-test-core.log 2>&1 & spinner $!; wait $!; then + echo "✅" +else + echo "❌" + echo " Error: Tests failed. Run 'cd $PROJECT_DIR && cargo test --workspace' for details" + exit 1 +fi + +# Test UI +printf "🧪 UI (WASM)... " +cd "$PROJECT_DIR/ui" + +if ! command -v wasm-pack &> /dev/null; then + echo "⚠️ (wasm-pack not installed)" + echo " Install with: cargo install wasm-pack" +else + if wasm-pack test --headless --firefox > /tmp/supervisor-test-ui.log 2>&1 & spinner $!; wait $!; then + echo "✅" + else + echo "❌" + echo " Error: Tests failed. Run 'cd $PROJECT_DIR/ui && wasm-pack test --headless --firefox' for details" + exit 1 + fi +fi + +echo "" +echo "✅ All tests completed" \ No newline at end of file diff --git a/bin/supervisor/src/auth.rs b/bin/supervisor/src/auth.rs new file mode 100644 index 0000000..ec645cb --- /dev/null +++ b/bin/supervisor/src/auth.rs @@ -0,0 +1,111 @@ +//! Authentication and API key management + +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use uuid::Uuid; + +/// API key scope/permission level +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum ApiKeyScope { + /// Full access - can manage keys, runners, jobs + Admin, + /// Can register new runners + Registrar, + /// Can create and manage jobs + User, +} + +impl ApiKeyScope { + pub fn as_str(&self) -> &'static str { + match self { + ApiKeyScope::Admin => "admin", + ApiKeyScope::Registrar => "registrar", + ApiKeyScope::User => "user", + } + } +} + +/// An API key with metadata +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ApiKey { + /// The actual key value (UUID or custom string) + pub key: String, + /// Human-readable name for the key + pub name: String, + /// Permission scope + pub scope: ApiKeyScope, + /// When the key was created + pub created_at: String, + /// Optional expiration timestamp + pub expires_at: Option, +} + +impl ApiKey { + /// Create a new API key with a generated UUID + pub fn new(name: String, scope: ApiKeyScope) -> Self { + Self { + key: Uuid::new_v4().to_string(), + name, + scope, + created_at: chrono::Utc::now().to_rfc3339(), + expires_at: None, + } + } + + /// Create a new API key with a specific key value + pub fn with_key(key: String, name: String, scope: ApiKeyScope) -> Self { + Self { + key, + name, + scope, + created_at: chrono::Utc::now().to_rfc3339(), + expires_at: None, + } + } +} + +/// Response for auth verification +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct AuthVerifyResponse { + pub valid: bool, + pub name: String, + pub scope: String, +} + +/// Method authorization requirements +/// Maps RPC method names to required scopes +pub fn get_method_required_scopes(method: &str) -> Option> { + use ApiKeyScope::*; + + match method { + // Admin-only methods + "key.create" | "key.generate" | "key.delete" | "key.list" | + "supervisor.info" => { + Some(vec![Admin]) + } + + // Admin or Registrar methods + "runner.create" | "runner.remove" => { + Some(vec![Admin, Registrar]) + } + + // Admin or User methods + "job.create" | "job.run" | "job.start" | "job.stop" | "job.delete" => { + Some(vec![Admin, User]) + } + + // Public methods (no auth required) + "rpc.discover" => None, + + // Any authenticated user (read-only operations) + "runner.list" | "runner.ping" | + "job.get" | "job.list" | "job.status" | "job.result" | "job.logs" | + "auth.verify" => { + Some(vec![Admin, Registrar, User]) + } + + // Default: require authentication + _ => Some(vec![Admin, Registrar, User]), + } +} diff --git a/bin/supervisor/src/bin/supervisor.rs b/bin/supervisor/src/bin/supervisor.rs new file mode 100644 index 0000000..8b79efc --- /dev/null +++ b/bin/supervisor/src/bin/supervisor.rs @@ -0,0 +1,112 @@ +//! Hero Supervisor Binary + +use hero_supervisor::SupervisorBuilder; +use clap::Parser; +use log::{error, info}; +use std::sync::Arc; +use tokio::sync::Mutex; + +/// Hero Supervisor - manages actors and dispatches jobs +#[derive(Parser, Debug)] +#[command(name = "supervisor")] +#[command(about = "Hero Supervisor - manages actors and dispatches jobs")] +struct Args { + /// Redis URL for job queue + #[arg(long, default_value = "redis://127.0.0.1:6379")] + redis_url: String, + + /// Namespace for Redis keys + #[arg(long, default_value = "")] + namespace: String, + + /// Admin secrets (required, can be specified multiple times) + #[arg(long = "admin-secret", value_name = "SECRET", required = true)] + admin_secrets: Vec, + + /// User secrets (can be specified multiple times) + #[arg(long = "user-secret", value_name = "SECRET")] + user_secrets: Vec, + + /// Register secrets (can be specified multiple times) + #[arg(long = "register-secret", value_name = "SECRET")] + register_secrets: Vec, + + /// Port for OpenRPC HTTP server + #[arg(long, default_value = "3030")] + port: u16, + + /// Bind address for OpenRPC HTTP server + #[arg(long, default_value = "127.0.0.1")] + bind_address: String, + + /// Pre-configured runner names (comma-separated) + #[arg(long, value_name = "NAMES", value_delimiter = ',')] + runners: Vec, +} + +#[tokio::main] +async fn main() -> Result<(), Box> { + env_logger::init(); + let args = Args::parse(); + + // Build supervisor + let mut builder = SupervisorBuilder::new() + .admin_secrets(args.admin_secrets); + + if !args.user_secrets.is_empty() { + builder = builder.user_secrets(args.user_secrets); + } + + if !args.register_secrets.is_empty() { + builder = builder.register_secrets(args.register_secrets); + } + + let mut supervisor = builder.build().await?; + + // Register pre-configured runners + if !args.runners.is_empty() { + for runner_name in &args.runners { + match supervisor.runner_create(runner_name.clone()).await { + Ok(_) => {}, + Err(e) => error!("Failed to register runner '{}': {}", runner_name, e), + } + } + } + + // Start OpenRPC server + use hero_supervisor::openrpc::start_http_openrpc_server; + + let supervisor_clone = supervisor.clone(); + let bind_addr = args.bind_address.clone(); + let port = args.port; + + tokio::spawn(async move { + match start_http_openrpc_server(supervisor_clone, &bind_addr, port).await { + Ok(handle) => { + handle.stopped().await; + error!("OpenRPC server stopped unexpectedly"); + } + Err(e) => { + error!("OpenRPC server error: {}", e); + } + } + }); + + tokio::time::sleep(tokio::time::Duration::from_millis(500)).await; + + // Print startup info + println!("📡 http://{}:{}", args.bind_address, args.port); + info!("Hero Supervisor is running. Press Ctrl+C to shutdown."); + + // Set up graceful shutdown + tokio::spawn(async move { + tokio::signal::ctrl_c().await.expect("Failed to listen for ctrl+c"); + info!("Received shutdown signal"); + std::process::exit(0); + }); + + // Keep the application running + loop { + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + } +} diff --git a/bin/supervisor/src/builder.rs b/bin/supervisor/src/builder.rs new file mode 100644 index 0000000..83cbf33 --- /dev/null +++ b/bin/supervisor/src/builder.rs @@ -0,0 +1,198 @@ +//! Supervisor builder for configuration and initialization. + +use crate::error::{SupervisorError, SupervisorResult}; +use crate::Supervisor; +use hero_job_client::ClientBuilder; + +/// Builder for constructing a Supervisor instance +pub struct SupervisorBuilder { + /// Set of registered runner IDs + runners: std::collections::HashSet, + /// Redis URL for connection + redis_url: String, + /// Admin secrets for bootstrapping API keys + admin_secrets: Vec, + /// User secrets for bootstrapping API keys + user_secrets: Vec, + /// Register secrets for bootstrapping API keys + register_secrets: Vec, + client_builder: ClientBuilder, + /// Osiris URL for queries (optional) + osiris_url: Option, + /// Supervisor URL for commands via Osiris (optional) + supervisor_url: Option, + /// Supervisor secret for Osiris commands (optional) + supervisor_secret: Option, + /// Runner name for Osiris operations (optional) + osiris_runner_name: Option, +} + +impl SupervisorBuilder { + /// Create a new supervisor builder + pub fn new() -> Self { + Self { + runners: std::collections::HashSet::new(), + redis_url: "redis://localhost:6379".to_string(), + admin_secrets: Vec::new(), + user_secrets: Vec::new(), + register_secrets: Vec::new(), + client_builder: ClientBuilder::new(), + osiris_url: None, + supervisor_url: None, + supervisor_secret: None, + osiris_runner_name: None, + } + } + + /// Set the Osiris URL for queries + pub fn osiris_url>(mut self, url: S) -> Self { + self.osiris_url = Some(url.into()); + self + } + + /// Set the Supervisor URL for Osiris commands + pub fn supervisor_url_for_osiris>(mut self, url: S) -> Self { + self.supervisor_url = Some(url.into()); + self + } + + /// Set the Supervisor secret for Osiris commands + pub fn supervisor_secret>(mut self, secret: S) -> Self { + self.supervisor_secret = Some(secret.into()); + self + } + + /// Set the runner name for Osiris operations + pub fn osiris_runner_name>(mut self, name: S) -> Self { + self.osiris_runner_name = Some(name.into()); + self + } + + /// Add an admin secret + pub fn add_admin_secret>(mut self, secret: S) -> Self { + self.admin_secrets.push(secret.into()); + self + } + + /// Add multiple admin secrets + pub fn admin_secrets(mut self, secrets: I) -> Self + where + I: IntoIterator, + S: Into, + { + self.admin_secrets.extend(secrets.into_iter().map(|s| s.into())); + self + } + + /// Add a user secret + pub fn add_user_secret>(mut self, secret: S) -> Self { + self.user_secrets.push(secret.into()); + self + } + + /// Add multiple user secrets + pub fn user_secrets(mut self, secrets: I) -> Self + where + I: IntoIterator, + S: Into, + { + self.user_secrets.extend(secrets.into_iter().map(|s| s.into())); + self + } + + /// Add a register secret + pub fn add_register_secret>(mut self, secret: S) -> Self { + self.register_secrets.push(secret.into()); + self + } + + /// Add multiple register secrets + pub fn register_secrets(mut self, secrets: I) -> Self + where + I: IntoIterator, + S: Into, + { + self.register_secrets.extend(secrets.into_iter().map(|s| s.into())); + self + } + + /// Add a runner to the supervisor + pub fn add_runner(mut self, runner_id: String) -> Self { + self.runners.insert(runner_id); + self + } + + /// Build the supervisor + pub async fn build(self) -> SupervisorResult { + // Create Redis client + let redis_client = redis::Client::open(self.redis_url.as_str()) + .map_err(|e| SupervisorError::ConfigError { + reason: format!("Invalid Redis URL: {}", e), + })?; + + // Create the store + let mut store = crate::store::Store::new(); + + // Add admin secrets as API keys + for secret in &self.admin_secrets { + store.key_create( + crate::auth::ApiKey::new(secret.clone(), crate::auth::ApiKeyScope::Admin), + ); + } + + // Add user secrets as API keys + for secret in &self.user_secrets { + store.key_create( + crate::auth::ApiKey::new(secret.clone(), crate::auth::ApiKeyScope::User), + ); + } + + // Add register secrets as API keys + for secret in &self.register_secrets { + store.key_create( + crate::auth::ApiKey::new(secret.clone(), crate::auth::ApiKeyScope::Registrar), + ); + } + + // Build the client + let client = self.client_builder.build().await?; + + // Build Osiris client if configured + // Temporarily disabled - needs update + // let osiris_client = if let (Some(osiris_url), Some(supervisor_url)) = + // (self.osiris_url, self.supervisor_url) { + // let mut builder = osiris_client::OsirisClient::builder() + // .osiris_url(osiris_url) + // .supervisor_url(supervisor_url) + // .runner_name(self.osiris_runner_name.unwrap_or_else(|| "osiris-runner".to_string())); + // + // if let Some(secret) = self.supervisor_secret { + // builder = builder.supervisor_secret(secret); + // } + // + // Some(builder.build().map_err(|e| SupervisorError::ConfigError { + // reason: format!("Failed to build Osiris client: {}", e), + // })?) + // } else { + // None + // }; + + // Add pre-configured runners to the store + for runner_id in self.runners { + let _ = store.runner_add(runner_id); + } + + Ok(Supervisor { + store: std::sync::Arc::new(tokio::sync::Mutex::new(store)), + job_client: client, + redis_client, + // osiris_client, // Temporarily disabled + }) + } +} + +impl Default for SupervisorBuilder { + fn default() -> Self { + Self::new() + } +} diff --git a/bin/supervisor/src/error.rs b/bin/supervisor/src/error.rs new file mode 100644 index 0000000..5d299fb --- /dev/null +++ b/bin/supervisor/src/error.rs @@ -0,0 +1,73 @@ +//! Error types for supervisor operations. + +use thiserror::Error; +use jsonrpsee::types::{ErrorObject, ErrorObjectOwned}; + +/// Result type for supervisor operations +pub type SupervisorResult = Result; + +/// Errors that can occur during supervisor operations +#[derive(Debug, Error)] +pub enum SupervisorError { + #[error("Runner '{runner_id}' not found")] + RunnerNotFound { runner_id: String }, + + #[error("Runner '{runner_id}' is already registered")] + RunnerAlreadyRegistered { runner_id: String }, + + #[error("Job '{job_id}' not found")] + JobNotFound { job_id: String }, + + #[error("Failed to queue job for runner '{runner_id}': {reason}")] + QueueError { runner_id: String, reason: String }, + + #[error("Configuration error: {reason}")] + ConfigError { reason: String }, + + #[error("Invalid secret or API key: {0}")] + InvalidSecret(String), + + #[error("Authentication error: {message}")] + AuthenticationError { message: String }, + + #[error("Insufficient permissions: {message}")] + PermissionDenied { message: String }, + + #[error("Redis error: {source}")] + RedisError { + #[from] + source: redis::RedisError, + }, + + #[error("Job error: {source}")] + JobError { + #[from] + source: hero_job::JobError, + }, + + #[error("Job client error: {source}")] + JobClientError { + #[from] + source: hero_job_client::ClientError, + }, + + #[error("IO error: {source}")] + IoError { + #[from] + source: std::io::Error, + }, + + #[error("Osiris client error: {0}")] + OsirisError(String), +} + +/// Implement conversion from SupervisorError → RPC ErrorObject +impl From for ErrorObject<'static> { + fn from(err: SupervisorError) -> Self { + ErrorObject::owned( + -32603, // Internal error code + format!("Supervisor error: {err}"), + None::<()>, + ) + } +} diff --git a/bin/supervisor/src/lib.rs b/bin/supervisor/src/lib.rs new file mode 100644 index 0000000..684d358 --- /dev/null +++ b/bin/supervisor/src/lib.rs @@ -0,0 +1,19 @@ +//! Hero Supervisor - Actor management for the Hero ecosystem. +//! +//! See README.md for detailed documentation and usage examples. + +pub mod supervisor; +pub mod builder; +pub mod error; +pub mod openrpc; +pub mod auth; +pub mod store; + +// Re-export job client for convenience +pub use hero_job_client as job_client; + +// Re-export main types for convenience +pub use supervisor::Supervisor; +pub use builder::SupervisorBuilder; +pub use error::{SupervisorError, SupervisorResult}; +pub use hero_job::{Job, JobBuilder, JobStatus, JobError}; diff --git a/bin/supervisor/src/openrpc.rs b/bin/supervisor/src/openrpc.rs new file mode 100644 index 0000000..8c43bc6 --- /dev/null +++ b/bin/supervisor/src/openrpc.rs @@ -0,0 +1,474 @@ +//! OpenRPC server implementation. + +use jsonrpsee::{ + core::{RpcResult, async_trait}, + server::middleware::rpc::{RpcServiceT, RpcServiceBuilder, MethodResponse}, + proc_macros::rpc, + server::{Server, ServerHandle}, + types::{ErrorObject, ErrorObjectOwned}, +}; +use tower_http::cors::{CorsLayer, Any}; + +use anyhow; +use log::{debug, info, error}; + +use crate::{auth::ApiKey, supervisor::Supervisor}; +use crate::error::SupervisorError; +use hero_job::{Job, JobResult, JobStatus}; +use serde::{Deserialize, Serialize}; + +use std::net::SocketAddr; +use std::sync::Arc; +use std::fs; +use tokio::sync::Mutex; + +/// Load OpenRPC specification from docs/openrpc.json +fn load_openrpc_spec() -> Result> { + let path = "../../docs/openrpc.json"; + let content = fs::read_to_string(path)?; + let spec = serde_json::from_str(&content)?; + debug!("Loaded OpenRPC specification from: {}", path); + Ok(spec) +} + +/// Request parameters for generating API keys (auto-generates key value) +#[derive(Debug, Deserialize, Serialize)] +pub struct GenerateApiKeyParams { + pub name: String, + pub scope: String, // "admin", "registrar", or "user" +} + +/// Job status response with metadata +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct JobStatusResponse { + pub job_id: String, + pub status: String, + pub created_at: String, +} + +/// Supervisor information response +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SupervisorInfo { + pub server_url: String, +} + +/// OpenRPC trait - maps directly to Supervisor methods +/// This trait exists only for jsonrpsee's macro system. +/// The implementation below is just error type conversion - +/// all actual logic lives in Supervisor methods. +#[rpc(server)] +pub trait SupervisorRpc { + /// Create a job without queuing it to a runner + #[method(name = "job.create")] + async fn job_create(&self, params: Job) -> RpcResult; + + /// Get a job by job ID + #[method(name = "job.get")] + async fn job_get(&self, job_id: String) -> RpcResult; + + /// Start a previously created job by queuing it to its assigned runner + #[method(name = "job.start")] + async fn job_start(&self, job_id: String) -> RpcResult<()>; + + /// Run a job on the appropriate runner and return the result + #[method(name = "job.run")] + async fn job_run(&self, params: Job) -> RpcResult; + + /// Get the current status of a job + #[method(name = "job.status")] + async fn job_status(&self, job_id: String) -> RpcResult; + + /// Get the result of a completed job (blocks until result is available) + #[method(name = "job.result")] + async fn job_result(&self, job_id: String) -> RpcResult; + + /// Get logs for a specific job + #[method(name = "job.logs")] + async fn job_logs(&self, job_id: String) -> RpcResult>; + + /// Stop a running job + #[method(name = "job.stop")] + async fn job_stop(&self, job_id: String) -> RpcResult<()>; + + /// Delete a job from the system + #[method(name = "job.delete")] + async fn job_delete(&self, job_id: String) -> RpcResult<()>; + + /// List all jobs + #[method(name = "job.list")] + async fn job_list(&self) -> RpcResult>; + + /// Add a runner with configuration + #[method(name = "runner.create")] + async fn runner_create(&self, runner_id: String) -> RpcResult<()>; + + /// Delete a runner from the supervisor + #[method(name = "runner.remove")] + async fn runner_delete(&self, runner_id: String) -> RpcResult<()>; + + /// List all runner IDs + #[method(name = "runner.list")] + async fn runner_list(&self) -> RpcResult>; + + /// Ping a runner (dispatch a ping job) + #[method(name = "runner.ping")] + async fn ping_runner(&self, runner_id: String) -> RpcResult; + + /// Create an API key with provided key value + #[method(name = "key.create")] + async fn key_create(&self, key: ApiKey) -> RpcResult<()>; + + /// Generate a new API key with auto-generated key value + #[method(name = "key.generate")] + async fn key_generate(&self, params: GenerateApiKeyParams) -> RpcResult; + + /// Delete an API key + #[method(name = "key.delete")] + async fn key_delete(&self, key_id: String) -> RpcResult<()>; + + /// List all secrets (returns counts only for security) + #[method(name = "key.list")] + async fn key_list(&self) -> RpcResult>; + + /// Verify an API key and return its metadata + #[method(name = "auth.verify")] + async fn auth_verify(&self) -> RpcResult; + + /// Get supervisor information + #[method(name = "supervisor.info")] + async fn supervisor_info(&self) -> RpcResult; + + /// OpenRPC discovery method - returns the OpenRPC document describing this API + #[method(name = "rpc.discover")] + async fn rpc_discover(&self) -> RpcResult; +} + +/// RPC implementation on Supervisor +/// +/// This implementation is ONLY for error type conversion (SupervisorError → ErrorObject). +/// All business logic is in Supervisor methods - these are thin wrappers. +/// Authorization is handled by middleware before methods are called. +#[async_trait] +impl SupervisorRpcServer for Supervisor { + async fn job_create(&self, job: Job) -> RpcResult { + Ok(self.job_create(job).await?) + } + + async fn job_get(&self, job_id: String) -> RpcResult { + Ok(self.job_get(&job_id).await?) + } + + async fn job_list(&self) -> RpcResult> { + let job_ids = self.job_list().await; + let mut jobs = Vec::new(); + for job_id in job_ids { + if let Ok(job) = self.job_get(&job_id).await { + jobs.push(job); + } + } + Ok(jobs) + } + + async fn job_run(&self, job: Job) -> RpcResult { + let output = self.job_run(job).await?; + Ok(JobResult::Success { success: output }) + } + + async fn job_start(&self, job_id: String) -> RpcResult<()> { + self.job_start(&job_id).await?; + Ok(()) + } + + async fn job_status(&self, job_id: String) -> RpcResult { + Ok(self.job_status(&job_id).await?) + } + + async fn job_logs(&self, job_id: String) -> RpcResult> { + Ok(self.job_logs(&job_id, None).await?) + } + + async fn job_result(&self, job_id: String) -> RpcResult { + match self.job_result(&job_id).await? { + Some(result) => { + if result.starts_with("Error:") { + Ok(JobResult::Error { error: result }) + } else { + Ok(JobResult::Success { success: result }) + } + }, + None => Ok(JobResult::Error { error: "Job result not available".to_string() }) + } + } + + async fn job_stop(&self, job_id: String) -> RpcResult<()> { + self.job_stop(&job_id).await?; + Ok(()) + } + + async fn job_delete(&self, job_id: String) -> RpcResult<()> { + self.job_delete(&job_id).await?; + Ok(()) + } + + async fn runner_create(&self, runner_id: String) -> RpcResult<()> { + self.runner_create(runner_id).await?; + Ok(()) + } + + async fn runner_delete(&self, runner_id: String) -> RpcResult<()> { + Ok(self.runner_delete(&runner_id).await?) + } + + async fn runner_list(&self) -> RpcResult> { + Ok(self.runner_list().await) + } + + + async fn ping_runner(&self, runner_id: String) -> RpcResult { + Ok(self.runner_ping(&runner_id).await?) + } + + async fn key_create(&self, key: ApiKey) -> RpcResult<()> { + let _ = self.key_create(key).await; + Ok(()) + } + + async fn key_generate(&self, params: GenerateApiKeyParams) -> RpcResult { + // Parse scope + let api_scope = match params.scope.to_lowercase().as_str() { + "admin" => crate::auth::ApiKeyScope::Admin, + "registrar" => crate::auth::ApiKeyScope::Registrar, + "user" => crate::auth::ApiKeyScope::User, + _ => return Err(ErrorObject::owned(-32602, "Invalid scope. Must be 'admin', 'registrar', or 'user'", None::<()>)), + }; + + let api_key = self.create_api_key(params.name, api_scope).await; + Ok(api_key) + } + + async fn key_delete(&self, key_id: String) -> RpcResult<()> { + self.key_delete(&key_id).await + .ok_or_else(|| ErrorObject::owned(-32603, "API key not found", None::<()>))?; + Ok(()) + } + + async fn key_list(&self) -> RpcResult> { + Ok(self.key_list().await) + } + + async fn auth_verify(&self) -> RpcResult { + // If this method is called, middleware already verified the key + // So we just return success - the middleware wouldn't have let an invalid key through + Ok(crate::auth::AuthVerifyResponse { + valid: true, + name: "verified".to_string(), + scope: "authenticated".to_string(), + }) + } + + async fn supervisor_info(&self) -> RpcResult { + Ok(SupervisorInfo { + server_url: "http://127.0.0.1:3031".to_string(), // TODO: get from config + }) + } + + async fn rpc_discover(&self) -> RpcResult { + debug!("OpenRPC request: rpc.discover"); + + // Read OpenRPC specification from docs/openrpc.json + match load_openrpc_spec() { + Ok(spec) => Ok(spec), + Err(e) => { + error!("Failed to load OpenRPC specification: {}", e); + // Fallback to a minimal spec if file loading fails + Ok(serde_json::json!({ + "openrpc": "1.3.2", + "info": { + "title": "Hero Supervisor OpenRPC API", + "version": "1.0.0", + "description": "OpenRPC API for managing Hero Supervisor runners and jobs" + }, + "methods": [], + "error": "Failed to load full specification" + })) + } + } + } +} + +/// Authorization middleware using RpcServiceT +/// This middleware is created per-connection and checks permissions for each RPC call +#[derive(Clone)] +struct AuthMiddleware { + supervisor: Supervisor, + inner: S, +} + +impl RpcServiceT for AuthMiddleware +where + S: RpcServiceT + Send + Sync + Clone + 'static, +{ + type MethodResponse = MethodResponse; + type BatchResponse = S::BatchResponse; + type NotificationResponse = S::NotificationResponse; + + fn call<'a>(&self, req: jsonrpsee::server::middleware::rpc::Request<'a>) -> impl std::future::Future + Send + 'a { + let supervisor = self.supervisor.clone(); + let inner = self.inner.clone(); + let method = req.method_name().to_string(); + let id = req.id(); + + Box::pin(async move { + // Check if method requires auth + let required_scopes = match crate::auth::get_method_required_scopes(&method) { + None => { + // Public method - no auth required + debug!("ℹ️ Public method: {}", method); + return inner.call(req).await; + } + Some(scopes) => scopes, + }; + + // Extract Authorization header from extensions + let headers = req.extensions().get::(); + + let api_key = headers + .and_then(|h| h.get(hyper::header::AUTHORIZATION)) + .and_then(|value| value.to_str().ok()) + .and_then(|s| s.strip_prefix("Bearer ")) + .map(|k| k.to_string()); + + let api_key = match api_key { + Some(key) => key, + None => { + error!("❌ Missing Authorization header for method: {}", method); + let err = ErrorObjectOwned::owned( + -32001, + format!("Missing Authorization header for method: {}", method), + None::<()>, + ); + return MethodResponse::error(id, err); + } + }; + + // Verify API key and check scope + let key_obj = match supervisor.key_get(&api_key).await { + Some(k) => k, + None => { + error!("❌ Invalid API key"); + let err = ErrorObjectOwned::owned(-32001, "Invalid API key", None::<()>); + return MethodResponse::error(id, err); + } + }; + + if !required_scopes.contains(&key_obj.scope) { + error!( + "❌ Unauthorized: method '{}' requires {:?}, got {:?}", + method, required_scopes, key_obj.scope + ); + let err = ErrorObjectOwned::owned( + -32001, + format!( + "Insufficient permissions for '{}'. Required: {:?}, Got: {:?}", + method, required_scopes, key_obj.scope + ), + None::<()>, + ); + return MethodResponse::error(id, err); + } + + debug!("✅ Authorized: {} with scope {:?}", method, key_obj.scope); + + // Authorized - proceed with the call + inner.call(req).await + }) + } + + fn batch<'a>(&self, batch: jsonrpsee::server::middleware::rpc::Batch<'a>) -> impl std::future::Future + Send + 'a { + // For simplicity, pass through batch requests + // In production, you'd want to check each request in the batch + self.inner.batch(batch) + } + + fn notification<'a>(&self, notif: jsonrpsee::server::middleware::rpc::Notification<'a>) -> impl std::future::Future + Send + 'a { + self.inner.notification(notif) + } +} + +/// HTTP middleware to propagate headers into request extensions +#[derive(Clone)] +struct HeaderPropagationService { + inner: S, +} + +impl tower::Service> for HeaderPropagationService +where + S: tower::Service> + Clone + Send + 'static, + S::Future: Send + 'static, + B: Send + 'static, +{ + type Response = S::Response; + type Error = S::Error; + type Future = std::pin::Pin> + Send>>; + + fn poll_ready(&mut self, cx: &mut std::task::Context<'_>) -> std::task::Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, mut req: hyper::Request) -> Self::Future { + let headers = req.headers().clone(); + req.extensions_mut().insert(headers); + let fut = self.inner.call(req); + Box::pin(fut) + } +} + +/// Start HTTP OpenRPC server (Unix socket support would require additional dependencies) +pub async fn start_http_openrpc_server( + supervisor: Supervisor, + bind_address: &str, + port: u16, +) -> anyhow::Result { + let http_addr: SocketAddr = format!("{}:{}", bind_address, port).parse()?; + + // Configure CORS to allow requests from the admin UI + // Note: Authorization header must be explicitly listed, not covered by Any + use tower_http::cors::AllowHeaders; + let cors = CorsLayer::new() + .allow_origin(Any) + .allow_headers(AllowHeaders::list([ + hyper::header::CONTENT_TYPE, + hyper::header::AUTHORIZATION, + ])) + .allow_methods(Any) + .expose_headers(Any); + + // Build RPC middleware with authorization (per-connection) + let supervisor_for_middleware = supervisor.clone(); + let rpc_middleware = RpcServiceBuilder::new().layer_fn(move |service| { + // This closure runs once per connection + AuthMiddleware { + supervisor: supervisor_for_middleware.clone(), + inner: service, + } + }); + + // Build HTTP middleware stack with CORS and header propagation + let http_middleware = tower::ServiceBuilder::new() + .layer(cors) + .layer(tower::layer::layer_fn(|service| { + HeaderPropagationService { inner: service } + })); + + let http_server = Server::builder() + .set_rpc_middleware(rpc_middleware) + .set_http_middleware(http_middleware) + .build(http_addr) + .await?; + + let http_handle = http_server.start(supervisor.into_rpc()); + + info!("OpenRPC HTTP server running at http://{} with CORS enabled", http_addr); + + Ok(http_handle) +} diff --git a/bin/supervisor/src/store.rs b/bin/supervisor/src/store.rs new file mode 100644 index 0000000..1d66135 --- /dev/null +++ b/bin/supervisor/src/store.rs @@ -0,0 +1,286 @@ +//! In-memory storage layer for Supervisor +//! +//! Provides CRUD operations for: +//! - API Keys +//! - Runners +//! - Jobs + +use crate::auth::{ApiKey, ApiKeyScope}; +use crate::error::{SupervisorError, SupervisorResult}; +use hero_job::Job; +use std::collections::{HashMap, HashSet}; + +/// In-memory storage for all supervisor data +pub struct Store { + /// API keys (key_value -> ApiKey) + api_keys: HashMap, + /// Registered runner IDs + runners: HashSet, + /// In-memory job storage (job_id -> Job) + jobs: HashMap, +} + +impl Store { + /// Create a new store + pub fn new() -> Self { + Self { + api_keys: HashMap::new(), + runners: HashSet::new(), + jobs: HashMap::new(), + } + } + + // ==================== API Key Operations ==================== + + /// Create an API key with a specific value + pub fn key_create(&mut self, key: ApiKey) -> ApiKey { + self.api_keys.insert(key.name.clone(), key.clone()); + key + } + + /// Create a new API key with generated UUID + pub fn key_create_new(&mut self, name: String, scope: ApiKeyScope) -> ApiKey { + let key = ApiKey::new(name, scope); + self.api_keys.insert(key.name.clone(), key.clone()); + key + } + + /// Get an API key by its value + pub fn key_get(&self, key_name: &str) -> Option<&ApiKey> { + self.api_keys.get(key_name) + } + + /// Delete an API key + pub fn key_delete(&mut self, key_name: &str) -> Option { + self.api_keys.remove(key_name) + } + + /// List all API keys + pub fn key_list(&self) -> Vec { + self.api_keys.values().cloned().collect() + } + + /// List API keys by scope + pub fn key_list_by_scope(&self, scope: ApiKeyScope) -> Vec { + self.api_keys + .values() + .filter(|k| k.scope == scope) + .cloned() + .collect() + } + + // ==================== Runner Operations ==================== + + /// Add a runner + pub fn runner_add(&mut self, runner_id: String) -> SupervisorResult<()> { + self.runners.insert(runner_id); + Ok(()) + } + + /// Remove a runner + pub fn runner_remove(&mut self, runner_id: &str) -> SupervisorResult<()> { + self.runners.remove(runner_id); + Ok(()) + } + + /// Check if a runner exists + pub fn runner_exists(&self, runner_id: &str) -> bool { + self.runners.contains(runner_id) + } + + /// List all runner IDs + pub fn runner_list_all(&self) -> Vec { + self.runners.iter().cloned().collect() + } + + // ==================== Job Operations ==================== + + /// Store a job in memory + pub fn job_store(&mut self, job: Job) -> SupervisorResult<()> { + self.jobs.insert(job.id.clone(), job); + Ok(()) + } + + /// Get a job from memory + pub fn job_get(&self, job_id: &str) -> SupervisorResult { + self.jobs + .get(job_id) + .cloned() + .ok_or_else(|| SupervisorError::JobNotFound { + job_id: job_id.to_string(), + }) + } + + /// Delete a job from memory + pub fn job_delete(&mut self, job_id: &str) -> SupervisorResult<()> { + self.jobs + .remove(job_id) + .ok_or_else(|| SupervisorError::JobNotFound { + job_id: job_id.to_string(), + })?; + Ok(()) + } + + /// List all job IDs + pub fn job_list(&self) -> Vec { + self.jobs.keys().cloned().collect() + } + + /// Check if a job exists + pub fn job_exists(&self, job_id: &str) -> bool { + self.jobs.contains_key(job_id) + } +} + +impl Clone for Store { + fn clone(&self) -> Self { + Self { + api_keys: self.api_keys.clone(), + runners: self.runners.clone(), + jobs: self.jobs.clone(), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use hero_job::JobBuilder; + + fn create_test_store() -> Store { + Store::new() + } + + fn create_test_job(id: &str, runner: &str) -> Job { + let mut job = JobBuilder::new() + .caller_id("test_caller") + .context_id("test_context") + .runner(runner) + .executor("test") + .payload("test payload") + .build() + .unwrap(); + job.id = id.to_string(); // Set ID manually + job + } + + #[test] + fn test_api_key_operations() { + let mut store = create_test_store(); + + // Create key + let key = store.key_create_new("test_key".to_string(), ApiKeyScope::Admin); + assert_eq!(key.name, "test_key"); + assert_eq!(key.scope, ApiKeyScope::Admin); + + // Get key + let retrieved = store.key_get(&key.key); + assert!(retrieved.is_some()); + assert_eq!(retrieved.unwrap().name, "test_key"); + + // List keys + let keys = store.key_list(); + assert_eq!(keys.len(), 1); + + // List by scope + let admin_keys = store.key_list_by_scope(ApiKeyScope::Admin); + assert_eq!(admin_keys.len(), 1); + + // Delete key + let removed = store.key_delete(&key.key); + assert!(removed.is_some()); + assert!(store.key_get(&key.key).is_none()); + } + + #[test] + fn test_runner_operations() { + let mut store = create_test_store(); + + // Add runner + assert!(store.runner_add("runner1".to_string()).is_ok()); + assert!(store.runner_exists("runner1")); + + // List runners + let runners = store.runner_list_all(); + assert_eq!(runners.len(), 1); + assert!(runners.contains(&"runner1".to_string())); + + // List all runners + let all_runners = store.runner_list_all(); + assert_eq!(all_runners.len(), 1); + + // Remove runner + assert!(store.runner_remove("runner1").is_ok()); + assert!(!store.runner_exists("runner1")); + } + + #[test] + fn test_job_operations() { + let mut store = create_test_store(); + let job = create_test_job("job1", "runner1"); + + // Store job + assert!(store.job_store(job.clone()).is_ok()); + assert!(store.job_exists("job1")); + + // Get job + let retrieved = store.job_get("job1"); + assert!(retrieved.is_ok()); + assert_eq!(retrieved.unwrap().id, "job1"); + + // List jobs + let jobs = store.job_list(); + assert_eq!(jobs.len(), 1); + assert!(jobs.contains(&"job1".to_string())); + + // Delete job + assert!(store.job_delete("job1").is_ok()); + assert!(!store.job_exists("job1")); + assert!(store.job_get("job1").is_err()); + } + + #[test] + fn test_job_not_found() { + let store = create_test_store(); + let result = store.job_get("nonexistent"); + assert!(result.is_err()); + } + + #[test] + fn test_multiple_jobs() { + let mut store = create_test_store(); + + // Add multiple jobs + for i in 1..=3 { + let job = create_test_job(&format!("job{}", i), "runner1"); + assert!(store.job_store(job).is_ok()); + } + + // Verify all exist + assert_eq!(store.job_list().len(), 3); + assert!(store.job_exists("job1")); + assert!(store.job_exists("job2")); + assert!(store.job_exists("job3")); + + // Delete one + assert!(store.job_delete("job2").is_ok()); + assert_eq!(store.job_list().len(), 2); + assert!(!store.job_exists("job2")); + } + + #[test] + fn test_store_clone() { + let mut store = create_test_store(); + store.runner_add("runner1".to_string()).unwrap(); + + let job = create_test_job("job1", "runner1"); + store.job_store(job).unwrap(); + + // Clone the store + let cloned = store.clone(); + + // Verify cloned data + assert!(cloned.runner_exists("runner1")); + assert!(cloned.job_exists("job1")); + } +} diff --git a/bin/supervisor/src/supervisor.rs b/bin/supervisor/src/supervisor.rs new file mode 100644 index 0000000..7f84461 --- /dev/null +++ b/bin/supervisor/src/supervisor.rs @@ -0,0 +1,360 @@ +//! Main supervisor implementation for managing multiple actor runners. + +use crate::error::{SupervisorError, SupervisorResult}; +use crate::store::Store; +use hero_job_client::Client as JobClient; +use hero_job::{Job, JobStatus}; +use std::sync::Arc; +use tokio::sync::Mutex; + +// Re-export RPC types for convenience +pub use jsonrpsee::core::RpcResult; +pub use jsonrpsee::types::ErrorObject; + +/// Main supervisor that manages multiple runners +#[derive(Clone)] +pub struct Supervisor { + /// Centralized storage layer with interior mutability + pub(crate) store: Arc>, + /// Job client for Redis operations + pub(crate) job_client: JobClient, + /// Redis client for direct operations + pub(crate) redis_client: redis::Client, + // Optional Osiris client for persistent storage - temporarily disabled + // pub(crate) osiris_client: Option, +} + +impl Supervisor { + /// Create a new supervisor builder + pub fn builder() -> crate::builder::SupervisorBuilder { + crate::builder::SupervisorBuilder::new() + } + + /// Create a job (store in memory only, does not dispatch) + /// Authorization must be checked by the caller (e.g., OpenRPC layer) + pub async fn job_create(&self, job: Job) -> SupervisorResult { + let runner = job.runner.clone(); + let job_id = job.id.clone(); + + let mut store = self.store.lock().await; + if !store.runner_exists(&runner) { + return Err(SupervisorError::RunnerNotFound { + runner_id: runner, + }); + } + + // Store job in memory only + store.job_store(job)?; + Ok(job_id) + } + + /// Delete a runner from the supervisor + pub async fn runner_delete(&self, runner_id: &str) -> SupervisorResult<()> { + self.store.lock().await.runner_remove(runner_id) + } + + /// Check if a runner is registered + pub async fn has_runner(&self, runner_id: &str) -> bool { + self.store.lock().await.runner_exists(runner_id) + } + + /// Get a job by job ID from memory + pub async fn job_get(&self, job_id: &str) -> SupervisorResult { + self.store.lock().await.job_get(job_id) + } + + /// Ping a runner by dispatching a ping job to its queue + pub async fn runner_ping(&self, runner_id: &str) -> SupervisorResult { + use hero_job::JobBuilder; + + // Check if runner exists + let store = self.store.lock().await; + if !store.runner_exists(runner_id) { + return Err(SupervisorError::RunnerNotFound { + runner_id: runner_id.to_string(), + }); + } + + // Create a ping job + let ping_job = JobBuilder::new() + .caller_id("supervisor_ping") + .context_id("ping_context") + .payload("ping") + .runner(runner_id) + .executor("ping") + .timeout(10) + .build() + .map_err(|e| SupervisorError::QueueError { + runner_id: runner_id.to_string(), + reason: format!("Failed to create ping job: {}", e), + })?; + + // Store and dispatch the ping job + let job_id = ping_job.id.clone(); + drop(store); + self.store.lock().await.job_store(ping_job.clone())?; + self.job_client + .store_job_in_redis(&ping_job) + .await + .map_err(SupervisorError::from)?; + self.job_client + .job_run(&job_id, runner_id) + .await + .map_err(SupervisorError::from)?; + + Ok(job_id) + } + + /// Stop a job by ID + pub async fn job_stop(&self, job_id: &str) -> SupervisorResult<()> { + // For now, we'll implement a basic stop by setting status to Stopping + let _ = self.job_client.set_job_status(job_id, JobStatus::Stopping).await; + Ok(()) + } + + /// Delete a job by ID + /// Authorization must be checked by the caller (e.g., OpenRPC layer) + pub async fn job_delete(&self, job_id: &str) -> SupervisorResult<()> { + self.store.lock().await.job_delete(job_id) + } + + /// List all managed runners + pub async fn runner_list(&self) -> Vec { + self.store.lock().await.runner_list_all() + } + + /// Check if a runner is registered + pub async fn runner_is_registered(&self, runner_id: &str) -> bool { + self.store.lock().await.runner_exists(runner_id) + } + + /// Start a job by dispatching it to a runner's queue (fire-and-forget) + pub async fn job_start(&self, job_id: &str) -> SupervisorResult<()> { + // Get the job from memory + let job = self.job_get(job_id).await?; + let runner = job.runner.clone(); + + let store = self.store.lock().await; + if !store.runner_exists(&runner) { + return Err(SupervisorError::RunnerNotFound { + runner_id: runner, + }); + } + + // Store job in Redis and dispatch to runner queue + self.job_client + .store_job_in_redis(&job) + .await + .map_err(SupervisorError::from)?; + + self.job_client + .job_run(&job.id, &runner) + .await + .map_err(SupervisorError::from) + } + + /// Run a job: create, dispatch, and wait for result + pub async fn job_run(&self, job: Job) -> SupervisorResult { + let runner = job.runner.clone(); + + let mut store = self.store.lock().await; + if !store.runner_exists(&runner) { + return Err(SupervisorError::RunnerNotFound { + runner_id: runner, + }); + } + + // Store job in memory + store.job_store(job.clone())?; + drop(store); + + // Use job_client's job_run_wait which handles store in Redis, dispatch, and wait + self.job_client + .job_run_wait(&job, &runner, 30) + .await + .map_err(SupervisorError::from) + } + + // Secret management methods removed - use API key management instead + // See add_api_key, remove_api_key, list_api_keys methods below + + /// List all job IDs from memory + pub async fn job_list(&self) -> Vec { + self.store.lock().await.job_list() + } + + /// Get the status of a job + pub async fn job_status(&self, job_id: &str) -> SupervisorResult { + // First check if job exists in memory (created but not started) + let store = self.store.lock().await; + if let Ok(_job) = store.job_get(job_id) { + drop(store); + // Try to get status from Redis + match self.job_client.get_status(job_id).await { + Ok(status) => return Ok(status), + Err(hero_job_client::ClientError::Job(hero_job::JobError::NotFound(_))) => { + // Job exists in memory but not in Redis - it's created but not dispatched + return Ok(JobStatus::Created); + } + Err(e) => return Err(SupervisorError::from(e)), + } + } + drop(store); + + // Job not in memory, try Redis + let status = self.job_client.get_status(job_id).await + .map_err(|e| match e { + hero_job_client::ClientError::Job(hero_job::JobError::NotFound(_)) => { + SupervisorError::JobNotFound { job_id: job_id.to_string() } + } + _ => SupervisorError::from(e) + })?; + + Ok(status) + } + + /// Get the result of a job (returns immediately with current result or error) + pub async fn job_result(&self, job_id: &str) -> SupervisorResult> { + // Use client's get_status to check if job exists and get its status + let status = self.job_client.get_status(job_id).await + .map_err(|e| match e { + hero_job_client::ClientError::Job(hero_job::JobError::NotFound(_)) => { + SupervisorError::JobNotFound { job_id: job_id.to_string() } + } + _ => SupervisorError::from(e) + })?; + + // If job has error status, get the error message + if status.as_str() == "error" { + let error_msg = self.job_client.get_error(job_id).await + .map_err(SupervisorError::from)?; + + return Ok(Some(format!("Error: {}", error_msg.unwrap_or_else(|| "Unknown error".to_string())))); + } + + // Use client's get_result to get the result + let result = self.job_client.get_result(job_id).await + .map_err(SupervisorError::from)?; + + Ok(result) + } + + // API Key Management Methods + + /// Get logs for a specific job + /// + /// Reads log files from the logs/actor//job-/ directory + pub async fn job_logs(&self, job_id: &str, lines: Option) -> SupervisorResult> { + // Determine the logs directory path + // Default to ~/hero/logs + let logs_root = if let Some(home) = std::env::var_os("HOME") { + std::path::PathBuf::from(home).join("hero").join("logs") + } else { + std::path::PathBuf::from("logs") + }; + + // Check if logs directory exists + if !logs_root.exists() { + return Ok(vec![format!("Logs directory not found: {}", logs_root.display())]); + } + + let actor_dir = logs_root.join("actor"); + if !actor_dir.exists() { + return Ok(vec![format!("Actor logs directory not found: {}", actor_dir.display())]); + } + + // Search through all runner directories to find the job + if let Ok(entries) = std::fs::read_dir(&actor_dir) { + for entry in entries.flatten() { + if entry.path().is_dir() { + let job_dir = entry.path().join(format!("job-{}", job_id)); + + if job_dir.exists() { + // Read all log files in the directory + let mut all_logs = Vec::new(); + + if let Ok(log_entries) = std::fs::read_dir(&job_dir) { + // Collect all log files with their paths for sorting + let mut log_files: Vec<_> = log_entries + .flatten() + .filter(|e| { + if !e.path().is_file() { + return false; + } + // Accept files that start with "log" (covers log.YYYY-MM-DD-HH format) + e.file_name().to_string_lossy().starts_with("log") + }) + .collect(); + + // Sort by filename (which includes timestamp for hourly rotation) + log_files.sort_by_key(|e| e.path()); + + // Read files in order + for entry in log_files { + if let Ok(content) = std::fs::read_to_string(entry.path()) { + all_logs.extend(content.lines().map(|s| s.to_string())); + } + } + } + + // If lines limit is specified, return only the last N lines + if let Some(n) = lines { + let start = all_logs.len().saturating_sub(n); + return Ok(all_logs[start..].to_vec()); + } else { + return Ok(all_logs); + } + } + } + } + } + + // If no logs found, return helpful message + Ok(vec![format!("No logs found for job: {}", job_id)]) + } + + // API Key Management - These methods provide direct access to the key store + // Authorization checking should be done at the OpenRPC layer before calling these + + /// Get an API key by its value + pub(crate) async fn key_get(&self, key_id: &str) -> Option { + self.store.lock().await.key_get(key_id).cloned() + } + + /// Create an API key with a specific value + pub(crate) async fn key_create(&self, key: crate::auth::ApiKey) -> crate::auth::ApiKey { + self.store.lock().await.key_create(key) + } + + /// Delete an API key + pub(crate) async fn key_delete(&self, key_id: &str) -> Option { + self.store.lock().await.key_delete(key_id) + } + + /// List all API keys + pub(crate) async fn key_list(&self) -> Vec { + self.store.lock().await.key_list() + } + + /// List API keys by scope + pub(crate) async fn key_list_by_scope(&self, scope: crate::auth::ApiKeyScope) -> Vec { + self.store.lock().await.key_list_by_scope(scope) + } + + // Runner Management + + /// Create a new runner + /// Authorization must be checked by the caller (e.g., OpenRPC layer) + pub async fn runner_create(&self, runner_id: String) -> SupervisorResult { + self.store.lock().await.runner_add(runner_id.clone())?; + Ok(runner_id) + } + + /// Create a new API key with generated UUID + pub async fn create_api_key(&self, name: String, scope: crate::auth::ApiKeyScope) -> crate::auth::ApiKey { + self.store.lock().await.key_create_new(name, scope) + } +} + +// Note: Default implementation removed because it requires async initialization +// Use Supervisor::builder() for proper initialization \ No newline at end of file diff --git a/bin/supervisor/tests/README.md b/bin/supervisor/tests/README.md new file mode 100644 index 0000000..1584a89 --- /dev/null +++ b/bin/supervisor/tests/README.md @@ -0,0 +1,195 @@ +# Supervisor End-to-End Tests + +Comprehensive integration tests for all Hero Supervisor OpenRPC client methods. + +## Prerequisites + +1. **Redis Server Running:** + ```bash + redis-server + ``` + +2. **Supervisor Running:** + ```bash + cd /Users/timurgordon/code/git.ourworld.tf/herocode/supervisor + ./scripts/run.sh + ``` + +## Running Tests + +### Run All Tests +```bash +cargo test --test end_to_end +``` + +### Run Specific Test +```bash +cargo test --test end_to_end test_01_rpc_discover +``` + +### Run with Output +```bash +cargo test --test end_to_end -- --nocapture +``` + +### Run in Order (Sequential) +```bash +cargo test --test end_to_end -- --test-threads=1 --nocapture +``` + +## Test Coverage + +### ✅ Discovery & Info +- `test_01_rpc_discover` - OpenRPC specification discovery +- `test_15_supervisor_info` - Supervisor information + +### ✅ Runner Management +- `test_02_runner_register` - Register a new runner +- `test_03_runner_list` - List all runners +- `test_14_runner_remove` - Remove a runner + +### ✅ Job Management +- `test_04_jobs_create` - Create a job without running +- `test_05_jobs_list` - List all jobs +- `test_06_job_run_simple` - Run a job and wait for result +- `test_07_job_status` - Get job status +- `test_08_job_get` - Get job by ID +- `test_09_job_delete` - Delete a job + +### ✅ Authentication & API Keys +- `test_10_auth_verify` - Verify current API key +- `test_11_auth_key_create` - Create new API key +- `test_12_auth_key_list` - List all API keys +- `test_13_auth_key_remove` - Remove an API key + +### ✅ Complete Workflow +- `test_99_complete_workflow` - End-to-end integration test + +## Test Configuration + +Tests use the following defaults: +- **Supervisor URL:** `http://127.0.0.1:3030` +- **Admin Secret:** `807470fd1e1ccc3fb997a1d4177cceb31a68cb355a4412c8fd6e66e517e902be` +- **Test Runner:** `test-runner` (all tests use this runner name) + +**Important:** All tests use the same runner name (`test-runner`), so you only need to start one runner with that name to run all tests. + +## Expected Behavior + +### Successful Tests +All tests should pass when: +- Supervisor is running on port 3030 +- Admin secret matches configuration +- Redis is accessible + +### Expected Warnings +Some tests may show warnings if: +- `job.run` times out (no actual runner connected to Redis) +- Runners already exist from previous test runs + +These are expected and don't indicate test failure. + +## Troubleshooting + +### Connection Refused +``` +Error: tcp connect error, 127.0.0.1:3030, Connection refused +``` +**Solution:** Start the supervisor with `./scripts/run.sh` + +### Method Not Found +``` +Error: Method not found +``` +**Solution:** Rebuild supervisor with latest code: +```bash +cd /Users/timurgordon/code/git.ourworld.tf/herocode/supervisor +cargo build +``` + +### Authorization Failed +``` +Error: Missing Authorization header +``` +**Solution:** Check that `ADMIN_SECRET` in test matches supervisor configuration + +### Job Tests Timeout +``` +Error: JsonRpc(RequestTimeout) +``` +**Solution:** Make sure you have a runner connected with the name `test-runner`: +```bash +cd /Users/timurgordon/code/git.ourworld.tf/herocode/runner/rust +cargo run --bin runner_osiris -- test-runner +``` + +## Continuous Integration + +To run tests in CI: + +```bash +#!/bin/bash +# Start Redis +redis-server --daemonize yes + +# Start Supervisor +cd /Users/timurgordon/code/git.ourworld.tf/herocode/supervisor +./scripts/run.sh & +SUPERVISOR_PID=$! + +# Wait for supervisor to be ready +sleep 2 + +# Run tests +cargo test --test end_to_end + +# Cleanup +kill $SUPERVISOR_PID +redis-cli shutdown +``` + +## Adding New Tests + +1. Create a new test function: + ```rust + #[tokio::test] + async fn test_XX_my_new_test() { + println!("\n🧪 Test: my.new.method"); + let client = create_client().await; + // ... test code ... + println!("✅ my.new.method works"); + } + ``` + +2. Run it: + ```bash + cargo test --test end_to_end test_XX_my_new_test -- --nocapture + ``` + +## Test Output Example + +``` +🧪 Test: rpc.discover +✅ rpc.discover works + +🧪 Test: runner.register +✅ runner.register works - registered: test-runner-e2e + +🧪 Test: runner.list +✅ runner.list works - found 3 runners + - osiris + - freezone + - test-runner-e2e + +🧪 Test: jobs.create +✅ jobs.create works - created job: 550e8400-e29b-41d4-a716-446655440000 + +... +``` + +## Notes + +- Tests are designed to be idempotent (can run multiple times) +- Tests clean up after themselves when possible +- Some tests depend on previous test state (use `--test-threads=1` for strict ordering) +- Job execution tests may timeout if no runner is connected to Redis (this is expected) diff --git a/bin/supervisor/tests/end_to_end.rs b/bin/supervisor/tests/end_to_end.rs new file mode 100644 index 0000000..88d4f5b --- /dev/null +++ b/bin/supervisor/tests/end_to_end.rs @@ -0,0 +1,482 @@ +//! End-to-End Integration Tests for Hero Supervisor +//! +//! Tests all OpenRPC client methods against a running supervisor instance. +//! The supervisor is automatically started and stopped for each test run. + +use hero_supervisor_openrpc_client::SupervisorClient; +use hero_supervisor::{SupervisorBuilder, openrpc::start_http_openrpc_server}; +use hero_job::{Job, JobBuilder}; +use std::sync::Once; + +/// Test configuration +const SUPERVISOR_URL: &str = "http://127.0.0.1:3031"; +const ADMIN_SECRET: &str = "test-admin-secret-for-e2e-tests"; +const TEST_RUNNER_NAME: &str = "test-runner"; + +/// Global initialization flag +static INIT: Once = Once::new(); + +/// Initialize and start the supervisor (called once) +async fn init_supervisor() { + // Use a blocking approach to ensure supervisor starts before any test runs + static mut INITIALIZED: bool = false; + + unsafe { + INIT.call_once(|| { + // Spawn a new runtime for the supervisor + std::thread::spawn(|| { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + // Build supervisor with test configuration + let supervisor = SupervisorBuilder::new() + .admin_secrets(vec![ADMIN_SECRET.to_string()]) + .build() + .await + .expect("Failed to build supervisor"); + + // Start OpenRPC server + match start_http_openrpc_server(supervisor, "127.0.0.1", 3031).await { + Ok(server_handle) => { + server_handle.stopped().await; + } + Err(e) => { + eprintln!("OpenRPC server error: {}", e); + } + } + }); + }); + + // Give the server time to start + std::thread::sleep(std::time::Duration::from_secs(1)); + INITIALIZED = true; + }); + } +} + +/// Helper to create a test client +async fn create_client() -> SupervisorClient { + // Ensure supervisor is running + init_supervisor().await; + + SupervisorClient::builder() + .url(SUPERVISOR_URL) + .secret(ADMIN_SECRET) + .build() + .expect("Failed to create supervisor client") +} + +/// Helper to create a test job (always uses TEST_RUNNER_NAME) +fn create_test_job(payload: &str) -> Job { + JobBuilder::new() + .caller_id("e2e-test") + .context_id("test-context") + .runner(TEST_RUNNER_NAME) + .payload(payload) + .executor("rhai") + .timeout(30) + .build() + .expect("Failed to build test job") +} + +#[tokio::test] +async fn test_01_rpc_discover() { + println!("\n🧪 Test: rpc.discover"); + + let client = create_client().await; + let result = client.discover().await; + + assert!(result.is_ok(), "rpc.discover should succeed"); + let spec = result.unwrap(); + + // Verify it's a valid OpenRPC spec + assert!(spec.get("openrpc").is_some(), "Should have openrpc field"); + assert!(spec.get("methods").is_some(), "Should have methods field"); + + println!("✅ rpc.discover works"); +} + +#[tokio::test] +async fn test_02_runner_register() { + println!("\n🧪 Test: runner.register"); + + let client = create_client().await; + + // Register a test runner + let result = client.runner_create(TEST_RUNNER_NAME).await; + + // Should succeed or already exist + match result { + Ok(()) => { + println!("✅ runner.register works - registered: {}", TEST_RUNNER_NAME); + } + Err(e) => { + // If it fails, it might already exist, which is okay + println!("⚠️ runner.register: {:?} (may already exist)", e); + } + } +} + +#[tokio::test] +async fn test_03_runner_list() { + println!("\n🧪 Test: runner.list"); + + let client = create_client().await; + + // First ensure our test runner exists + let _ = client.runner_create(TEST_RUNNER_NAME).await; + + // List all runners + let result = client.runner_list().await; + + if let Err(ref e) = result { + println!(" Error: {:?}", e); + } + assert!(result.is_ok(), "runner.list should succeed"); + let runners = result.unwrap(); + + assert!(!runners.is_empty(), "Should have at least one runner"); + assert!(runners.contains(&TEST_RUNNER_NAME.to_string()), + "Should contain our test runner"); + + println!("✅ runner.list works - found {} runners", runners.len()); + for runner in &runners { + println!(" - {}", runner); + } +} + +#[tokio::test] +async fn test_04_jobs_create() { + println!("\n🧪 Test: jobs.create"); + + let client = create_client().await; + + // Ensure runner exists + let _ = client.runner_create(TEST_RUNNER_NAME).await; + + // Create a job without running it + let job = create_test_job("print('test job');"); + let result = client.job_create(job).await; + + match &result { + Ok(_) => {}, + Err(e) => println!(" Error: {:?}", e), + } + assert!(result.is_ok(), "jobs.create should succeed"); + let job_id = result.unwrap(); + + assert!(!job_id.is_empty(), "Should return a job ID"); + println!("✅ jobs.create works - created job: {}", job_id); +} + +#[tokio::test] +async fn test_05_jobs_list() { + println!("\n🧪 Test: jobs.list"); + + let client = create_client().await; + + // Create a job first + let _ = client.runner_create(TEST_RUNNER_NAME).await; + let job = create_test_job("print('list test');"); + let _ = client.job_create(job).await; + + // List all jobs + let result = client.job_list().await; + + assert!(result.is_ok(), "jobs.list should succeed"); + let jobs = result.unwrap(); + + println!("✅ jobs.list works - found {} jobs", jobs.len()); +} + +#[tokio::test] +async fn test_06_job_run_simple() { + println!("\n🧪 Test: job.run (simple script)"); + + let client = create_client().await; + + // Ensure runner exists + let _ = client.runner_create(TEST_RUNNER_NAME).await; + + // Run a simple job + let job = create_test_job(r#" + print("Hello from test!"); + 42 + "#); + + let result = client.job_run(job, Some(30)).await; + + // Note: This will timeout if no runner is actually connected to Redis + // but we're testing the API call itself + match result { + Ok(response) => { + println!("✅ job.run works - job_id: {}, status: {}", + response.job_id, response.status); + } + Err(e) => { + println!("⚠️ job.run: {:?} (runner may not be connected)", e); + // This is expected if no actual runner is listening + } + } +} + +#[tokio::test] +async fn test_07_job_status() { + println!("\n🧪 Test: job.status"); + + let client = create_client().await; + + // Create a job first + let _ = client.runner_create(TEST_RUNNER_NAME).await; + let job = create_test_job("print('status test');"); + let job_id = client.job_create(job).await.expect("Failed to create job"); + + // Get job status + let result = client.job_status(&job_id).await; + + if let Err(ref e) = result { + println!(" Error: {:?}", e); + } + assert!(result.is_ok(), "job.status should succeed"); + let status = result.unwrap(); + + println!("✅ job.status works - job: {}, status: {:?}", + job_id, status); +} + +#[tokio::test] +async fn test_08_job_get() { + println!("\n🧪 Test: job.get"); + + let client = create_client().await; + + // Create a job first + let _ = client.runner_create(TEST_RUNNER_NAME).await; + let original_job = create_test_job("print('get test');"); + let job_id = client.job_create(original_job.clone()).await + .expect("Failed to create job"); + + // Get the job + let result = client.job_get(&job_id).await; + + assert!(result.is_ok(), "job.get should succeed"); + let job = result.unwrap(); + + assert_eq!(job.id, job_id); + println!("✅ job.get works - retrieved job: {}", job.id); +} + +#[tokio::test] +async fn test_09_job_delete() { + println!("\n🧪 Test: job.delete"); + + let client = create_client().await; + + // Create a job first + let _ = client.runner_create(TEST_RUNNER_NAME).await; + let job = create_test_job("print('delete test');"); + let job_id = client.job_create(job).await.expect("Failed to create job"); + + // Delete the job + let result = client.job_delete(&job_id).await; + + if let Err(ref e) = result { + println!(" Error: {:?}", e); + } + assert!(result.is_ok(), "job.delete should succeed"); + println!("✅ job.delete works - deleted job: {}", job_id); + + // Verify it's gone + let get_result = client.job_get(&job_id).await; + assert!(get_result.is_err(), "Job should not exist after deletion"); +} + +#[tokio::test] +async fn test_10_auth_verify() { + println!("\n🧪 Test: auth.verify"); + + let client = create_client().await; + + let result = client.auth_verify().await; + + assert!(result.is_ok(), "auth.verify should succeed with valid key"); + let auth_info = result.unwrap(); + + println!("✅ auth.verify works"); + println!(" Scope: {}", auth_info.scope); + println!(" Name: {}", auth_info.name.unwrap_or_else(|| "N/A".to_string())); +} + +#[tokio::test] +async fn test_11_auth_key_create() { + println!("\n🧪 Test: auth.key.create"); + + let client = create_client().await; + + use hero_supervisor_openrpc_client::GenerateApiKeyParams; + let params = GenerateApiKeyParams { + name: "test-key".to_string(), + scope: "user".to_string(), + }; + let result = client.key_generate(params).await; + + assert!(result.is_ok(), "auth.key.create should succeed"); + let api_key = result.unwrap(); + + assert!(!api_key.key.is_empty(), "Should return a key"); + assert_eq!(api_key.name, "test-key"); + assert_eq!(api_key.scope, "user"); + + println!("✅ auth.key.create works - created key: {}...", + &api_key.key[..api_key.key.len().min(8)]); +} + +#[tokio::test] +async fn test_12_auth_key_list() { + println!("\n🧪 Test: auth.key.list"); + + let client = create_client().await; + + // Create a key first + use hero_supervisor_openrpc_client::GenerateApiKeyParams; + let params = GenerateApiKeyParams { + name: "list-test-key".to_string(), + scope: "user".to_string(), + }; + let _ = client.key_generate(params).await; + + let result = client.key_list().await; + + assert!(result.is_ok(), "auth.key.list should succeed"); + let keys = result.unwrap(); + + println!("✅ auth.key.list works - found {} keys", keys.len()); + for key in &keys { + println!(" - {} ({}): {}...", key.name, key.scope, + &key.key[..key.key.len().min(8)]); + } +} + +#[tokio::test] +async fn test_13_auth_key_remove() { + println!("\n🧪 Test: auth.key.remove"); + + let client = create_client().await; + + // Create a key first + use hero_supervisor_openrpc_client::GenerateApiKeyParams; + let params = GenerateApiKeyParams { + name: "remove-test-key".to_string(), + scope: "user".to_string(), + }; + let api_key = client.key_generate(params) + .await + .expect("Failed to create key"); + + // Remove it (use name as the key_id, not the key value) + let result = client.key_delete(api_key.name.clone()).await; + + if let Err(ref e) = result { + println!(" Error: {:?}", e); + } + assert!(result.is_ok(), "auth.key.remove should succeed"); + println!("✅ auth.key.remove works - removed key: {}...", + &api_key.key[..api_key.key.len().min(8)]); +} + +#[tokio::test] +async fn test_14_runner_remove() { + println!("\n🧪 Test: runner.remove"); + + let client = create_client().await; + + // Register a runner to remove + let runner_name = "test-runner-to-remove"; + let _ = client.runner_create(runner_name).await; + + // Remove it + let result = client.runner_remove(runner_name).await; + + assert!(result.is_ok(), "runner.remove should succeed"); + println!("✅ runner.remove works - removed: {}", runner_name); + + // Verify it's gone + let runners = client.runner_list().await.unwrap(); + assert!(!runners.contains(&runner_name.to_string()), + "Runner should not exist after removal"); +} + +#[tokio::test] +async fn test_15_supervisor_info() { + println!("\n🧪 Test: supervisor.info"); + + let client = create_client().await; + + let result = client.get_supervisor_info().await; + + if let Err(ref e) = result { + println!(" Error: {:?}", e); + } + assert!(result.is_ok(), "supervisor.info should succeed"); + let info = result.unwrap(); + + println!("✅ supervisor.info works"); + println!(" Server URL: {}", info.server_url); +} + +/// Integration test that runs a complete workflow +#[tokio::test] +async fn test_99_complete_workflow() { + println!("\n🧪 Test: Complete Workflow"); + + let client = create_client().await; + + // 1. Register runner + println!(" 1. Registering runner..."); + let _ = client.runner_create("workflow-runner").await; + + // 2. List runners + println!(" 2. Listing runners..."); + let runners = client.runner_list().await.unwrap(); + assert!(runners.contains(&"workflow-runner".to_string())); + + // 3. Create API key + println!(" 3. Creating API key..."); + use hero_supervisor_openrpc_client::GenerateApiKeyParams; + let params = GenerateApiKeyParams { + name: "workflow-key".to_string(), + scope: "user".to_string(), + }; + let api_key = client.key_generate(params).await.unwrap(); + + // 4. Verify auth + println!(" 4. Verifying auth..."); + let _ = client.auth_verify().await.unwrap(); + + // 5. Create job + println!(" 5. Creating job..."); + let job = create_test_job("print('workflow test');"); + let job_id = client.job_create(job).await.unwrap(); + + // 6. Get job status + println!(" 6. Getting job status..."); + let _status = client.job_status(&job_id).await.unwrap(); + + // 7. List all jobs + println!(" 7. Listing all jobs..."); + let jobs = client.job_list().await.unwrap(); + assert!(!jobs.is_empty()); + + // 8. Delete job + println!(" 8. Deleting job..."); + let _ = client.job_delete(&job_id).await.unwrap(); + + // 9. Remove API key + println!(" 9. Removing API key..."); + let _ = client.key_delete(api_key.name).await.unwrap(); + + // 10. Remove runner + println!(" 10. Removing runner..."); + let _ = client.runner_remove("workflow-runner").await.unwrap(); + + println!("✅ Complete workflow test passed!"); +} diff --git a/bin/supervisor/tests/job_api_integration_tests.rs b/bin/supervisor/tests/job_api_integration_tests.rs new file mode 100644 index 0000000..f604ced --- /dev/null +++ b/bin/supervisor/tests/job_api_integration_tests.rs @@ -0,0 +1,31 @@ +//! Integration tests for the job API +//! +//! These tests validate the complete job lifecycle using a real supervisor instance. +//! They require Redis and a running supervisor to execute properly. + +use hero_supervisor_openrpc_client::{SupervisorClient, JobBuilder, JobResult}; +use std::time::Duration; +use tokio::time::sleep; +use uuid::Uuid; + +/// Test helper to create a unique job for testing +fn create_test_job(context: &str) -> Result> { + JobBuilder::new() + .caller_id("integration_test") + .context_id(context) + .payload("echo 'Test job output'") + .executor("osis") + .runner("osis_runner_1") + .timeout(30) + .env_var("TEST_VAR", "test_value") + .build() + .map_err(|e| e.into()) +} + +/// Test helper to check if supervisor is available +async fn is_supervisor_available() -> bool { + match SupervisorClient::new("http://localhost:3030") { + Ok(client) => client.discover().await.is_ok(), + Err(_) => false, + } +} \ No newline at end of file diff --git a/docs/ethymology.md b/docs/ethymology.md new file mode 100644 index 0000000..6abea96 --- /dev/null +++ b/docs/ethymology.md @@ -0,0 +1,91 @@ +# HORUS — The Meaning Behind the Name +*Hierarchical Orchestration Runtime for Universal Scripts* + +--- + +## 1. Why “Horus”? + +**Horus** is one of the oldest and most symbolic deities of ancient Egypt: +a god of the **sky, perception, order, and dominion**. + +In mythology, Horus *is* the sky itself; +his **right eye is the sun** (clarity, authority), +his **left eye the moon** (rhythm, balance). + +This symbolism aligns perfectly with a system built to supervise, coordinate, and execute distributed workloads. + +--- + +## 2. Symbolic Mapping to the Architecture + +- **Sky** → the compute fabric itself +- **Solar eye (sun)** → supervisor layer (visibility, authentication, authority) +- **Lunar eye (moon)** → coordinator layer (workflow rhythms, stepwise order) +- **Falcon wings** → runners (swift execution of tasks) +- **Battle against chaos** → ordering and normalizing raw jobs + +Horus is an archetype of **oversight**, **correct action**, and **restoring balance**—all fundamental qualities of an agentic execution system. + +--- + +## 3. The Name as a Backronym +**H O R U S** +**H**ierarchical +**O**rchestration +**R**untime for +**U**niversal +**S**cripts + +This describes the system exactly: +a runtime that receives jobs, authenticates them, orchestrates workflows, and executes scripts across distributed runners. + +--- + +## 4. Why It Fits This Stack + +The stack consists of: + +- **Job** – the incoming intent +- **Supervisor** – verifies, authenticates, admits +- **Coordinator** – plans, arranges, sequences +- **Runner** – executes scripts +- **SAL** – system-level script engine +- **Osiris** – object-level storage & retrieval engine + +All of this is unified by the central logic of *oversight, orchestration, and action*. + +Horus expresses these ideas precisely: +- Observation → validation & monitoring +- Order → workflow coordination +- Action → script execution +- Sky → the domain that contains all processes beneath it + +--- + +## 5. Visual & Conceptual Identity + +**Themes:** +- The Eye of Horus → observability, correctness, safety +- Falcon → agile execution +- Sky → the domain of computation +- Light (sun/moon) → insight, clarity, cycle + +**Palette concepts:** +- Gold + deep blue +- Light on dark (sun in sky) +- Single-line geometric Eye (modernized) + +The name offers both deep mythic roots and clean, modern branding potential. + +--- + +## 6. Narrative Summary + +**HORUS** is the execution sky: +the domain where jobs arrive, gain form, and become actions. +It brings clarity to chaos, structure to tasks, and order to distributed systems. + +It is not just a name. +It is the story of a system that sees clearly, acts decisively, and orchestrates wisely. + +--- \ No newline at end of file diff --git a/lib/clients/job/Cargo.toml b/lib/clients/job/Cargo.toml new file mode 100644 index 0000000..31557ae --- /dev/null +++ b/lib/clients/job/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "hero-job-client" +version.workspace = true +edition.workspace = true +description = "Job client for Hero - Redis-based job management" +license = "MIT OR Apache-2.0" + +[lib] +name = "hero_job_client" +path = "lib.rs" + +[dependencies] +# Core dependencies +redis.workspace = true +tokio.workspace = true +chrono.workspace = true +thiserror.workspace = true +async-trait.workspace = true +serde.workspace = true +serde_json.workspace = true + +# Hero dependencies +hero-job = { path = "../../models/job" } diff --git a/lib/clients/job/lib.rs b/lib/clients/job/lib.rs new file mode 100644 index 0000000..22e0e98 --- /dev/null +++ b/lib/clients/job/lib.rs @@ -0,0 +1,473 @@ +//! Job client implementation for managing jobs in Redis + +use chrono::Utc; +use redis::AsyncCommands; +use hero_job::{Job, JobStatus, JobError}; +use thiserror::Error; + +/// Client-specific error types +#[derive(Error, Debug)] +pub enum ClientError { + #[error("Redis error: {0}")] + Redis(#[from] redis::RedisError), + #[error("Job error: {0}")] + Job(#[from] JobError), + #[error("Invalid status: {0}")] + InvalidStatus(String), + #[error("Timeout waiting for job completion")] + Timeout, +} + +/// Client for managing jobs in Redis +#[derive(Debug, Clone)] +pub struct Client { + redis_client: redis::Client, + namespace: String, +} + +pub struct ClientBuilder { + /// Redis URL for connection + redis_url: String, + /// Namespace for queue keys + namespace: String, +} + +impl ClientBuilder { + /// Create a new client builder + pub fn new() -> Self { + Self { + redis_url: "redis://localhost:6379".to_string(), + namespace: "".to_string(), + } + } + + /// Set the Redis URL + pub fn redis_url>(mut self, url: S) -> Self { + self.redis_url = url.into(); + self + } + + /// Set the namespace for queue keys + pub fn namespace>(mut self, namespace: S) -> Self { + self.namespace = namespace.into(); + self + } + + /// Build the client + pub async fn build(self) -> Result { + // Create Redis client + let redis_client = redis::Client::open(self.redis_url.as_str()) + .map_err(|e| ClientError::Redis(e))?; + + Ok(Client { + redis_client, + namespace: self.namespace, + }) + } +} + +impl Default for Client { + fn default() -> Self { + // Note: Default implementation creates an empty client + // Use Client::builder() for proper initialization + Self { + redis_client: redis::Client::open("redis://localhost:6379").unwrap(), + namespace: "".to_string(), + } + } +} + +impl Client { + /// Create a new client builder + pub fn builder() -> ClientBuilder { + ClientBuilder::new() + } + + /// List all job IDs from Redis + pub async fn list_jobs(&self) -> Result, ClientError> { + let mut conn = self.redis_client + .get_multiplexed_async_connection() + .await + .map_err(|e| ClientError::Redis(e))?; + + let keys: Vec = conn.keys(format!("{}:*", &self.jobs_key())).await + .map_err(|e| ClientError::Redis(e))?; + let job_ids: Vec = keys + .into_iter() + .filter_map(|key| { + if key.starts_with(&format!("{}:", self.jobs_key())) { + key.strip_prefix(&format!("{}:", self.jobs_key())) + .map(|s| s.to_string()) + } else { + None + } + }) + .collect(); + + Ok(job_ids) + } + + fn jobs_key(&self) -> String { + if self.namespace.is_empty() { + format!("job") + } else { + format!("{}:job", self.namespace) + } + } + + pub fn job_key(&self, job_id: &str) -> String { + if self.namespace.is_empty() { + format!("job:{}", job_id) + } else { + format!("{}:job:{}", self.namespace, job_id) + } + } + + pub fn job_reply_key(&self, job_id: &str) -> String { + if self.namespace.is_empty() { + format!("reply:{}", job_id) + } else { + format!("{}:reply:{}", self.namespace, job_id) + } + } + + pub fn runner_key(&self, runner_name: &str) -> String { + if self.namespace.is_empty() { + format!("runner:{}", runner_name) + } else { + format!("{}:runner:{}", self.namespace, runner_name) + } + } + + /// Set job error in Redis + pub async fn set_error(&self, + job_id: &str, + error: &str, + ) -> Result<(), ClientError> { + let job_key = self.job_key(job_id); + let now = Utc::now(); + + let mut conn = self.redis_client + .get_multiplexed_async_connection() + .await + .map_err(|e| ClientError::Redis(e))?; + + let _: () = conn.hset_multiple(&job_key, &[ + ("error", error), + ("status", JobStatus::Error.as_str()), + ("updated_at", &now.to_rfc3339()), + ]).await + .map_err(|e| ClientError::Redis(e))?; + + Ok(()) + } + + /// Set job status in Redis + pub async fn set_job_status(&self, + job_id: &str, + status: JobStatus, + ) -> Result<(), ClientError> { + let job_key = self.job_key(job_id); + let now = Utc::now(); + + let mut conn = self.redis_client + .get_multiplexed_async_connection() + .await + .map_err(|e| ClientError::Redis(e))?; + + let _: () = conn.hset_multiple(&job_key, &[ + ("status", status.as_str()), + ("updated_at", &now.to_rfc3339()), + ]).await + .map_err(|e| ClientError::Redis(e))?; + Ok(()) + } + + /// Get job status from Redis + pub async fn get_status( + &self, + job_id: &str, + ) -> Result { + let mut conn = self.redis_client + .get_multiplexed_async_connection() + .await + .map_err(|e| ClientError::Redis(e))?; + + let status_str: Option = conn.hget(&self.job_key(job_id), "status").await + .map_err(|e| ClientError::Redis(e))?; + + match status_str { + Some(s) => JobStatus::from_str(&s).ok_or_else(|| ClientError::InvalidStatus(s)), + None => Err(ClientError::Job(JobError::NotFound(job_id.to_string()))), + } + } + + /// Delete job from Redis + pub async fn delete_from_redis( + &self, + job_id: &str, + ) -> Result<(), ClientError> { + let mut conn = self.redis_client + .get_multiplexed_async_connection() + .await + .map_err(|e| ClientError::Redis(e))?; + + let job_key = self.job_key(job_id); + let _: () = conn.del(&job_key).await + .map_err(|e| ClientError::Redis(e))?; + Ok(()) + } + + /// Store this job in Redis with the specified status + pub async fn store_job_in_redis_with_status(&self, job: &Job, status: JobStatus) -> Result<(), ClientError> { + let mut conn = self.redis_client + .get_multiplexed_async_connection() + .await + .map_err(|e| ClientError::Redis(e))?; + + let job_key = self.job_key(&job.id); + + // Serialize the job data + let job_data = serde_json::to_string(job) + .map_err(|e| JobError::Serialization(e))?; + + // Store job data in Redis hash + let _: () = conn.hset_multiple(&job_key, &[ + ("data", job_data), + ("status", status.as_str().to_string()), + ("created_at", job.created_at.to_rfc3339()), + ("updated_at", job.updated_at.to_rfc3339()), + ]).await + .map_err(|e| ClientError::Redis(e))?; + + // Set TTL for the job (24 hours) + let _: () = conn.expire(&job_key, 86400).await + .map_err(|e| ClientError::Redis(e))?; + + Ok(()) + } + + /// Store this job in Redis (defaults to Dispatched status for backwards compatibility) + pub async fn store_job_in_redis(&self, job: &Job) -> Result<(), ClientError> { + self.store_job_in_redis_with_status(job, JobStatus::Dispatched).await + } + + /// Load a job from Redis by ID + pub async fn load_job_from_redis( + &self, + job_id: &str, + ) -> Result { + let job_key = self.job_key(job_id); + + let mut conn = self.redis_client + .get_multiplexed_async_connection() + .await + .map_err(|e| ClientError::Redis(e))?; + + // Get job data from Redis + let job_data: Option = conn.hget(&job_key, "data").await + .map_err(|e| ClientError::Redis(e))?; + + match job_data { + Some(data) => { + let job: Job = serde_json::from_str(&data) + .map_err(|e| JobError::Serialization(e))?; + Ok(job) + } + None => Err(ClientError::Job(JobError::NotFound(job_id.to_string()))), + } + } + + /// Delete a job by ID + pub async fn delete_job(&mut self, job_id: &str) -> Result<(), ClientError> { + let mut conn = self.redis_client.get_multiplexed_async_connection().await + .map_err(|e| ClientError::Redis(e))?; + + let job_key = self.job_key(job_id); + let deleted_count: i32 = conn.del(&job_key).await + .map_err(|e| ClientError::Redis(e))?; + + if deleted_count == 0 { + return Err(ClientError::Job(JobError::NotFound(job_id.to_string()))); + } + + Ok(()) + } + + /// Set job result in Redis + pub async fn set_result( + &self, + job_id: &str, + result: &str, + ) -> Result<(), ClientError> { + let job_key = self.job_key(&job_id); + let now = Utc::now(); + let mut conn = self.redis_client + .get_multiplexed_async_connection() + .await + .map_err(|e| ClientError::Redis(e))?; + let _: () = conn.hset_multiple(&job_key, &[ + ("result", result), + ("status", JobStatus::Finished.as_str()), + ("updated_at", &now.to_rfc3339()), + ]).await + .map_err(|e| ClientError::Redis(e))?; + + Ok(()) + } + + /// Get job result from Redis + pub async fn get_result( + &self, + job_id: &str, + ) -> Result, ClientError> { + let job_key = self.job_key(job_id); + let mut conn = self.redis_client + .get_multiplexed_async_connection() + .await + .map_err(|e| ClientError::Redis(e))?; + let result: Option = conn.hget(&job_key, "result").await + .map_err(|e| ClientError::Redis(e))?; + Ok(result) + } + + /// Get job result from Redis + pub async fn get_error( + &self, + job_id: &str, + ) -> Result, ClientError> { + let job_key = self.job_key(job_id); + let mut conn = self.redis_client + .get_multiplexed_async_connection() + .await + .map_err(|e| ClientError::Redis(e))?; + let result: Option = conn.hget(&job_key, "error").await + .map_err(|e| ClientError::Redis(e))?; + Ok(result) + } + + /// Get a job ID from the work queue (blocking pop) + pub async fn get_job_id(&self, queue_key: &str) -> Result, ClientError> { + let mut conn = self.redis_client + .get_multiplexed_async_connection() + .await + .map_err(|e| ClientError::Redis(e))?; + + // Use BRPOP with a short timeout to avoid blocking indefinitely + let result: Option<(String, String)> = conn.brpop(queue_key, 1.0).await + .map_err(|e| ClientError::Redis(e))?; + + Ok(result.map(|(_, job_id)| job_id)) + } + + /// Get a job by ID (alias for load_job_from_redis) + pub async fn get_job(&self, job_id: &str) -> Result { + self.load_job_from_redis(job_id).await + } + + /// Run a job by dispatching it to a runner's queue (fire-and-forget) + pub async fn job_run(&self, job_id: &str, runner_name: &str) -> Result<(), ClientError> { + let mut conn = self.redis_client + .get_multiplexed_async_connection() + .await + .map_err(|e| ClientError::Redis(e))?; + + let queue_key = self.runner_key(runner_name); + + // Push job ID to the runner's queue (LPUSH for FIFO with BRPOP) + let _: () = conn.lpush(&queue_key, job_id).await + .map_err(|e| ClientError::Redis(e))?; + + Ok(()) + } + + /// Run a job and wait for completion + /// + /// This is a convenience method that: + /// 1. Stores the job in Redis + /// 2. Dispatches it to the runner's queue + /// 3. Waits for the job to complete (polls status) + /// 4. Returns the result or error + /// + /// # Arguments + /// * `job` - The job to run + /// * `runner_name` - The name of the runner to dispatch to + /// * `timeout_secs` - Maximum time to wait for job completion (in seconds) + /// + /// # Returns + /// * `Ok(String)` - The job result if successful + /// * `Err(JobError)` - If the job fails, times out, or encounters an error + pub async fn job_run_wait( + &self, + job: &Job, + runner_name: &str, + timeout_secs: u64, + ) -> Result { + use tokio::time::{Duration, timeout}; + + // Store the job in Redis + self.store_job_in_redis(job).await?; + + // Dispatch to runner queue + self.job_run(&job.id, runner_name).await?; + + // Wait for job to complete with timeout + let result = timeout( + Duration::from_secs(timeout_secs), + self.wait_for_job_completion(&job.id) + ).await; + + match result { + Ok(Ok(job_result)) => Ok(job_result), + Ok(Err(e)) => Err(e), + Err(_) => Err(ClientError::Timeout), + } + } + + /// Wait for a job to complete by polling its status + /// + /// This polls the job status every 500ms until it reaches a terminal state + /// (Finished or Error), then returns the result or error. + async fn wait_for_job_completion(&self, job_id: &str) -> Result { + use tokio::time::{sleep, Duration}; + + loop { + // Check job status + let status = self.get_status(job_id).await?; + + match status { + JobStatus::Finished => { + // Job completed successfully, get the result + let result = self.get_result(job_id).await?; + return result.ok_or_else(|| { + ClientError::Job(JobError::InvalidData(format!("Job {} finished but has no result", job_id))) + }); + } + JobStatus::Error => { + // Job failed, get the error message + let mut conn = self.redis_client + .get_multiplexed_async_connection() + .await + .map_err(|e| ClientError::Redis(e))?; + + let error_msg: Option = conn + .hget(&self.job_key(job_id), "error") + .await + .map_err(|e| ClientError::Redis(e))?; + + return Err(ClientError::Job(JobError::InvalidData( + error_msg.unwrap_or_else(|| format!("Job {} failed with unknown error", job_id)) + ))); + } + JobStatus::Stopping => { + return Err(ClientError::Job(JobError::InvalidData(format!("Job {} was stopped", job_id)))); + } + // Job is still running (Dispatched, WaitingForPrerequisites, Started) + _ => { + // Wait before polling again + sleep(Duration::from_millis(500)).await; + } + } + } + } +} diff --git a/lib/clients/osiris/Cargo.toml b/lib/clients/osiris/Cargo.toml new file mode 100644 index 0000000..be3c69f --- /dev/null +++ b/lib/clients/osiris/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "osiris-client" +version.workspace = true +edition.workspace = true +description = "Osiris client library" +license = "MIT OR Apache-2.0" + +[dependencies] +# Core dependencies +serde.workspace = true +serde_json.workspace = true +anyhow.workspace = true +thiserror.workspace = true +chrono.workspace = true + +# HTTP client +reqwest = { version = "0.12", default-features = false, features = ["json"] } + +# Hero dependencies +hero-supervisor-openrpc-client = { path = "../supervisor" } +hero-job = { path = "../../models/job" } + +[target.'cfg(not(target_arch = "wasm32"))'.dependencies] +uuid.workspace = true + +[target.'cfg(target_arch = "wasm32")'.dependencies] +uuid = { workspace = true, features = ["js"] } +getrandom.workspace = true + +[dev-dependencies] +tokio.workspace = true diff --git a/lib/clients/osiris/examples/complete.rs b/lib/clients/osiris/examples/complete.rs new file mode 100644 index 0000000..92839f1 --- /dev/null +++ b/lib/clients/osiris/examples/complete.rs @@ -0,0 +1,170 @@ +//! Complete Osiris Client Example +//! +//! This example demonstrates the full CQRS pattern with Osiris: +//! - Commands (writes) via Rhai scripts through Supervisor +//! - Queries (reads) via REST API from Osiris server +//! +//! Prerequisites: +//! - Redis running on localhost:6379 +//! - Supervisor running on localhost:3030 +//! - Osiris server running on localhost:8080 +//! - Osiris runner connected to Redis + +use osiris_client::OsirisClient; +use hero_supervisor_openrpc_client::SupervisorClient; + +#[tokio::main] +async fn main() -> Result<(), Box> { + println!("🚀 Osiris Client - Complete Example\n"); + println!("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n"); + + // Configuration + let admin_secret = "807470fd1e1ccc3fb997a1d4177cceb31a68cb355a4412c8fd6e66e517e902be"; + let supervisor_url = "http://localhost:3030"; + let osiris_url = "http://localhost:8080"; + let runner_name = "osiris-queue"; + + // ========== Part 1: Setup Runner ========== + println!("📋 Part 1: Runner Setup\n"); + + let supervisor = SupervisorClient::builder() + .url(supervisor_url) + .secret(admin_secret) + .build()?; + + // Register the runner + println!("1. Registering runner '{}'...", runner_name); + match supervisor.register_runner(runner_name).await { + Ok(result) => println!(" ✅ Runner registered: {}\n", result), + Err(e) => println!(" ⚠️ Registration failed (may already exist): {:?}\n", e), + } + + // List all runners + println!("2. Listing all runners..."); + match supervisor.list_runners().await { + Ok(runners) => { + println!(" ✅ Found {} runner(s):", runners.len()); + for runner in runners { + println!(" - {}", runner); + } + println!(); + } + Err(e) => println!(" ❌ Failed: {:?}\n", e), + } + + // ========== Part 2: Initialize Osiris Client ========== + println!("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n"); + println!("📋 Part 2: Osiris Client (CQRS Pattern)\n"); + + let client = OsirisClient::builder() + .osiris_url(osiris_url) + .supervisor_url(supervisor_url) + .supervisor_secret(admin_secret) + .runner_name(runner_name) + .build()?; + + println!("✅ Osiris client initialized"); + println!(" - Osiris URL: {}", osiris_url); + println!(" - Supervisor URL: {}", supervisor_url); + println!(" - Runner: {}\n", runner_name); + + // ========== Part 3: Execute Simple Script ========== + println!("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n"); + println!("📋 Part 3: Execute Rhai Script\n"); + + let script = r#" + print("Hello from Osiris!"); + let result = 40 + 2; + print("The answer is: " + result); + result + "#; + + println!("Executing script..."); + match client.execute_script(script).await { + Ok(response) => { + println!(" ✅ Script executed successfully!"); + println!(" Job ID: {}", response.job_id); + println!(" Status: {}\n", response.status); + } + Err(e) => { + println!(" ❌ Script execution failed: {}", e); + println!(" Make sure the runner is connected to Redis!\n"); + } + } + + // ========== Part 4: CQRS Operations (if runner is connected) ========== + println!("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n"); + println!("📋 Part 4: CQRS Operations (Commands + Queries)\n"); + + // Create an API Key (Command via Rhai) + println!("1. Creating API Key (Command via Rhai)..."); + let api_key = format!("test-key-{}", chrono::Utc::now().timestamp()); + match client.create_api_key( + api_key.clone(), + "Test Key".to_string(), + "admin".to_string() + ).await { + Ok(response) => { + println!(" ✅ API Key created!"); + println!(" Job ID: {}", response.job_id); + println!(" Status: {}\n", response.status); + } + Err(e) => println!(" ⚠️ Failed: {}\n", e), + } + + // Query the API Key (Query via REST) + println!("2. Querying API Key (Query via REST)..."); + match client.get_api_key(&api_key).await { + Ok(key) => { + println!(" ✅ API Key retrieved!"); + println!(" Key: {:?}\n", key); + } + Err(e) => println!(" ⚠️ Not found yet: {}\n", e), + } + + // List all API Keys (Query via REST) + println!("3. Listing all API Keys (Query via REST)..."); + match client.list_api_keys().await { + Ok(keys) => { + println!(" ✅ Found {} API key(s)", keys.len()); + for key in keys.iter().take(3) { + println!(" - {:?}", key); + } + if keys.len() > 3 { + println!(" ... and {} more", keys.len() - 3); + } + println!(); + } + Err(e) => println!(" ⚠️ Failed: {}\n", e), + } + + // ========== Part 5: Cleanup ========== + println!("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n"); + println!("📋 Part 5: Cleanup\n"); + + println!("Deleting test API Key (Command via Rhai)..."); + match client.delete_api_key(api_key.clone()).await { + Ok(response) => { + println!(" ✅ API Key deleted!"); + println!(" Job ID: {}", response.job_id); + println!(" Status: {}\n", response.status); + } + Err(e) => println!(" ⚠️ Failed: {}\n", e), + } + + // ========== Summary ========== + println!("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n"); + println!("✅ Example Complete!\n"); + println!("Summary:"); + println!(" ✅ Runner registration working"); + println!(" ✅ Osiris client initialized"); + println!(" ✅ CQRS pattern demonstrated"); + println!(" - Commands via Rhai scripts"); + println!(" - Queries via REST API"); + println!("\nNext steps:"); + println!(" - Explore other CQRS methods (runners, jobs, etc.)"); + println!(" - Use template-based script generation"); + println!(" - Build your own Osiris-backed applications!"); + + Ok(()) +} diff --git a/lib/clients/osiris/src/communication.rs b/lib/clients/osiris/src/communication.rs new file mode 100644 index 0000000..697ca65 --- /dev/null +++ b/lib/clients/osiris/src/communication.rs @@ -0,0 +1,100 @@ +//! Communication methods (queries and commands) + +use serde::{Deserialize, Serialize}; +use crate::{OsirisClient, OsirisClientError}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Verification { + pub id: String, + pub email: String, + pub code: String, + pub transport: String, + pub status: VerificationStatus, + pub created_at: i64, + pub expires_at: i64, + pub verified_at: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum VerificationStatus { + Pending, + Verified, + Expired, + Failed, +} + +// ========== Request/Response Models ========== + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SendVerificationRequest { + pub email: String, + pub verification_url: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SendVerificationResponse { + pub verification_id: String, + pub email: String, + pub expires_at: i64, +} + +// ========== Client Methods ========== + +impl OsirisClient { + // ========== Query Methods ========== + + /// Get verification by ID (query) + pub async fn get_verification(&self, verification_id: &str) -> Result { + self.get("verification", verification_id).await + } + + /// Get verification by email (query) + pub async fn get_verification_by_email(&self, email: &str) -> Result, OsirisClientError> { + self.query("verification", &format!("email={}", email)).await + } + + /// Get verification status - alias for get_verification (query) + pub async fn get_verification_status(&self, verification_id: &str) -> Result { + self.get_verification(verification_id).await + } + + // ========== Command Methods ========== + + /// Send verification email (command) + pub async fn send_verification_email( + &self, + request: SendVerificationRequest, + ) -> Result { + let email = &request.email; + let verification_url = request.verification_url.as_deref().unwrap_or(""); + + // Generate verification code + let verification_id = format!("ver_{}", uuid::Uuid::new_v4()); + let code = format!("{:06}", (uuid::Uuid::new_v4().as_u128() % 1_000_000)); + + let script = format!(r#" +// Send email verification +let email = "{}"; +let code = "{}"; +let verification_url = "{}"; +let verification_id = "{}"; + +// TODO: Implement actual email sending logic +print("Sending verification email to: " + email); +print("Verification code: " + code); +print("Verification URL: " + verification_url); + +// Return verification details +verification_id +"#, email, code, verification_url, verification_id); + + let _response = self.execute_script(&script).await?; + + Ok(SendVerificationResponse { + verification_id, + email: request.email, + expires_at: chrono::Utc::now().timestamp() + 3600, // 1 hour + }) + } +} diff --git a/lib/clients/osiris/src/kyc.rs b/lib/clients/osiris/src/kyc.rs new file mode 100644 index 0000000..05555d0 --- /dev/null +++ b/lib/clients/osiris/src/kyc.rs @@ -0,0 +1,102 @@ +//! KYC methods (queries and commands) + +use serde::{Deserialize, Serialize}; +use crate::{OsirisClient, OsirisClientError}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct KycSession { + pub id: String, + pub resident_id: String, + pub status: KycSessionStatus, + pub kyc_url: Option, + pub created_at: i64, + pub updated_at: i64, + pub expires_at: i64, + pub verified_at: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum KycSessionStatus { + Pending, + InProgress, + Completed, + Failed, + Expired, +} + +// ========== Request/Response Models ========== + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct KycVerificationRequest { + pub resident_id: String, + pub callback_url: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct KycVerificationResponse { + pub session_id: String, + pub kyc_url: String, + pub expires_at: i64, +} + +// ========== Client Methods ========== + +impl OsirisClient { + // ========== Query Methods ========== + + /// Get KYC session by ID + pub async fn get_kyc_session(&self, session_id: &str) -> Result { + self.get("kyc_session", session_id).await + } + + /// List all KYC sessions for a resident + pub async fn list_kyc_sessions_by_resident(&self, resident_id: &str) -> Result, OsirisClientError> { + self.query("kyc_session", &format!("resident_id={}", resident_id)).await + } + + // ========== Command Methods ========== + + /// Start KYC verification (command) + pub async fn start_kyc_verification( + &self, + request: KycVerificationRequest, + ) -> Result { + let resident_id = &request.resident_id; + let callback_url = request.callback_url.as_deref().unwrap_or(""); + + // Generate session ID + let session_id = format!("kyc_{}", uuid::Uuid::new_v4()); + + let script = format!(r#" +// Start KYC verification +let resident_id = "{}"; +let callback_url = "{}"; +let session_id = "{}"; + +// TODO: Implement actual KYC provider integration +print("Starting KYC verification for resident: " + resident_id); +print("Session ID: " + session_id); +print("Callback URL: " + callback_url); + +// Return session details +session_id +"#, resident_id, callback_url, session_id); + + let _response = self.execute_script(&script).await?; + + Ok(KycVerificationResponse { + session_id, + kyc_url: "https://kyc.example.com/verify".to_string(), + expires_at: chrono::Utc::now().timestamp() + 86400, + }) + } + + /// Check KYC status (query) + pub async fn check_kyc_status( + &self, + session_id: String, + ) -> Result { + self.get_kyc_session(&session_id).await + } +} diff --git a/lib/clients/osiris/src/lib.rs b/lib/clients/osiris/src/lib.rs new file mode 100644 index 0000000..0ce00ab --- /dev/null +++ b/lib/clients/osiris/src/lib.rs @@ -0,0 +1,439 @@ +//! Osiris Client - Unified CQRS Client +//! +//! This client provides both: +//! - Commands (writes) via Rhai scripts to Hero Supervisor +//! - Queries (reads) via REST API to Osiris server +//! +//! Follows CQRS pattern with a single unified interface. + +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use thiserror::Error; + +pub mod kyc; +pub mod payment; +pub mod communication; + +pub use kyc::*; +pub use payment::*; +pub use communication::*; + +#[derive(Debug, Error)] +pub enum OsirisClientError { + #[error("HTTP request failed: {0}")] + RequestFailed(#[from] reqwest::Error), + + #[error("Resource not found: {0}")] + NotFound(String), + + #[error("Deserialization failed: {0}")] + DeserializationFailed(String), + + #[error("Configuration error: {0}")] + ConfigError(String), + + #[error("Command execution failed: {0}")] + CommandFailed(String), +} + +/// Osiris client with CQRS support +#[derive(Clone)] +pub struct OsirisClient { + // Query side (Osiris REST API) + osiris_url: String, + + // Command side (Supervisor + Rhai) + supervisor_client: Option, + runner_name: String, + timeout: u64, + + // HTTP client + client: reqwest::Client, +} + +/// Builder for OsirisClient +#[derive(Clone, Debug, Default)] +pub struct OsirisClientBuilder { + osiris_url: Option, + supervisor_url: Option, + runner_name: Option, + supervisor_secret: Option, + timeout: u64, +} + +impl OsirisClientBuilder { + /// Create a new builder + pub fn new() -> Self { + Self { + osiris_url: None, + supervisor_url: None, + runner_name: None, + supervisor_secret: None, + timeout: 30, + } + } + + /// Set the Osiris server URL (for queries) + pub fn osiris_url(mut self, url: impl Into) -> Self { + self.osiris_url = Some(url.into()); + self + } + + /// Set the Supervisor URL (for commands) + pub fn supervisor_url(mut self, url: impl Into) -> Self { + self.supervisor_url = Some(url.into()); + self + } + + /// Set the runner name (default: "osiris") + pub fn runner_name(mut self, name: impl Into) -> Self { + self.runner_name = Some(name.into()); + self + } + + /// Set the supervisor authentication secret + pub fn supervisor_secret(mut self, secret: impl Into) -> Self { + self.supervisor_secret = Some(secret.into()); + self + } + + /// Set the timeout in seconds (default: 30) + pub fn timeout(mut self, timeout: u64) -> Self { + self.timeout = timeout; + self + } + + /// Build the OsirisClient + pub fn build(self) -> Result { + let osiris_url = self.osiris_url + .ok_or_else(|| OsirisClientError::ConfigError("osiris_url is required".to_string()))?; + + // Build supervisor client if URL and secret are provided + let supervisor_client = if let (Some(url), Some(secret)) = (self.supervisor_url, self.supervisor_secret) { + Some( + hero_supervisor_openrpc_client::SupervisorClient::builder() + .url(url) + .secret(secret) + .build() + .map_err(|e| OsirisClientError::ConfigError(format!("Failed to create supervisor client: {:?}", e)))? + ) + } else { + None + }; + + Ok(OsirisClient { + osiris_url, + supervisor_client, + runner_name: self.runner_name.unwrap_or_else(|| "osiris".to_string()), + timeout: self.timeout, + client: reqwest::Client::new(), + }) + } +} + +impl OsirisClient { + /// Create a new Osiris client (query-only) + pub fn new(osiris_url: impl Into) -> Self { + Self { + osiris_url: osiris_url.into(), + supervisor_client: None, + runner_name: "osiris".to_string(), + timeout: 30, + client: reqwest::Client::new(), + } + } + + /// Create a builder for full CQRS configuration + pub fn builder() -> OsirisClientBuilder { + OsirisClientBuilder::new() + } + + /// Generic GET request for any struct by ID + pub async fn get(&self, struct_name: &str, id: &str) -> Result + where + T: for<'de> Deserialize<'de>, + { + let url = format!("{}/api/{}/{}", self.osiris_url, struct_name, id); + + let response = self.client + .get(&url) + .send() + .await?; + + if response.status() == 404 { + return Err(OsirisClientError::NotFound(format!("{}/{}", struct_name, id))); + } + + let data = response + .json::() + .await + .map_err(|e| OsirisClientError::DeserializationFailed(e.to_string()))?; + + Ok(data) + } + + /// Generic LIST request for all instances of a struct + pub async fn list(&self, struct_name: &str) -> Result, OsirisClientError> + where + T: for<'de> Deserialize<'de>, + { + let url = format!("{}/api/{}", self.osiris_url, struct_name); + + let response = self.client + .get(&url) + .send() + .await?; + + let data = response + .json::>() + .await + .map_err(|e| OsirisClientError::DeserializationFailed(e.to_string()))?; + + Ok(data) + } + + /// Generic QUERY request with filters + pub async fn query(&self, struct_name: &str, query: &str) -> Result, OsirisClientError> + where + T: for<'de> Deserialize<'de>, + { + let url = format!("{}/api/{}?{}", self.osiris_url, struct_name, query); + + let response = self.client + .get(&url) + .send() + .await?; + + let data = response + .json::>() + .await + .map_err(|e| OsirisClientError::DeserializationFailed(e.to_string()))?; + + Ok(data) + } + + // ========== Command Methods (Supervisor + Rhai) ========== + // Commands are write operations that execute Rhai scripts via the supervisor + // to modify state in Osiris + + /// Execute a Rhai script via the Supervisor + pub async fn execute_script(&self, script: &str) -> Result { + let supervisor_client = self.supervisor_client.as_ref() + .ok_or_else(|| OsirisClientError::ConfigError("supervisor_client not configured for commands".to_string()))?; + + // Use JobBuilder from supervisor client (which re-exports from hero-job) + use hero_supervisor_openrpc_client::JobBuilder; + + let job = JobBuilder::new() + .caller_id("osiris-client") + .context_id("command-execution") + .runner(&self.runner_name) + .payload(script) + .executor("rhai") + .timeout(self.timeout) + .build() + .map_err(|e| OsirisClientError::CommandFailed(format!("Failed to build job: {}", e)))?; + + // Use job_run method which returns JobRunResponse + // Secret is sent via Authorization header (configured during client creation) + let result = supervisor_client.job_run(job, Some(self.timeout)) + .await + .map_err(|e| OsirisClientError::CommandFailed(format!("{:?}", e)))?; + + // Convert JobRunResponse to our RunJobResponse + Ok(RunJobResponse { + job_id: result.job_id, + status: result.status, + }) + } + + /// Execute a Rhai script template with variable substitution + pub async fn execute_template(&self, template: &str, variables: &HashMap) -> Result { + let script = substitute_variables(template, variables); + self.execute_script(&script).await + } + + // ========== Supervisor-specific CQRS Methods ========== + + /// Create an API key (Command - via Rhai) + pub async fn create_api_key(&self, key: String, name: String, scope: String) -> Result { + let script = format!( + r#" + let api_key = new_api_key("{}", "{}", "{}", "{}"); + save_api_key(api_key); + "#, + self.get_namespace(), + key, + name, + scope + ); + self.execute_script(&script).await + } + + /// Get an API key by key value (Query - via REST) + pub async fn get_api_key(&self, key: &str) -> Result, OsirisClientError> { + // Query by indexed field + let results: Vec = self.query("ApiKey", &format!("key={}", key)).await?; + Ok(results.into_iter().next()) + } + + /// List all API keys (Query - via REST) + pub async fn list_api_keys(&self) -> Result, OsirisClientError> { + self.list("ApiKey").await + } + + /// Delete an API key (Command - via Rhai) + pub async fn delete_api_key(&self, key: String) -> Result { + let script = format!( + r#" + delete_api_key("{}"); + "#, + key + ); + self.execute_script(&script).await + } + + /// Create a runner (Command - via Rhai) + pub async fn create_runner(&self, runner_id: String, name: String, queue: String, registered_by: String) -> Result { + let script = format!( + r#" + let runner = new_runner("{}", "{}", "{}", "{}", "{}"); + save_runner(runner); + "#, + self.get_namespace(), + runner_id, + name, + queue, + registered_by + ); + self.execute_script(&script).await + } + + /// Get a runner by ID (Query - via REST) + pub async fn get_runner(&self, runner_id: &str) -> Result, OsirisClientError> { + let results: Vec = self.query("Runner", &format!("runner_id={}", runner_id)).await?; + Ok(results.into_iter().next()) + } + + /// List all runners (Query - via REST) + pub async fn list_runners(&self) -> Result, OsirisClientError> { + self.list("Runner").await + } + + /// Delete a runner (Command - via Rhai) + pub async fn delete_runner(&self, runner_id: String) -> Result { + let script = format!( + r#" + delete_runner("{}"); + "#, + runner_id + ); + self.execute_script(&script).await + } + + /// Create job metadata (Command - via Rhai) + pub async fn create_job_metadata(&self, job_id: String, runner: String, created_by: String, payload: String) -> Result { + let script = format!( + r#" + let job = new_job_metadata("{}", "{}", "{}", "{}", "{}"); + save_job_metadata(job); + "#, + self.get_namespace(), + job_id, + runner, + created_by, + payload + ); + self.execute_script(&script).await + } + + /// Get job metadata by ID (Query - via REST) + pub async fn get_job_metadata(&self, job_id: &str) -> Result, OsirisClientError> { + let results: Vec = self.query("JobMetadata", &format!("job_id={}", job_id)).await?; + Ok(results.into_iter().next()) + } + + /// List all job metadata (Query - via REST) + pub async fn list_job_metadata(&self) -> Result, OsirisClientError> { + self.list("JobMetadata").await + } + + /// List jobs by runner (Query - via REST) + pub async fn list_jobs_by_runner(&self, runner: &str) -> Result, OsirisClientError> { + self.query("JobMetadata", &format!("runner={}", runner)).await + } + + /// List jobs by creator (Query - via REST) + pub async fn list_jobs_by_creator(&self, creator: &str) -> Result, OsirisClientError> { + self.query("JobMetadata", &format!("created_by={}", creator)).await + } + + // Helper method to get namespace + fn get_namespace(&self) -> &str { + "supervisor" + } +} + +// ========== Helper Structures ========== + +#[derive(Serialize)] +struct RunJobRequest { + runner_name: String, + script: String, + timeout: Option, + #[serde(skip_serializing_if = "Option::is_none")] + env: Option>, +} + +#[derive(Deserialize, Debug, Clone)] +pub struct RunJobResponse { + pub job_id: String, + pub status: String, +} + +/// Helper function to substitute variables in a Rhai script template +pub fn substitute_variables(template: &str, variables: &HashMap) -> String { + let mut result = template.to_string(); + for (key, value) in variables { + let placeholder = format!("{{{{ {} }}}}", key); + result = result.replace(&placeholder, value); + } + result +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_client_creation() { + let client = OsirisClient::new("http://localhost:8080"); + assert_eq!(client.osiris_url, "http://localhost:8080"); + } + + #[test] + fn test_builder() { + let client = OsirisClient::builder() + .osiris_url("http://localhost:8081") + .supervisor_url("http://localhost:3030") + .supervisor_secret("test_secret") + .runner_name("osiris") + .build() + .unwrap(); + + assert_eq!(client.osiris_url, "http://localhost:8081"); + assert_eq!(client.supervisor_url, Some("http://localhost:3030".to_string())); + assert_eq!(client.runner_name, "osiris"); + } + + #[test] + fn test_substitute_variables() { + let template = "let x = {{ value }}; let y = {{ name }};"; + let mut vars = HashMap::new(); + vars.insert("value".to_string(), "42".to_string()); + vars.insert("name".to_string(), "\"test\"".to_string()); + + let result = substitute_variables(template, &vars); + assert_eq!(result, "let x = 42; let y = \"test\";"); + } +} diff --git a/lib/clients/osiris/src/payment.rs b/lib/clients/osiris/src/payment.rs new file mode 100644 index 0000000..0cdae7a --- /dev/null +++ b/lib/clients/osiris/src/payment.rs @@ -0,0 +1,39 @@ +//! Payment query methods + +use serde::{Deserialize, Serialize}; +use crate::{OsirisClient, OsirisClientError}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Payment { + pub id: String, + pub amount: f64, + pub currency: String, + pub status: PaymentStatus, + pub description: String, + pub payment_url: Option, + pub created_at: i64, + pub updated_at: i64, + pub completed_at: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum PaymentStatus { + Pending, + Processing, + Completed, + Failed, + Cancelled, +} + +impl OsirisClient { + /// Get payment by ID + pub async fn get_payment(&self, payment_id: &str) -> Result { + self.get("payment", payment_id).await + } + + /// List all payments + pub async fn list_payments(&self) -> Result, OsirisClientError> { + self.list("payment").await + } +} diff --git a/lib/clients/osiris/src/scripts/kyc_verification.rhai b/lib/clients/osiris/src/scripts/kyc_verification.rhai new file mode 100644 index 0000000..2099b18 --- /dev/null +++ b/lib/clients/osiris/src/scripts/kyc_verification.rhai @@ -0,0 +1,37 @@ +// KYC verification script template +// Variables: {{resident_id}}, {{callback_url}} + +print("=== Starting KYC Verification ==="); +print("Resident ID: {{resident_id}}"); + +// Get freezone context +let freezone_pubkey = "04e58314c13ea3f9caed882001a5090797b12563d5f9bbd7f16efe020e060c780b446862311501e2e9653416527d2634ff8a8050ff3a085baccd7ddcb94185ff56"; +let freezone_ctx = get_context([freezone_pubkey]); + +// Get KYC client from context +let kyc_client = freezone_ctx.get("kyc_client"); +if kyc_client == () { + print("ERROR: KYC client not configured"); + return #{ + success: false, + error: "KYC client not configured" + }; +} + +// Create KYC session +let session = kyc_client.create_session( + "{{resident_id}}", + "{{callback_url}}" +); + +print("✓ KYC session created"); +print(" Session ID: " + session.session_id); +print(" KYC URL: " + session.kyc_url); + +// Return response +#{ + success: true, + session_id: session.session_id, + kyc_url: session.kyc_url, + expires_at: session.expires_at +} diff --git a/lib/clients/supervisor/.gitignore b/lib/clients/supervisor/.gitignore new file mode 100644 index 0000000..39e6caf --- /dev/null +++ b/lib/clients/supervisor/.gitignore @@ -0,0 +1,2 @@ +pkg +target \ No newline at end of file diff --git a/lib/clients/supervisor/Cargo-wasm.toml b/lib/clients/supervisor/Cargo-wasm.toml new file mode 100644 index 0000000..42a4af9 --- /dev/null +++ b/lib/clients/supervisor/Cargo-wasm.toml @@ -0,0 +1,59 @@ +[package] +name = "hero-supervisor-openrpc-client-wasm" +version = "0.1.0" +edition = "2021" +description = "WASM-compatible OpenRPC client for Hero Supervisor" +license = "MIT OR Apache-2.0" + +[lib] +crate-type = ["cdylib", "rlib"] + +[dependencies] +# WASM bindings +wasm-bindgen = "0.2" +wasm-bindgen-futures = "0.4" +js-sys = "0.3" + +# Web APIs +web-sys = { version = "0.3", features = [ + "console", + "Request", + "RequestInit", + "RequestMode", + "Response", + "Window", + "Headers", + "AbortController", + "AbortSignal", +] } + +# Serialization +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +serde-wasm-bindgen = "0.6" + +# Error handling +thiserror = "1.0" + +# UUID for job IDs +uuid = { version = "1.0", features = ["v4", "serde", "js"] } + +# Time handling +chrono = { version = "0.4", features = ["serde", "wasmbind"] } + +# Collections +indexmap = "2.0" + +# Logging for WASM +log = "0.4" +console_log = "1.0" + +# Async utilities +futures = "0.3" + +[dependencies.getrandom] +version = "0.2" +features = ["js"] + +[dev-dependencies] +wasm-bindgen-test = "0.3" diff --git a/lib/clients/supervisor/Cargo.toml b/lib/clients/supervisor/Cargo.toml new file mode 100644 index 0000000..0df8f0e --- /dev/null +++ b/lib/clients/supervisor/Cargo.toml @@ -0,0 +1,77 @@ +[package] +name = "hero-supervisor-openrpc-client" +version.workspace = true +edition.workspace = true +description = "OpenRPC client for Hero Supervisor" +license = "MIT OR Apache-2.0" + +[lib] +crate-type = ["cdylib", "rlib"] + +[features] +default = [] + +[dependencies] +# Common dependencies for both native and WASM +serde.workspace = true +serde_json.workspace = true +thiserror.workspace = true +log.workspace = true +uuid.workspace = true +indexmap.workspace = true +hero-job = { path = "../../models/job" } + +# Native JSON-RPC client (not WASM compatible) +[target.'cfg(not(target_arch = "wasm32"))'.dependencies] +jsonrpsee = { workspace = true, features = ["http-client", "macros"] } +tokio.workspace = true +# hero-job-client removed - now part of supervisor +env_logger.workspace = true +http.workspace = true + +# WASM-specific dependencies +[target.'cfg(target_arch = "wasm32")'.dependencies] +wasm-bindgen.workspace = true +wasm-bindgen-futures.workspace = true +js-sys.workspace = true +serde-wasm-bindgen.workspace = true +web-sys = { workspace = true, features = [ + "console", + "Request", + "RequestInit", + "RequestMode", + "Response", + "Headers", + "Window", +] } +console_log.workspace = true +getrandom.workspace = true +# Crypto for signing +secp256k1.workspace = true +sha2.workspace = true +hex.workspace = true + +[target.'cfg(target_arch = "wasm32")'.dev-dependencies] +wasm-bindgen-test = "0.3" + +# UUID for job IDs (native) +[target.'cfg(not(target_arch = "wasm32"))'.dependencies.uuid] +workspace = true + +# Time handling (native) +[target.'cfg(not(target_arch = "wasm32"))'.dependencies.chrono] +workspace = true + +# WASM-compatible dependencies +[target.'cfg(target_arch = "wasm32")'.dependencies.chrono] +workspace = true +features = ["wasmbind"] + +[target.'cfg(target_arch = "wasm32")'.dependencies.uuid] +workspace = true +features = ["js"] + + +[dev-dependencies] +# Testing utilities +tokio-test = "0.4" diff --git a/lib/clients/supervisor/README.md b/lib/clients/supervisor/README.md new file mode 100644 index 0000000..f21a29e --- /dev/null +++ b/lib/clients/supervisor/README.md @@ -0,0 +1,180 @@ +# Hero Supervisor OpenRPC Client + +A Rust client library for interacting with the Hero Supervisor OpenRPC server. This crate provides a simple, async interface for managing actors and jobs remotely. + +## Features + +- **Async API**: Built on `tokio` and `jsonrpsee` for high-performance async operations +- **Type Safety**: Full Rust type safety with serde serialization/deserialization +- **Job Builder**: Fluent API for creating jobs with validation +- **Comprehensive Coverage**: All supervisor operations available via client +- **Error Handling**: Detailed error types with proper error propagation + +## Installation + +Add this to your `Cargo.toml`: + +```toml +[dependencies] +hero-supervisor-openrpc-client = "0.1.0" +tokio = { version = "1.0", features = ["full"] } +``` + +## Quick Start + +```rust +use hero_supervisor_openrpc_client::{SupervisorClient, JobBuilder}; + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Create a client with admin secret + let client = SupervisorClient::new("http://127.0.0.1:3030", "your-admin-secret")?; + + // Register a runner (runner must be started externally) + client.register_runner("admin-secret", "my_runner").await?; + + // Create and run a job + let job = JobBuilder::new() + .caller_id("my_client") + .context_id("example_context") + .payload("echo 'Hello from Hero Supervisor!'") + .executor("bash") + .runner("my_runner") + .timeout(60) + .build()?; + + client.queue_job_to_runner("my_actor", job).await?; + + // Check runner status + let status = client.get_runner_status("my_actor").await?; + println!("Runner status: {:?}", status); + + // List all runners + let runners = client.list_runners().await?; + println!("Active runners: {:?}", runners); + + Ok(()) +} +``` + +## API Reference + +### Client Creation + +```rust +let client = SupervisorClient::new("http://127.0.0.1:3030")?; +``` + +### Runner Management + +```rust +// Register a runner +client.register_runner("admin-secret", "my_runner").await?; + +// Remove a runner +client.remove_runner("admin-secret", "my_runner").await?; + +// List all runners +let runners = client.list_runners().await?; + +// Start/stop runners +client.start_runner("actor_id").await?; +client.stop_runner("actor_id", false).await?; // force = false + +// Get runner status +let status = client.get_runner_status("actor_id").await?; + +// Get runner logs +let logs = client.get_runner_logs("actor_id", Some(100), false).await?; +``` + +### Job Management + +```rust +// Create a job using the builder +let job = JobBuilder::new() + .caller_id("client_id") + .context_id("context_id") + .payload("script_content") + .job_type(JobType::OSIS) + .runner("target_actor") + .timeout(Duration::from_secs(300)) + .env_var("KEY", "value") + .build()?; + +// Queue the job +client.queue_job_to_runner("actor_id", job).await?; +``` + +### Bulk Operations + +```rust +// Start all runners +let results = client.start_all().await?; + +// Stop all runners +let results = client.stop_all(false).await?; // force = false + +// Get status of all runners +let statuses = client.get_all_runner_status().await?; +``` + +## Types + +### RunnerType + +- `SALRunner` - System abstraction layer operations +- `OSISRunner` - Operating system interface operations +- `VRunner` - Virtualization operations +- `PyRunner` - Python-based actors + +### JobType + +- `SAL` - SAL job type +- `OSIS` - OSIS job type +- `V` - V job type +- `Python` - Python job type + +### Runner Management + +Runners are expected to be started and managed externally. The supervisor only tracks which runners are registered and queues jobs to them via Redis. + +### ProcessStatus + +- `Running` - Process is active +- `Stopped` - Process is stopped +- `Failed` - Process failed +- `Unknown` - Status unknown + +## Error Handling + +The client uses the `ClientError` enum for error handling: + +```rust +use hero_supervisor_openrpc_client::ClientError; + +match client.start_runner("actor_id").await { + Ok(()) => println!("Runner started successfully"), + Err(ClientError::JsonRpc(e)) => println!("JSON-RPC error: {}", e), + Err(ClientError::Server { message }) => println!("Server error: {}", message), + Err(e) => println!("Other error: {}", e), +} +``` + +## Examples + +See the `examples/` directory for complete usage examples: + +- `basic_client.rs` - Basic client usage +- `job_management.rs` - Job creation and management +- `runner_lifecycle.rs` - Complete runner lifecycle management + +## Requirements + +- Rust 1.70+ +- Hero Supervisor server running with OpenRPC feature enabled +- Network access to the supervisor server + +## License + +Licensed under either of Apache License, Version 2.0 or MIT license at your option. diff --git a/lib/clients/supervisor/build-wasm.sh b/lib/clients/supervisor/build-wasm.sh new file mode 100755 index 0000000..81b3d6d --- /dev/null +++ b/lib/clients/supervisor/build-wasm.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +# Build script for WASM-compatible OpenRPC client + +set -e + +echo "Building WASM OpenRPC client..." + +# Check if wasm-pack is installed +if ! command -v wasm-pack &> /dev/null; then + echo "wasm-pack is not installed. Installing..." + curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh +fi + +# Build the WASM package +echo "Building WASM package..." +wasm-pack build --target web --out-dir pkg-wasm + +echo "WASM build complete! Package available in pkg-wasm/" +echo "" +echo "To use in a web project:" +echo "1. Copy the pkg-wasm directory to your web project" +echo "2. Import the module in your JavaScript:" +echo " import init, { WasmSupervisorClient, create_client, create_job } from './pkg-wasm/hero_supervisor_openrpc_client_wasm.js';" +echo "3. Initialize the WASM module:" +echo " await init();" +echo "4. Create and use the client:" +echo " const client = create_client('http://localhost:3030');" +echo " const runners = await client.list_runners();" diff --git a/lib/clients/supervisor/example-wasm.html b/lib/clients/supervisor/example-wasm.html new file mode 100644 index 0000000..8885975 --- /dev/null +++ b/lib/clients/supervisor/example-wasm.html @@ -0,0 +1,202 @@ + + + + + Hero Supervisor WASM OpenRPC Client Example + + + +

Hero Supervisor WASM OpenRPC Client

+ +
+

Connection

+ + +
+
+ +
+

Runner Management

+ +
+ +

Register Runner

+ + + + +
+
+ +
+

Job Execution

+ + + + + +
+
+ + + + diff --git a/lib/clients/supervisor/src/builder.rs b/lib/clients/supervisor/src/builder.rs new file mode 100644 index 0000000..71b6ac5 --- /dev/null +++ b/lib/clients/supervisor/src/builder.rs @@ -0,0 +1,102 @@ +//! Builder pattern for WasmSupervisorClient to ensure proper configuration +//! +//! This module provides a type-safe builder that guarantees a client cannot be +//! created without a secret, preventing authentication issues. + +use crate::wasm::WasmSupervisorClient; + +/// Builder for WasmSupervisorClient that enforces secret requirement +#[derive(Clone)] +pub struct WasmSupervisorClientBuilder { + server_url: Option, + secret: Option, +} + +impl WasmSupervisorClientBuilder { + /// Create a new builder + pub fn new() -> Self { + Self { + server_url: None, + secret: None, + } + } + + /// Set the server URL + pub fn server_url(mut self, url: impl Into) -> Self { + self.server_url = Some(url.into()); + self + } + + /// Set the authentication secret (required) + pub fn secret(mut self, secret: impl Into) -> Self { + self.secret = Some(secret.into()); + self + } + + /// Build the client + /// + /// Returns Err if server_url or secret is not set + pub fn build(self) -> Result { + let server_url = self.server_url.ok_or("Server URL is required")?; + let secret = self.secret.ok_or("Secret is required for authenticated client")?; + + if secret.is_empty() { + return Err("Secret cannot be empty".to_string()); + } + + Ok(WasmSupervisorClient::new(server_url, secret)) + } +} + +impl Default for WasmSupervisorClientBuilder { + fn default() -> Self { + Self::new() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_builder_requires_all_fields() { + let builder = WasmSupervisorClientBuilder::new(); + assert!(builder.build().is_err()); + + let builder = WasmSupervisorClientBuilder::new() + .server_url("http://localhost:3030"); + assert!(builder.build().is_err()); + + let builder = WasmSupervisorClientBuilder::new() + .secret("test-secret"); + assert!(builder.build().is_err()); + } + + #[test] + fn test_builder_success() { + let builder = WasmSupervisorClientBuilder::new() + .server_url("http://localhost:3030") + .secret("test-secret"); + assert!(builder.build().is_ok()); + } + + #[test] + fn test_build_error_messages() { + let result = WasmSupervisorClientBuilder::new().build(); + assert!(result.is_err()); + assert_eq!(result.unwrap_err(), "Server URL is required"); + + let result = WasmSupervisorClientBuilder::new() + .server_url("http://localhost:3030") + .build(); + assert!(result.is_err()); + assert_eq!(result.unwrap_err(), "Secret is required for authenticated client"); + + let result = WasmSupervisorClientBuilder::new() + .server_url("http://localhost:3030") + .secret("") + .build(); + assert!(result.is_err()); + assert_eq!(result.unwrap_err(), "Secret cannot be empty"); + } +} diff --git a/lib/clients/supervisor/src/lib.rs b/lib/clients/supervisor/src/lib.rs new file mode 100644 index 0000000..611ab37 --- /dev/null +++ b/lib/clients/supervisor/src/lib.rs @@ -0,0 +1,695 @@ +use serde::{Deserialize, Serialize}; +use thiserror::Error; +use serde_json; + +// Import types from the main supervisor crate + + +// WASM-compatible client module +#[cfg(target_arch = "wasm32")] +pub mod wasm; + +// Builder module for type-safe client construction +#[cfg(target_arch = "wasm32")] +pub mod builder; + +// Re-export WASM types for convenience +#[cfg(target_arch = "wasm32")] +pub use wasm::{WasmSupervisorClient, WasmJobType, WasmRunnerType, create_job_canonical_repr, sign_job_canonical}; + +// Re-export builder for convenience +#[cfg(target_arch = "wasm32")] +pub use builder::WasmSupervisorClientBuilder; + +// Native client dependencies +#[cfg(not(target_arch = "wasm32"))] +use jsonrpsee::{ + core::client::ClientT, + http_client::{HttpClient, HttpClientBuilder}, + rpc_params, +}; + +#[cfg(not(target_arch = "wasm32"))] +use http::{HeaderMap, HeaderName, HeaderValue}; + +#[cfg(not(target_arch = "wasm32"))] +use std::path::PathBuf; + +/// Client for communicating with Hero Supervisor OpenRPC server +/// Requires authentication secret for all operations +#[cfg(not(target_arch = "wasm32"))] +#[derive(Clone)] +pub struct SupervisorClient { + client: HttpClient, + server_url: String, + secret: String, +} + +/// Error types for client operations +#[cfg(not(target_arch = "wasm32"))] +#[derive(Error, Debug)] +pub enum ClientError { + #[error("JSON-RPC error: {0}")] + JsonRpc(#[from] jsonrpsee::core::ClientError), + + #[error("Serialization error: {0}")] + Serialization(#[from] serde_json::Error), + + #[error("HTTP client error: {0}")] + Http(String), + + #[error("Server error: {message}")] + Server { message: String }, +} + +/// Error types for WASM client operations +#[cfg(target_arch = "wasm32")] +#[derive(Error, Debug)] +pub enum ClientError { + #[error("JSON-RPC error: {0}")] + JsonRpc(String), + + #[error("Serialization error: {0}")] + Serialization(#[from] serde_json::Error), + + #[error("HTTP client error: {0}")] + Http(String), + + #[error("Server error: {message}")] + Server { message: String }, + + #[error("JavaScript error: {0}")] + JavaScript(String), + + #[error("Network error: {0}")] + Network(String), +} + +// Implement From for jsonrpsee ClientError for WASM +#[cfg(target_arch = "wasm32")] +impl From for ClientError { + fn from(js_val: wasm_bindgen::JsValue) -> Self { + ClientError::JavaScript(format!("{:?}", js_val)) + } +} + +/// Result type for client operations +pub type ClientResult = Result; + +/// Request parameters for generating API keys (auto-generates key value) +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct GenerateApiKeyParams { + pub name: String, + pub scope: String, // "admin", "registrar", or "user" +} + +/// Configuration for a runner +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct RunnerConfig { + /// Name of the runner + pub name: String, + /// Command to run the runner (full command line) + pub command: String, + /// Optional environment variables + #[serde(skip_serializing_if = "Option::is_none")] + pub env: Option>, +} + + + +/// Job result response +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum JobResult { + Success { success: String }, + Error { error: String }, +} + +/// Job status response +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct JobStatusResponse { + pub job_id: String, + pub status: String, + pub created_at: String, + pub started_at: Option, + pub completed_at: Option, +} + +/// Response from job.run (blocking execution) +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct JobRunResponse { + pub job_id: String, + pub status: String, + pub result: Option, +} + +/// Response from job.start (non-blocking execution) +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct JobStartResponse { + pub job_id: String, + pub status: String, +} + +// Re-export Job types from hero-job crate (both native and WASM) +pub use hero_job::{Job, JobStatus, JobError, JobBuilder, JobSignature}; + +// Note: Job client is now part of hero-supervisor crate +// Re-exports removed - use hero_supervisor::job_client directly if needed + +/// Process status wrapper for OpenRPC serialization (matches server response) +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub enum ProcessStatusWrapper { + Running, + Stopped, + Starting, + Stopping, + Error(String), +} + +/// Log information wrapper for OpenRPC serialization (matches server response) +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct LogInfoWrapper { + pub timestamp: String, + pub level: String, + pub message: String, +} + +/// Supervisor information response containing secret counts and server details +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SupervisorInfo { + pub server_url: String, +} + +/// API Key information +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ApiKey { + pub key: String, + pub name: String, + pub scope: String, + pub created_at: String, +} + +/// Auth verification response +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct AuthVerifyResponse { + pub scope: String, + pub name: Option, + pub created_at: Option, +} + +/// Simple ProcessStatus type for native builds to avoid service manager dependency +#[cfg(not(target_arch = "wasm32"))] +pub type ProcessStatus = ProcessStatusWrapper; + +// Types duplicated from supervisor-core to avoid cyclic dependency +// These match the types in hero-supervisor but are defined here independently + +/// Runner status information (duplicated to avoid cyclic dependency) +#[cfg(not(target_arch = "wasm32"))] +pub type RunnerStatus = ProcessStatusWrapper; + +/// Log information (duplicated to avoid cyclic dependency) +#[cfg(not(target_arch = "wasm32"))] +pub type LogInfo = LogInfoWrapper; + +/// Type aliases for WASM compatibility +#[cfg(target_arch = "wasm32")] +pub type ProcessStatus = ProcessStatusWrapper; +#[cfg(target_arch = "wasm32")] +pub type RunnerStatus = ProcessStatusWrapper; +#[cfg(target_arch = "wasm32")] +pub type LogInfo = LogInfoWrapper; + +/// Builder for SupervisorClient +#[cfg(not(target_arch = "wasm32"))] +#[derive(Debug, Clone)] +pub struct SupervisorClientBuilder { + url: Option, + secret: Option, + timeout: Option, +} + +#[cfg(not(target_arch = "wasm32"))] +impl SupervisorClientBuilder { + /// Create a new builder + pub fn new() -> Self { + Self { + url: None, + secret: None, + timeout: Some(std::time::Duration::from_secs(30)), + } + } + + /// Set the server URL + pub fn url(mut self, url: impl Into) -> Self { + self.url = Some(url.into()); + self + } + + /// Set the authentication secret + pub fn secret(mut self, secret: impl Into) -> Self { + self.secret = Some(secret.into()); + self + } + + /// Set the request timeout (default: 30 seconds) + pub fn timeout(mut self, timeout: std::time::Duration) -> Self { + self.timeout = Some(timeout); + self + } + + /// Build the SupervisorClient + pub fn build(self) -> ClientResult { + let server_url = self.url + .ok_or_else(|| ClientError::Http("URL is required".to_string()))?; + let secret = self.secret + .ok_or_else(|| ClientError::Http("Secret is required".to_string()))?; + + // Create headers with Authorization bearer token + let mut headers = HeaderMap::new(); + let auth_value = format!("Bearer {}", secret); + headers.insert( + HeaderName::from_static("authorization"), + HeaderValue::from_str(&auth_value) + .map_err(|e| ClientError::Http(format!("Invalid auth header: {}", e)))? + ); + + let client = HttpClientBuilder::default() + .request_timeout(self.timeout.unwrap_or(std::time::Duration::from_secs(30))) + .set_headers(headers) + .build(&server_url) + .map_err(|e| ClientError::Http(e.to_string()))?; + + Ok(SupervisorClient { + client, + server_url, + secret, + }) + } +} + +#[cfg(not(target_arch = "wasm32"))] +impl Default for SupervisorClientBuilder { + fn default() -> Self { + Self::new() + } +} + +#[cfg(not(target_arch = "wasm32"))] +impl SupervisorClient { + /// Create a builder for SupervisorClient + pub fn builder() -> SupervisorClientBuilder { + SupervisorClientBuilder::new() + } + + /// Get the server URL + pub fn server_url(&self) -> &str { + &self.server_url + } + + /// Test connection using OpenRPC discovery method + /// This calls the standard `rpc.discover` method that should be available on any OpenRPC server + pub async fn discover(&self) -> ClientResult { + let result: serde_json::Value = self + .client + .request("rpc.discover", rpc_params![]) + .await.map_err(|e| ClientError::JsonRpc(e))?; + Ok(result) + } + + /// Register a new runner to the supervisor + /// The runner name is also used as the queue name + /// Authentication via Authorization header (set during client creation) + pub async fn runner_create( + &self, + name: &str, + ) -> ClientResult<()> { + let _: () = self + .client + .request("runner.create", rpc_params![name]) + .await.map_err(|e| ClientError::JsonRpc(e))?; + Ok(()) + } + + /// Create a new job without queuing it to a runner + /// Authentication via Authorization header (set during client creation) + pub async fn job_create( + &self, + job: Job, + ) -> ClientResult { + let job_id: String = self + .client + .request("job.create", rpc_params![job]) + .await.map_err(|e| ClientError::JsonRpc(e))?; + Ok(job_id) + } + + /// List all jobs + pub async fn job_list(&self) -> ClientResult> { + let jobs: Vec = self + .client + .request("job.list", rpc_params![]) + .await.map_err(|e| ClientError::JsonRpc(e))?; + Ok(jobs) + } + + /// Run a job on the appropriate runner and wait for the result (blocking) + /// This method queues the job and waits for completion before returning + /// The secret is sent via Authorization header (set during client creation) + pub async fn job_run( + &self, + job: Job, + timeout: Option, + ) -> ClientResult { + let mut params = serde_json::json!({ + "job": job + }); + + if let Some(t) = timeout { + params["timeout"] = serde_json::json!(t); + } + + let result: JobRunResponse = self + .client + .request("job.run", rpc_params![params]) + .await.map_err(|e| ClientError::JsonRpc(e))?; + Ok(result) + } + + /// Start a job without waiting for the result (non-blocking) + /// This method queues the job and returns immediately with the job_id + /// Authentication via Authorization header (set during client creation) + pub async fn job_start( + &self, + job: Job, + ) -> ClientResult { + let params = serde_json::json!({ + "job": job + }); + + let result: JobStartResponse = self + .client + .request("job.start", rpc_params![params]) + .await.map_err(|e| ClientError::JsonRpc(e))?; + Ok(result) + } + + /// Get the current status of a job + pub async fn job_status(&self, job_id: &str) -> ClientResult { + let status: JobStatus = self + .client + .request("job.status", rpc_params![job_id]) + .await.map_err(|e| ClientError::JsonRpc(e))?; + Ok(status) + } + + /// Get the result of a completed job (blocks until result is available) + pub async fn job_result(&self, job_id: &str) -> ClientResult { + let result: JobResult = self + .client + .request("job.result", rpc_params![job_id]) + .await.map_err(|e| ClientError::JsonRpc(e))?; + Ok(result) + } + + /// Remove a runner from the supervisor + /// Authentication via Authorization header (set during client creation) + pub async fn runner_remove(&self, runner_id: &str) -> ClientResult<()> { + let _: () = self + .client + .request("runner.remove", rpc_params![runner_id]) + .await.map_err(|e| ClientError::JsonRpc(e))?; + Ok(()) + } + + /// List all runner IDs + pub async fn runner_list(&self) -> ClientResult> { + let runners: Vec = self + .client + .request("runner.list", rpc_params![]) + .await.map_err(|e| ClientError::JsonRpc(e))?; + Ok(runners) + } + + /// Start a specific runner + /// Authentication via Authorization header (set during client creation) + pub async fn start_runner(&self, actor_id: &str) -> ClientResult<()> { + let _: () = self + .client + .request("runner.start", rpc_params![actor_id]) + .await.map_err(|e| ClientError::JsonRpc(e))?; + Ok(()) + } + + /// Add a runner to the supervisor + /// Authentication via Authorization header (set during client creation) + pub async fn add_runner(&self, config: RunnerConfig) -> ClientResult<()> { + let params = serde_json::json!({ + "config": config + }); + let _: () = self + .client + .request("runner.add", rpc_params![params]) + .await.map_err(|e| ClientError::JsonRpc(e))?; + Ok(()) + } + + /// Get status of a specific runner + /// Authentication via Authorization header (set during client creation) + pub async fn get_runner_status(&self, actor_id: &str) -> ClientResult { + let status: RunnerStatus = self + .client + .request("runner.status", rpc_params![actor_id]) + .await.map_err(|e| ClientError::JsonRpc(e))?; + Ok(status) + } + + /// Get logs for a specific runner + pub async fn get_runner_logs( + &self, + actor_id: &str, + lines: Option, + follow: bool, + ) -> ClientResult> { + let logs: Vec = self + .client + .request("get_runner_logs", rpc_params![actor_id, lines, follow]) + .await.map_err(|e| ClientError::JsonRpc(e))?; + Ok(logs) + } + + /// Queue a job to a specific runner + pub async fn queue_job_to_runner(&self, runner: &str, job: Job) -> ClientResult<()> { + let params = serde_json::json!({ + "runner": runner, + "job": job + }); + + let _: () = self + .client + .request("queue_job_to_runner", rpc_params![params]) + .await.map_err(|e| ClientError::JsonRpc(e))?; + Ok(()) + } + + /// Queue a job and wait for completion + pub async fn queue_and_wait(&self, runner: &str, job: Job, timeout_secs: u64) -> ClientResult> { + let params = serde_json::json!({ + "runner": runner, + "job": job, + "timeout_secs": timeout_secs + }); + + let result: Option = self + .client + .request("queue_and_wait", rpc_params![params]) + .await.map_err(|e| ClientError::JsonRpc(e))?; + Ok(result) + } + + /// Run a job on a specific runner + pub async fn run_job(&self, job: Job) -> ClientResult { + let params = serde_json::json!({ + "job": job + }); + + let result: JobResult = self + .client + .request("job.run", rpc_params![params]) + .await.map_err(|e| ClientError::JsonRpc(e))?; + Ok(result) + } + + /// Get job result by job ID + pub async fn get_job_result(&self, job_id: &str) -> ClientResult> { + let result: Option = self + .client + .request("get_job_result", rpc_params![job_id]) + .await.map_err(|e| ClientError::JsonRpc(e))?; + Ok(result) + } + + /// Get status of all runners + pub async fn get_all_runner_status(&self) -> ClientResult> { + let statuses: Vec<(String, RunnerStatus)> = self + .client + .request("get_all_runner_status", rpc_params![]) + .await.map_err(|e| ClientError::JsonRpc(e))?; + Ok(statuses) + } + + /// Start all runners + pub async fn start_all(&self) -> ClientResult> { + let results: Vec<(String, bool)> = self + .client + .request("start_all", rpc_params![]) + .await.map_err(|e| ClientError::JsonRpc(e))?; + Ok(results) + } + + /// Stop all runners + pub async fn stop_all(&self, force: bool) -> ClientResult> { + let results: Vec<(String, bool)> = self + .client + .request("stop_all", rpc_params![force]) + .await.map_err(|e| ClientError::JsonRpc(e))?; + Ok(results) + } + + /// Get status of all runners (alternative method) + pub async fn get_all_status(&self) -> ClientResult> { + let statuses: Vec<(String, RunnerStatus)> = self + .client + .request("get_all_status", rpc_params![]) + .await.map_err(|e| ClientError::JsonRpc(e))?; + Ok(statuses) + } + + /// Add a secret to the supervisor + pub async fn add_secret( + &self, + secret_type: &str, + secret_value: &str, + ) -> ClientResult<()> { + let params = serde_json::json!({ + "secret_type": secret_type, + "secret_value": secret_value + }); + + let _: () = self + .client + .request("add_secret", rpc_params![params]) + .await.map_err(|e| ClientError::JsonRpc(e))?; + Ok(()) + } + + /// Remove a secret from the supervisor + pub async fn remove_secret( + &self, + secret_type: &str, + secret_value: &str, + ) -> ClientResult<()> { + let params = serde_json::json!({ + "secret_type": secret_type, + "secret_value": secret_value + }); + + let _: () = self + .client + .request("remove_secret", rpc_params![params]) + .await.map_err(|e| ClientError::JsonRpc(e))?; + Ok(()) + } + + /// List secrets (returns supervisor info including secret counts) + pub async fn list_secrets(&self) -> ClientResult { + let params = serde_json::json!({}); + + let info: SupervisorInfo = self + .client + .request("list_secrets", rpc_params![params]) + .await.map_err(|e| ClientError::JsonRpc(e))?; + Ok(info) + } + + /// Stop a running job + pub async fn job_stop(&self, job_id: &str) -> ClientResult<()> { + let _: () = self.client + .request("job.stop", rpc_params![job_id]) + .await.map_err(|e| ClientError::JsonRpc(e))?; + Ok(()) + } + + /// Delete a job from the system + pub async fn job_delete(&self, job_id: &str) -> ClientResult<()> { + let _: () = self.client + .request("job.delete", rpc_params![job_id]) + .await.map_err(|e| ClientError::JsonRpc(e))?; + Ok(()) + } + + /// Get supervisor information including secret counts + pub async fn get_supervisor_info(&self) -> ClientResult { + let info: SupervisorInfo = self + .client + .request("supervisor.info", rpc_params![]) + .await.map_err(|e| ClientError::JsonRpc(e))?; + Ok(info) + } + + /// Get a job by ID + pub async fn job_get(&self, job_id: &str) -> ClientResult { + let job: Job = self + .client + .request("job.get", rpc_params![job_id]) + .await.map_err(|e| ClientError::JsonRpc(e))?; + Ok(job) + } + + // ========== Auth/API Key Methods ========== + + /// Verify the current API key + pub async fn auth_verify(&self) -> ClientResult { + let response: AuthVerifyResponse = self + .client + .request("auth.verify", rpc_params![]) + .await.map_err(|e| ClientError::JsonRpc(e))?; + Ok(response) + } + + /// Create a new API key (admin only) + pub async fn key_create(&self, key: ApiKey) -> ClientResult<()> { + let _: () = self + .client + .request("key.create", rpc_params![key]) + .await.map_err(|e| ClientError::JsonRpc(e))?; + Ok(()) + } + + /// Generate a new API key with auto-generated key value (admin only) + pub async fn key_generate(&self, params: GenerateApiKeyParams) -> ClientResult { + let api_key: ApiKey = self + .client + .request("key.generate", rpc_params![params]) + .await.map_err(|e| ClientError::JsonRpc(e))?; + Ok(api_key) + } + + /// Remove an API key (admin only) + pub async fn key_delete(&self, key_id: String) -> ClientResult<()> { + let _: () = self + .client + .request("key.delete", rpc_params![key_id]) + .await.map_err(|e| ClientError::JsonRpc(e))?; + Ok(()) + } + + /// List all API keys (admin only) + pub async fn key_list(&self) -> ClientResult> { + let keys: Vec = self + .client + .request("key.list", rpc_params![]) + .await.map_err(|e| ClientError::JsonRpc(e))?; + Ok(keys) + } +} \ No newline at end of file diff --git a/lib/clients/supervisor/src/wasm.rs b/lib/clients/supervisor/src/wasm.rs new file mode 100644 index 0000000..95c497a --- /dev/null +++ b/lib/clients/supervisor/src/wasm.rs @@ -0,0 +1,859 @@ +//! WASM-compatible OpenRPC client for Hero Supervisor +//! +//! This module provides a WASM-compatible client library for interacting with the Hero Supervisor +//! OpenRPC server using browser-native fetch APIs. + +use wasm_bindgen::prelude::*; +use wasm_bindgen_futures::JsFuture; +use web_sys::{Headers, Request, RequestInit, RequestMode, Response}; +use serde_json::json; +use secp256k1::{Message, PublicKey, Secp256k1, SecretKey, ecdsa::Signature}; +use sha2::{Sha256, Digest}; +use serde::{Deserialize, Serialize}; +use thiserror::Error; +use uuid::Uuid; + +/// WASM-compatible client for communicating with Hero Supervisor OpenRPC server +/// Requires authentication secret for all operations +#[wasm_bindgen] +#[derive(Clone)] +pub struct WasmSupervisorClient { + server_url: String, + secret: String, +} + +/// Error types for WASM client operations +#[derive(Error, Debug)] +pub enum WasmClientError { + #[error("Network error: {0}")] + Network(String), + + #[error("Serialization error: {0}")] + Serialization(#[from] serde_json::Error), + + #[error("JavaScript error: {0}")] + JavaScript(String), + + #[error("Server error: {message}")] + Server { message: String }, + + #[error("Invalid response format")] + InvalidResponse, +} + +/// Result type for WASM client operations +pub type WasmClientResult = Result; + +/// Auth verification response +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct AuthVerifyResponse { + pub valid: bool, + pub name: String, + pub scope: String, +} + +/// JSON-RPC request structure +#[derive(Serialize)] +struct JsonRpcRequest { + jsonrpc: String, + method: String, + params: serde_json::Value, + id: u32, +} + +/// JSON-RPC response structure +#[derive(Deserialize)] +struct JsonRpcResponse { + jsonrpc: String, + #[serde(skip_serializing_if = "Option::is_none")] + result: Option, + #[serde(skip_serializing_if = "Option::is_none")] + error: Option, + id: u32, +} + +/// JSON-RPC error structure +#[derive(Deserialize)] +struct JsonRpcError { + code: i32, + message: String, + #[serde(skip_serializing_if = "Option::is_none")] + data: Option, +} + +/// Types of runners supported by the supervisor +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[wasm_bindgen] +pub enum WasmRunnerType { + SALRunner, + OSISRunner, + VRunner, +} + +/// Job type enumeration that maps to runner types +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[wasm_bindgen] +pub enum WasmJobType { + SAL, + OSIS, + V, +} + +/// Job status enumeration +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub enum JobStatus { + Pending, + Running, + Finished, + Error, +} + +/// Job error type +#[derive(Debug, Clone, thiserror::Error)] +pub enum JobError { + #[error("Validation error: {0}")] + Validation(String), + #[error("Execution error: {0}")] + Execution(String), + #[error("Timeout error")] + Timeout, +} + +// Re-export JobBuilder from hero-job for convenience +pub use hero_job::JobBuilder; + + +#[wasm_bindgen] +impl WasmSupervisorClient { + /// Create a new WASM supervisor client with authentication secret + #[wasm_bindgen(constructor)] + pub fn new(server_url: String, secret: String) -> Self { + console_log::init_with_level(log::Level::Info).ok(); + Self { + server_url, + secret, + } + } + + /// Alias for new() to maintain backward compatibility + #[wasm_bindgen] + pub fn with_secret(server_url: String, secret: String) -> Self { + Self::new(server_url, secret) + } + + /// Get the server URL + #[wasm_bindgen(getter)] + pub fn server_url(&self) -> String { + self.server_url.clone() + } + + /// Test connection using OpenRPC discovery method + pub async fn discover(&self) -> Result { + let result = self.call_method("rpc.discover", serde_json::Value::Null).await; + match result { + Ok(value) => Ok(wasm_bindgen::JsValue::from_str(&value.to_string())), + Err(e) => Err(JsValue::from_str(&e.to_string())), + } + } + + /// Verify an API key and return its metadata as JSON + /// The key is sent via Authorization header (Bearer token) + pub async fn auth_verify(&self, key: String) -> Result { + // Create a temporary client with the key to verify + let temp_client = WasmSupervisorClient::with_secret(self.server_url.clone(), key); + + // Send empty object as params - the key is in the Authorization header + let params = serde_json::json!({}); + + match temp_client.call_method("auth.verify", params).await { + Ok(result) => { + // Parse to AuthVerifyResponse to validate, then convert to JsValue + let auth_response: AuthVerifyResponse = serde_json::from_value(result) + .map_err(|e| JsValue::from_str(&format!("Failed to parse auth response: {}", e)))?; + + // Convert to JsValue + serde_wasm_bindgen::to_value(&auth_response) + .map_err(|e| JsValue::from_str(&format!("Failed to convert to JsValue: {}", e))) + } + Err(e) => Err(JsValue::from_str(&format!("Failed to verify auth: {}", e))), + } + } + + /// Verify the client's stored API key + /// Uses the secret that was set when creating the client + pub async fn auth_verify_self(&self) -> Result { + self.auth_verify(self.secret.clone()).await + } + + /// Create a new API key (admin only) + /// Returns the created API key with its key string + pub async fn auth_create_key(&self, name: String, scope: String) -> Result { + let params = serde_json::json!({ + "name": name, + "scope": scope + }); + + match self.call_method("auth.create_key", params).await { + Ok(result) => Ok(serde_wasm_bindgen::to_value(&result) + .map_err(|e| JsValue::from_str(&format!("Failed to convert result: {}", e)))?), + Err(e) => Err(JsValue::from_str(&format!("Failed to create key: {}", e))), + } + } + + /// List all API keys (admin only) + pub async fn auth_list_keys(&self) -> Result { + match self.call_method("auth.list_keys", serde_json::Value::Null).await { + Ok(result) => Ok(serde_wasm_bindgen::to_value(&result) + .map_err(|e| JsValue::from_str(&format!("Failed to convert result: {}", e)))?), + Err(e) => Err(JsValue::from_str(&format!("Failed to list keys: {}", e))), + } + } + + /// Remove an API key (admin only) + pub async fn auth_remove_key(&self, key: String) -> Result { + let params = serde_json::json!({ + "key": key + }); + + match self.call_method("auth.remove_key", params).await { + Ok(result) => { + if let Some(success) = result.as_bool() { + Ok(success) + } else { + Err(JsValue::from_str("Invalid response format: expected boolean")) + } + }, + Err(e) => Err(JsValue::from_str(&format!("Failed to remove key: {}", e))), + } + } + + /// Register a new runner to the supervisor + /// The queue name is automatically set to match the runner name + /// Authentication uses the secret from Authorization header (set during client creation) + pub async fn register_runner(&self, name: String) -> Result { + // Secret is sent via Authorization header, not in params + let params = serde_json::json!({ + "name": name + }); + + match self.call_method("register_runner", params).await { + Ok(result) => { + // Extract the runner name from the result + if let Some(runner) = result.as_str() { + Ok(runner.to_string()) + } else { + Err(JsValue::from_str("Invalid response format: expected runner name")) + } + }, + Err(e) => Err(JsValue::from_str(&format!("Failed to register runner: {}", e))), + } + } + + /// Create a job (fire-and-forget, non-blocking) - DEPRECATED: Use create_job with API key auth + #[wasm_bindgen] + pub async fn create_job_with_secret(&self, secret: String, job: hero_job::Job) -> Result { + // Backend expects RunJobParams struct with secret and job fields - wrap in array like register_runner + let params = serde_json::json!([{ + "secret": secret, + "job": { + "id": job.id, + "caller_id": job.caller_id, + "context_id": job.context_id, + "payload": job.payload, + "runner": job.runner, + "executor": job.executor, + "timeout": job.timeout, + "env_vars": serde_json::from_str::(&serde_json::to_string(&job.env_vars).unwrap_or_else(|_| "{}".to_string())).unwrap_or(serde_json::json!({})), + "created_at": job.created_at, + "updated_at": job.updated_at + } + }]); + + match self.call_method("create_job", params).await { + Ok(result) => { + if let Some(job_id) = result.as_str() { + Ok(job_id.to_string()) + } else { + Ok(result.to_string()) + } + } + Err(e) => Err(JsValue::from_str(&format!("Failed to create job: {:?}", e))) + } + } + + /// Run a job on a specific runner (blocking, returns result) + #[wasm_bindgen] + pub async fn run_job(&self, secret: String, job: hero_job::Job) -> Result { + // Backend expects RunJobParams struct with secret and job fields - wrap in array like register_runner + let params = serde_json::json!([{ + "secret": secret, + "job": { + "id": job.id, + "caller_id": job.caller_id, + "context_id": job.context_id, + "payload": job.payload, + "runner": job.runner, + "executor": job.executor, + "timeout": job.timeout, + "env_vars": serde_json::from_str::(&serde_json::to_string(&job.env_vars).unwrap_or_else(|_| "{}".to_string())).unwrap_or(serde_json::json!({})), + "created_at": job.created_at, + "updated_at": job.updated_at + } + }]); + + match self.call_method("job.run", params).await { + Ok(result) => { + if let Some(result_str) = result.as_str() { + Ok(result_str.to_string()) + } else { + Ok(result.to_string()) + } + }, + Err(e) => Err(JsValue::from_str(&e.to_string())), + } + } + + /// List all runner IDs + pub async fn list_runners(&self) -> Result, JsValue> { + match self.call_method("list_runners", serde_json::Value::Null).await { + Ok(result) => { + if let Ok(runners) = serde_json::from_value::>(result) { + Ok(runners) + } else { + Err(JsValue::from_str("Failed to parse runners list")) + } + }, + Err(e) => Err(JsValue::from_str(&format!("Failed to list runners: {}", e))) + } + } + + /// Get status of all runners + pub async fn get_all_runner_status(&self) -> Result { + match self.call_method("get_all_runner_status", serde_json::Value::Null).await { + Ok(result) => { + // Convert serde_json::Value to JsValue + Ok(serde_wasm_bindgen::to_value(&result) + .map_err(|e| JsValue::from_str(&format!("Failed to convert result: {}", e)))?) + }, + Err(e) => Err(JsValue::from_str(&format!("Failed to get runner statuses: {}", e))) + } + } + + /// Create a job from a JsValue (full Job object) + pub async fn create_job(&self, job: JsValue) -> Result { + // Convert JsValue to serde_json::Value + let job_value: serde_json::Value = serde_wasm_bindgen::from_value(job) + .map_err(|e| JsValue::from_str(&format!("Failed to parse job: {}", e)))?; + + // Wrap in RunJobParams structure and pass as positional parameter + let params = serde_json::json!([{ + "job": job_value + }]); + + match self.call_method("jobs.create", params).await { + Ok(result) => { + if let Some(job_id) = result.as_str() { + Ok(job_id.to_string()) + } else { + Err(JsValue::from_str("Invalid response format: expected job ID")) + } + }, + Err(e) => Err(JsValue::from_str(&format!("Failed to create job: {}", e))), + } + } + + /// Create a job with basic parameters (simplified version) + pub async fn create_simple_job( + &self, + runner: String, + caller_id: String, + context_id: String, + payload: String, + executor: String, + ) -> Result { + // Generate a unique job ID + let job_id = format!("job-{}", uuid::Uuid::new_v4()); + + let job = serde_json::json!({ + "id": job_id, + "runner": runner, + "caller_id": caller_id, + "context_id": context_id, + "payload": payload, + "executor": executor, + "timeout": 30, + "env": {} + }); + + let params = serde_json::json!({ + "job": job + }); + + match self.call_method("jobs.create", params).await { + Ok(result) => { + if let Some(job_id) = result.as_str() { + Ok(job_id.to_string()) + } else { + Err(JsValue::from_str("Invalid response format: expected job ID")) + } + }, + Err(e) => Err(JsValue::from_str(&format!("Failed to create job: {}", e))), + } + } + + /// List all jobs + pub async fn list_jobs(&self) -> Result { + match self.call_method("jobs.list", serde_json::Value::Null).await { + Ok(result) => { + // Convert serde_json::Value to JsValue + serde_wasm_bindgen::to_value(&result) + .map_err(|e| JsValue::from_str(&format!("Failed to convert jobs list: {}", e))) + }, + Err(e) => Err(JsValue::from_str(&e.to_string())), + } + } + + /// Get a job by job ID + pub async fn get_job(&self, job_id: &str) -> Result { + let params = serde_json::json!([job_id]); + match self.call_method("get_job", params).await { + Ok(result) => { + // Convert the Job result to hero_job::Job + if let Ok(job_value) = serde_json::from_value::(result) { + // Extract fields from the job + let id = job_value.get("id").and_then(|v| v.as_str()).unwrap_or("").to_string(); + let caller_id = job_value.get("caller_id").and_then(|v| v.as_str()).unwrap_or("").to_string(); + let context_id = job_value.get("context_id").and_then(|v| v.as_str()).unwrap_or("").to_string(); + let payload = job_value.get("payload").and_then(|v| v.as_str()).unwrap_or("").to_string(); + let runner = job_value.get("runner").and_then(|v| v.as_str()).unwrap_or("").to_string(); + let executor = job_value.get("executor").and_then(|v| v.as_str()).unwrap_or("").to_string(); + let timeout_secs = job_value.get("timeout").and_then(|v| v.get("secs")).and_then(|v| v.as_u64()).unwrap_or(30); + let env_vars = job_value.get("env_vars").map(|v| v.to_string()).unwrap_or_else(|| "{}".to_string()); + let created_at = job_value.get("created_at").and_then(|v| v.as_str()).unwrap_or("").to_string(); + let updated_at = job_value.get("updated_at").and_then(|v| v.as_str()).unwrap_or("").to_string(); + + Ok(hero_job::Job { + id, + caller_id, + context_id, + payload, + runner, + executor, + timeout: timeout_secs, + env_vars: serde_json::from_str(&env_vars).unwrap_or_default(), + created_at: chrono::DateTime::parse_from_rfc3339(&created_at) + .map(|dt| dt.with_timezone(&chrono::Utc)) + .unwrap_or_else(|_| chrono::Utc::now()), + updated_at: chrono::DateTime::parse_from_rfc3339(&updated_at) + .map(|dt| dt.with_timezone(&chrono::Utc)) + .unwrap_or_else(|_| chrono::Utc::now()), + signatures: Vec::new(), + }) + } else { + Err(JsValue::from_str("Invalid response format for get_job")) + } + }, + Err(e) => Err(JsValue::from_str(&e.to_string())), + } + } + + /// Ping a runner by dispatching a ping job to its queue + #[wasm_bindgen] + pub async fn ping_runner(&self, runner_id: &str) -> Result { + let params = serde_json::json!([runner_id]); + + match self.call_method("ping_runner", params).await { + Ok(result) => { + if let Some(job_id) = result.as_str() { + Ok(job_id.to_string()) + } else { + Ok(result.to_string()) + } + } + Err(e) => Err(JsValue::from_str(&format!("Failed to ping runner: {:?}", e))) + } + } + + /// Stop a job by ID + #[wasm_bindgen] + pub async fn stop_job(&self, job_id: &str) -> Result<(), JsValue> { + let params = serde_json::json!([job_id]); + + match self.call_method("stop_job", params).await { + Ok(_) => Ok(()), + Err(e) => Err(JsValue::from_str(&format!("Failed to stop job: {:?}", e))) + } + } + + /// Delete a job by ID + #[wasm_bindgen] + pub async fn delete_job(&self, job_id: &str) -> Result<(), JsValue> { + let params = serde_json::json!([{ + "job_id": job_id + }]); + + match self.call_method("job.delete", params).await { + Ok(_) => Ok(()), + Err(e) => Err(JsValue::from_str(&format!("Failed to delete job: {:?}", e))) + } + } + + /// Get logs for a specific job + #[wasm_bindgen] + pub async fn get_job_logs(&self, job_id: &str, lines: Option) -> Result { + let params = if let Some(n) = lines { + serde_json::json!([job_id, n]) + } else { + serde_json::json!([job_id, serde_json::Value::Null]) + }; + + match self.call_method("get_job_logs", params).await { + Ok(result) => { + // Convert Vec to JsValue + Ok(serde_wasm_bindgen::to_value(&result) + .map_err(|e| JsValue::from_str(&format!("Failed to convert logs: {}", e)))?) + }, + Err(e) => Err(JsValue::from_str(&format!("Failed to get job logs: {:?}", e))) + } + } + + /// Remove a runner from the supervisor + pub async fn remove_runner(&self, actor_id: &str) -> Result<(), JsValue> { + let params = serde_json::json!([actor_id]); + match self.call_method("remove_runner", params).await { + Ok(_) => Ok(()), + Err(e) => Err(JsValue::from_str(&e.to_string())), + } + } + + /// Start a specific runner + pub async fn start_runner(&self, actor_id: &str) -> Result<(), JsValue> { + let params = serde_json::json!([actor_id]); + match self.call_method("start_runner", params).await { + Ok(_) => Ok(()), + Err(e) => Err(JsValue::from_str(&e.to_string())), + } + } + + /// Stop a specific runner + pub async fn stop_runner(&self, actor_id: &str, force: bool) -> Result<(), JsValue> { + let params = serde_json::json!([actor_id, force]); + self.call_method("stop_runner", params) + .await + .map_err(|e| JsValue::from_str(&e.to_string()))?; + Ok(()) + } + + /// Get a specific runner by ID + pub async fn get_runner(&self, actor_id: &str) -> Result { + let params = serde_json::json!([actor_id]); + let result = self.call_method("get_runner", params) + .await + .map_err(|e| JsValue::from_str(&e.to_string()))?; + // Convert the serde_json::Value to a JsValue via string serialization + let json_string = serde_json::to_string(&result) + .map_err(|e| JsValue::from_str(&e.to_string()))?; + Ok(js_sys::JSON::parse(&json_string) + .map_err(|e| JsValue::from_str("Failed to parse JSON"))?) + } + + /// Add a secret to the supervisor + pub async fn add_secret(&self, admin_secret: &str, secret_type: &str, secret_value: &str) -> Result<(), JsValue> { + let params = serde_json::json!([{ + "admin_secret": admin_secret, + "secret_type": secret_type, + "secret_value": secret_value + }]); + match self.call_method("add_secret", params).await { + Ok(_) => Ok(()), + Err(e) => Err(JsValue::from_str(&e.to_string())), + } + } + + /// Remove a secret from the supervisor + pub async fn remove_secret(&self, admin_secret: &str, secret_type: &str, secret_value: &str) -> Result<(), JsValue> { + let params = serde_json::json!([{ + "admin_secret": admin_secret, + "secret_type": secret_type, + "secret_value": secret_value + }]); + match self.call_method("remove_secret", params).await { + Ok(_) => Ok(()), + Err(e) => Err(JsValue::from_str(&e.to_string())), + } + } + + /// List secrets (returns supervisor info including secret counts) + pub async fn list_secrets(&self, admin_secret: &str) -> Result { + let params = serde_json::json!([{ + "admin_secret": admin_secret + }]); + match self.call_method("list_secrets", params).await { + Ok(result) => { + // Convert serde_json::Value to JsValue + let result_str = serde_json::to_string(&result) + .map_err(|e| JsValue::from_str(&e.to_string()))?; + Ok(js_sys::JSON::parse(&result_str) + .map_err(|e| JsValue::from_str(&format!("JSON parse error: {:?}", e)))?) + }, + Err(e) => Err(JsValue::from_str(&e.to_string())), + } + } + + /// Get supervisor information including secret counts + pub async fn get_supervisor_info(&self, admin_secret: &str) -> Result { + let params = serde_json::json!({ + "admin_secret": admin_secret + }); + + match self.call_method("get_supervisor_info", params).await { + Ok(result) => { + let result_str = serde_json::to_string(&result) + .map_err(|e| JsValue::from_str(&format!("Serialization error: {:?}", e)))?; + Ok(js_sys::JSON::parse(&result_str) + .map_err(|e| JsValue::from_str(&format!("JSON parse error: {:?}", e)))?) + }, + Err(e) => Err(JsValue::from_str(&format!("Failed to get supervisor info: {:?}", e))), + } + } + + /// List admin secrets (returns actual secret values) + pub async fn list_admin_secrets(&self, admin_secret: &str) -> Result, JsValue> { + let params = serde_json::json!({ + "admin_secret": admin_secret + }); + + match self.call_method("list_admin_secrets", params).await { + Ok(result) => { + let secrets: Vec = serde_json::from_value(result) + .map_err(|e| JsValue::from_str(&format!("Failed to parse admin secrets: {:?}", e)))?; + Ok(secrets) + }, + Err(e) => Err(JsValue::from_str(&format!("Failed to list admin secrets: {:?}", e))), + } + } + + /// List user secrets (returns actual secret values) + pub async fn list_user_secrets(&self, admin_secret: &str) -> Result, JsValue> { + let params = serde_json::json!({ + "admin_secret": admin_secret + }); + + match self.call_method("list_user_secrets", params).await { + Ok(result) => { + let secrets: Vec = serde_json::from_value(result) + .map_err(|e| JsValue::from_str(&format!("Failed to parse user secrets: {:?}", e)))?; + Ok(secrets) + }, + Err(e) => Err(JsValue::from_str(&format!("Failed to list user secrets: {:?}", e))), + } + } + + /// List register secrets (returns actual secret values) + pub async fn list_register_secrets(&self, admin_secret: &str) -> Result, JsValue> { + let params = serde_json::json!({ + "admin_secret": admin_secret + }); + + match self.call_method("list_register_secrets", params).await { + Ok(result) => { + let secrets: Vec = serde_json::from_value(result) + .map_err(|e| JsValue::from_str(&format!("Failed to parse register secrets: {:?}", e)))?; + Ok(secrets) + }, + Err(e) => Err(JsValue::from_str(&format!("Failed to list register secrets: {:?}", e))), + } + } + + /// Start a previously created job by queuing it to its assigned runner + pub async fn start_job(&self, job_id: &str) -> Result<(), JsValue> { + let params = serde_json::json!([{ + "job_id": job_id + }]); + + match self.call_method("job.start", params).await { + Ok(_) => Ok(()), + Err(e) => Err(JsValue::from_str(&e.to_string())), + } + } + + /// Get the status of a job + pub async fn get_job_status(&self, job_id: &str) -> Result { + let params = serde_json::json!([job_id]); + + match self.call_method("job.status", params).await { + Ok(result) => serde_wasm_bindgen::to_value(&result) + .map_err(|e| JsValue::from_str(&format!("Serialization error: {:?}", e))), + Err(e) => Err(JsValue::from_str(&e.to_string())), + } + } + + /// Get the result of a completed job + pub async fn get_job_result(&self, job_id: &str) -> Result { + let params = serde_json::json!([job_id]); + + match self.call_method("job.result", params).await { + Ok(result) => serde_wasm_bindgen::to_value(&result) + .map_err(|e| JsValue::from_str(&format!("Serialization error: {:?}", e))), + Err(e) => Err(JsValue::from_str(&e.to_string())), + } + } + + /// Internal method to make JSON-RPC calls + async fn call_method(&self, method: &str, params: serde_json::Value) -> WasmClientResult { + let request = JsonRpcRequest { + jsonrpc: "2.0".to_string(), + method: method.to_string(), + params, + id: 1, + }; + + let body = serde_json::to_string(&request)?; + + // Create headers + let headers = Headers::new().map_err(|e| WasmClientError::JavaScript(format!("{:?}", e)))?; + headers.set("Content-Type", "application/json") + .map_err(|e| WasmClientError::JavaScript(format!("{:?}", e)))?; + + // Add Authorization header with secret + let auth_value = format!("Bearer {}", self.secret); + headers.set("Authorization", &auth_value) + .map_err(|e| WasmClientError::JavaScript(format!("{:?}", e)))?; + + // Create request init + let opts = RequestInit::new(); + opts.set_method("POST"); + opts.set_headers(&headers); + opts.set_body(&JsValue::from_str(&body)); + opts.set_mode(RequestMode::Cors); + + // Create request + let request = Request::new_with_str_and_init(&self.server_url, &opts) + .map_err(|e| WasmClientError::JavaScript(format!("{:?}", e)))?; + + // Get window and fetch + let window = web_sys::window().ok_or_else(|| WasmClientError::JavaScript("No window object".to_string()))?; + let resp_value = JsFuture::from(window.fetch_with_request(&request)).await + .map_err(|e| WasmClientError::Network(format!("{:?}", e)))?; + + // Convert to Response + let resp: Response = resp_value.dyn_into() + .map_err(|e| WasmClientError::JavaScript(format!("{:?}", e)))?; + + // Check if response is ok + if !resp.ok() { + return Err(WasmClientError::Network(format!("HTTP {}: {}", resp.status(), resp.status_text()))); + } + + // Get response text + let text_promise = resp.text() + .map_err(|e| WasmClientError::JavaScript(format!("{:?}", e)))?; + let text_value = JsFuture::from(text_promise).await + .map_err(|e| WasmClientError::Network(format!("{:?}", e)))?; + let text = text_value.as_string() + .ok_or_else(|| WasmClientError::InvalidResponse)?; + + // Parse JSON-RPC response + let response: JsonRpcResponse = serde_json::from_str(&text)?; + + if let Some(error) = response.error { + return Err(WasmClientError::Server { + message: format!("{}: {}", error.code, error.message), + }); + } + + // For void methods, null result is valid + Ok(response.result.unwrap_or(serde_json::Value::Null)) + } +} + +/// Initialize the WASM client library (call manually if needed) +pub fn init() { + console_log::init_with_level(log::Level::Info).ok(); + log::info!("Hero Supervisor WASM OpenRPC Client initialized"); +} + +/// Utility function to create a client from JavaScript +#[wasm_bindgen] +pub fn create_client(server_url: String, secret: String) -> WasmSupervisorClient { + WasmSupervisorClient::new(server_url, secret) +} + +/// Sign a job's canonical representation with a private key +/// Returns a tuple of (public_key_hex, signature_hex) +#[wasm_bindgen] +pub fn sign_job_canonical( + canonical_repr: String, + private_key_hex: String, +) -> Result { + // Decode private key from hex + let secret_bytes = hex::decode(&private_key_hex) + .map_err(|e| JsValue::from_str(&format!("Invalid private key hex: {}", e)))?; + + let secret_key = SecretKey::from_slice(&secret_bytes) + .map_err(|e| JsValue::from_str(&format!("Invalid private key: {}", e)))?; + + // Get the public key (uncompressed format) + let secp = Secp256k1::new(); + let public_key = PublicKey::from_secret_key(&secp, &secret_key); + let public_key_hex = hex::encode(public_key.serialize_uncompressed()); + + // Hash the canonical representation + let mut hasher = Sha256::new(); + hasher.update(canonical_repr.as_bytes()); + let hash = hasher.finalize(); + + // Create message from hash + let message = Message::from_digest_slice(&hash) + .map_err(|e| JsValue::from_str(&format!("Invalid message: {}", e)))?; + + // Sign the message + let signature = secp.sign_ecdsa(&message, &secret_key); + let signature_hex = hex::encode(signature.serialize_compact()); + + // Return as JS object + let result = serde_json::json!({ + "public_key": public_key_hex, + "signature": signature_hex + }); + + serde_wasm_bindgen::to_value(&result) + .map_err(|e| JsValue::from_str(&format!("Failed to serialize result: {}", e))) +} + +/// Create canonical representation of a job for signing +/// This matches the format used in runner_rust Job::canonical_representation +#[wasm_bindgen] +pub fn create_job_canonical_repr( + id: String, + caller_id: String, + context_id: String, + payload: String, + runner: String, + executor: String, + timeout: u64, + env_vars_json: String, +) -> Result { + // Parse env_vars from JSON + let env_vars: std::collections::HashMap = serde_json::from_str(&env_vars_json) + .map_err(|e| JsValue::from_str(&format!("Invalid env_vars JSON: {}", e)))?; + + // Sort env_vars keys for deterministic ordering + let mut env_vars_sorted: Vec<_> = env_vars.iter().collect(); + env_vars_sorted.sort_by_key(|&(k, _)| k); + + // Create canonical representation (matches Job::canonical_representation in runner_rust) + let canonical = format!( + "{}:{}:{}:{}:{}:{}:{}:{:?}", + id, + caller_id, + context_id, + payload, + runner, + executor, + timeout, + env_vars_sorted + ); + + Ok(canonical) +} diff --git a/lib/models/job/Cargo.toml b/lib/models/job/Cargo.toml new file mode 100644 index 0000000..09e0141 --- /dev/null +++ b/lib/models/job/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "hero-job" +version.workspace = true +edition.workspace = true +description = "Job types and models for Hero" +license = "MIT OR Apache-2.0" + +[lib] +name = "hero_job" +path = "lib.rs" + +[dependencies] +serde.workspace = true +serde_json.workspace = true +thiserror.workspace = true +chrono.workspace = true +uuid.workspace = true +log.workspace = true +hex.workspace = true +sha2.workspace = true +secp256k1.workspace = true + +[target.'cfg(target_arch = "wasm32")'.dependencies] +wasm-bindgen.workspace = true diff --git a/lib/models/job/lib.rs b/lib/models/job/lib.rs new file mode 100644 index 0000000..ba522b6 --- /dev/null +++ b/lib/models/job/lib.rs @@ -0,0 +1,338 @@ +use chrono::{Utc}; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use thiserror::Error; +use uuid::Uuid; +use log::{error}; + +#[cfg(target_arch = "wasm32")] +use wasm_bindgen::prelude::*; + +/// Signature for a job - contains the signatory's public key and their signature +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct JobSignature { + /// Public key of the signatory (hex-encoded secp256k1 public key) + pub public_key: String, + /// Signature (hex-encoded secp256k1 signature) + pub signature: String, +} + +/// Job status enumeration +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub enum JobStatus { + Created, + Dispatched, + WaitingForPrerequisites, + Started, + Error, + Stopping, + Finished, +} + +/// Job result response for RPC calls +#[derive(Debug, Serialize, Clone)] +#[serde(untagged)] +pub enum JobResult { + Success { success: String }, + Error { error: String }, +} + +impl JobStatus { + pub fn as_str(&self) -> &'static str { + match self { + JobStatus::Created => "created", + JobStatus::Dispatched => "dispatched", + JobStatus::WaitingForPrerequisites => "waiting_for_prerequisites", + JobStatus::Started => "started", + JobStatus::Error => "error", + JobStatus::Stopping => "stopping", + JobStatus::Finished => "finished", + } + } + + pub fn from_str(s: &str) -> Option { + match s { + "created" => Some(JobStatus::Created), + "dispatched" => Some(JobStatus::Dispatched), + "waiting_for_prerequisites" => Some(JobStatus::WaitingForPrerequisites), + "started" => Some(JobStatus::Started), + "error" => Some(JobStatus::Error), + "stopping" => Some(JobStatus::Stopping), + "finished" => Some(JobStatus::Finished), + _ => None, + } + } +} + +/// Job structure representing a unit of work to be executed +#[cfg_attr(target_arch = "wasm32", wasm_bindgen(getter_with_clone))] +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Job { + pub id: String, + pub caller_id: String, + pub context_id: String, + pub payload: String, + pub runner: String, // name of the runner to execute this job + pub executor: String, // name of the executor the runner will use to execute this job + pub timeout: u64, // timeout in seconds + #[cfg_attr(target_arch = "wasm32", wasm_bindgen(skip))] + pub env_vars: HashMap, // environment variables for script execution + #[cfg_attr(target_arch = "wasm32", wasm_bindgen(skip))] + pub created_at: chrono::DateTime, + #[cfg_attr(target_arch = "wasm32", wasm_bindgen(skip))] + pub updated_at: chrono::DateTime, + + /// Signatures from authorized signatories (public keys are included in each signature) + #[cfg_attr(target_arch = "wasm32", wasm_bindgen(skip))] + pub signatures: Vec, +} + +/// Error types for job operations +#[derive(Error, Debug)] +pub enum JobError { + #[error("Serialization error: {0}")] + Serialization(#[from] serde_json::Error), + #[error("Job not found: {0}")] + NotFound(String), + #[error("Invalid data: {0}")] + InvalidData(String), + #[error("Validation error: {0}")] + Validation(String), + #[error("Signature verification failed: {0}")] + SignatureVerification(String), +} + +impl Job { + /// Create a new job with the given parameters + pub fn new( + caller_id: String, + context_id: String, + payload: String, + runner: String, + executor: String, + ) -> Self { + let now = Utc::now(); + Self { + id: Uuid::new_v4().to_string(), + caller_id, + context_id, + payload, + runner, + executor, + timeout: 300, // 5 minutes default + env_vars: HashMap::new(), + created_at: now, + updated_at: now, + signatures: Vec::new(), + } + } + + /// Get the canonical representation of the job for signing + /// This creates a deterministic string representation that can be hashed and signed + /// Note: Signatures are excluded from the canonical representation + pub fn canonical_representation(&self) -> String { + // Create a deterministic representation excluding signatures + // Sort env_vars keys for deterministic ordering + let mut env_vars_sorted: Vec<_> = self.env_vars.iter().collect(); + env_vars_sorted.sort_by_key(|&(k, _)| k); + + format!( + "{}:{}:{}:{}:{}:{}:{}:{:?}", + self.id, + self.caller_id, + self.context_id, + self.payload, + self.runner, + self.executor, + self.timeout, + env_vars_sorted + ) + } + + /// Get list of signatory public keys from signatures + pub fn signatories(&self) -> Vec { + self.signatures.iter() + .map(|sig| sig.public_key.clone()) + .collect() + } + + /// Verify that all signatures are valid + /// Returns Ok(()) if verification passes, Err otherwise + /// Empty signatures list is allowed - loop simply won't execute + pub fn verify_signatures(&self) -> Result<(), JobError> { + use secp256k1::{Message, PublicKey, Secp256k1, ecdsa::Signature}; + use sha2::{Sha256, Digest}; + + // Get the canonical representation and hash it + let canonical = self.canonical_representation(); + let mut hasher = Sha256::new(); + hasher.update(canonical.as_bytes()); + let hash = hasher.finalize(); + + let secp = Secp256k1::verification_only(); + let message = Message::from_digest_slice(&hash) + .map_err(|e| JobError::SignatureVerification(format!("Invalid message: {}", e)))?; + + // Verify each signature (if any) + for sig_data in &self.signatures { + // Decode public key + let pubkey_bytes = hex::decode(&sig_data.public_key) + .map_err(|e| JobError::SignatureVerification(format!("Invalid public key hex: {}", e)))?; + let pubkey = PublicKey::from_slice(&pubkey_bytes) + .map_err(|e| JobError::SignatureVerification(format!("Invalid public key: {}", e)))?; + + // Decode signature + let sig_bytes = hex::decode(&sig_data.signature) + .map_err(|e| JobError::SignatureVerification(format!("Invalid signature hex: {}", e)))?; + let signature = Signature::from_compact(&sig_bytes) + .map_err(|e| JobError::SignatureVerification(format!("Invalid signature: {}", e)))?; + + // Verify signature + secp.verify_ecdsa(&message, &signature, &pubkey) + .map_err(|e| JobError::SignatureVerification(format!("Signature verification failed: {}", e)))?; + } + + Ok(()) + } +} + +/// Builder for constructing job execution requests. +pub struct JobBuilder { + caller_id: String, + context_id: String, + payload: String, + runner: String, + executor: String, + timeout: u64, // timeout in seconds + env_vars: HashMap, + signatures: Vec, +} + +impl JobBuilder { + pub fn new() -> Self { + Self { + caller_id: "".to_string(), + context_id: "".to_string(), + payload: "".to_string(), + runner: "".to_string(), + executor: "".to_string(), + timeout: 300, // 5 minutes default + env_vars: HashMap::new(), + signatures: Vec::new(), + } + } + + /// Set the caller ID for this job + pub fn caller_id(mut self, caller_id: &str) -> Self { + self.caller_id = caller_id.to_string(); + self + } + + /// Set the context ID for this job + pub fn context_id(mut self, context_id: &str) -> Self { + self.context_id = context_id.to_string(); + self + } + + /// Set the payload (script content) for this job + pub fn payload(mut self, payload: &str) -> Self { + self.payload = payload.to_string(); + self + } + + /// Set the runner name for this job + pub fn runner(mut self, runner: &str) -> Self { + self.runner = runner.to_string(); + self + } + + /// Set the executor for this job + pub fn executor(mut self, executor: &str) -> Self { + self.executor = executor.to_string(); + self + } + + /// Set the timeout for job execution (in seconds) + pub fn timeout(mut self, timeout: u64) -> Self { + self.timeout = timeout; + self + } + + /// Set a single environment variable + pub fn env_var(mut self, key: &str, value: &str) -> Self { + self.env_vars.insert(key.to_string(), value.to_string()); + self + } + + /// Set multiple environment variables from a HashMap + pub fn env_vars(mut self, env_vars: HashMap) -> Self { + self.env_vars = env_vars; + self + } + + /// Clear all environment variables + pub fn clear_env_vars(mut self) -> Self { + self.env_vars.clear(); + self + } + + /// Add a signature (public key and signature) + pub fn signature(mut self, public_key: &str, signature: &str) -> Self { + self.signatures.push(JobSignature { + public_key: public_key.to_string(), + signature: signature.to_string(), + }); + self + } + + /// Set multiple signatures + pub fn signatures(mut self, signatures: Vec) -> Self { + self.signatures = signatures; + self + } + + /// Clear all signatures + pub fn clear_signatures(mut self) -> Self { + self.signatures.clear(); + self + } + + /// Build the job + pub fn build(self) -> Result { + if self.caller_id.is_empty() { + return Err(JobError::InvalidData("caller_id is required".to_string())); + } + if self.context_id.is_empty() { + return Err(JobError::InvalidData("context_id is required".to_string())); + } + if self.payload.is_empty() { + return Err(JobError::InvalidData("payload is required".to_string())); + } + if self.runner.is_empty() { + return Err(JobError::InvalidData("runner is required".to_string())); + } + if self.executor.is_empty() { + return Err(JobError::InvalidData("executor is required".to_string())); + } + + let mut job = Job::new( + self.caller_id, + self.context_id, + self.payload, + self.runner, + self.executor, + ); + + job.timeout = self.timeout; + job.env_vars = self.env_vars; + job.signatures = self.signatures; + + Ok(job) + } +} + +impl Default for JobBuilder { + fn default() -> Self { + Self::new() + } +} diff --git a/lib/osiris/core/Cargo.toml b/lib/osiris/core/Cargo.toml new file mode 100644 index 0000000..52d0760 --- /dev/null +++ b/lib/osiris/core/Cargo.toml @@ -0,0 +1,46 @@ +[package] +name = "osiris-core" +version.workspace = true +edition = "2021" +description = "Osiris core - Object storage and indexing system" +license = "MIT OR Apache-2.0" + +[lib] +name = "osiris" +path = "lib.rs" + +[dependencies] +# Core dependencies +anyhow.workspace = true +redis.workspace = true +serde.workspace = true +serde_json.workspace = true +tokio.workspace = true +uuid.workspace = true +toml.workspace = true +thiserror.workspace = true +clap.workspace = true +env_logger.workspace = true +log.workspace = true + +# Time handling +time = { version = "0.3", features = ["serde", "formatting", "parsing", "macros"] } + +# Tracing +tracing.workspace = true +tracing-subscriber = { version = "0.3", features = ["env-filter"] } + +# Email +lettre = "0.11" + +# HTTP client +reqwest = { version = "0.11", features = ["json"] } + +# Rhai scripting +rhai = { version = "1.21.0", features = ["std", "sync", "serde"] } + +# Osiris derive macros +osiris-derive = { path = "../derive" } + +[dev-dependencies] +tempfile = "3.8" diff --git a/lib/osiris/core/config/mod.rs b/lib/osiris/core/config/mod.rs new file mode 100644 index 0000000..00bfe54 --- /dev/null +++ b/lib/osiris/core/config/mod.rs @@ -0,0 +1,60 @@ +pub mod model; + +pub use model::{Config, HeroDbConfig, NamespaceConfig}; + +use crate::error::{Error, Result}; +use std::collections::HashMap; +use std::fs; +use std::path::PathBuf; + +/// Load configuration from file +pub fn load_config(path: Option) -> Result { + let config_path = path.unwrap_or_else(default_config_path); + + if !config_path.exists() { + return Err(Error::Config(format!( + "Configuration file not found: {}", + config_path.display() + ))); + } + + let content = fs::read_to_string(&config_path)?; + let config: Config = toml::from_str(&content) + .map_err(|e| Error::Config(format!("Failed to parse config: {}", e)))?; + + Ok(config) +} + +/// Save configuration to file +pub fn save_config(config: &Config, path: Option) -> Result<()> { + let config_path = path.unwrap_or_else(default_config_path); + + // Create parent directory if it doesn't exist + if let Some(parent) = config_path.parent() { + fs::create_dir_all(parent)?; + } + + let content = toml::to_string_pretty(config) + .map_err(|e| Error::Config(format!("Failed to serialize config: {}", e)))?; + + fs::write(&config_path, content)?; + + Ok(()) +} + +/// Get the default configuration file path +pub fn default_config_path() -> PathBuf { + let home = std::env::var("HOME").unwrap_or_else(|_| ".".to_string()); + PathBuf::from(home) + .join(".config") + .join("osiris") + .join("config.toml") +} + +/// Create a default configuration +pub fn create_default_config(herodb_url: String) -> Config { + Config { + herodb: HeroDbConfig { url: herodb_url }, + namespaces: HashMap::new(), + } +} diff --git a/lib/osiris/core/config/model.rs b/lib/osiris/core/config/model.rs new file mode 100644 index 0000000..8d19e13 --- /dev/null +++ b/lib/osiris/core/config/model.rs @@ -0,0 +1,55 @@ +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; + +/// OSIRIS configuration +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct Config { + /// HeroDB connection configuration + pub herodb: HeroDbConfig, + + /// Namespace configurations + #[serde(default)] + pub namespaces: HashMap, +} + +/// HeroDB connection configuration +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct HeroDbConfig { + /// HeroDB URL (e.g., "redis://localhost:6379") + pub url: String, +} + +/// Namespace configuration +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct NamespaceConfig { + /// HeroDB database ID for this namespace + pub db_id: u16, +} + +impl Config { + /// Get namespace configuration by name + pub fn get_namespace(&self, name: &str) -> Option<&NamespaceConfig> { + self.namespaces.get(name) + } + + /// Add or update a namespace + pub fn set_namespace(&mut self, name: String, config: NamespaceConfig) { + self.namespaces.insert(name, config); + } + + /// Remove a namespace + pub fn remove_namespace(&mut self, name: &str) -> Option { + self.namespaces.remove(name) + } + + /// Get the next available database ID + pub fn next_db_id(&self) -> u16 { + let max_id = self + .namespaces + .values() + .map(|ns| ns.db_id) + .max() + .unwrap_or(0); + max_id + 1 + } +} diff --git a/lib/osiris/core/context.rs b/lib/osiris/core/context.rs new file mode 100644 index 0000000..d9e92e9 --- /dev/null +++ b/lib/osiris/core/context.rs @@ -0,0 +1,413 @@ +/// OSIRIS Context +/// +/// A complete context with HeroDB storage and participant-based access. +/// Each context is isolated with its own HeroDB connection. +/// +/// Combines: +/// - HeroDB storage (via GenericStore) +/// - Participant list (public keys) +/// - Generic CRUD operations for any data + +use crate::objects::Note; +use crate::objects::heroledger::{ + user::User, + group::Group, + money::Account, + dnsrecord::DNSZone, +}; +use crate::store::{GenericStore, HeroDbClient}; +use rhai::{CustomType, EvalAltResult, TypeBuilder}; +use std::sync::Arc; + +/// Convert serde_json::Value to rhai::Dynamic +fn json_to_rhai(value: serde_json::Value) -> Result { + match value { + serde_json::Value::Null => Ok(rhai::Dynamic::UNIT), + serde_json::Value::Bool(b) => Ok(rhai::Dynamic::from(b)), + serde_json::Value::Number(n) => { + if let Some(i) = n.as_i64() { + Ok(rhai::Dynamic::from(i)) + } else if let Some(f) = n.as_f64() { + Ok(rhai::Dynamic::from(f)) + } else { + Err("Invalid number".to_string()) + } + } + serde_json::Value::String(s) => Ok(rhai::Dynamic::from(s)), + serde_json::Value::Array(arr) => { + let rhai_arr: Result, String> = arr + .into_iter() + .map(json_to_rhai) + .collect(); + Ok(rhai::Dynamic::from(rhai_arr?)) + } + serde_json::Value::Object(obj) => { + let mut rhai_map = rhai::Map::new(); + for (k, v) in obj { + rhai_map.insert(k.into(), json_to_rhai(v)?); + } + Ok(rhai::Dynamic::from(rhai_map)) + } + } +} + +// ============================================================================ +// OsirisContext - Main Context Type +// ============================================================================ + +/// OSIRIS Context - combines storage with participant-based access +/// +/// This is the main context object that provides: +/// - HeroDB storage via GenericStore +/// - Participant list (public keys) +/// - Generic CRUD operations +#[derive(Clone, Debug)] +pub struct OsirisContext { + pub(crate) participants: Vec, // Public keys of all participants in this context + pub(crate) store: Arc, +} + +// Keep OsirisInstance as an alias for backward compatibility +pub type OsirisInstance = OsirisContext; + +impl OsirisContext { + /// Create a builder for OsirisContext + pub fn builder() -> OsirisContextBuilder { + OsirisContextBuilder::new() + } + + /// Create a new OSIRIS context with minimal config (for backwards compatibility) + pub fn new(name: impl ToString, herodb_url: &str, db_id: u16) -> Result> { + OsirisContextBuilder::new() + .name(name) + .herodb_url(herodb_url) + .db_id(db_id) + .build() + } + + /// Get the context participants (public keys) + pub fn participants(&self) -> Vec { + self.participants.clone() + } + + /// Get the context ID (sorted, comma-separated participant keys) + pub fn context_id(&self) -> String { + let mut sorted = self.participants.clone(); + sorted.sort(); + sorted.join(",") + } + + // ============================================================================ + // Generic CRUD Operations + // ============================================================================ + // These methods work with any Rhai Dynamic object and store in HeroDB + + /// Generic save - saves any Rhai object to HeroDB + /// + /// Usage in Rhai: + /// ```rhai + /// let resident = digital_resident() + /// .email("test@example.com") + /// .first_name("John"); + /// let id = ctx.save("residents", "resident_123", resident); + /// ``` + pub fn save(&self, collection: String, id: String, data: rhai::Dynamic) -> Result> { + let store = self.store.clone(); + let id_clone = id.clone(); + let collection_clone = collection.clone(); + + // Serialize Rhai object to JSON + let json_content = format!("{:?}", data); // Simple serialization for now + + // Save as Note + tokio::task::block_in_place(|| { + tokio::runtime::Handle::current().block_on(async move { + let mut note = Note::new(collection_clone); + // Parse string ID to u32, default to 0 if parsing fails + note.base_data.id = id_clone.parse::().unwrap_or(0); + note.content = Some(json_content); + + store.put(¬e).await + .map_err(|e| format!("Failed to save: {}", e))?; + + Ok(id_clone) + }) + }).map_err(|e: String| e.into()) + } + + /// Generic get - retrieves data from HeroDB and returns as Rhai object + /// + /// Usage in Rhai: + /// ```rhai + /// let resident = ctx.get("residents", "resident_123"); + /// print(resident); // Can use the data directly + /// ``` + pub fn get(&self, collection: String, id: String) -> Result> { + let store = self.store.clone(); + + tokio::task::block_in_place(|| { + tokio::runtime::Handle::current().block_on(async move { + // Get raw JSON from HeroDB (generic) + let json_data = store.get_raw(&collection, &id).await + .map_err(|e| format!("Failed to get from HeroDB: {}", e))?; + + // Parse JSON to Rhai Map + let parsed: serde_json::Value = serde_json::from_str(&json_data) + .map_err(|e| format!("Failed to parse JSON: {}", e))?; + + // Convert serde_json::Value to rhai::Dynamic + json_to_rhai(parsed) + }) + }).map_err(|e: String| e.into()) + } + + /// Generic delete - checks if exists in HeroDB and deletes + /// + /// Usage in Rhai: + /// ```rhai + /// let deleted = ctx.delete("residents", "resident_123"); + /// if deleted { + /// print("Deleted successfully"); + /// } + /// ``` + pub fn delete(&self, collection: String, id: String) -> Result> { + let store = self.store.clone(); + + tokio::task::block_in_place(|| { + tokio::runtime::Handle::current().block_on(async move { + // Check if exists by trying to get it + match store.get::(&collection, &id).await { + Ok(note) => { + // Exists, now delete it + store.delete(¬e).await + .map_err(|e| format!("Failed to delete from HeroDB: {}", e)) + } + Err(_) => { + // Doesn't exist + Ok(false) + } + } + }) + }).map_err(|e: String| e.into()) + } + + /// Check if an object exists in the context + pub fn exists(&self, collection: String, id: String) -> Result> { + let store = self.store.clone(); + + tokio::task::block_in_place(|| { + tokio::runtime::Handle::current().block_on(async move { + // Check if exists by trying to get it + match store.get::(&collection, &id).await { + Ok(_) => Ok(true), + Err(_) => Ok(false), + } + }) + }).map_err(|e: String| e.into()) + } + + /// List all IDs in a collection + pub fn list(&self, collection: String) -> Result, Box> { + let store = self.store.clone(); + + tokio::task::block_in_place(|| { + tokio::runtime::Handle::current().block_on(async move { + store.get_all_ids(&collection).await + .map(|ids| ids.into_iter().map(rhai::Dynamic::from).collect()) + .map_err(|e| format!("Failed to list: {}", e)) + }) + }).map_err(|e: String| e.into()) + } + + /// Query objects by field value + pub fn query(&self, collection: String, field: String, value: String) -> Result, Box> { + let store = self.store.clone(); + + tokio::task::block_in_place(|| { + tokio::runtime::Handle::current().block_on(async move { + store.get_ids_by_index(&collection, &field, &value).await + .map(|ids| ids.into_iter().map(rhai::Dynamic::from).collect()) + .map_err(|e| format!("Failed to query: {}", e)) + }) + }).map_err(|e: String| e.into()) + } +} + +impl OsirisContext { + /// Generic save method for any Storable object + pub fn save_object(&self, object: T) -> Result> + where + T: crate::store::Storable + Send + 'static, + { + let store = self.store.clone(); + let id = object.base_data().id; + + tokio::task::block_in_place(|| { + tokio::runtime::Handle::current().block_on(async move { + store.put(&object).await + .map_err(|e| format!("Failed to save object: {}", e))?; + Ok(id.to_string()) + }) + }).map_err(|e: String| e.into()) + } +} + +impl CustomType for OsirisContext { + fn build(mut builder: TypeBuilder) { + builder + .with_name("OsirisContext") + .with_fn("participants", |ctx: &mut OsirisContext| ctx.participants()) + .with_fn("context_id", |ctx: &mut OsirisContext| ctx.context_id()) + // Generic CRUD (with collection name) + .with_fn("save", |ctx: &mut OsirisContext, collection: String, id: String, data: rhai::Dynamic| ctx.save(collection, id, data)) + // Typed save methods (no collection name needed - Rhai will pick the right one based on type) + .with_fn("save", |ctx: &mut OsirisContext, note: Note| ctx.save_object(note)) + .with_fn("save", |ctx: &mut OsirisContext, event: crate::objects::Event| ctx.save_object(event)) + .with_fn("save", |ctx: &mut OsirisContext, user: User| ctx.save_object(user)) + .with_fn("save", |ctx: &mut OsirisContext, group: Group| ctx.save_object(group)) + .with_fn("save", |ctx: &mut OsirisContext, account: Account| ctx.save_object(account)) + .with_fn("save", |ctx: &mut OsirisContext, zone: DNSZone| ctx.save_object(zone)) + .with_fn("get", |ctx: &mut OsirisContext, collection: String, id: String| ctx.get(collection, id)) + .with_fn("delete", |ctx: &mut OsirisContext, collection: String, id: String| ctx.delete(collection, id)) + .with_fn("list", |ctx: &mut OsirisContext, collection: String| ctx.list(collection)) + .with_fn("query", |ctx: &mut OsirisContext, collection: String, field: String, value: String| ctx.query(collection, field, value)); + } +} + +// ============================================================================ +// OsirisContextBuilder +// ============================================================================ + +/// Builder for OsirisContext +pub struct OsirisContextBuilder { + participants: Option>, + herodb_url: Option, + db_id: Option, +} + +impl OsirisContextBuilder { + /// Create a new builder + pub fn new() -> Self { + Self { + participants: None, + herodb_url: None, + db_id: None, + } + } + + /// Set the context participants (public keys) + pub fn participants(mut self, participants: Vec) -> Self { + self.participants = Some(participants); + self + } + + /// Set a single participant (for backwards compatibility) + pub fn name(mut self, name: impl ToString) -> Self { + self.participants = Some(vec![name.to_string()]); + self + } + + /// Set owner (deprecated, use participants instead) + #[deprecated(note = "Use participants() instead")] + pub fn owner(mut self, owner_id: impl ToString) -> Self { + self.participants = Some(vec![owner_id.to_string()]); + self + } + + /// Set the HeroDB URL + pub fn herodb_url(mut self, url: impl ToString) -> Self { + self.herodb_url = Some(url.to_string()); + self + } + + /// Set the HeroDB database ID + pub fn db_id(mut self, db_id: u16) -> Self { + self.db_id = Some(db_id); + self + } + + /// Build the OsirisContext + pub fn build(self) -> Result> { + let participants = self.participants.ok_or("Context participants are required")?; + + // HeroDB URL and DB ID are now optional - context can work without storage + let herodb_url = self.herodb_url.unwrap_or_else(|| "redis://localhost:6379".to_string()); + let db_id = self.db_id.unwrap_or(1); + + if participants.is_empty() { + return Err("At least one participant is required".into()); + } + + // Create HeroDB client + let client = HeroDbClient::new(&herodb_url, db_id)?; + + // Create store + let store = GenericStore::new(client); + + Ok(OsirisContext { + participants, + store: Arc::new(store), + }) + } +} + +impl Default for OsirisContextBuilder { + fn default() -> Self { + Self::new() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_context_creation() { + let ctx = OsirisContext::new("test_ctx", "redis://localhost:6379", 1); + assert!(ctx.is_ok()); + + let ctx = ctx.unwrap(); + assert_eq!(ctx.participants(), vec!["test_ctx".to_string()]); + assert_eq!(ctx.context_id(), "test_ctx"); + } + + #[test] + fn test_builder_basic() { + let ctx = OsirisContextBuilder::new() + .participants(vec!["pk1".to_string()]) + .herodb_url("redis://localhost:6379") + .db_id(1) + .build(); + + assert!(ctx.is_ok()); + let ctx = ctx.unwrap(); + assert_eq!(ctx.participants(), vec!["pk1".to_string()]); + assert_eq!(ctx.context_id(), "pk1"); + } + + #[test] + fn test_builder_with_multiple_participants() { + let ctx = OsirisContextBuilder::new() + .participants(vec!["pk1".to_string(), "pk2".to_string(), "pk3".to_string()]) + .herodb_url("redis://localhost:6379") + .db_id(1) + .build(); + + assert!(ctx.is_ok()); + let ctx = ctx.unwrap(); + assert_eq!(ctx.participants().len(), 3); + // Context ID should be sorted + assert_eq!(ctx.context_id(), "pk1,pk2,pk3"); + } + + #[test] + fn test_builder_missing_participants() { + let ctx = OsirisContextBuilder::new() + .herodb_url("redis://localhost:6379") + .db_id(1) + .build(); + + assert!(ctx.is_err()); + assert!(ctx.unwrap_err().to_string().contains("participants are required")); + } +} diff --git a/lib/osiris/core/error.rs b/lib/osiris/core/error.rs new file mode 100644 index 0000000..d835d17 --- /dev/null +++ b/lib/osiris/core/error.rs @@ -0,0 +1,46 @@ +use std::fmt; + +#[derive(Debug)] +pub enum Error { + Redis(redis::RedisError), + Serialization(serde_json::Error), + NotFound(String), + InvalidInput(String), + Config(String), + Io(std::io::Error), +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Error::Redis(e) => write!(f, "Redis error: {}", e), + Error::Serialization(e) => write!(f, "Serialization error: {}", e), + Error::NotFound(msg) => write!(f, "Not found: {}", msg), + Error::InvalidInput(msg) => write!(f, "Invalid input: {}", msg), + Error::Config(msg) => write!(f, "Configuration error: {}", msg), + Error::Io(e) => write!(f, "IO error: {}", e), + } + } +} + +impl std::error::Error for Error {} + +impl From for Error { + fn from(e: redis::RedisError) -> Self { + Error::Redis(e) + } +} + +impl From for Error { + fn from(e: serde_json::Error) -> Self { + Error::Serialization(e) + } +} + +impl From for Error { + fn from(e: std::io::Error) -> Self { + Error::Io(e) + } +} + +pub type Result = std::result::Result; diff --git a/lib/osiris/core/index/field_index.rs b/lib/osiris/core/index/field_index.rs new file mode 100644 index 0000000..a9df1c0 --- /dev/null +++ b/lib/osiris/core/index/field_index.rs @@ -0,0 +1,140 @@ +use crate::error::Result; +use crate::store::{HeroDbClient, OsirisObject}; + +/// Field indexing for fast filtering by tags and metadata +#[derive(Debug, Clone)] +pub struct FieldIndex { + client: HeroDbClient, +} + +impl FieldIndex { + /// Create a new field index + pub fn new(client: HeroDbClient) -> Self { + Self { client } + } + + /// Index an object (add to field indexes) + pub async fn index_object(&self, obj: &OsirisObject) -> Result<()> { + // Index tags + for (key, value) in &obj.meta.tags { + let field_key = format!("field:tag:{}={}", key, value); + self.client.sadd(&field_key, &obj.id).await?; + } + + // Index MIME type if present + if let Some(mime) = &obj.meta.mime { + let field_key = format!("field:mime:{}", mime); + self.client.sadd(&field_key, &obj.id).await?; + } + + // Index title if present (for exact match) + if let Some(title) = &obj.meta.title { + let field_key = format!("field:title:{}", title); + self.client.sadd(&field_key, &obj.id).await?; + } + + // Add to scan index for text search + self.client.sadd("scan:index", &obj.id).await?; + + Ok(()) + } + + /// Remove an object from indexes + pub async fn deindex_object(&self, obj: &OsirisObject) -> Result<()> { + // Remove from tag indexes + for (key, value) in &obj.meta.tags { + let field_key = format!("field:tag:{}={}", key, value); + self.client.srem(&field_key, &obj.id).await?; + } + + // Remove from MIME index + if let Some(mime) = &obj.meta.mime { + let field_key = format!("field:mime:{}", mime); + self.client.srem(&field_key, &obj.id).await?; + } + + // Remove from title index + if let Some(title) = &obj.meta.title { + let field_key = format!("field:title:{}", title); + self.client.srem(&field_key, &obj.id).await?; + } + + // Remove from scan index + self.client.srem("scan:index", &obj.id).await?; + + Ok(()) + } + + /// Update object indexes (remove old, add new) + pub async fn reindex_object(&self, old_obj: &OsirisObject, new_obj: &OsirisObject) -> Result<()> { + self.deindex_object(old_obj).await?; + self.index_object(new_obj).await?; + Ok(()) + } + + /// Get all IDs matching a tag filter + pub async fn get_ids_by_tag(&self, key: &str, value: &str) -> Result> { + let field_key = format!("field:tag:{}={}", key, value); + self.client.smembers(&field_key).await + } + + /// Get all IDs matching a MIME type + pub async fn get_ids_by_mime(&self, mime: &str) -> Result> { + let field_key = format!("field:mime:{}", mime); + self.client.smembers(&field_key).await + } + + /// Get all IDs matching a title + pub async fn get_ids_by_title(&self, title: &str) -> Result> { + let field_key = format!("field:title:{}", title); + self.client.smembers(&field_key).await + } + + /// Get all IDs in the scan index + pub async fn get_all_ids(&self) -> Result> { + self.client.smembers("scan:index").await + } + + /// Get intersection of multiple field filters + pub async fn get_ids_by_filters(&self, filters: &[(String, String)]) -> Result> { + if filters.is_empty() { + return self.get_all_ids().await; + } + + let keys: Vec = filters + .iter() + .map(|(k, v)| { + if k == "mime" { + format!("field:mime:{}", v) + } else if k == "title" { + format!("field:title:{}", v) + } else { + format!("field:tag:{}={}", k, v) + } + }) + .collect(); + + self.client.sinter(&keys).await + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + #[ignore] + async fn test_index_object() { + let client = HeroDbClient::new("redis://localhost:6379", 1).unwrap(); + let index = FieldIndex::new(client); + + let mut obj = OsirisObject::new("test".to_string(), Some("Hello".to_string())); + obj.set_tag("topic".to_string(), "rust".to_string()); + obj.set_mime(Some("text/plain".to_string())); + + index.index_object(&obj).await.unwrap(); + + let ids = index.get_ids_by_tag("topic", "rust").await.unwrap(); + assert!(ids.contains(&obj.id)); + } +} diff --git a/lib/osiris/core/index/mod.rs b/lib/osiris/core/index/mod.rs new file mode 100644 index 0000000..cb8608b --- /dev/null +++ b/lib/osiris/core/index/mod.rs @@ -0,0 +1,3 @@ +pub mod field_index; + +pub use field_index::FieldIndex; diff --git a/lib/osiris/core/interfaces/cli.rs b/lib/osiris/core/interfaces/cli.rs new file mode 100644 index 0000000..c079d19 --- /dev/null +++ b/lib/osiris/core/interfaces/cli.rs @@ -0,0 +1,408 @@ +use crate::config::{self, NamespaceConfig}; +use crate::error::{Error, Result}; +use crate::index::FieldIndex; +use crate::retrieve::{RetrievalQuery, SearchEngine}; +use crate::store::{HeroDbClient, OsirisObject}; +use clap::{Parser, Subcommand}; +use std::collections::BTreeMap; +use std::fs; +use std::io::{self, Read}; +use std::path::PathBuf; + +#[derive(Parser, Debug)] +#[command(name = "osiris")] +#[command(about = "OSIRIS - Object Storage, Indexing & Retrieval Intelligent System", long_about = None)] +pub struct Cli { + #[command(subcommand)] + pub command: Commands, +} + +#[derive(Subcommand, Debug)] +pub enum Commands { + /// Initialize OSIRIS configuration + Init { + /// HeroDB URL + #[arg(long, default_value = "redis://localhost:6379")] + herodb: String, + }, + + /// Namespace management + Ns { + #[command(subcommand)] + command: NsCommands, + }, + + /// Put an object + Put { + /// Object path (namespace/name) + path: String, + + /// File to upload (use '-' for stdin) + file: String, + + /// Tags (key=value pairs, comma-separated) + #[arg(long)] + tags: Option, + + /// MIME type + #[arg(long)] + mime: Option, + + /// Title + #[arg(long)] + title: Option, + }, + + /// Get an object + Get { + /// Object path (namespace/name or namespace/id) + path: String, + + /// Output file (default: stdout) + #[arg(long)] + output: Option, + + /// Output raw content only (no metadata) + #[arg(long)] + raw: bool, + }, + + /// Delete an object + Del { + /// Object path (namespace/name or namespace/id) + path: String, + }, + + /// Search/find objects + Find { + /// Text query (optional) + query: Option, + + /// Namespace to search + #[arg(long)] + ns: String, + + /// Filters (key=value pairs, comma-separated) + #[arg(long)] + filter: Option, + + /// Maximum number of results + #[arg(long, default_value = "10")] + topk: usize, + + /// Output as JSON + #[arg(long)] + json: bool, + }, + + /// Show statistics + Stats { + /// Namespace (optional, shows all if not specified) + #[arg(long)] + ns: Option, + }, +} + +#[derive(Subcommand, Debug, Clone)] +pub enum NsCommands { + /// Create a new namespace + Create { + /// Namespace name + name: String, + }, + + /// List all namespaces + List, + + /// Delete a namespace + Delete { + /// Namespace name + name: String, + }, +} + +impl Cli { + pub async fn run(self) -> Result<()> { + match self.command { + Commands::Init { herodb } => { + let config = config::create_default_config(herodb); + config::save_config(&config, None)?; + println!("✓ OSIRIS initialized"); + println!(" Config: {}", config::default_config_path().display()); + Ok(()) + } + + Commands::Ns { ref command } => self.handle_ns_command(command.clone()).await, + Commands::Put { ref path, ref file, ref tags, ref mime, ref title } => { + self.handle_put(path.clone(), file.clone(), tags.clone(), mime.clone(), title.clone()).await + } + Commands::Get { ref path, ref output, raw } => { + self.handle_get(path.clone(), output.clone(), raw).await + } + Commands::Del { ref path } => self.handle_del(path.clone()).await, + Commands::Find { ref query, ref ns, ref filter, topk, json } => { + self.handle_find(query.clone(), ns.clone(), filter.clone(), topk, json).await + } + Commands::Stats { ref ns } => self.handle_stats(ns.clone()).await, + } + } + + async fn handle_ns_command(&self, command: NsCommands) -> Result<()> { + let mut config = config::load_config(None)?; + + match command { + NsCommands::Create { name } => { + if config.get_namespace(&name).is_some() { + return Err(Error::InvalidInput(format!( + "Namespace '{}' already exists", + name + ))); + } + + let db_id = config.next_db_id(); + let ns_config = NamespaceConfig { db_id }; + + config.set_namespace(name.clone(), ns_config); + config::save_config(&config, None)?; + + println!("✓ Created namespace '{}' (DB {})", name, db_id); + Ok(()) + } + + NsCommands::List => { + if config.namespaces.is_empty() { + println!("No namespaces configured"); + } else { + println!("Namespaces:"); + for (name, ns_config) in &config.namespaces { + println!(" {} → DB {}", name, ns_config.db_id); + } + } + Ok(()) + } + + NsCommands::Delete { name } => { + if config.remove_namespace(&name).is_none() { + return Err(Error::NotFound(format!("Namespace '{}'", name))); + } + + config::save_config(&config, None)?; + println!("✓ Deleted namespace '{}'", name); + Ok(()) + } + } + } + + async fn handle_put( + &self, + path: String, + file: String, + tags: Option, + mime: Option, + title: Option, + ) -> Result<()> { + let (ns, name) = parse_path(&path)?; + let config = config::load_config(None)?; + let ns_config = config.get_namespace(&ns) + .ok_or_else(|| Error::NotFound(format!("Namespace '{}'", ns)))?; + + // Read content + let content = if file == "-" { + let mut buffer = String::new(); + io::stdin().read_to_string(&mut buffer)?; + buffer + } else { + fs::read_to_string(&file)? + }; + + // Create object + let mut obj = OsirisObject::with_id(name.clone(), ns.clone(), Some(content)); + + if let Some(title) = title { + obj.set_title(Some(title)); + } + + if let Some(mime) = mime { + obj.set_mime(Some(mime)); + } + + // Parse tags + if let Some(tags_str) = tags { + let tag_map = parse_tags(&tags_str)?; + for (key, value) in tag_map { + obj.set_tag(key, value); + } + } + + // Store object + let client = HeroDbClient::new(&config.herodb.url, ns_config.db_id)?; + let index = FieldIndex::new(client.clone()); + + client.put_object(&obj).await?; + index.index_object(&obj).await?; + + println!("✓ Stored {}/{}", ns, obj.id); + Ok(()) + } + + async fn handle_get(&self, path: String, output: Option, raw: bool) -> Result<()> { + let (ns, id) = parse_path(&path)?; + let config = config::load_config(None)?; + let ns_config = config.get_namespace(&ns) + .ok_or_else(|| Error::NotFound(format!("Namespace '{}'", ns)))?; + + let client = HeroDbClient::new(&config.herodb.url, ns_config.db_id)?; + let obj = client.get_object(&id).await?; + + if raw { + // Output raw content only + let content = obj.text.unwrap_or_default(); + if let Some(output_path) = output { + fs::write(output_path, content)?; + } else { + print!("{}", content); + } + } else { + // Output full object as JSON + let json = serde_json::to_string_pretty(&obj)?; + if let Some(output_path) = output { + fs::write(output_path, json)?; + } else { + println!("{}", json); + } + } + + Ok(()) + } + + async fn handle_del(&self, path: String) -> Result<()> { + let (ns, id) = parse_path(&path)?; + let config = config::load_config(None)?; + let ns_config = config.get_namespace(&ns) + .ok_or_else(|| Error::NotFound(format!("Namespace '{}'", ns)))?; + + let client = HeroDbClient::new(&config.herodb.url, ns_config.db_id)?; + let index = FieldIndex::new(client.clone()); + + // Get object first to deindex it + let obj = client.get_object(&id).await?; + index.deindex_object(&obj).await?; + + let deleted = client.delete_object(&id).await?; + + if deleted { + println!("✓ Deleted {}/{}", ns, id); + Ok(()) + } else { + Err(Error::NotFound(format!("{}/{}", ns, id))) + } + } + + async fn handle_find( + &self, + query: Option, + ns: String, + filter: Option, + topk: usize, + json: bool, + ) -> Result<()> { + let config = config::load_config(None)?; + let ns_config = config.get_namespace(&ns) + .ok_or_else(|| Error::NotFound(format!("Namespace '{}'", ns)))?; + + let client = HeroDbClient::new(&config.herodb.url, ns_config.db_id)?; + let engine = SearchEngine::new(client.clone()); + + // Build query + let mut retrieval_query = RetrievalQuery::new(ns.clone()).with_top_k(topk); + + if let Some(text) = query { + retrieval_query = retrieval_query.with_text(text); + } + + if let Some(filter_str) = filter { + let filters = parse_tags(&filter_str)?; + for (key, value) in filters { + retrieval_query = retrieval_query.with_filter(key, value); + } + } + + // Execute search + let results = engine.search(&retrieval_query).await?; + + if json { + println!("{}", serde_json::to_string_pretty(&results)?); + } else { + if results.is_empty() { + println!("No results found"); + } else { + println!("Found {} result(s):\n", results.len()); + for (i, result) in results.iter().enumerate() { + println!("{}. {} (score: {:.2})", i + 1, result.id, result.score); + if let Some(snippet) = &result.snippet { + println!(" {}", snippet); + } + println!(); + } + } + } + + Ok(()) + } + + async fn handle_stats(&self, ns: Option) -> Result<()> { + let config = config::load_config(None)?; + + if let Some(ns_name) = ns { + let ns_config = config.get_namespace(&ns_name) + .ok_or_else(|| Error::NotFound(format!("Namespace '{}'", ns_name)))?; + + let client = HeroDbClient::new(&config.herodb.url, ns_config.db_id)?; + let size = client.dbsize().await?; + + println!("Namespace: {}", ns_name); + println!(" DB ID: {}", ns_config.db_id); + println!(" Keys: {}", size); + } else { + println!("OSIRIS Statistics\n"); + println!("Namespaces: {}", config.namespaces.len()); + for (name, ns_config) in &config.namespaces { + let client = HeroDbClient::new(&config.herodb.url, ns_config.db_id)?; + let size = client.dbsize().await?; + println!(" {} (DB {}) → {} keys", name, ns_config.db_id, size); + } + } + + Ok(()) + } +} + +/// Parse a path into namespace and name/id +fn parse_path(path: &str) -> Result<(String, String)> { + let parts: Vec<&str> = path.splitn(2, '/').collect(); + if parts.len() != 2 { + return Err(Error::InvalidInput(format!( + "Invalid path format. Expected 'namespace/name', got '{}'", + path + ))); + } + Ok((parts[0].to_string(), parts[1].to_string())) +} + +/// Parse tags from comma-separated key=value pairs +fn parse_tags(tags_str: &str) -> Result> { + let mut tags = BTreeMap::new(); + + for pair in tags_str.split(',') { + let parts: Vec<&str> = pair.trim().splitn(2, '=').collect(); + if parts.len() != 2 { + return Err(Error::InvalidInput(format!( + "Invalid tag format. Expected 'key=value', got '{}'", + pair + ))); + } + tags.insert(parts[0].to_string(), parts[1].to_string()); + } + + Ok(tags) +} diff --git a/lib/osiris/core/interfaces/mod.rs b/lib/osiris/core/interfaces/mod.rs new file mode 100644 index 0000000..3277100 --- /dev/null +++ b/lib/osiris/core/interfaces/mod.rs @@ -0,0 +1,3 @@ +pub mod cli; + +pub use cli::Cli; diff --git a/lib/osiris/core/lib.rs b/lib/osiris/core/lib.rs new file mode 100644 index 0000000..126994e --- /dev/null +++ b/lib/osiris/core/lib.rs @@ -0,0 +1,23 @@ +// Allow the crate to reference itself as ::osiris for the derive macro +extern crate self as osiris; + +pub mod config; +pub mod error; +pub mod index; +pub mod interfaces; +pub mod objects; +pub mod retrieve; +pub mod store; + +// Rhai integration modules (top-level) +pub mod context; + +pub use error::{Error, Result}; +pub use store::{BaseData, IndexKey, Object, Storable}; +pub use objects::{Event, Note}; + +// OsirisContext is the main type for Rhai integration +pub use context::{OsirisContext, OsirisInstance, OsirisContextBuilder}; + +// Re-export the derive macro +pub use osiris_derive::Object as DeriveObject; diff --git a/lib/osiris/core/main.rs b/lib/osiris/core/main.rs new file mode 100644 index 0000000..18905eb --- /dev/null +++ b/lib/osiris/core/main.rs @@ -0,0 +1,22 @@ +use clap::Parser; +use osiris::interfaces::Cli; +use tracing_subscriber::{fmt, EnvFilter}; + +#[tokio::main] +async fn main() { + // Initialize tracing + fmt() + .with_env_filter( + EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info")), + ) + .init(); + + // Parse CLI arguments + let cli = Cli::parse(); + + // Run the command + if let Err(e) = cli.run().await { + eprintln!("Error: {}", e); + std::process::exit(1); + } +} diff --git a/lib/osiris/core/objects/accounting/expense.rs b/lib/osiris/core/objects/accounting/expense.rs new file mode 100644 index 0000000..505fa8a --- /dev/null +++ b/lib/osiris/core/objects/accounting/expense.rs @@ -0,0 +1,151 @@ +/// Expense Object for Accounting + +use crate::store::BaseData; +use serde::{Deserialize, Serialize}; + +/// Expense category +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub enum ExpenseCategory { + Registration, + Subscription, + Service, + Product, + Other, +} + +impl Default for ExpenseCategory { + fn default() -> Self { + ExpenseCategory::Other + } +} + +/// Expense status +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub enum ExpenseStatus { + Pending, + Approved, + Paid, + Rejected, +} + +impl Default for ExpenseStatus { + fn default() -> Self { + ExpenseStatus::Pending + } +} + +/// Expense record +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, crate::DeriveObject)] +pub struct Expense { + /// Base data for object storage + pub base_data: BaseData, + + /// User/entity ID who incurred the expense + pub user_id: u32, + + /// Amount + pub amount: f64, + + /// Currency + pub currency: String, + + /// Description + pub description: String, + + /// Category + pub category: ExpenseCategory, + + /// Status + pub status: ExpenseStatus, + + /// Date incurred (unix timestamp) + pub expense_date: u64, + + /// Related invoice ID (if any) + pub invoice_id: Option, +} + +impl Expense { + /// Create a new expense + pub fn new(id: u32) -> Self { + let base_data = BaseData::with_id(id, String::new()); + let now = time::OffsetDateTime::now_utc().unix_timestamp() as u64; + Self { + base_data, + user_id: 0, + amount: 0.0, + currency: String::from("USD"), + description: String::new(), + category: ExpenseCategory::default(), + status: ExpenseStatus::default(), + expense_date: now, + invoice_id: None, + } + } + + /// Set user ID (fluent) + pub fn user_id(mut self, id: u32) -> Self { + self.user_id = id; + self + } + + /// Set amount (fluent) + pub fn amount(mut self, amount: f64) -> Self { + self.amount = amount; + self + } + + /// Set currency (fluent) + pub fn currency(mut self, currency: impl ToString) -> Self { + self.currency = currency.to_string(); + self + } + + /// Set description (fluent) + pub fn description(mut self, description: impl ToString) -> Self { + self.description = description.to_string(); + self + } + + /// Set category (fluent) + pub fn category(mut self, category: ExpenseCategory) -> Self { + self.category = category; + self + } + + /// Set category from string (fluent) + pub fn category_str(mut self, category: &str) -> Self { + self.category = match category.to_lowercase().as_str() { + "registration" => ExpenseCategory::Registration, + "subscription" => ExpenseCategory::Subscription, + "service" => ExpenseCategory::Service, + "product" => ExpenseCategory::Product, + _ => ExpenseCategory::Other, + }; + self + } + + /// Set invoice ID (fluent) + pub fn invoice_id(mut self, id: u32) -> Self { + self.invoice_id = Some(id); + self + } + + /// Approve expense + pub fn approve(mut self) -> Self { + self.status = ExpenseStatus::Approved; + self + } + + /// Mark as paid + pub fn mark_paid(mut self) -> Self { + self.status = ExpenseStatus::Paid; + self + } + + /// Reject expense + pub fn reject(mut self) -> Self { + self.status = ExpenseStatus::Rejected; + self + } +} diff --git a/lib/osiris/core/objects/accounting/invoice.rs b/lib/osiris/core/objects/accounting/invoice.rs new file mode 100644 index 0000000..0e2f770 --- /dev/null +++ b/lib/osiris/core/objects/accounting/invoice.rs @@ -0,0 +1,130 @@ +/// Invoice Object for Accounting + +use crate::store::BaseData; +use serde::{Deserialize, Serialize}; + +/// Invoice status +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub enum InvoiceStatus { + Draft, + Sent, + Paid, + Overdue, + Cancelled, +} + +impl Default for InvoiceStatus { + fn default() -> Self { + InvoiceStatus::Draft + } +} + +/// Invoice for billing +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, crate::DeriveObject)] +pub struct Invoice { + /// Base data for object storage + pub base_data: BaseData, + + /// Invoice number + pub invoice_number: String, + + /// Customer/payer ID + pub customer_id: u32, + + /// Amount + pub amount: f64, + + /// Currency + pub currency: String, + + /// Description + pub description: String, + + /// Status + pub status: InvoiceStatus, + + /// Due date (unix timestamp) + pub due_date: Option, + + /// Payment date (unix timestamp) + pub paid_date: Option, +} + +impl Invoice { + /// Create a new invoice + pub fn new(id: u32) -> Self { + let base_data = BaseData::with_id(id, String::new()); + Self { + base_data, + invoice_number: String::new(), + customer_id: 0, + amount: 0.0, + currency: String::from("USD"), + description: String::new(), + status: InvoiceStatus::default(), + due_date: None, + paid_date: None, + } + } + + /// Set invoice number (fluent) + pub fn invoice_number(mut self, number: impl ToString) -> Self { + self.invoice_number = number.to_string(); + self + } + + /// Set customer ID (fluent) + pub fn customer_id(mut self, id: u32) -> Self { + self.customer_id = id; + self + } + + /// Set amount (fluent) + pub fn amount(mut self, amount: f64) -> Self { + self.amount = amount; + self + } + + /// Set currency (fluent) + pub fn currency(mut self, currency: impl ToString) -> Self { + self.currency = currency.to_string(); + self + } + + /// Set description (fluent) + pub fn description(mut self, description: impl ToString) -> Self { + self.description = description.to_string(); + self + } + + /// Set due date (fluent) + pub fn due_date(mut self, date: u64) -> Self { + self.due_date = Some(date); + self + } + + /// Mark as sent + pub fn send(mut self) -> Self { + self.status = InvoiceStatus::Sent; + self + } + + /// Mark as paid + pub fn mark_paid(mut self) -> Self { + self.status = InvoiceStatus::Paid; + self.paid_date = Some(time::OffsetDateTime::now_utc().unix_timestamp() as u64); + self + } + + /// Mark as overdue + pub fn mark_overdue(mut self) -> Self { + self.status = InvoiceStatus::Overdue; + self + } + + /// Cancel invoice + pub fn cancel(mut self) -> Self { + self.status = InvoiceStatus::Cancelled; + self + } +} diff --git a/lib/osiris/core/objects/accounting/mod.rs b/lib/osiris/core/objects/accounting/mod.rs new file mode 100644 index 0000000..fdd6e93 --- /dev/null +++ b/lib/osiris/core/objects/accounting/mod.rs @@ -0,0 +1,11 @@ +/// Accounting Module +/// +/// Provides Invoice and Expense objects for financial tracking + +pub mod invoice; +pub mod expense; +pub mod rhai; + +pub use invoice::{Invoice, InvoiceStatus}; +pub use expense::{Expense, ExpenseCategory, ExpenseStatus}; +// pub use rhai::register_accounting_modules; // TODO: Implement when needed diff --git a/lib/osiris/core/objects/accounting/rhai.rs b/lib/osiris/core/objects/accounting/rhai.rs new file mode 100644 index 0000000..e69de29 diff --git a/lib/osiris/core/objects/communication/email.rs b/lib/osiris/core/objects/communication/email.rs new file mode 100644 index 0000000..42b35b2 --- /dev/null +++ b/lib/osiris/core/objects/communication/email.rs @@ -0,0 +1,489 @@ +/// Email Client +/// +/// Real SMTP email client for sending emails including verification emails. + +use serde::{Deserialize, Serialize}; +use super::verification::Verification; +use crate::store::{BaseData, Object, Storable}; +use lettre::{ + Message, SmtpTransport, Transport, + message::{header::ContentType, MultiPart, SinglePart}, + transport::smtp::authentication::Credentials, +}; + +/// Email client with SMTP configuration +#[derive(Debug, Clone, Serialize, Deserialize, crate::DeriveObject)] +pub struct EmailClient { + #[serde(flatten)] + pub base_data: BaseData, + + /// SMTP server hostname + pub smtp_host: String, + + /// SMTP port + pub smtp_port: u16, + + /// Username for SMTP auth + pub username: String, + + /// Password for SMTP auth + pub password: String, + + /// From address + pub from_address: String, + + /// From name + pub from_name: String, + + /// Use TLS + pub use_tls: bool, +} + +/// Mail template with placeholders +#[derive(Debug, Clone, Serialize, Deserialize, crate::DeriveObject)] +pub struct MailTemplate { + #[serde(flatten)] + pub base_data: BaseData, + + /// Template ID + pub id: String, + + /// Template name + pub name: String, + + /// Email subject (can contain placeholders like ${name}) + pub subject: String, + + /// Email body (can contain placeholders like ${code}, ${url}) + pub body: String, + + /// HTML body (optional, can contain placeholders) + pub html_body: Option, +} + +impl Default for MailTemplate { + fn default() -> Self { + Self { + base_data: BaseData::new(), + id: String::new(), + name: String::new(), + subject: String::new(), + body: String::new(), + html_body: None, + } + } +} + +/// Email message created from a template +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct Mail { + /// Recipient email address + pub to: String, + + /// Template ID to use + pub template_id: Option, + + /// Parameters to replace in template + pub parameters: std::collections::HashMap, + + /// Direct subject (if not using template) + pub subject: Option, + + /// Direct body (if not using template) + pub body: Option, +} + +impl Default for EmailClient { + fn default() -> Self { + Self { + base_data: BaseData::new(), + smtp_host: "localhost".to_string(), + smtp_port: 587, + username: String::new(), + password: String::new(), + from_address: "noreply@example.com".to_string(), + from_name: "No Reply".to_string(), + use_tls: true, + } + } +} + +impl MailTemplate { + /// Create a new mail template + pub fn new() -> Self { + Self::default() + } + + /// Builder: Set template ID + pub fn id(mut self, id: String) -> Self { + self.id = id; + self + } + + /// Builder: Set template name + pub fn name(mut self, name: String) -> Self { + self.name = name; + self + } + + /// Builder: Set subject + pub fn subject(mut self, subject: String) -> Self { + self.subject = subject; + self + } + + /// Builder: Set body + pub fn body(mut self, body: String) -> Self { + self.body = body; + self + } + + /// Builder: Set HTML body + pub fn html_body(mut self, html_body: String) -> Self { + self.html_body = Some(html_body); + self + } + + /// Replace placeholders in text + fn replace_placeholders(&self, text: &str, parameters: &std::collections::HashMap) -> String { + let mut result = text.to_string(); + for (key, value) in parameters { + let placeholder = format!("${{{}}}", key); + result = result.replace(&placeholder, value); + } + result + } + + /// Render subject with parameters + pub fn render_subject(&self, parameters: &std::collections::HashMap) -> String { + self.replace_placeholders(&self.subject, parameters) + } + + /// Render body with parameters + pub fn render_body(&self, parameters: &std::collections::HashMap) -> String { + self.replace_placeholders(&self.body, parameters) + } + + /// Render HTML body with parameters + pub fn render_html_body(&self, parameters: &std::collections::HashMap) -> Option { + self.html_body.as_ref().map(|html| self.replace_placeholders(html, parameters)) + } +} + +impl Mail { + /// Create a new mail + pub fn new() -> Self { + Self::default() + } + + /// Builder: Set recipient + pub fn to(mut self, to: String) -> Self { + self.to = to; + self + } + + /// Builder: Set template ID + pub fn template(mut self, template_id: String) -> Self { + self.template_id = Some(template_id); + self + } + + /// Builder: Add a parameter + pub fn parameter(mut self, key: String, value: String) -> Self { + self.parameters.insert(key, value); + self + } + + /// Builder: Set subject (for non-template emails) + pub fn subject(mut self, subject: String) -> Self { + self.subject = Some(subject); + self + } + + /// Builder: Set body (for non-template emails) + pub fn body(mut self, body: String) -> Self { + self.body = Some(body); + self + } +} + +impl EmailClient { + /// Create a new email client + pub fn new() -> Self { + Self::default() + } + + /// Builder: Set SMTP host + pub fn smtp_host(mut self, host: String) -> Self { + self.smtp_host = host; + self + } + + /// Builder: Set SMTP port + pub fn smtp_port(mut self, port: u16) -> Self { + self.smtp_port = port; + self + } + + /// Builder: Set username + pub fn username(mut self, username: String) -> Self { + self.username = username; + self + } + + /// Builder: Set password + pub fn password(mut self, password: String) -> Self { + self.password = password; + self + } + + /// Builder: Set from address + pub fn from_address(mut self, address: String) -> Self { + self.from_address = address; + self + } + + /// Builder: Set from name + pub fn from_name(mut self, name: String) -> Self { + self.from_name = name; + self + } + + /// Builder: Set use TLS + pub fn use_tls(mut self, use_tls: bool) -> Self { + self.use_tls = use_tls; + self + } + + /// Build SMTP transport + fn build_transport(&self) -> Result { + let creds = Credentials::new( + self.username.clone(), + self.password.clone(), + ); + + let transport = if self.use_tls { + SmtpTransport::starttls_relay(&self.smtp_host) + .map_err(|e| format!("Failed to create SMTP transport: {}", e))? + .credentials(creds) + .port(self.smtp_port) + .build() + } else { + SmtpTransport::builder_dangerous(&self.smtp_host) + .credentials(creds) + .port(self.smtp_port) + .build() + }; + + Ok(transport) + } + + /// Send a plain text email + pub fn send_email( + &self, + to: &str, + subject: &str, + body: &str, + ) -> Result<(), String> { + let from_mailbox = format!("{} <{}>", self.from_name, self.from_address) + .parse() + .map_err(|e| format!("Invalid from address: {}", e))?; + + let to_mailbox = to.parse() + .map_err(|e| format!("Invalid to address: {}", e))?; + + let email = Message::builder() + .from(from_mailbox) + .to(to_mailbox) + .subject(subject) + .body(body.to_string()) + .map_err(|e| format!("Failed to build email: {}", e))?; + + let transport = self.build_transport()?; + + transport.send(&email) + .map_err(|e| format!("Failed to send email: {}", e))?; + + Ok(()) + } + + /// Send an HTML email + pub fn send_html_email( + &self, + to: &str, + subject: &str, + html_body: &str, + text_body: Option<&str>, + ) -> Result<(), String> { + let from_mailbox = format!("{} <{}>", self.from_name, self.from_address) + .parse() + .map_err(|e| format!("Invalid from address: {}", e))?; + + let to_mailbox = to.parse() + .map_err(|e| format!("Invalid to address: {}", e))?; + + // Build multipart email with text and HTML alternatives + let text_part = if let Some(text) = text_body { + SinglePart::builder() + .header(ContentType::TEXT_PLAIN) + .body(text.to_string()) + } else { + SinglePart::builder() + .header(ContentType::TEXT_PLAIN) + .body(String::new()) + }; + + let html_part = SinglePart::builder() + .header(ContentType::TEXT_HTML) + .body(html_body.to_string()); + + let multipart = MultiPart::alternative() + .singlepart(text_part) + .singlepart(html_part); + + let email = Message::builder() + .from(from_mailbox) + .to(to_mailbox) + .subject(subject) + .multipart(multipart) + .map_err(|e| format!("Failed to build email: {}", e))?; + + let transport = self.build_transport()?; + + transport.send(&email) + .map_err(|e| format!("Failed to send email: {}", e))?; + + Ok(()) + } + + /// Send a mail using a template + pub fn send_mail(&self, mail: &Mail, template: &MailTemplate) -> Result<(), String> { + // Render subject and body with parameters + let subject = template.render_subject(&mail.parameters); + let body_text = template.render_body(&mail.parameters); + let html_body = template.render_html_body(&mail.parameters); + + // Send email + if let Some(html) = html_body { + self.send_html_email(&mail.to, &subject, &html, Some(&body_text)) + } else { + self.send_email(&mail.to, &subject, &body_text) + } + } + + /// Send a verification email with code + pub fn send_verification_code_email( + &self, + verification: &Verification, + ) -> Result<(), String> { + let subject = "Verify your email address"; + let body = format!( + "Hello,\n\n\ + Please verify your email address by entering this code:\n\n\ + {}\n\n\ + This code will expire in 24 hours.\n\n\ + If you didn't request this, please ignore this email.", + verification.verification_code + ); + + self.send_email(&verification.contact, subject, &body) + } + + /// Send a verification email with URL link + pub fn send_verification_link_email( + &self, + verification: &Verification, + ) -> Result<(), String> { + let verification_url = verification.get_verification_url() + .ok_or_else(|| "No callback URL configured".to_string())?; + + let subject = "Verify your email address"; + + let html_body = format!( + r#" + + + + + +
+

Verify your email address

+

Hello,

+

Please verify your email address by clicking the button below:

+ Verify Email +

Or enter this verification code:

+
{}
+

This link and code will expire in 24 hours.

+

If you didn't request this, please ignore this email.

+
+ +"#, + verification_url, verification.verification_code + ); + + let text_body = format!( + "Hello,\n\n\ + Please verify your email address by visiting this link:\n\ + {}\n\n\ + Or enter this verification code: {}\n\n\ + This link and code will expire in 24 hours.\n\n\ + If you didn't request this, please ignore this email.", + verification_url, verification.verification_code + ); + + self.send_html_email( + &verification.contact, + subject, + &html_body, + Some(&text_body), + ) + } +} + +// For Rhai integration, we need a simpler synchronous wrapper +impl EmailClient { + /// Synchronous wrapper for send_verification_code_email + pub fn send_verification_code_sync(&self, verification: &Verification) -> Result<(), String> { + // In a real implementation, you'd use tokio::runtime::Runtime::new().block_on() + // For now, just simulate + println!("=== VERIFICATION CODE EMAIL ==="); + println!("To: {}", verification.contact); + println!("Code: {}", verification.verification_code); + println!("==============================="); + Ok(()) + } + + /// Synchronous wrapper for send_verification_link_email + pub fn send_verification_link_sync(&self, verification: &Verification) -> Result<(), String> { + let verification_url = verification.get_verification_url() + .ok_or_else(|| "No callback URL configured".to_string())?; + + println!("=== VERIFICATION LINK EMAIL ==="); + println!("To: {}", verification.contact); + println!("Code: {}", verification.verification_code); + println!("Link: {}", verification_url); + println!("==============================="); + Ok(()) + } +} diff --git a/lib/osiris/core/objects/communication/mod.rs b/lib/osiris/core/objects/communication/mod.rs new file mode 100644 index 0000000..a982720 --- /dev/null +++ b/lib/osiris/core/objects/communication/mod.rs @@ -0,0 +1,10 @@ +/// Communication Module +/// +/// Transport-agnostic verification and email client. + +pub mod verification; +pub mod email; +pub mod rhai; + +pub use verification::{Verification, VerificationStatus, VerificationTransport}; +pub use email::EmailClient; diff --git a/lib/osiris/core/objects/communication/rhai.rs b/lib/osiris/core/objects/communication/rhai.rs new file mode 100644 index 0000000..e8d2f4d --- /dev/null +++ b/lib/osiris/core/objects/communication/rhai.rs @@ -0,0 +1,407 @@ +/// Rhai bindings for Communication (Verification and Email) + +use ::rhai::plugin::*; +use ::rhai::{CustomType, Dynamic, Engine, EvalAltResult, Module, TypeBuilder}; + +use super::verification::{Verification, VerificationStatus, VerificationTransport}; +use super::email::{EmailClient, MailTemplate, Mail}; + +// ============================================================================ +// Verification Module +// ============================================================================ + +type RhaiVerification = Verification; + +#[export_module] +mod rhai_verification_module { + use super::RhaiVerification; + use super::super::verification::{Verification, VerificationTransport}; + + #[rhai_fn(name = "new_verification", return_raw)] + pub fn new_verification( + entity_id: String, + contact: String, + ) -> Result> { + // Default to email transport + Ok(Verification::new(0, entity_id, contact, VerificationTransport::Email)) + } + + #[rhai_fn(name = "callback_url", return_raw)] + pub fn set_callback_url( + verification: &mut RhaiVerification, + url: String, + ) -> Result> { + let owned = std::mem::take(verification); + *verification = owned.callback_url(url); + Ok(verification.clone()) + } + + #[rhai_fn(name = "mark_sent", return_raw)] + pub fn mark_sent( + verification: &mut RhaiVerification, + ) -> Result<(), Box> { + verification.mark_sent(); + Ok(()) + } + + #[rhai_fn(name = "verify_code", return_raw)] + pub fn verify_code( + verification: &mut RhaiVerification, + code: String, + ) -> Result<(), Box> { + verification.verify_code(&code) + .map_err(|e| e.into()) + } + + #[rhai_fn(name = "verify_nonce", return_raw)] + pub fn verify_nonce( + verification: &mut RhaiVerification, + nonce: String, + ) -> Result<(), Box> { + verification.verify_nonce(&nonce) + .map_err(|e| e.into()) + } + + #[rhai_fn(name = "resend", return_raw)] + pub fn resend( + verification: &mut RhaiVerification, + ) -> Result<(), Box> { + verification.resend(); + Ok(()) + } + + // Getters + #[rhai_fn(name = "get_entity_id")] + pub fn get_entity_id(verification: &mut RhaiVerification) -> String { + verification.entity_id.clone() + } + + #[rhai_fn(name = "get_contact")] + pub fn get_contact(verification: &mut RhaiVerification) -> String { + verification.contact.clone() + } + + #[rhai_fn(name = "get_code")] + pub fn get_code(verification: &mut RhaiVerification) -> String { + verification.verification_code.clone() + } + + #[rhai_fn(name = "get_nonce")] + pub fn get_nonce(verification: &mut RhaiVerification) -> String { + verification.verification_nonce.clone() + } + + #[rhai_fn(name = "get_verification_url")] + pub fn get_verification_url(verification: &mut RhaiVerification) -> String { + verification.get_verification_url().unwrap_or_default() + } + + #[rhai_fn(name = "get_status")] + pub fn get_status(verification: &mut RhaiVerification) -> String { + format!("{:?}", verification.status) + } + + #[rhai_fn(name = "get_attempts")] + pub fn get_attempts(verification: &mut RhaiVerification) -> i64 { + verification.attempts as i64 + } +} + +// ============================================================================ +// Mail Template Module +// ============================================================================ + +type RhaiMailTemplate = MailTemplate; + +#[export_module] +mod rhai_mail_template_module { + use super::RhaiMailTemplate; + use super::super::email::MailTemplate; + use ::rhai::EvalAltResult; + + #[rhai_fn(name = "new_mail_template", return_raw)] + pub fn new_mail_template() -> Result> { + Ok(MailTemplate::new()) + } + + #[rhai_fn(name = "id", return_raw)] + pub fn set_id( + template: &mut RhaiMailTemplate, + id: String, + ) -> Result> { + let owned = std::mem::take(template); + *template = owned.id(id); + Ok(template.clone()) + } + + #[rhai_fn(name = "name", return_raw)] + pub fn set_name( + template: &mut RhaiMailTemplate, + name: String, + ) -> Result> { + let owned = std::mem::take(template); + *template = owned.name(name); + Ok(template.clone()) + } + + #[rhai_fn(name = "subject", return_raw)] + pub fn set_subject( + template: &mut RhaiMailTemplate, + subject: String, + ) -> Result> { + let owned = std::mem::take(template); + *template = owned.subject(subject); + Ok(template.clone()) + } + + #[rhai_fn(name = "body", return_raw)] + pub fn set_body( + template: &mut RhaiMailTemplate, + body: String, + ) -> Result> { + let owned = std::mem::take(template); + *template = owned.body(body); + Ok(template.clone()) + } + + #[rhai_fn(name = "html_body", return_raw)] + pub fn set_html_body( + template: &mut RhaiMailTemplate, + html_body: String, + ) -> Result> { + let owned = std::mem::take(template); + *template = owned.html_body(html_body); + Ok(template.clone()) + } + + // Getters + #[rhai_fn(name = "get_id")] + pub fn get_id(template: &mut RhaiMailTemplate) -> String { + template.id.clone() + } +} + +// ============================================================================ +// Mail Module +// ============================================================================ + +type RhaiMail = Mail; + +#[export_module] +mod rhai_mail_module { + use super::RhaiMail; + use super::super::email::Mail; + use ::rhai::EvalAltResult; + + #[rhai_fn(name = "new_mail", return_raw)] + pub fn new_mail() -> Result> { + Ok(Mail::new()) + } + + #[rhai_fn(name = "to", return_raw)] + pub fn set_to( + mail: &mut RhaiMail, + to: String, + ) -> Result> { + let owned = std::mem::take(mail); + *mail = owned.to(to); + Ok(mail.clone()) + } + + #[rhai_fn(name = "template", return_raw)] + pub fn set_template( + mail: &mut RhaiMail, + template_id: String, + ) -> Result> { + let owned = std::mem::take(mail); + *mail = owned.template(template_id); + Ok(mail.clone()) + } + + #[rhai_fn(name = "parameter", return_raw)] + pub fn add_parameter( + mail: &mut RhaiMail, + key: String, + value: String, + ) -> Result> { + let owned = std::mem::take(mail); + *mail = owned.parameter(key, value); + Ok(mail.clone()) + } +} + +// ============================================================================ +// Email Client Module +// ============================================================================ + +type RhaiEmailClient = EmailClient; + +#[export_module] +mod rhai_email_module { + use super::RhaiEmailClient; + use super::RhaiMail; + use super::RhaiMailTemplate; + use super::super::email::{EmailClient, Mail, MailTemplate}; + use super::super::verification::Verification; + use ::rhai::EvalAltResult; + + #[rhai_fn(name = "new_email_client", return_raw)] + pub fn new_email_client() -> Result> { + Ok(EmailClient::new()) + } + + #[rhai_fn(name = "smtp_host", return_raw)] + pub fn set_smtp_host( + client: &mut RhaiEmailClient, + host: String, + ) -> Result> { + let owned = std::mem::take(client); + *client = owned.smtp_host(host); + Ok(client.clone()) + } + + #[rhai_fn(name = "smtp_port", return_raw)] + pub fn set_smtp_port( + client: &mut RhaiEmailClient, + port: i64, + ) -> Result> { + let owned = std::mem::take(client); + *client = owned.smtp_port(port as u16); + Ok(client.clone()) + } + + #[rhai_fn(name = "username", return_raw)] + pub fn set_username( + client: &mut RhaiEmailClient, + username: String, + ) -> Result> { + let owned = std::mem::take(client); + *client = owned.username(username); + Ok(client.clone()) + } + + #[rhai_fn(name = "password", return_raw)] + pub fn set_password( + client: &mut RhaiEmailClient, + password: String, + ) -> Result> { + let owned = std::mem::take(client); + *client = owned.password(password); + Ok(client.clone()) + } + + #[rhai_fn(name = "from_email", return_raw)] + pub fn set_from_email( + client: &mut RhaiEmailClient, + email: String, + ) -> Result> { + let owned = std::mem::take(client); + *client = owned.from_address(email); + Ok(client.clone()) + } + + #[rhai_fn(name = "from_name", return_raw)] + pub fn set_from_name( + client: &mut RhaiEmailClient, + name: String, + ) -> Result> { + let owned = std::mem::take(client); + *client = owned.from_name(name); + Ok(client.clone()) + } + + #[rhai_fn(name = "use_tls", return_raw)] + pub fn set_use_tls( + client: &mut RhaiEmailClient, + use_tls: bool, + ) -> Result> { + let owned = std::mem::take(client); + *client = owned.use_tls(use_tls); + Ok(client.clone()) + } + + #[rhai_fn(name = "send_mail", return_raw)] + pub fn send_mail( + client: &mut RhaiEmailClient, + mail: RhaiMail, + template: RhaiMailTemplate, + ) -> Result<(), Box> { + client.send_mail(&mail, &template) + .map_err(|e| e.into()) + } + + #[rhai_fn(name = "send_verification_code", return_raw)] + pub fn send_verification_code( + client: &mut RhaiEmailClient, + verification: Verification, + ) -> Result<(), Box> { + client.send_verification_code_sync(&verification) + .map_err(|e| e.into()) + } + + #[rhai_fn(name = "send_verification_link", return_raw)] + pub fn send_verification_link( + client: &mut RhaiEmailClient, + verification: Verification, + ) -> Result<(), Box> { + client.send_verification_link_sync(&verification) + .map_err(|e| e.into()) + } +} + +// ============================================================================ +// Registration Functions +// ============================================================================ + +/// Register Communication modules into a Rhai Module +pub fn register_communication_modules(parent_module: &mut Module) { + // Register custom types + parent_module.set_custom_type::("Verification"); + parent_module.set_custom_type::("MailTemplate"); + parent_module.set_custom_type::("Mail"); + parent_module.set_custom_type::("EmailClient"); + + // Merge verification functions + let verification_module = exported_module!(rhai_verification_module); + parent_module.combine_flatten(verification_module); + + // Merge mail template functions + let mail_template_module = exported_module!(rhai_mail_template_module); + parent_module.combine_flatten(mail_template_module); + + // Merge mail functions + let mail_module = exported_module!(rhai_mail_module); + parent_module.combine_flatten(mail_module); + + // Merge email client functions + let email_module = exported_module!(rhai_email_module); + parent_module.combine_flatten(email_module); +} + +// ============================================================================ +// CustomType Implementations +// ============================================================================ + +impl CustomType for Verification { + fn build(mut builder: TypeBuilder) { + builder.with_name("Verification"); + } +} + +impl CustomType for MailTemplate { + fn build(mut builder: TypeBuilder) { + builder.with_name("MailTemplate"); + } +} + +impl CustomType for Mail { + fn build(mut builder: TypeBuilder) { + builder.with_name("Mail"); + } +} + +impl CustomType for EmailClient { + fn build(mut builder: TypeBuilder) { + builder.with_name("EmailClient"); + } +} diff --git a/lib/osiris/core/objects/communication/verification.rs b/lib/osiris/core/objects/communication/verification.rs new file mode 100644 index 0000000..1de6ffd --- /dev/null +++ b/lib/osiris/core/objects/communication/verification.rs @@ -0,0 +1,239 @@ +/// Transport-Agnostic Verification +/// +/// Manages verification sessions with codes and nonces for email, SMS, etc. + +use crate::store::{BaseData, Object, Storable}; +use serde::{Deserialize, Serialize}; + +/// Verification transport type +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +#[serde(rename_all = "lowercase")] +pub enum VerificationTransport { + Email, + Sms, + WhatsApp, + Telegram, + Other(String), +} + +impl Default for VerificationTransport { + fn default() -> Self { + VerificationTransport::Email + } +} + +/// Verification status +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] +#[serde(rename_all = "lowercase")] +pub enum VerificationStatus { + #[default] + Pending, + Sent, + Verified, + Expired, + Failed, +} + +/// Verification Session +/// +/// Transport-agnostic verification that can be used for email, SMS, etc. +/// Supports both code-based verification and URL-based (nonce) verification. +#[derive(Debug, Clone, Serialize, Deserialize, Default, crate::DeriveObject)] +pub struct Verification { + #[serde(flatten)] + pub base_data: BaseData, + + /// User/entity ID this verification is for + pub entity_id: String, + + /// Contact address (email, phone, etc.) + pub contact: String, + + /// Transport type + pub transport: VerificationTransport, + + /// Verification code (6 digits for user entry) + pub verification_code: String, + + /// Verification nonce (for URL-based verification) + pub verification_nonce: String, + + /// Current status + pub status: VerificationStatus, + + /// When verification was sent + pub sent_at: Option, + + /// When verification was completed + pub verified_at: Option, + + /// When verification expires + pub expires_at: Option, + + /// Number of attempts + pub attempts: u32, + + /// Maximum attempts allowed + pub max_attempts: u32, + + /// Callback URL (for server to construct verification link) + pub callback_url: Option, + + /// Additional metadata + #[serde(default)] + pub metadata: std::collections::HashMap, +} + +impl Verification { + /// Create a new verification + pub fn new(id: u32, entity_id: String, contact: String, transport: VerificationTransport) -> Self { + let mut base_data = BaseData::new(); + base_data.id = id; + + // Generate verification code (6 digits) + let code = Self::generate_code(); + + // Generate verification nonce (32 char hex) + let nonce = Self::generate_nonce(); + + // Set expiry to 24 hours from now + let expires_at = Self::now() + (24 * 60 * 60); + + Self { + base_data, + entity_id, + contact, + transport, + verification_code: code, + verification_nonce: nonce, + status: VerificationStatus::Pending, + sent_at: None, + verified_at: None, + expires_at: Some(expires_at), + attempts: 0, + max_attempts: 3, + callback_url: None, + metadata: std::collections::HashMap::new(), + } + } + + /// Generate a 6-digit verification code + fn generate_code() -> String { + use std::time::{SystemTime, UNIX_EPOCH}; + let timestamp = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_nanos(); + format!("{:06}", (timestamp % 1_000_000) as u32) + } + + /// Generate a verification nonce (32 char hex string) + fn generate_nonce() -> String { + use std::time::{SystemTime, UNIX_EPOCH}; + let timestamp = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_nanos(); + format!("{:032x}", timestamp) + } + + /// Set callback URL + pub fn callback_url(mut self, url: String) -> Self { + self.callback_url = Some(url); + self + } + + /// Get verification URL (callback_url + nonce) + pub fn get_verification_url(&self) -> Option { + self.callback_url.as_ref().map(|base_url| { + if base_url.contains('?') { + format!("{}&nonce={}", base_url, self.verification_nonce) + } else { + format!("{}?nonce={}", base_url, self.verification_nonce) + } + }) + } + + /// Mark as sent + pub fn mark_sent(&mut self) { + self.status = VerificationStatus::Sent; + self.sent_at = Some(Self::now()); + self.base_data.update_modified(); + } + + /// Verify with code + pub fn verify_code(&mut self, code: &str) -> Result<(), String> { + // Check if expired + if let Some(expires_at) = self.expires_at { + if Self::now() > expires_at { + self.status = VerificationStatus::Expired; + self.base_data.update_modified(); + return Err("Verification code expired".to_string()); + } + } + + // Check attempts + self.attempts += 1; + if self.attempts > self.max_attempts { + self.status = VerificationStatus::Failed; + self.base_data.update_modified(); + return Err("Maximum attempts exceeded".to_string()); + } + + // Check code + if code != self.verification_code { + self.base_data.update_modified(); + return Err("Invalid verification code".to_string()); + } + + // Success + self.status = VerificationStatus::Verified; + self.verified_at = Some(Self::now()); + self.base_data.update_modified(); + Ok(()) + } + + /// Verify with nonce (for URL-based verification) + pub fn verify_nonce(&mut self, nonce: &str) -> Result<(), String> { + // Check if expired + if let Some(expires_at) = self.expires_at { + if Self::now() > expires_at { + self.status = VerificationStatus::Expired; + self.base_data.update_modified(); + return Err("Verification link expired".to_string()); + } + } + + // Check nonce + if nonce != self.verification_nonce { + self.base_data.update_modified(); + return Err("Invalid verification link".to_string()); + } + + // Success + self.status = VerificationStatus::Verified; + self.verified_at = Some(Self::now()); + self.base_data.update_modified(); + Ok(()) + } + + /// Resend verification (generate new code and nonce) + pub fn resend(&mut self) { + self.verification_code = Self::generate_code(); + self.verification_nonce = Self::generate_nonce(); + self.status = VerificationStatus::Pending; + self.attempts = 0; + + // Extend expiry + self.expires_at = Some(Self::now() + (24 * 60 * 60)); + self.base_data.update_modified(); + } + + /// Helper to get current timestamp + fn now() -> u64 { + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_secs() + } +} diff --git a/lib/osiris/core/objects/communication/verification_old.rs b/lib/osiris/core/objects/communication/verification_old.rs new file mode 100644 index 0000000..cb4be5a --- /dev/null +++ b/lib/osiris/core/objects/communication/verification_old.rs @@ -0,0 +1,155 @@ +/// Email Verification +/// +/// Manages email verification sessions and status. + +use crate::store::{BaseData, Object, Storable}; +use serde::{Deserialize, Serialize}; + +/// Email verification status +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] +#[serde(rename_all = "lowercase")] +pub enum VerificationStatus { + #[default] + Pending, + Sent, + Verified, + Expired, + Failed, +} + +/// Email Verification Session +#[derive(Debug, Clone, Serialize, Deserialize, Default, crate::DeriveObject)] +pub struct EmailVerification { + #[serde(flatten)] + pub base_data: BaseData, + + /// User/entity ID this verification is for + pub entity_id: String, + + /// Email address to verify + pub email: String, + + /// Verification code/token + pub verification_code: String, + + /// Current status + pub status: VerificationStatus, + + /// When verification was sent + pub sent_at: Option, + + /// When verification was completed + pub verified_at: Option, + + /// When verification expires + pub expires_at: Option, + + /// Number of attempts + pub attempts: u32, + + /// Maximum attempts allowed + pub max_attempts: u32, + + /// Additional metadata + #[serde(default)] + pub metadata: std::collections::HashMap, +} + +impl EmailVerification { + /// Create a new email verification + pub fn new(id: u32, entity_id: String, email: String) -> Self { + let mut base_data = BaseData::new(); + base_data.id = id; + + // Generate verification code (6 digits) + let code = Self::generate_code(); + + // Set expiry to 24 hours from now + let expires_at = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_secs() + (24 * 60 * 60); + + Self { + base_data, + entity_id, + email, + verification_code: code, + status: VerificationStatus::Pending, + sent_at: None, + verified_at: None, + expires_at: Some(expires_at), + attempts: 0, + max_attempts: 3, + metadata: std::collections::HashMap::new(), + } + } + + /// Generate a 6-digit verification code + fn generate_code() -> String { + use std::time::{SystemTime, UNIX_EPOCH}; + let timestamp = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_nanos(); + format!("{:06}", (timestamp % 1_000_000) as u32) + } + + /// Mark as sent + pub fn mark_sent(&mut self) { + self.status = VerificationStatus::Sent; + self.sent_at = Some(Self::now()); + self.base_data.update_modified(); + } + + /// Verify with code + pub fn verify(&mut self, code: &str) -> Result<(), String> { + // Check if expired + if let Some(expires_at) = self.expires_at { + if Self::now() > expires_at { + self.status = VerificationStatus::Expired; + self.base_data.update_modified(); + return Err("Verification code expired".to_string()); + } + } + + // Check attempts + self.attempts += 1; + if self.attempts > self.max_attempts { + self.status = VerificationStatus::Failed; + self.base_data.update_modified(); + return Err("Maximum attempts exceeded".to_string()); + } + + // Check code + if code != self.verification_code { + self.base_data.update_modified(); + return Err("Invalid verification code".to_string()); + } + + // Success + self.status = VerificationStatus::Verified; + self.verified_at = Some(Self::now()); + self.base_data.update_modified(); + Ok(()) + } + + /// Resend verification (generate new code) + pub fn resend(&mut self) { + self.verification_code = Self::generate_code(); + self.status = VerificationStatus::Pending; + self.attempts = 0; + + // Extend expiry + self.expires_at = Some(Self::now() + (24 * 60 * 60)); + self.base_data.update_modified(); + } + + /// Helper to get current timestamp + fn now() -> u64 { + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_secs() + } +} diff --git a/lib/osiris/core/objects/event/mod.rs b/lib/osiris/core/objects/event/mod.rs new file mode 100644 index 0000000..5d8e328 --- /dev/null +++ b/lib/osiris/core/objects/event/mod.rs @@ -0,0 +1,139 @@ +use crate::store::BaseData; +use serde::{Deserialize, Serialize}; +use time::OffsetDateTime; + +pub mod rhai; + +/// Event status +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] +pub enum EventStatus { + #[default] + Draft, + Published, + Cancelled, +} + +/// A calendar event object +#[derive(Debug, Clone, Serialize, Deserialize, crate::DeriveObject)] +pub struct Event { + /// Base data + pub base_data: BaseData, + + /// Title of the event + #[index] + pub title: String, + + /// Optional description + pub description: Option, + + /// Start time + #[index] + #[serde(with = "time::serde::timestamp")] + pub start_time: OffsetDateTime, + + /// End time + #[serde(with = "time::serde::timestamp")] + pub end_time: OffsetDateTime, + + /// Optional location + #[index] + pub location: Option, + + /// Event status + #[index] + pub status: EventStatus, + + /// Whether this is an all-day event + pub all_day: bool, + + /// Optional category + #[index] + pub category: Option, +} + +impl Event { + /// Create a new event + pub fn new(ns: String, title: impl ToString) -> Self { + let now = OffsetDateTime::now_utc(); + Self { + base_data: BaseData::with_ns(ns), + title: title.to_string(), + description: None, + start_time: now, + end_time: now, + location: None, + status: EventStatus::default(), + all_day: false, + category: None, + } + } + + /// Create an event with specific ID + pub fn with_id(id: String, ns: String, title: impl ToString) -> Self { + let now = OffsetDateTime::now_utc(); + let id_u32 = id.parse::().unwrap_or(0); + Self { + base_data: BaseData::with_id(id_u32, ns), + title: title.to_string(), + description: None, + start_time: now, + end_time: now, + location: None, + status: EventStatus::default(), + all_day: false, + category: None, + } + } + + /// Set the description + pub fn set_description(mut self, description: impl ToString) -> Self { + self.description = Some(description.to_string()); + self.base_data.update_modified(); + self + } + + /// Set the start time + pub fn set_start_time(mut self, start_time: OffsetDateTime) -> Self { + self.start_time = start_time; + self.base_data.update_modified(); + self + } + + /// Set the end time + pub fn set_end_time(mut self, end_time: OffsetDateTime) -> Self { + self.end_time = end_time; + self.base_data.update_modified(); + self + } + + /// Set the location + pub fn set_location(mut self, location: impl ToString) -> Self { + self.location = Some(location.to_string()); + self.base_data.update_modified(); + self + } + + /// Set the status + pub fn set_status(mut self, status: EventStatus) -> Self { + self.status = status; + self.base_data.update_modified(); + self + } + + /// Set as all-day event + pub fn set_all_day(mut self, all_day: bool) -> Self { + self.all_day = all_day; + self.base_data.update_modified(); + self + } + + /// Set the category + pub fn set_category(mut self, category: impl ToString) -> Self { + self.category = Some(category.to_string()); + self.base_data.update_modified(); + self + } +} + +// Object trait implementation is auto-generated by #[derive(DeriveObject)] +// The derive macro generates: object_type(), base_data(), base_data_mut(), index_keys(), indexed_fields() diff --git a/lib/osiris/core/objects/event/rhai.rs b/lib/osiris/core/objects/event/rhai.rs new file mode 100644 index 0000000..72ea780 --- /dev/null +++ b/lib/osiris/core/objects/event/rhai.rs @@ -0,0 +1,89 @@ +use crate::objects::Event; +use rhai::{CustomType, Engine, TypeBuilder, Module, FuncRegistration}; + +impl CustomType for Event { + fn build(mut builder: TypeBuilder) { + builder + .with_name("Event") + .with_fn("new", |ns: String, title: String| Event::new(ns, title)) + .with_fn("set_description", |event: &mut Event, desc: String| { + event.description = Some(desc); + event.base_data.update_modified(); + }) + .with_fn("set_location", |event: &mut Event, location: String| { + event.location = Some(location); + event.base_data.update_modified(); + }) + .with_fn("set_category", |event: &mut Event, category: String| { + event.category = Some(category); + event.base_data.update_modified(); + }) + .with_fn("set_all_day", |event: &mut Event, all_day: bool| { + event.all_day = all_day; + event.base_data.update_modified(); + }) + .with_fn("get_id", |event: &mut Event| event.base_data.id.clone()) + .with_fn("get_title", |event: &mut Event| event.title.clone()) + .with_fn("to_json", |event: &mut Event| { + serde_json::to_string_pretty(event).unwrap_or_default() + }); + } +} + +/// Register Event API in Rhai engine +pub fn register_event_api(engine: &mut Engine) { + engine.build_type::(); + + // Register builder-style constructor (namespace only, like note()) + engine.register_fn("event", |ns: String| Event::new(ns, String::new())); + + // Register title as a chainable method + engine.register_fn("title", |mut event: Event, title: String| { + event.title = title; + event.base_data.update_modified(); + event + }); + + // Register chainable methods that return Self + engine.register_fn("description", |mut event: Event, desc: String| { + event.description = Some(desc); + event.base_data.update_modified(); + event + }); + + engine.register_fn("location", |mut event: Event, location: String| { + event.location = Some(location); + event.base_data.update_modified(); + event + }); + + engine.register_fn("category", |mut event: Event, category: String| { + event.category = Some(category); + event.base_data.update_modified(); + event + }); + + engine.register_fn("all_day", |mut event: Event, all_day: bool| { + event.all_day = all_day; + event.base_data.update_modified(); + event + }); +} + +/// Register Event functions into a module (for use in packages) +pub fn register_event_functions(module: &mut Module) { + // Register Event type + module.set_custom_type::("Event"); + + // Register builder-style constructor + FuncRegistration::new("event") + .set_into_module(module, |ns: String, title: String| Event::new(ns, title)); + + // Register chainable methods + FuncRegistration::new("description") + .set_into_module(module, |mut event: Event, desc: String| { + event.description = Some(desc); + event.base_data.update_modified(); + event + }); +} diff --git a/lib/osiris/core/objects/flow/instance.rs b/lib/osiris/core/objects/flow/instance.rs new file mode 100644 index 0000000..fcc74bb --- /dev/null +++ b/lib/osiris/core/objects/flow/instance.rs @@ -0,0 +1,241 @@ +/// Flow Instance +/// +/// Represents an active instance of a flow template for a specific entity (e.g., user). + +use crate::store::{BaseData, Object, Storable}; +use serde::{Deserialize, Serialize}; + +/// Status of a step in a flow instance +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] +#[serde(rename_all = "lowercase")] +pub enum StepStatus { + #[default] + Pending, + Active, + Completed, + Skipped, + Failed, +} + +/// A step instance in a flow +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct StepInstance { + /// Step name (from template) + pub name: String, + + /// Current status + pub status: StepStatus, + + /// When step was started + pub started_at: Option, + + /// When step was completed + pub completed_at: Option, + + /// Step result data + #[serde(default)] + pub result: std::collections::HashMap, + + /// Error message if failed + pub error: Option, +} + +impl StepInstance { + pub fn new(name: String) -> Self { + Self { + name, + status: StepStatus::Pending, + started_at: None, + completed_at: None, + result: std::collections::HashMap::new(), + error: None, + } + } +} + +/// Overall flow status +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] +#[serde(rename_all = "lowercase")] +pub enum FlowStatus { + #[default] + Created, + Running, + Completed, + Failed, + Cancelled, +} + +/// Flow Instance - an active execution of a flow template +#[derive(Debug, Clone, Serialize, Deserialize, Default, crate::DeriveObject)] +pub struct FlowInstance { + #[serde(flatten)] + pub base_data: BaseData, + + /// Instance name (typically entity_id or unique identifier) + pub name: String, + + /// Template name this instance is based on + pub template_name: String, + + /// Entity ID this flow is for (e.g., user_id) + pub entity_id: String, + + /// Current flow status + pub status: FlowStatus, + + /// Step instances + pub steps: Vec, + + /// Current step index + pub current_step: usize, + + /// When flow was started + pub started_at: Option, + + /// When flow was completed + pub completed_at: Option, + + /// Instance metadata + #[serde(default)] + pub metadata: std::collections::HashMap, +} + +impl FlowInstance { + /// Create a new flow instance + pub fn new(id: u32, name: String, template_name: String, entity_id: String) -> Self { + let mut base_data = BaseData::new(); + base_data.id = id; + Self { + base_data, + name, + template_name, + entity_id, + status: FlowStatus::Created, + steps: Vec::new(), + current_step: 0, + started_at: None, + completed_at: None, + metadata: std::collections::HashMap::new(), + } + } + + /// Initialize steps from template + pub fn init_steps(&mut self, step_names: Vec) { + self.steps = step_names.into_iter().map(StepInstance::new).collect(); + self.base_data.update_modified(); + } + + /// Start the flow + pub fn start(&mut self) { + // Initialize default steps if none exist + if self.steps.is_empty() { + // Create default steps based on common workflow + self.steps = vec![ + StepInstance::new("registration".to_string()), + StepInstance::new("kyc".to_string()), + StepInstance::new("email".to_string()), + ]; + } + + self.status = FlowStatus::Running; + self.started_at = Some(Self::now()); + + // Start first step if exists + if let Some(step) = self.steps.first_mut() { + step.status = StepStatus::Active; + step.started_at = Some(Self::now()); + } + + self.base_data.update_modified(); + } + + /// Complete a step by name + pub fn complete_step(&mut self, step_name: &str) -> Result<(), String> { + let step_idx = self.steps.iter().position(|s| s.name == step_name) + .ok_or_else(|| format!("Step '{}' not found", step_name))?; + + let step = &mut self.steps[step_idx]; + step.status = StepStatus::Completed; + step.completed_at = Some(Self::now()); + + // Move to next step if this was the current step + if step_idx == self.current_step { + self.current_step += 1; + + // Start next step if exists + if let Some(next_step) = self.steps.get_mut(self.current_step) { + next_step.status = StepStatus::Active; + next_step.started_at = Some(Self::now()); + } else { + // All steps completed + self.status = FlowStatus::Completed; + self.completed_at = Some(Self::now()); + } + } + + self.base_data.update_modified(); + Ok(()) + } + + /// Fail a step + pub fn fail_step(&mut self, step_name: &str, error: String) -> Result<(), String> { + let step = self.steps.iter_mut() + .find(|s| s.name == step_name) + .ok_or_else(|| format!("Step '{}' not found", step_name))?; + + step.status = StepStatus::Failed; + step.error = Some(error); + step.completed_at = Some(Self::now()); + + self.status = FlowStatus::Failed; + self.base_data.update_modified(); + Ok(()) + } + + /// Skip a step + pub fn skip_step(&mut self, step_name: &str) -> Result<(), String> { + let step = self.steps.iter_mut() + .find(|s| s.name == step_name) + .ok_or_else(|| format!("Step '{}' not found", step_name))?; + + step.status = StepStatus::Skipped; + step.completed_at = Some(Self::now()); + self.base_data.update_modified(); + Ok(()) + } + + /// Get current step + pub fn get_current_step(&self) -> Option<&StepInstance> { + self.steps.get(self.current_step) + } + + /// Get step by name + pub fn get_step(&self, name: &str) -> Option<&StepInstance> { + self.steps.iter().find(|s| s.name == name) + } + + /// Set step result data + pub fn set_step_result(&mut self, step_name: &str, key: String, value: String) -> Result<(), String> { + let step = self.steps.iter_mut() + .find(|s| s.name == step_name) + .ok_or_else(|| format!("Step '{}' not found", step_name))?; + + step.result.insert(key, value); + self.base_data.update_modified(); + Ok(()) + } + + /// Add metadata + pub fn add_metadata(&mut self, key: String, value: String) { + self.metadata.insert(key, value); + self.base_data.update_modified(); + } + + /// Helper to get current timestamp + fn now() -> u64 { + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_secs() + } +} diff --git a/lib/osiris/core/objects/flow/mod.rs b/lib/osiris/core/objects/flow/mod.rs new file mode 100644 index 0000000..313079b --- /dev/null +++ b/lib/osiris/core/objects/flow/mod.rs @@ -0,0 +1,10 @@ +/// Flow Module +/// +/// Provides workflow/flow management with templates and instances. + +pub mod template; +pub mod instance; +pub mod rhai; + +pub use template::{FlowTemplate, FlowStep}; +pub use instance::{FlowInstance, FlowStatus, StepStatus, StepInstance}; diff --git a/lib/osiris/core/objects/flow/rhai.rs b/lib/osiris/core/objects/flow/rhai.rs new file mode 100644 index 0000000..cf5f163 --- /dev/null +++ b/lib/osiris/core/objects/flow/rhai.rs @@ -0,0 +1,183 @@ +/// Rhai bindings for Flow objects + +use ::rhai::plugin::*; +use ::rhai::{CustomType, Dynamic, Engine, EvalAltResult, Module, TypeBuilder}; + +use super::template::{FlowTemplate, FlowStep}; +use super::instance::{FlowInstance, FlowStatus, StepStatus}; + +// ============================================================================ +// Flow Template Module +// ============================================================================ + +type RhaiFlowTemplate = FlowTemplate; + +#[export_module] +mod rhai_flow_template_module { + use super::RhaiFlowTemplate; + + #[rhai_fn(name = "new_flow", return_raw)] + pub fn new_flow() -> Result> { + Ok(FlowTemplate::new(0)) + } + + #[rhai_fn(name = "name", return_raw)] + pub fn set_name( + template: &mut RhaiFlowTemplate, + name: String, + ) -> Result> { + let owned = std::mem::take(template); + *template = owned.name(name); + Ok(template.clone()) + } + + #[rhai_fn(name = "description", return_raw)] + pub fn set_description( + template: &mut RhaiFlowTemplate, + description: String, + ) -> Result> { + let owned = std::mem::take(template); + *template = owned.description(description); + Ok(template.clone()) + } + + #[rhai_fn(name = "add_step", return_raw)] + pub fn add_step( + template: &mut RhaiFlowTemplate, + name: String, + description: String, + ) -> Result<(), Box> { + template.add_step(name, description); + Ok(()) + } + + #[rhai_fn(name = "build", return_raw)] + pub fn build( + template: &mut RhaiFlowTemplate, + ) -> Result> { + Ok(template.clone()) + } + + // Getters + #[rhai_fn(name = "get_name")] + pub fn get_name(template: &mut RhaiFlowTemplate) -> String { + template.name.clone() + } + + #[rhai_fn(name = "get_description")] + pub fn get_description(template: &mut RhaiFlowTemplate) -> String { + template.description.clone() + } +} + +// ============================================================================ +// Flow Instance Module +// ============================================================================ + +type RhaiFlowInstance = FlowInstance; + +#[export_module] +mod rhai_flow_instance_module { + use super::RhaiFlowInstance; + + #[rhai_fn(name = "new_flow_instance", return_raw)] + pub fn new_instance( + name: String, + template_name: String, + entity_id: String, + ) -> Result> { + Ok(FlowInstance::new(0, name, template_name, entity_id)) + } + + #[rhai_fn(name = "start", return_raw)] + pub fn start( + instance: &mut RhaiFlowInstance, + ) -> Result<(), Box> { + instance.start(); + Ok(()) + } + + #[rhai_fn(name = "complete_step", return_raw)] + pub fn complete_step( + instance: &mut RhaiFlowInstance, + step_name: String, + ) -> Result<(), Box> { + instance.complete_step(&step_name) + .map_err(|e| e.into()) + } + + #[rhai_fn(name = "fail_step", return_raw)] + pub fn fail_step( + instance: &mut RhaiFlowInstance, + step_name: String, + error: String, + ) -> Result<(), Box> { + instance.fail_step(&step_name, error) + .map_err(|e| e.into()) + } + + #[rhai_fn(name = "skip_step", return_raw)] + pub fn skip_step( + instance: &mut RhaiFlowInstance, + step_name: String, + ) -> Result<(), Box> { + instance.skip_step(&step_name) + .map_err(|e| e.into()) + } + + // Getters + #[rhai_fn(name = "get_name")] + pub fn get_name(instance: &mut RhaiFlowInstance) -> String { + instance.name.clone() + } + + #[rhai_fn(name = "get_template_name")] + pub fn get_template_name(instance: &mut RhaiFlowInstance) -> String { + instance.template_name.clone() + } + + #[rhai_fn(name = "get_entity_id")] + pub fn get_entity_id(instance: &mut RhaiFlowInstance) -> String { + instance.entity_id.clone() + } + + #[rhai_fn(name = "get_status")] + pub fn get_status(instance: &mut RhaiFlowInstance) -> String { + format!("{:?}", instance.status) + } +} + +// ============================================================================ +// Registration Functions +// ============================================================================ + +/// Register Flow modules into a Rhai Module (for use in packages) +pub fn register_flow_modules(parent_module: &mut Module) { + // Register custom types + parent_module.set_custom_type::("FlowTemplate"); + parent_module.set_custom_type::("FlowInstance"); + + // Merge flow template functions + let template_module = exported_module!(rhai_flow_template_module); + parent_module.merge(&template_module); + + // Merge flow instance functions + let instance_module = exported_module!(rhai_flow_instance_module); + parent_module.merge(&instance_module); +} + +// ============================================================================ +// CustomType Implementations +// ============================================================================ + +impl CustomType for FlowTemplate { + fn build(mut builder: TypeBuilder) { + builder.with_name("FlowTemplate"); + } +} + +impl CustomType for FlowInstance { + fn build(mut builder: TypeBuilder) { + builder.with_name("FlowInstance"); + } +} diff --git a/lib/osiris/core/objects/flow/template.rs b/lib/osiris/core/objects/flow/template.rs new file mode 100644 index 0000000..9259664 --- /dev/null +++ b/lib/osiris/core/objects/flow/template.rs @@ -0,0 +1,117 @@ +/// Flow Template +/// +/// Defines a reusable workflow template with steps that can be instantiated multiple times. + +use crate::store::{BaseData, Object, Storable}; +use serde::{Deserialize, Serialize}; + +/// A step in a flow template +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct FlowStep { + /// Step name/identifier + pub name: String, + + /// Step description + pub description: String, + + /// Steps that must be completed before this step can start + #[serde(default)] + pub dependencies: Vec, +} + +impl FlowStep { + pub fn new(name: String, description: String) -> Self { + Self { + name, + description, + dependencies: Vec::new(), + } + } + + pub fn with_dependencies(mut self, dependencies: Vec) -> Self { + self.dependencies = dependencies; + self + } + + pub fn add_dependency(&mut self, dependency: String) { + self.dependencies.push(dependency); + } +} + +/// Flow Template - defines a reusable workflow +#[derive(Debug, Clone, Serialize, Deserialize, Default, crate::DeriveObject)] +pub struct FlowTemplate { + #[serde(flatten)] + pub base_data: BaseData, + + /// Template name + pub name: String, + + /// Template description + pub description: String, + + /// Ordered list of steps + pub steps: Vec, + + /// Template metadata + #[serde(default)] + pub metadata: std::collections::HashMap, +} + +impl FlowTemplate { + /// Create a new flow template + pub fn new(id: u32) -> Self { + let mut base_data = BaseData::new(); + base_data.id = id; + Self { + base_data, + name: String::new(), + description: String::new(), + steps: Vec::new(), + metadata: std::collections::HashMap::new(), + } + } + + /// Builder: Set name + pub fn name(mut self, name: String) -> Self { + self.name = name; + self.base_data.update_modified(); + self + } + + /// Builder: Set description + pub fn description(mut self, description: String) -> Self { + self.description = description; + self.base_data.update_modified(); + self + } + + /// Add a step to the template + pub fn add_step(&mut self, name: String, description: String) { + self.steps.push(FlowStep::new(name, description)); + self.base_data.update_modified(); + } + + /// Add a step with dependencies + pub fn add_step_with_dependencies(&mut self, name: String, description: String, dependencies: Vec) { + let step = FlowStep::new(name, description).with_dependencies(dependencies); + self.steps.push(step); + self.base_data.update_modified(); + } + + /// Get step by name + pub fn get_step(&self, name: &str) -> Option<&FlowStep> { + self.steps.iter().find(|s| s.name == name) + } + + /// Add metadata + pub fn add_metadata(&mut self, key: String, value: String) { + self.metadata.insert(key, value); + self.base_data.update_modified(); + } + + /// Build (for fluent API compatibility) + pub fn build(self) -> Self { + self + } +} diff --git a/lib/osiris/core/objects/grid4/bid.rs b/lib/osiris/core/objects/grid4/bid.rs new file mode 100644 index 0000000..6b8fc1d --- /dev/null +++ b/lib/osiris/core/objects/grid4/bid.rs @@ -0,0 +1,126 @@ +use crate::store::BaseData; +use rhai::{CustomType, TypeBuilder}; +use serde::{Deserialize, Serialize}; + +/// Bid status enumeration +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] +pub enum BidStatus { + #[default] + Pending, + Confirmed, + Assigned, + Cancelled, + Done, +} + +/// Billing period enumeration +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] +pub enum BillingPeriod { + #[default] + Hourly, + Monthly, + Yearly, + Biannually, + Triannually, +} + +/// I can bid for infra, and optionally get accepted +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, crate::DeriveObject)] +pub struct Bid { + pub base_data: BaseData, + /// links back to customer for this capacity (user on ledger) + #[index] + pub customer_id: u32, + /// nr of slices I need in 1 machine + pub compute_slices_nr: i32, + /// price per 1 GB slice I want to accept + pub compute_slice_price: f64, + /// nr of storage slices needed + pub storage_slices_nr: i32, + /// price per 1 GB storage slice I want to accept + pub storage_slice_price: f64, + pub status: BidStatus, + /// if obligation then will be charged and money needs to be in escrow, otherwise its an intent + pub obligation: bool, + /// epoch timestamp + pub start_date: u32, + /// epoch timestamp + pub end_date: u32, + /// signature as done by a user/consumer to validate their identity and intent + pub signature_user: String, + pub billing_period: BillingPeriod, +} + +impl Bid { + pub fn new() -> Self { + Self { + base_data: BaseData::new(), + customer_id: 0, + compute_slices_nr: 0, + compute_slice_price: 0.0, + storage_slices_nr: 0, + storage_slice_price: 0.0, + status: BidStatus::default(), + obligation: false, + start_date: 0, + end_date: 0, + signature_user: String::new(), + billing_period: BillingPeriod::default(), + } + } + + pub fn customer_id(mut self, v: u32) -> Self { + self.customer_id = v; + self + } + + pub fn compute_slices_nr(mut self, v: i32) -> Self { + self.compute_slices_nr = v; + self + } + + pub fn compute_slice_price(mut self, v: f64) -> Self { + self.compute_slice_price = v; + self + } + + pub fn storage_slices_nr(mut self, v: i32) -> Self { + self.storage_slices_nr = v; + self + } + + pub fn storage_slice_price(mut self, v: f64) -> Self { + self.storage_slice_price = v; + self + } + + pub fn status(mut self, v: BidStatus) -> Self { + self.status = v; + self + } + + pub fn obligation(mut self, v: bool) -> Self { + self.obligation = v; + self + } + + pub fn start_date(mut self, v: u32) -> Self { + self.start_date = v; + self + } + + pub fn end_date(mut self, v: u32) -> Self { + self.end_date = v; + self + } + + pub fn signature_user(mut self, v: impl ToString) -> Self { + self.signature_user = v.to_string(); + self + } + + pub fn billing_period(mut self, v: BillingPeriod) -> Self { + self.billing_period = v; + self + } +} diff --git a/lib/osiris/core/objects/grid4/common.rs b/lib/osiris/core/objects/grid4/common.rs new file mode 100644 index 0000000..2014aff --- /dev/null +++ b/lib/osiris/core/objects/grid4/common.rs @@ -0,0 +1,39 @@ +use rhai::{CustomType, TypeBuilder}; +use serde::{Deserialize, Serialize}; + +/// SLA policy matching the V spec `SLAPolicy` +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] +pub struct SLAPolicy { + /// should +90 + pub sla_uptime: i32, + /// minimal mbits we can expect avg over 1h per node, 0 means we don't guarantee + pub sla_bandwidth_mbit: i32, + /// 0-100, percent of money given back in relation to month if sla breached, + /// e.g. 200 means we return 2 months worth of rev if sla missed + pub sla_penalty: i32, +} + +impl SLAPolicy { + pub fn new() -> Self { Self::default() } + pub fn sla_uptime(mut self, v: i32) -> Self { self.sla_uptime = v; self } + pub fn sla_bandwidth_mbit(mut self, v: i32) -> Self { self.sla_bandwidth_mbit = v; self } + pub fn sla_penalty(mut self, v: i32) -> Self { self.sla_penalty = v; self } + pub fn build(self) -> Self { self } +} + +/// Pricing policy matching the V spec `PricingPolicy` +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] +pub struct PricingPolicy { + /// e.g. 30,40,50 means if user has more CC in wallet than 1 year utilization + /// then this provider gives 30%, 2Y 40%, ... + pub marketplace_year_discounts: Vec, + /// e.g. 10,20,30 + pub volume_discounts: Vec, +} + +impl PricingPolicy { + pub fn new() -> Self { Self { marketplace_year_discounts: vec![30, 40, 50], volume_discounts: vec![10, 20, 30] } } + pub fn marketplace_year_discounts(mut self, v: Vec) -> Self { self.marketplace_year_discounts = v; self } + pub fn volume_discounts(mut self, v: Vec) -> Self { self.volume_discounts = v; self } + pub fn build(self) -> Self { self } +} diff --git a/lib/osiris/core/objects/grid4/contract.rs b/lib/osiris/core/objects/grid4/contract.rs new file mode 100644 index 0000000..98f2439 --- /dev/null +++ b/lib/osiris/core/objects/grid4/contract.rs @@ -0,0 +1,217 @@ +use crate::store::BaseData; +use rhai::{CustomType, TypeBuilder}; +use serde::{Deserialize, Serialize}; +use super::bid::BillingPeriod; + +/// Contract status enumeration +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] +pub enum ContractStatus { + #[default] + Active, + Cancelled, + Error, + Paused, +} + +/// Compute slice provisioned for a contract +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] +pub struct ComputeSliceProvisioned { + pub node_id: u32, + /// the id of the slice in the node + pub id: u16, + pub mem_gb: f64, + pub storage_gb: f64, + pub passmark: i32, + pub vcores: i32, + pub cpu_oversubscription: i32, + pub tags: String, +} + +/// Storage slice provisioned for a contract +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] +pub struct StorageSliceProvisioned { + pub node_id: u32, + /// the id of the slice in the node, are tracked in the node itself + pub id: u16, + pub storage_size_gb: i32, + pub tags: String, +} + +/// Contract for provisioned infrastructure +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, crate::DeriveObject)] +pub struct Contract { + pub base_data: BaseData, + /// links back to customer for this capacity (user on ledger) + #[index] + pub customer_id: u32, + pub compute_slices: Vec, + pub storage_slices: Vec, + /// price per 1 GB agreed upon + pub compute_slice_price: f64, + /// price per 1 GB agreed upon + pub storage_slice_price: f64, + /// price per 1 GB agreed upon (transfer) + pub network_slice_price: f64, + pub status: ContractStatus, + /// epoch timestamp + pub start_date: u32, + /// epoch timestamp + pub end_date: u32, + /// signature as done by a user/consumer to validate their identity and intent + pub signature_user: String, + /// signature as done by the hoster + pub signature_hoster: String, + pub billing_period: BillingPeriod, +} + +impl Contract { + pub fn new() -> Self { + Self { + base_data: BaseData::new(), + customer_id: 0, + compute_slices: Vec::new(), + storage_slices: Vec::new(), + compute_slice_price: 0.0, + storage_slice_price: 0.0, + network_slice_price: 0.0, + status: ContractStatus::default(), + start_date: 0, + end_date: 0, + signature_user: String::new(), + signature_hoster: String::new(), + billing_period: BillingPeriod::default(), + } + } + + pub fn customer_id(mut self, v: u32) -> Self { + self.customer_id = v; + self + } + + pub fn add_compute_slice(mut self, slice: ComputeSliceProvisioned) -> Self { + self.compute_slices.push(slice); + self + } + + pub fn add_storage_slice(mut self, slice: StorageSliceProvisioned) -> Self { + self.storage_slices.push(slice); + self + } + + pub fn compute_slice_price(mut self, v: f64) -> Self { + self.compute_slice_price = v; + self + } + + pub fn storage_slice_price(mut self, v: f64) -> Self { + self.storage_slice_price = v; + self + } + + pub fn network_slice_price(mut self, v: f64) -> Self { + self.network_slice_price = v; + self + } + + pub fn status(mut self, v: ContractStatus) -> Self { + self.status = v; + self + } + + pub fn start_date(mut self, v: u32) -> Self { + self.start_date = v; + self + } + + pub fn end_date(mut self, v: u32) -> Self { + self.end_date = v; + self + } + + pub fn signature_user(mut self, v: impl ToString) -> Self { + self.signature_user = v.to_string(); + self + } + + pub fn signature_hoster(mut self, v: impl ToString) -> Self { + self.signature_hoster = v.to_string(); + self + } + + pub fn billing_period(mut self, v: BillingPeriod) -> Self { + self.billing_period = v; + self + } +} + +impl ComputeSliceProvisioned { + pub fn new() -> Self { + Self::default() + } + + pub fn node_id(mut self, v: u32) -> Self { + self.node_id = v; + self + } + + pub fn id(mut self, v: u16) -> Self { + self.id = v; + self + } + + pub fn mem_gb(mut self, v: f64) -> Self { + self.mem_gb = v; + self + } + + pub fn storage_gb(mut self, v: f64) -> Self { + self.storage_gb = v; + self + } + + pub fn passmark(mut self, v: i32) -> Self { + self.passmark = v; + self + } + + pub fn vcores(mut self, v: i32) -> Self { + self.vcores = v; + self + } + + pub fn cpu_oversubscription(mut self, v: i32) -> Self { + self.cpu_oversubscription = v; + self + } + + pub fn tags(mut self, v: impl ToString) -> Self { + self.tags = v.to_string(); + self + } +} + +impl StorageSliceProvisioned { + pub fn new() -> Self { + Self::default() + } + + pub fn node_id(mut self, v: u32) -> Self { + self.node_id = v; + self + } + + pub fn id(mut self, v: u16) -> Self { + self.id = v; + self + } + + pub fn storage_size_gb(mut self, v: i32) -> Self { + self.storage_size_gb = v; + self + } + + pub fn tags(mut self, v: impl ToString) -> Self { + self.tags = v.to_string(); + self + } +} diff --git a/lib/osiris/core/objects/grid4/mod.rs b/lib/osiris/core/objects/grid4/mod.rs new file mode 100644 index 0000000..f5d808c --- /dev/null +++ b/lib/osiris/core/objects/grid4/mod.rs @@ -0,0 +1,18 @@ +pub mod bid; +pub mod common; +pub mod contract; +pub mod node; +pub mod nodegroup; +pub mod reputation; +pub mod reservation; + +pub use bid::{Bid, BidStatus, BillingPeriod}; +pub use common::{PricingPolicy, SLAPolicy}; +pub use contract::{Contract, ContractStatus, ComputeSliceProvisioned, StorageSliceProvisioned}; +pub use node::{ + CPUDevice, ComputeSlice, DeviceInfo, GPUDevice, MemoryDevice, NetworkDevice, Node, + NodeCapacity, StorageDevice, StorageSlice, +}; +pub use nodegroup::NodeGroup; +pub use reputation::{NodeGroupReputation, NodeReputation}; +pub use reservation::{Reservation, ReservationStatus}; diff --git a/lib/osiris/core/objects/grid4/node.rs b/lib/osiris/core/objects/grid4/node.rs new file mode 100644 index 0000000..b079b99 --- /dev/null +++ b/lib/osiris/core/objects/grid4/node.rs @@ -0,0 +1,279 @@ +use crate::store::BaseData; +use rhai::{CustomType, TypeBuilder}; +use serde::{Deserialize, Serialize}; +use super::common::{PricingPolicy, SLAPolicy}; + +/// Storage device information +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] +pub struct StorageDevice { + /// can be used in node + pub id: String, + /// Size of the storage device in gigabytes + pub size_gb: f64, + /// Description of the storage device + pub description: String, +} + +/// Memory device information +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] +pub struct MemoryDevice { + /// can be used in node + pub id: String, + /// Size of the memory device in gigabytes + pub size_gb: f64, + /// Description of the memory device + pub description: String, +} + +/// CPU device information +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] +pub struct CPUDevice { + /// can be used in node + pub id: String, + /// Number of CPU cores + pub cores: i32, + /// Passmark score + pub passmark: i32, + /// Description of the CPU + pub description: String, + /// Brand of the CPU + pub cpu_brand: String, + /// Version of the CPU + pub cpu_version: String, +} + +/// GPU device information +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] +pub struct GPUDevice { + /// can be used in node + pub id: String, + /// Number of GPU cores + pub cores: i32, + /// Size of the GPU memory in gigabytes + pub memory_gb: f64, + /// Description of the GPU + pub description: String, + pub gpu_brand: String, + pub gpu_version: String, +} + +/// Network device information +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] +pub struct NetworkDevice { + /// can be used in node + pub id: String, + /// Network speed in Mbps + pub speed_mbps: i32, + /// Description of the network device + pub description: String, +} + +/// Aggregated device info for a node +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] +pub struct DeviceInfo { + pub vendor: String, + pub storage: Vec, + pub memory: Vec, + pub cpu: Vec, + pub gpu: Vec, + pub network: Vec, +} + +/// NodeCapacity represents the hardware capacity details of a node. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] +pub struct NodeCapacity { + /// Total storage in gigabytes + pub storage_gb: f64, + /// Total memory in gigabytes + pub mem_gb: f64, + /// Total GPU memory in gigabytes + pub mem_gb_gpu: f64, + /// Passmark score for the node + pub passmark: i32, + /// Total virtual cores + pub vcores: i32, +} + +// PricingPolicy and SLAPolicy moved to `common.rs` to be shared across models. + +/// Compute slice (typically represents a base unit of compute) +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] +pub struct ComputeSlice { + /// the id of the slice in the node + pub id: u16, + pub mem_gb: f64, + pub storage_gb: f64, + pub passmark: i32, + pub vcores: i32, + pub cpu_oversubscription: i32, + pub storage_oversubscription: i32, + /// nr of GPU's see node to know what GPU's are + pub gpus: u8, +} + +impl ComputeSlice { + pub fn new() -> Self { + Self { + id: 0, + mem_gb: 0.0, + storage_gb: 0.0, + passmark: 0, + vcores: 0, + cpu_oversubscription: 0, + storage_oversubscription: 0, + gpus: 0, + } + } + + pub fn id(mut self, id: u16) -> Self { + self.id = id; + self + } + pub fn mem_gb(mut self, v: f64) -> Self { + self.mem_gb = v; + self + } + pub fn storage_gb(mut self, v: f64) -> Self { + self.storage_gb = v; + self + } + pub fn passmark(mut self, v: i32) -> Self { + self.passmark = v; + self + } + pub fn vcores(mut self, v: i32) -> Self { + self.vcores = v; + self + } + pub fn cpu_oversubscription(mut self, v: i32) -> Self { + self.cpu_oversubscription = v; + self + } + pub fn storage_oversubscription(mut self, v: i32) -> Self { + self.storage_oversubscription = v; + self + } + pub fn gpus(mut self, v: u8) -> Self { + self.gpus = v; + self + } +} + +/// Storage slice (typically 1GB of storage) +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] +pub struct StorageSlice { + /// the id of the slice in the node, are tracked in the node itself + pub id: u16, +} + +impl StorageSlice { + pub fn new() -> Self { + Self { + id: 0, + } + } + + pub fn id(mut self, id: u16) -> Self { + self.id = id; + self + } +} + +/// Grid4 Node model +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, crate::DeriveObject)] +pub struct Node { + pub base_data: BaseData, + /// Link to node group + #[index] + pub nodegroupid: i32, + /// Uptime percentage 0..100 + pub uptime: i32, + pub computeslices: Vec, + pub storageslices: Vec, + pub devices: DeviceInfo, + /// 2 letter code as specified in lib/data/countries/data/countryInfo.txt + #[index] + pub country: String, + /// Hardware capacity details + pub capacity: NodeCapacity, + /// first time node was active + pub birthtime: u32, + /// node public key + #[index] + pub pubkey: String, + /// signature done on node to validate pubkey with privkey + pub signature_node: String, + /// signature as done by farmers to validate their identity + pub signature_farmer: String, +} + +impl Node { + pub fn new() -> Self { + Self { + base_data: BaseData::new(), + nodegroupid: 0, + uptime: 0, + computeslices: Vec::new(), + storageslices: Vec::new(), + devices: DeviceInfo::default(), + country: String::new(), + capacity: NodeCapacity::default(), + birthtime: 0, + pubkey: String::new(), + signature_node: String::new(), + signature_farmer: String::new(), + } + } + + pub fn nodegroupid(mut self, v: i32) -> Self { + self.nodegroupid = v; + self + } + pub fn uptime(mut self, v: i32) -> Self { + self.uptime = v; + self + } + pub fn add_compute_slice(mut self, s: ComputeSlice) -> Self { + self.computeslices.push(s); + self + } + pub fn add_storage_slice(mut self, s: StorageSlice) -> Self { + self.storageslices.push(s); + self + } + pub fn devices(mut self, d: DeviceInfo) -> Self { + self.devices = d; + self + } + pub fn country(mut self, c: impl ToString) -> Self { + self.country = c.to_string(); + self + } + pub fn capacity(mut self, c: NodeCapacity) -> Self { + self.capacity = c; + self + } + pub fn birthtime(mut self, t: u32) -> Self { + self.birthtime = t; + self + } + + pub fn pubkey(mut self, v: impl ToString) -> Self { + self.pubkey = v.to_string(); + self + } + pub fn signature_node(mut self, v: impl ToString) -> Self { + self.signature_node = v.to_string(); + self + } + pub fn signature_farmer(mut self, v: impl ToString) -> Self { + self.signature_farmer = v.to_string(); + self + } + + /// Placeholder for capacity recalculation out of the devices on the Node + pub fn check(self) -> Self { + // TODO: calculate NodeCapacity out of the devices on the Node + self + } +} diff --git a/lib/osiris/core/objects/grid4/nodegroup.rs b/lib/osiris/core/objects/grid4/nodegroup.rs new file mode 100644 index 0000000..96789c3 --- /dev/null +++ b/lib/osiris/core/objects/grid4/nodegroup.rs @@ -0,0 +1,50 @@ +use crate::store::BaseData; +use rhai::{CustomType, TypeBuilder}; +use serde::{Deserialize, Serialize}; + +use super::common::{PricingPolicy, SLAPolicy}; + +/// Grid4 NodeGroup model (root object for farmer configuration) +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, crate::DeriveObject)] +pub struct NodeGroup { + pub base_data: BaseData, + /// link back to farmer who owns the nodegroup, is a user? + #[index] + pub farmerid: u32, + /// only visible by farmer, in future encrypted, used to boot a node + pub secret: String, + pub description: String, + pub slapolicy: SLAPolicy, + pub pricingpolicy: PricingPolicy, + /// pricing in CC - cloud credit, per 2GB node slice + pub compute_slice_normalized_pricing_cc: f64, + /// pricing in CC - cloud credit, per 1GB storage slice + pub storage_slice_normalized_pricing_cc: f64, + /// signature as done by farmers to validate that they created this group + pub signature_farmer: String, +} + +impl NodeGroup { + pub fn new() -> Self { + Self { + base_data: BaseData::new(), + farmerid: 0, + secret: String::new(), + description: String::new(), + slapolicy: SLAPolicy::default(), + pricingpolicy: PricingPolicy::new(), + compute_slice_normalized_pricing_cc: 0.0, + storage_slice_normalized_pricing_cc: 0.0, + signature_farmer: String::new(), + } + } + + pub fn farmerid(mut self, v: u32) -> Self { self.farmerid = v; self } + pub fn secret(mut self, v: impl ToString) -> Self { self.secret = v.to_string(); self } + pub fn description(mut self, v: impl ToString) -> Self { self.description = v.to_string(); self } + pub fn slapolicy(mut self, v: SLAPolicy) -> Self { self.slapolicy = v; self } + pub fn pricingpolicy(mut self, v: PricingPolicy) -> Self { self.pricingpolicy = v; self } + pub fn compute_slice_normalized_pricing_cc(mut self, v: f64) -> Self { self.compute_slice_normalized_pricing_cc = v; self } + pub fn storage_slice_normalized_pricing_cc(mut self, v: f64) -> Self { self.storage_slice_normalized_pricing_cc = v; self } + pub fn signature_farmer(mut self, v: impl ToString) -> Self { self.signature_farmer = v.to_string(); self } +} diff --git a/lib/osiris/core/objects/grid4/reputation.rs b/lib/osiris/core/objects/grid4/reputation.rs new file mode 100644 index 0000000..d6242c9 --- /dev/null +++ b/lib/osiris/core/objects/grid4/reputation.rs @@ -0,0 +1,83 @@ +use crate::store::BaseData; +use rhai::{CustomType, TypeBuilder}; +use serde::{Deserialize, Serialize}; + +/// Node reputation information +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] +pub struct NodeReputation { + pub node_id: u32, + /// between 0 and 100, earned over time + pub reputation: i32, + /// between 0 and 100, set by system, farmer has no ability to set this + pub uptime: i32, +} + +/// NodeGroup reputation model +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, crate::DeriveObject)] +pub struct NodeGroupReputation { + pub base_data: BaseData, + #[index] + pub nodegroup_id: u32, + /// between 0 and 100, earned over time + pub reputation: i32, + /// between 0 and 100, set by system, farmer has no ability to set this + pub uptime: i32, + pub nodes: Vec, +} + +impl NodeGroupReputation { + pub fn new() -> Self { + Self { + base_data: BaseData::new(), + nodegroup_id: 0, + reputation: 50, // default as per spec + uptime: 0, + nodes: Vec::new(), + } + } + + pub fn nodegroup_id(mut self, v: u32) -> Self { + self.nodegroup_id = v; + self + } + + pub fn reputation(mut self, v: i32) -> Self { + self.reputation = v; + self + } + + pub fn uptime(mut self, v: i32) -> Self { + self.uptime = v; + self + } + + pub fn add_node_reputation(mut self, node_rep: NodeReputation) -> Self { + self.nodes.push(node_rep); + self + } +} + +impl NodeReputation { + pub fn new() -> Self { + Self { + node_id: 0, + reputation: 50, // default as per spec + uptime: 0, + } + } + + pub fn node_id(mut self, v: u32) -> Self { + self.node_id = v; + self + } + + pub fn reputation(mut self, v: i32) -> Self { + self.reputation = v; + self + } + + pub fn uptime(mut self, v: i32) -> Self { + self.uptime = v; + self + } +} diff --git a/lib/osiris/core/objects/grid4/reservation.rs b/lib/osiris/core/objects/grid4/reservation.rs new file mode 100644 index 0000000..2db13f1 --- /dev/null +++ b/lib/osiris/core/objects/grid4/reservation.rs @@ -0,0 +1,56 @@ +use crate::store::BaseData; +use rhai::{CustomType, TypeBuilder}; +use serde::{Deserialize, Serialize}; + +/// Reservation status as per V spec +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] +pub enum ReservationStatus { + #[default] + Pending, + Confirmed, + Assigned, + Cancelled, + Done, +} + +/// Grid4 Reservation model +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, crate::DeriveObject)] +pub struct Reservation { + pub base_data: BaseData, + /// links back to customer for this capacity + #[index] + pub customer_id: u32, + pub compute_slices: Vec, + pub storage_slices: Vec, + pub status: ReservationStatus, + /// if obligation then will be charged and money needs to be in escrow, otherwise its an intent + pub obligation: bool, + /// epoch + pub start_date: u32, + pub end_date: u32, +} + +impl Reservation { + pub fn new() -> Self { + Self { + base_data: BaseData::new(), + customer_id: 0, + compute_slices: Vec::new(), + storage_slices: Vec::new(), + status: ReservationStatus::Pending, + obligation: false, + start_date: 0, + end_date: 0, + } + } + + pub fn customer_id(mut self, v: u32) -> Self { self.customer_id = v; self } + pub fn add_compute_slice(mut self, id: u32) -> Self { self.compute_slices.push(id); self } + pub fn compute_slices(mut self, v: Vec) -> Self { self.compute_slices = v; self } + pub fn add_storage_slice(mut self, id: u32) -> Self { self.storage_slices.push(id); self } + pub fn storage_slices(mut self, v: Vec) -> Self { self.storage_slices = v; self } + pub fn status(mut self, v: ReservationStatus) -> Self { self.status = v; self } + pub fn obligation(mut self, v: bool) -> Self { self.obligation = v; self } + pub fn start_date(mut self, v: u32) -> Self { self.start_date = v; self } + pub fn end_date(mut self, v: u32) -> Self { self.end_date = v; self } +} diff --git a/lib/osiris/core/objects/grid4/specs/README.md b/lib/osiris/core/objects/grid4/specs/README.md new file mode 100644 index 0000000..5aa351e --- /dev/null +++ b/lib/osiris/core/objects/grid4/specs/README.md @@ -0,0 +1,194 @@ + +# Grid4 Data Model + +This module defines data models for nodes, groups, and slices in a cloud/grid infrastructure. Each root object is marked with `@[heap]` and can be indexed for efficient querying. + +## Root Objects Overview + +| Object | Description | Index Fields | +| ----------- | --------------------------------------------- | ------------------------------ | +| `Node` | Represents a single node in the grid | `id`, `nodegroupid`, `country` | +| `NodeGroup` | Represents a group of nodes owned by a farmer | `id`, `farmerid` | + +--- + +## Node + +Represents a single node in the grid with slices, devices, and capacity. + +| Field | Type | Description | Indexed | +| --------------- | ---------------- | -------------------------------------------- | ------- | +| `id` | `int` | Unique node ID | ✅ | +| `nodegroupid` | `int` | ID of the owning node group | ✅ | +| `uptime` | `int` | Uptime percentage (0-100) | ✅ | +| `computeslices` | `[]ComputeSlice` | List of compute slices | ❌ | +| `storageslices` | `[]StorageSlice` | List of storage slices | ❌ | +| `devices` | `DeviceInfo` | Hardware device info (storage, memory, etc.) | ❌ | +| `country` | `string` | 2-letter country code | ✅ | +| `capacity` | `NodeCapacity` | Aggregated hardware capacity | ❌ | +| `provisiontime` | `u32` | Provisioning time (simple/compatible format) | ✅ | + +--- + +## NodeGroup + +Represents a group of nodes owned by a farmer, with policies. + +| Field | Type | Description | Indexed | +| ------------------------------------- | --------------- | ---------------------------------------------- | ------- | +| `id` | `u32` | Unique group ID | ✅ | +| `farmerid` | `u32` | Farmer/user ID | ✅ | +| `secret` | `string` | Encrypted secret for booting nodes | ❌ | +| `description` | `string` | Group description | ❌ | +| `slapolicy` | `SLAPolicy` | SLA policy details | ❌ | +| `pricingpolicy` | `PricingPolicy` | Pricing policy details | ❌ | +| `compute_slice_normalized_pricing_cc` | `f64` | Pricing per 2GB compute slice in cloud credits | ❌ | +| `storage_slice_normalized_pricing_cc` | `f64` | Pricing per 1GB storage slice in cloud credits | ❌ | +| `reputation` | `int` | Reputation (0-100) | ✅ | +| `uptime` | `int` | Uptime (0-100) | ✅ | + +--- + +## ComputeSlice + +Represents a compute slice (e.g., 1GB memory unit). + +| Field | Type | Description | +| -------------------------- | --------------- | -------------------------------- | +| `nodeid` | `u32` | Owning node ID | +| `id` | `int` | Slice ID in node | +| `mem_gb` | `f64` | Memory in GB | +| `storage_gb` | `f64` | Storage in GB | +| `passmark` | `int` | Passmark score | +| `vcores` | `int` | Virtual cores | +| `cpu_oversubscription` | `int` | CPU oversubscription ratio | +| `storage_oversubscription` | `int` | Storage oversubscription ratio | +| `price_range` | `[]f64` | Price range [min, max] | +| `gpus` | `u8` | Number of GPUs | +| `price_cc` | `f64` | Price per slice in cloud credits | +| `pricing_policy` | `PricingPolicy` | Pricing policy | +| `sla_policy` | `SLAPolicy` | SLA policy | + +--- + +## StorageSlice + +Represents a 1GB storage slice. + +| Field | Type | Description | +| ---------------- | --------------- | -------------------------------- | +| `nodeid` | `u32` | Owning node ID | +| `id` | `int` | Slice ID in node | +| `price_cc` | `f64` | Price per slice in cloud credits | +| `pricing_policy` | `PricingPolicy` | Pricing policy | +| `sla_policy` | `SLAPolicy` | SLA policy | + +--- + +## DeviceInfo + +Hardware device information for a node. + +| Field | Type | Description | +| --------- | ----------------- | ----------------------- | +| `vendor` | `string` | Vendor of the node | +| `storage` | `[]StorageDevice` | List of storage devices | +| `memory` | `[]MemoryDevice` | List of memory devices | +| `cpu` | `[]CPUDevice` | List of CPU devices | +| `gpu` | `[]GPUDevice` | List of GPU devices | +| `network` | `[]NetworkDevice` | List of network devices | + +--- + +## StorageDevice + +| Field | Type | Description | +| ------------- | -------- | --------------------- | +| `id` | `string` | Unique ID for device | +| `size_gb` | `f64` | Size in GB | +| `description` | `string` | Description of device | + +--- + +## MemoryDevice + +| Field | Type | Description | +| ------------- | -------- | --------------------- | +| `id` | `string` | Unique ID for device | +| `size_gb` | `f64` | Size in GB | +| `description` | `string` | Description of device | + +--- + +## CPUDevice + +| Field | Type | Description | +| ------------- | -------- | ------------------------ | +| `id` | `string` | Unique ID for device | +| `cores` | `int` | Number of CPU cores | +| `passmark` | `int` | Passmark benchmark score | +| `description` | `string` | Description of device | +| `cpu_brand` | `string` | Brand of the CPU | +| `cpu_version` | `string` | Version of the CPU | + +--- + +## GPUDevice + +| Field | Type | Description | +| ------------- | -------- | --------------------- | +| `id` | `string` | Unique ID for device | +| `cores` | `int` | Number of GPU cores | +| `memory_gb` | `f64` | GPU memory in GB | +| `description` | `string` | Description of device | +| `gpu_brand` | `string` | Brand of the GPU | +| `gpu_version` | `string` | Version of the GPU | + +--- + +## NetworkDevice + +| Field | Type | Description | +| ------------- | -------- | --------------------- | +| `id` | `string` | Unique ID for device | +| `speed_mbps` | `int` | Network speed in Mbps | +| `description` | `string` | Description of device | + +--- + +## NodeCapacity + +Aggregated hardware capacity for a node. + +| Field | Type | Description | +| ------------ | ----- | ---------------------- | +| `storage_gb` | `f64` | Total storage in GB | +| `mem_gb` | `f64` | Total memory in GB | +| `mem_gb_gpu` | `f64` | Total GPU memory in GB | +| `passmark` | `int` | Total passmark score | +| `vcores` | `int` | Total virtual cores | + +--- + +## SLAPolicy + +Service Level Agreement policy for slices or node groups. + +| Field | Type | Description | +| -------------------- | ----- | --------------------------------------- | +| `sla_uptime` | `int` | Required uptime % (e.g., 90) | +| `sla_bandwidth_mbit` | `int` | Guaranteed bandwidth in Mbps (0 = none) | +| `sla_penalty` | `int` | Penalty % if SLA is breached (0-100) | + +--- + +## PricingPolicy + +Pricing policy for slices or node groups. + +| Field | Type | Description | +| ---------------------------- | ------- | --------------------------------------------------------- | +| `marketplace_year_discounts` | `[]int` | Discounts for 1Y, 2Y, 3Y prepaid usage (e.g. [30,40,50]) | +| `volume_discounts` | `[]int` | Volume discounts based on purchase size (e.g. [10,20,30]) | + + diff --git a/lib/osiris/core/objects/grid4/specs/model_bid.v b/lib/osiris/core/objects/grid4/specs/model_bid.v new file mode 100644 index 0000000..0ca7b3f --- /dev/null +++ b/lib/osiris/core/objects/grid4/specs/model_bid.v @@ -0,0 +1,37 @@ +module datamodel + +// I can bid for infra, and optionally get accepted +@[heap] +pub struct Bid { +pub mut: + id u32 + customer_id u32 // links back to customer for this capacity (user on ledger) + compute_slices_nr int // nr of slices I need in 1 machine + compute_slice_price f64 // price per 1 GB slice I want to accept + storage_slices_nr int + storage_slice_price f64 // price per 1 GB storage slice I want to accept + storage_slices_nr int + status BidStatus + obligation bool // if obligation then will be charged and money needs to be in escrow, otherwise its an intent + start_date u32 // epoch + end_date u32 + signature_user string // signature as done by a user/consumer to validate their identity and intent + billing_period BillingPeriod +} + +pub enum BidStatus { + pending + confirmed + assigned + cancelled + done +} + + +pub enum BillingPeriod { + hourly + monthly + yearly + biannually + triannually +} diff --git a/lib/osiris/core/objects/grid4/specs/model_contract.v b/lib/osiris/core/objects/grid4/specs/model_contract.v new file mode 100644 index 0000000..f9fc26b --- /dev/null +++ b/lib/osiris/core/objects/grid4/specs/model_contract.v @@ -0,0 +1,52 @@ +module datamodel + +// I can bid for infra, and optionally get accepted +@[heap] +pub struct Contract { +pub mut: + id u32 + customer_id u32 // links back to customer for this capacity (user on ledger) + compute_slices []ComputeSliceProvisioned + storage_slices []StorageSliceProvisioned + compute_slice_price f64 // price per 1 GB agreed upon + storage_slice_price f64 // price per 1 GB agreed upon + network_slice_price f64 // price per 1 GB agreed upon (transfer) + status ContractStatus + start_date u32 // epoch + end_date u32 + signature_user string // signature as done by a user/consumer to validate their identity and intent + signature_hoster string // signature as done by the hoster + billing_period BillingPeriod +} + +pub enum ConctractStatus { + active + cancelled + error + paused +} + + +// typically 1GB of memory, but can be adjusted based based on size of machine +pub struct ComputeSliceProvisioned { +pub mut: + node_id u32 + id u16 // the id of the slice in the node + mem_gb f64 + storage_gb f64 + passmark int + vcores int + cpu_oversubscription int + tags string +} + +// 1GB of storage +pub struct StorageSliceProvisioned { +pub mut: + node_id u32 + id u16 // the id of the slice in the node, are tracked in the node itself + storage_size_gb int + tags string +} + + diff --git a/lib/osiris/core/objects/grid4/specs/model_node.v b/lib/osiris/core/objects/grid4/specs/model_node.v new file mode 100644 index 0000000..b451fa6 --- /dev/null +++ b/lib/osiris/core/objects/grid4/specs/model_node.v @@ -0,0 +1,104 @@ +module datamodel + +//ACCESS ONLY TF + +@[heap] +pub struct Node { +pub mut: + id int + nodegroupid int + uptime int // 0..100 + computeslices []ComputeSlice + storageslices []StorageSlice + devices DeviceInfo + country string // 2 letter code as specified in lib/data/countries/data/countryInfo.txt, use that library for validation + capacity NodeCapacity // Hardware capacity details + birthtime u32 // first time node was active + pubkey string + signature_node string // signature done on node to validate pubkey with privkey + signature_farmer string // signature as done by farmers to validate their identity +} + +pub struct DeviceInfo { +pub mut: + vendor string + storage []StorageDevice + memory []MemoryDevice + cpu []CPUDevice + gpu []GPUDevice + network []NetworkDevice +} + +pub struct StorageDevice { +pub mut: + id string // can be used in node + size_gb f64 // Size of the storage device in gigabytes + description string // Description of the storage device +} + +pub struct MemoryDevice { +pub mut: + id string // can be used in node + size_gb f64 // Size of the memory device in gigabytes + description string // Description of the memory device +} + +pub struct CPUDevice { +pub mut: + id string // can be used in node + cores int // Number of CPU cores + passmark int + description string // Description of the CPU + cpu_brand string // Brand of the CPU + cpu_version string // Version of the CPU +} + +pub struct GPUDevice { +pub mut: + id string // can be used in node + cores int // Number of GPU cores + memory_gb f64 // Size of the GPU memory in gigabytes + description string // Description of the GPU + gpu_brand string + gpu_version string +} + +pub struct NetworkDevice { +pub mut: + id string // can be used in node + speed_mbps int // Network speed in Mbps + description string // Description of the network device +} + +// NodeCapacity represents the hardware capacity details of a node. +pub struct NodeCapacity { +pub mut: + storage_gb f64 // Total storage in gigabytes + mem_gb f64 // Total memory in gigabytes + mem_gb_gpu f64 // Total GPU memory in gigabytes + passmark int // Passmark score for the node + vcores int // Total virtual cores +} + +// typically 1GB of memory, but can be adjusted based based on size of machine +pub struct ComputeSlice { +pub mut: + u16 int // the id of the slice in the node + mem_gb f64 + storage_gb f64 + passmark int + vcores int + cpu_oversubscription int + storage_oversubscription int + gpus u8 // nr of GPU's see node to know what GPU's are +} + +// 1GB of storage +pub struct StorageSlice { +pub mut: + u16 int // the id of the slice in the node, are tracked in the node itself +} + +fn (mut n Node) check() ! { + // todo calculate NodeCapacity out of the devices on the Node +} diff --git a/lib/osiris/core/objects/grid4/specs/model_nodegroup.v b/lib/osiris/core/objects/grid4/specs/model_nodegroup.v new file mode 100644 index 0000000..ae4858b --- /dev/null +++ b/lib/osiris/core/objects/grid4/specs/model_nodegroup.v @@ -0,0 +1,33 @@ +module datamodel + +// is a root object, is the only obj farmer needs to configure in the UI, this defines how slices will be created +@[heap] +pub struct NodeGroup { +pub mut: + id u32 + farmerid u32 // link back to farmer who owns the nodegroup, is a user? + secret string // only visible by farmer, in future encrypted, used to boot a node + description string + slapolicy SLAPolicy + pricingpolicy PricingPolicy + compute_slice_normalized_pricing_cc f64 // pricing in CC - cloud credit, per 2GB node slice + storage_slice_normalized_pricing_cc f64 // pricing in CC - cloud credit, per 1GB storage slice + signature_farmer string // signature as done by farmers to validate that they created this group +} + +pub struct SLAPolicy { +pub mut: + sla_uptime int // should +90 + sla_bandwidth_mbit int // minimal mbits we can expect avg over 1h per node, 0 means we don't guarantee + sla_penalty int // 0-100, percent of money given back in relation to month if sla breached, e.g. 200 means we return 2 months worth of rev if sla missed +} + +pub struct PricingPolicy { +pub mut: + marketplace_year_discounts []int = [30, 40, 50] // e.g. 30,40,50 means if user has more CC in wallet than 1 year utilization on all his purchaes then this provider gives 30%, 2Y 40%, ... + // volume_discounts []int = [10, 20, 30] // e.g. 10,20,30 +} + + + + diff --git a/lib/osiris/core/objects/grid4/specs/model_reputation.v b/lib/osiris/core/objects/grid4/specs/model_reputation.v new file mode 100644 index 0000000..0d65749 --- /dev/null +++ b/lib/osiris/core/objects/grid4/specs/model_reputation.v @@ -0,0 +1,19 @@ + +@[heap] +pub struct NodeGroupReputation { +pub mut: + nodegroup_id u32 + reputation int = 50 // between 0 and 100, earned over time + uptime int // between 0 and 100, set by system, farmer has no ability to set this + nodes []NodeReputation +} + +pub struct NodeReputation { +pub mut: + node_id u32 + reputation int = 50 // between 0 and 100, earned over time + uptime int // between 0 and 100, set by system, farmer has no ability to set this +} + + + diff --git a/lib/osiris/core/objects/heroledger/dnsrecord.rs b/lib/osiris/core/objects/heroledger/dnsrecord.rs new file mode 100644 index 0000000..92fb122 --- /dev/null +++ b/lib/osiris/core/objects/heroledger/dnsrecord.rs @@ -0,0 +1,311 @@ +use crate::store::{BaseData, IndexKey, Object}; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; + +/// Defines the supported DNS record types +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub enum NameType { + A, + AAAA, + CNAME, + MX, + TXT, + SRV, + PTR, + NS, +} + +impl Default for NameType { + fn default() -> Self { + NameType::A + } +} + +/// Category of the DNS record +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub enum NameCat { + IPv4, + IPv6, + Mycelium, +} + +impl Default for NameCat { + fn default() -> Self { + NameCat::IPv4 + } +} + +/// Status of a DNS zone +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub enum DNSZoneStatus { + Active, + Suspended, + Archived, +} + +impl Default for DNSZoneStatus { + fn default() -> Self { + DNSZoneStatus::Active + } +} + +/// Represents a DNS record configuration +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct DNSRecord { + pub subdomain: String, + pub record_type: NameType, + pub value: String, + pub priority: u32, + pub ttl: u32, + pub is_active: bool, + pub cat: NameCat, + pub is_wildcard: bool, +} + +impl DNSRecord { + pub fn new() -> Self { + Self { + subdomain: String::new(), + record_type: NameType::default(), + value: String::new(), + priority: 0, + ttl: 3600, + is_active: true, + cat: NameCat::default(), + is_wildcard: false, + } + } + + pub fn subdomain(mut self, subdomain: impl ToString) -> Self { + self.subdomain = subdomain.to_string(); + self + } + + pub fn record_type(mut self, record_type: NameType) -> Self { + self.record_type = record_type; + self + } + + pub fn value(mut self, value: impl ToString) -> Self { + self.value = value.to_string(); + self + } + + pub fn priority(mut self, priority: u32) -> Self { + self.priority = priority; + self + } + + pub fn ttl(mut self, ttl: u32) -> Self { + self.ttl = ttl; + self + } + + pub fn is_active(mut self, is_active: bool) -> Self { + self.is_active = is_active; + self + } + + pub fn cat(mut self, cat: NameCat) -> Self { + self.cat = cat; + self + } + + pub fn is_wildcard(mut self, is_wildcard: bool) -> Self { + self.is_wildcard = is_wildcard; + self + } + + pub fn build(self) -> Self { + self + } +} + +impl std::fmt::Display for DNSRecord { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}:{:?}", self.subdomain, self.record_type) + } +} + +/// SOA (Start of Authority) record for a DNS zone +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct SOARecord { + pub zone_id: u32, + pub primary_ns: String, + pub admin_email: String, + pub serial: u64, + pub refresh: u32, + pub retry: u32, + pub expire: u32, + pub minimum_ttl: u32, + pub is_active: bool, +} + +impl SOARecord { + pub fn new() -> Self { + Self { + zone_id: 0, + primary_ns: String::new(), + admin_email: String::new(), + serial: 0, + refresh: 3600, + retry: 600, + expire: 604800, + minimum_ttl: 3600, + is_active: true, + } + } + + pub fn zone_id(mut self, zone_id: u32) -> Self { + self.zone_id = zone_id; + self + } + + pub fn primary_ns(mut self, primary_ns: impl ToString) -> Self { + self.primary_ns = primary_ns.to_string(); + self + } + + pub fn admin_email(mut self, admin_email: impl ToString) -> Self { + self.admin_email = admin_email.to_string(); + self + } + + pub fn serial(mut self, serial: u64) -> Self { + self.serial = serial; + self + } + + pub fn refresh(mut self, refresh: u32) -> Self { + self.refresh = refresh; + self + } + + pub fn retry(mut self, retry: u32) -> Self { + self.retry = retry; + self + } + + pub fn expire(mut self, expire: u32) -> Self { + self.expire = expire; + self + } + + pub fn minimum_ttl(mut self, minimum_ttl: u32) -> Self { + self.minimum_ttl = minimum_ttl; + self + } + + pub fn is_active(mut self, is_active: bool) -> Self { + self.is_active = is_active; + self + } + + pub fn build(self) -> Self { + self + } +} + +impl std::fmt::Display for SOARecord { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.primary_ns) + } +} + +/// Represents a DNS zone with its configuration and records +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, crate::DeriveObject)] +pub struct DNSZone { + /// Base model data + pub base_data: BaseData, + #[index] + pub domain: String, + #[index(path = "subdomain")] + #[index(path = "record_type")] + pub dnsrecords: Vec, + pub administrators: Vec, + pub status: DNSZoneStatus, + pub metadata: HashMap, + #[index(path = "primary_ns")] + pub soarecord: Vec, +} + +impl DNSZone { + /// Create a new DNS zone instance + pub fn new(id: u32) -> Self { + let mut base_data = BaseData::new(); + Self { + base_data, + domain: String::new(), + dnsrecords: Vec::new(), + administrators: Vec::new(), + status: DNSZoneStatus::default(), + metadata: HashMap::new(), + soarecord: Vec::new(), + } + } + + /// Set the domain name (fluent) + pub fn domain(mut self, domain: impl ToString) -> Self { + self.domain = domain.to_string(); + self + } + + /// Add a DNS record (fluent) + pub fn add_dnsrecord(mut self, record: DNSRecord) -> Self { + self.dnsrecords.push(record); + self + } + + /// Set all DNS records (fluent) + pub fn dnsrecords(mut self, dnsrecords: Vec) -> Self { + self.dnsrecords = dnsrecords; + self + } + + /// Add an administrator (fluent) + pub fn add_administrator(mut self, admin_id: u32) -> Self { + self.administrators.push(admin_id); + self + } + + /// Set all administrators (fluent) + pub fn administrators(mut self, administrators: Vec) -> Self { + self.administrators = administrators; + self + } + + /// Set the zone status (fluent) + pub fn status(mut self, status: DNSZoneStatus) -> Self { + self.status = status; + self + } + + /// Add metadata entry (fluent) + pub fn add_metadata(mut self, key: impl ToString, value: impl ToString) -> Self { + self.metadata.insert(key.to_string(), value.to_string()); + self + } + + /// Set all metadata (fluent) + pub fn metadata(mut self, metadata: HashMap) -> Self { + self.metadata = metadata; + self + } + + /// Add an SOA record (fluent) + pub fn add_soarecord(mut self, soa: SOARecord) -> Self { + self.soarecord.push(soa); + self + } + + /// Set all SOA records (fluent) + pub fn soarecord(mut self, soarecord: Vec) -> Self { + self.soarecord = soarecord; + self + } + + /// Build the final DNS zone instance + pub fn build(self) -> Self { + self + } +} diff --git a/lib/osiris/core/objects/heroledger/group.rs b/lib/osiris/core/objects/heroledger/group.rs new file mode 100644 index 0000000..5bb941f --- /dev/null +++ b/lib/osiris/core/objects/heroledger/group.rs @@ -0,0 +1,227 @@ +use crate::store::{BaseData, IndexKey, Object}; +use serde::{Deserialize, Serialize}; + +/// Defines the lifecycle of a group +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub enum GroupStatus { + Active, + Inactive, + Suspended, + Archived, +} + +impl Default for GroupStatus { + fn default() -> Self { + GroupStatus::Active + } +} + +/// Visibility controls who can discover or view the group +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub enum Visibility { + Public, // Anyone can see and request to join + Private, // Only invited users can see the group + Unlisted, // Not visible in search; only accessible by direct link or DNS +} + +impl Default for Visibility { + fn default() -> Self { + Visibility::Public + } +} + +/// GroupConfig holds rules that govern group membership and behavior +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] +pub struct GroupConfig { + pub max_members: u32, + pub allow_guests: bool, + pub auto_approve: bool, + pub require_invite: bool, +} + +impl GroupConfig { + pub fn new() -> Self { + Self { + max_members: 0, + allow_guests: false, + auto_approve: false, + require_invite: false, + } + } + + pub fn max_members(mut self, max_members: u32) -> Self { + self.max_members = max_members; + self + } + + pub fn allow_guests(mut self, allow_guests: bool) -> Self { + self.allow_guests = allow_guests; + self + } + + pub fn auto_approve(mut self, auto_approve: bool) -> Self { + self.auto_approve = auto_approve; + self + } + + pub fn require_invite(mut self, require_invite: bool) -> Self { + self.require_invite = require_invite; + self + } + + pub fn build(self) -> Self { + self + } +} + +/// Represents a collaborative or access-controlled unit within the system +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, crate::DeriveObject)] +pub struct Group { + /// Base model data + pub base_data: BaseData, + #[index] + pub name: String, + pub description: String, + pub dnsrecords: Vec, + pub administrators: Vec, + pub config: GroupConfig, + pub status: GroupStatus, + pub visibility: Visibility, + pub created: u64, + pub updated: u64, +} + +impl Group { + /// Create a new group instance + pub fn new(id: u32) -> Self { + let mut base_data = BaseData::new(); + Self { + base_data, + name: String::new(), + description: String::new(), + dnsrecords: Vec::new(), + administrators: Vec::new(), + config: GroupConfig::new(), + status: GroupStatus::default(), + visibility: Visibility::default(), + created: 0, + updated: 0, + } + } + + /// Set the group name (fluent) + pub fn name(mut self, name: impl ToString) -> Self { + self.name = name.to_string(); + self + } + + /// Set the group description (fluent) + pub fn description(mut self, description: impl ToString) -> Self { + self.description = description.to_string(); + self + } + + /// Add a DNS record ID (fluent) + pub fn add_dnsrecord(mut self, dnsrecord_id: u32) -> Self { + self.dnsrecords.push(dnsrecord_id); + self + } + + /// Set all DNS record IDs (fluent) + pub fn dnsrecords(mut self, dnsrecords: Vec) -> Self { + self.dnsrecords = dnsrecords; + self + } + + /// Add an administrator user ID (fluent) + pub fn add_administrator(mut self, user_id: u32) -> Self { + self.administrators.push(user_id); + self + } + + /// Set all administrator user IDs (fluent) + pub fn administrators(mut self, administrators: Vec) -> Self { + self.administrators = administrators; + self + } + + /// Set the group configuration (fluent) + pub fn config(mut self, config: GroupConfig) -> Self { + self.config = config; + self + } + + /// Set the group status (fluent) + pub fn status(mut self, status: GroupStatus) -> Self { + self.status = status; + self + } + + /// Set the group visibility (fluent) + pub fn visibility(mut self, visibility: Visibility) -> Self { + self.visibility = visibility; + self + } + + /// Set the created timestamp (fluent) + pub fn created(mut self, created: u64) -> Self { + self.created = created; + self + } + + /// Set the updated timestamp (fluent) + pub fn updated(mut self, updated: u64) -> Self { + self.updated = updated; + self + } + + /// Build the final group instance + pub fn build(self) -> Self { + self + } +} + +/// Represents the membership relationship between users and groups +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, crate::DeriveObject)] +pub struct UserGroupMembership { + /// Base model data + pub base_data: BaseData, + #[index] + pub user_id: u32, + pub group_ids: Vec, +} + +impl UserGroupMembership { + /// Create a new user group membership instance + pub fn new(id: u32) -> Self { + let mut base_data = BaseData::new(); + Self { + base_data, + user_id: 0, + group_ids: Vec::new(), + } + } + + /// Set the user ID (fluent) + pub fn user_id(mut self, user_id: u32) -> Self { + self.user_id = user_id; + self + } + + /// Add a group ID (fluent) + pub fn add_group_id(mut self, group_id: u32) -> Self { + self.group_ids.push(group_id); + self + } + + /// Set all group IDs (fluent) + pub fn group_ids(mut self, group_ids: Vec) -> Self { + self.group_ids = group_ids; + self + } + + /// Build the final membership instance + pub fn build(self) -> Self { + self + } +} diff --git a/lib/osiris/core/objects/heroledger/membership.rs b/lib/osiris/core/objects/heroledger/membership.rs new file mode 100644 index 0000000..8b24f54 --- /dev/null +++ b/lib/osiris/core/objects/heroledger/membership.rs @@ -0,0 +1,110 @@ +use crate::store::{BaseData, IndexKey, Object}; +use serde::{Deserialize, Serialize}; + +/// Defines the possible roles a member can have +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub enum MemberRole { + Owner, + Admin, + Moderator, + Member, + Guest, +} + +impl Default for MemberRole { + fn default() -> Self { + MemberRole::Member + } +} + +/// Represents the current status of membership +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub enum MemberStatus { + Active, + Pending, + Suspended, + Removed, +} + +impl Default for MemberStatus { + fn default() -> Self { + MemberStatus::Pending + } +} + +/// Represents a member within a circle +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, crate::DeriveObject)] +pub struct Member { + /// Base model data + pub base_data: BaseData, + #[index] + pub user_id: u32, + pub role: MemberRole, + pub status: MemberStatus, + pub joined_at: u64, + pub invited_by: u32, + pub permissions: Vec, +} + +impl Member { + /// Create a new member instance + pub fn new(id: u32) -> Self { + let mut base_data = BaseData::new(); + Self { + base_data, + user_id: 0, + role: MemberRole::default(), + status: MemberStatus::default(), + joined_at: 0, + invited_by: 0, + permissions: Vec::new(), + } + } + + /// Set the user ID (fluent) + pub fn user_id(mut self, user_id: u32) -> Self { + self.user_id = user_id; + self + } + + /// Set the member role (fluent) + pub fn role(mut self, role: MemberRole) -> Self { + self.role = role; + self + } + + /// Set the member status (fluent) + pub fn status(mut self, status: MemberStatus) -> Self { + self.status = status; + self + } + + /// Set the joined timestamp (fluent) + pub fn joined_at(mut self, joined_at: u64) -> Self { + self.joined_at = joined_at; + self + } + + /// Set who invited this member (fluent) + pub fn invited_by(mut self, invited_by: u32) -> Self { + self.invited_by = invited_by; + self + } + + /// Add a permission (fluent) + pub fn add_permission(mut self, permission: impl ToString) -> Self { + self.permissions.push(permission.to_string()); + self + } + + /// Set all permissions (fluent) + pub fn permissions(mut self, permissions: Vec) -> Self { + self.permissions = permissions; + self + } + + /// Build the final member instance + pub fn build(self) -> Self { + self + } +} diff --git a/lib/osiris/core/objects/heroledger/mod.rs b/lib/osiris/core/objects/heroledger/mod.rs new file mode 100644 index 0000000..4882238 --- /dev/null +++ b/lib/osiris/core/objects/heroledger/mod.rs @@ -0,0 +1,10 @@ +// Export all heroledger model modules +pub mod dnsrecord; +pub mod group; +pub mod membership; +pub mod money; +pub mod rhai; +pub mod secretbox; +pub mod signature; +pub mod user; +pub mod user_kvs; diff --git a/lib/osiris/core/objects/heroledger/money.rs b/lib/osiris/core/objects/heroledger/money.rs new file mode 100644 index 0000000..55c4499 --- /dev/null +++ b/lib/osiris/core/objects/heroledger/money.rs @@ -0,0 +1,498 @@ +use crate::store::{BaseData, IndexKey, Object}; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; + +/// Represents the status of an account +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub enum AccountStatus { + Active, + Inactive, + Suspended, + Archived, +} + +impl Default for AccountStatus { + fn default() -> Self { + AccountStatus::Active + } +} + +/// Represents the type of transaction +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub enum TransactionType { + Transfer, + Clawback, + Freeze, + Unfreeze, + Issue, + Burn, +} + +impl Default for TransactionType { + fn default() -> Self { + TransactionType::Transfer + } +} + +/// Represents a signature for transactions +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct Signature { + pub signer_id: u32, + pub signature: String, + pub timestamp: u64, +} + +impl Signature { + pub fn new() -> Self { + Self { + signer_id: 0, + signature: String::new(), + timestamp: 0, + } + } + + pub fn signer_id(mut self, signer_id: u32) -> Self { + self.signer_id = signer_id; + self + } + + pub fn signature(mut self, signature: impl ToString) -> Self { + self.signature = signature.to_string(); + self + } + + pub fn timestamp(mut self, timestamp: u64) -> Self { + self.timestamp = timestamp; + self + } + + pub fn build(self) -> Self { + self + } +} + +/// Policy item for account operations +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] +pub struct AccountPolicyItem { + pub signers: Vec, + pub min_signatures: u32, + pub enabled: bool, + pub threshold: f64, + pub recipient: u32, +} + +impl AccountPolicyItem { + pub fn new() -> Self { + Self { + signers: Vec::new(), + min_signatures: 0, + enabled: false, + threshold: 0.0, + recipient: 0, + } + } + + pub fn add_signer(mut self, signer_id: u32) -> Self { + self.signers.push(signer_id); + self + } + + pub fn signers(mut self, signers: Vec) -> Self { + self.signers = signers; + self + } + + pub fn min_signatures(mut self, min_signatures: u32) -> Self { + self.min_signatures = min_signatures; + self + } + + pub fn enabled(mut self, enabled: bool) -> Self { + self.enabled = enabled; + self + } + + pub fn threshold(mut self, threshold: f64) -> Self { + self.threshold = threshold; + self + } + + pub fn recipient(mut self, recipient: u32) -> Self { + self.recipient = recipient; + self + } + + pub fn build(self) -> Self { + self + } +} + +/// Represents an account in the financial system +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, crate::DeriveObject)] +pub struct Account { + /// Base model data + pub base_data: BaseData, + pub owner_id: u32, + #[index] + pub address: String, + pub balance: f64, + pub currency: String, + pub assetid: u32, + pub last_activity: u64, + pub administrators: Vec, + pub accountpolicy: u32, +} + +impl Account { + /// Create a new account instance + pub fn new(id: u32) -> Self { + let mut base_data = BaseData::new(); + Self { + base_data, + owner_id: 0, + address: String::new(), + balance: 0.0, + currency: String::new(), + assetid: 0, + last_activity: 0, + administrators: Vec::new(), + accountpolicy: 0, + } + } + + /// Set the owner ID (fluent) + pub fn owner_id(mut self, owner_id: u32) -> Self { + self.owner_id = owner_id; + self + } + + /// Set the blockchain address (fluent) + pub fn address(mut self, address: impl ToString) -> Self { + self.address = address.to_string(); + self + } + + /// Set the balance (fluent) + pub fn balance(mut self, balance: f64) -> Self { + self.balance = balance; + self + } + + /// Set the currency (fluent) + pub fn currency(mut self, currency: impl ToString) -> Self { + self.currency = currency.to_string(); + self + } + + /// Set the asset ID (fluent) + pub fn assetid(mut self, assetid: u32) -> Self { + self.assetid = assetid; + self + } + + /// Set the last activity timestamp (fluent) + pub fn last_activity(mut self, last_activity: u64) -> Self { + self.last_activity = last_activity; + self + } + + /// Add an administrator (fluent) + pub fn add_administrator(mut self, admin_id: u32) -> Self { + self.administrators.push(admin_id); + self + } + + /// Set all administrators (fluent) + pub fn administrators(mut self, administrators: Vec) -> Self { + self.administrators = administrators; + self + } + + /// Set the account policy ID (fluent) + pub fn accountpolicy(mut self, accountpolicy: u32) -> Self { + self.accountpolicy = accountpolicy; + self + } + + /// Build the final account instance + pub fn build(self) -> Self { + self + } +} + +/// Represents an asset in the financial system +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, crate::DeriveObject)] +pub struct Asset { + /// Base model data + pub base_data: BaseData, + #[index] + pub address: String, + pub assetid: u32, + pub asset_type: String, + pub issuer: u32, + pub supply: f64, + pub decimals: u8, + pub is_frozen: bool, + pub metadata: HashMap, + pub administrators: Vec, + pub min_signatures: u32, +} + +impl Asset { + /// Create a new asset instance + pub fn new(id: u32) -> Self { + let mut base_data = BaseData::new(); + Self { + base_data, + address: String::new(), + assetid: 0, + asset_type: String::new(), + issuer: 0, + supply: 0.0, + decimals: 0, + is_frozen: false, + metadata: HashMap::new(), + administrators: Vec::new(), + min_signatures: 0, + } + } + + /// Set the blockchain address (fluent) + pub fn address(mut self, address: impl ToString) -> Self { + self.address = address.to_string(); + self + } + + /// Set the asset ID (fluent) + pub fn assetid(mut self, assetid: u32) -> Self { + self.assetid = assetid; + self + } + + /// Set the asset type (fluent) + pub fn asset_type(mut self, asset_type: impl ToString) -> Self { + self.asset_type = asset_type.to_string(); + self + } + + /// Set the issuer (fluent) + pub fn issuer(mut self, issuer: u32) -> Self { + self.issuer = issuer; + self + } + + /// Set the supply (fluent) + pub fn supply(mut self, supply: f64) -> Self { + self.supply = supply; + self + } + + /// Set the decimals (fluent) + pub fn decimals(mut self, decimals: u8) -> Self { + self.decimals = decimals; + self + } + + /// Set the frozen status (fluent) + pub fn is_frozen(mut self, is_frozen: bool) -> Self { + self.is_frozen = is_frozen; + self + } + + /// Add metadata entry (fluent) + pub fn add_metadata(mut self, key: impl ToString, value: impl ToString) -> Self { + self.metadata.insert(key.to_string(), value.to_string()); + self + } + + /// Set all metadata (fluent) + pub fn metadata(mut self, metadata: HashMap) -> Self { + self.metadata = metadata; + self + } + + /// Add an administrator (fluent) + pub fn add_administrator(mut self, admin_id: u32) -> Self { + self.administrators.push(admin_id); + self + } + + /// Set all administrators (fluent) + pub fn administrators(mut self, administrators: Vec) -> Self { + self.administrators = administrators; + self + } + + /// Set minimum signatures required (fluent) + pub fn min_signatures(mut self, min_signatures: u32) -> Self { + self.min_signatures = min_signatures; + self + } + + /// Build the final asset instance + pub fn build(self) -> Self { + self + } +} + +/// Represents account policies for various operations +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, crate::DeriveObject)] +pub struct AccountPolicy { + /// Base model data + pub base_data: BaseData, + pub transferpolicy: AccountPolicyItem, + pub adminpolicy: AccountPolicyItem, + pub clawbackpolicy: AccountPolicyItem, + pub freezepolicy: AccountPolicyItem, +} + +impl AccountPolicy { + /// Create a new account policy instance + pub fn new(id: u32) -> Self { + let mut base_data = BaseData::new(); + Self { + base_data, + transferpolicy: AccountPolicyItem::new(), + adminpolicy: AccountPolicyItem::new(), + clawbackpolicy: AccountPolicyItem::new(), + freezepolicy: AccountPolicyItem::new(), + } + } + + /// Set the transfer policy (fluent) + pub fn transferpolicy(mut self, transferpolicy: AccountPolicyItem) -> Self { + self.transferpolicy = transferpolicy; + self + } + + /// Set the admin policy (fluent) + pub fn adminpolicy(mut self, adminpolicy: AccountPolicyItem) -> Self { + self.adminpolicy = adminpolicy; + self + } + + /// Set the clawback policy (fluent) + pub fn clawbackpolicy(mut self, clawbackpolicy: AccountPolicyItem) -> Self { + self.clawbackpolicy = clawbackpolicy; + self + } + + /// Set the freeze policy (fluent) + pub fn freezepolicy(mut self, freezepolicy: AccountPolicyItem) -> Self { + self.freezepolicy = freezepolicy; + self + } + + /// Build the final account policy instance + pub fn build(self) -> Self { + self + } +} + +/// Represents a financial transaction +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, crate::DeriveObject)] +pub struct Transaction { + /// Base model data + pub base_data: BaseData, + pub txid: u32, + pub source: u32, + pub destination: u32, + pub assetid: u32, + pub amount: f64, + pub timestamp: u64, + pub status: String, + pub memo: String, + pub tx_type: TransactionType, + pub signatures: Vec, +} + +impl Transaction { + /// Create a new transaction instance + pub fn new(id: u32) -> Self { + let mut base_data = BaseData::new(); + Self { + base_data, + txid: 0, + source: 0, + destination: 0, + assetid: 0, + amount: 0.0, + timestamp: 0, + status: String::new(), + memo: String::new(), + tx_type: TransactionType::default(), + signatures: Vec::new(), + } + } + + /// Set the transaction ID (fluent) + pub fn txid(mut self, txid: u32) -> Self { + self.txid = txid; + self + } + + /// Set the source account (fluent) + pub fn source(mut self, source: u32) -> Self { + self.source = source; + self + } + + /// Set the destination account (fluent) + pub fn destination(mut self, destination: u32) -> Self { + self.destination = destination; + self + } + + /// Set the asset ID (fluent) + pub fn assetid(mut self, assetid: u32) -> Self { + self.assetid = assetid; + self + } + + /// Set the amount (fluent) + pub fn amount(mut self, amount: f64) -> Self { + self.amount = amount; + self + } + + /// Set the timestamp (fluent) + pub fn timestamp(mut self, timestamp: u64) -> Self { + self.timestamp = timestamp; + self + } + + /// Set the status (fluent) + pub fn status(mut self, status: impl ToString) -> Self { + self.status = status.to_string(); + self + } + + /// Set the memo (fluent) + pub fn memo(mut self, memo: impl ToString) -> Self { + self.memo = memo.to_string(); + self + } + + /// Set the transaction type (fluent) + pub fn tx_type(mut self, tx_type: TransactionType) -> Self { + self.tx_type = tx_type; + self + } + + /// Add a signature (fluent) + pub fn add_signature(mut self, signature: Signature) -> Self { + self.signatures.push(signature); + self + } + + /// Set all signatures (fluent) + pub fn signatures(mut self, signatures: Vec) -> Self { + self.signatures = signatures; + self + } + + /// Build the final transaction instance + pub fn build(self) -> Self { + self + } +} diff --git a/lib/osiris/core/objects/heroledger/rhai.rs b/lib/osiris/core/objects/heroledger/rhai.rs new file mode 100644 index 0000000..15aecc6 --- /dev/null +++ b/lib/osiris/core/objects/heroledger/rhai.rs @@ -0,0 +1,364 @@ +use ::rhai::plugin::*; +use ::rhai::{Dynamic, Engine, EvalAltResult, Module, CustomType, TypeBuilder}; +use std::mem; + +use crate::objects::heroledger::{ + dnsrecord::DNSZone, + group::{Group, Visibility}, + money::Account, + user::{User, UserStatus}, +}; + +// ============================================================================ +// User Module +// ============================================================================ + +type RhaiUser = User; + +#[export_module] +mod rhai_user_module { + use crate::objects::heroledger::user::User; + + use super::RhaiUser; + + #[rhai_fn(name = "new_user", return_raw)] + pub fn new_user() -> Result> { + Ok(User::new(0)) + } + + #[rhai_fn(name = "username", return_raw)] + pub fn set_username( + user: &mut RhaiUser, + username: String, + ) -> Result> { + let owned = std::mem::take(user); + *user = owned.username(username); + Ok(user.clone()) + } + + #[rhai_fn(name = "add_email", return_raw)] + pub fn add_email(user: &mut RhaiUser, email: String) -> Result> { + let owned = std::mem::take(user); + *user = owned.add_email(email); + Ok(user.clone()) + } + + #[rhai_fn(name = "pubkey", return_raw)] + pub fn set_pubkey(user: &mut RhaiUser, pubkey: String) -> Result> { + let owned = std::mem::take(user); + *user = owned.pubkey(pubkey); + Ok(user.clone()) + } + + #[rhai_fn(name = "status", return_raw)] + pub fn set_status(user: &mut RhaiUser, status: String) -> Result> { + let status_enum = match status.as_str() { + "Active" => UserStatus::Active, + "Inactive" => UserStatus::Inactive, + "Suspended" => UserStatus::Suspended, + "Archived" => UserStatus::Archived, + _ => return Err(format!("Invalid user status: {}", status).into()), + }; + let owned = std::mem::take(user); + *user = owned.status(status_enum); + Ok(user.clone()) + } + + #[rhai_fn(name = "save_user", return_raw)] + pub fn save_user(user: &mut RhaiUser) -> Result> { + // This would integrate with the database save functionality + // For now, just return the user as-is + Ok(user.clone()) + } + + // Getters + #[rhai_fn(name = "get_id")] + pub fn get_id(user: &mut RhaiUser) -> u32 { + user.base_data.id + } + + #[rhai_fn(name = "get_username")] + pub fn get_username(user: &mut RhaiUser) -> String { + user.username.clone() + } + + #[rhai_fn(name = "get_email")] + pub fn get_email(user: &mut RhaiUser) -> String { + if let Some(first_email) = user.email.first() { + first_email.clone() + } else { + String::new() + } + } + + #[rhai_fn(name = "get_pubkey")] + pub fn get_pubkey(user: &mut RhaiUser) -> String { + user.pubkey.clone() + } +} + +// ============================================================================ +// Group Module +// ============================================================================ + +type RhaiGroup = Group; + +#[export_module] +mod rhai_group_module { + use super::RhaiGroup; + + #[rhai_fn(name = "new_group", return_raw)] + pub fn new_group() -> Result> { + Ok(Group::new(0)) + } + + #[rhai_fn(name = "name", return_raw)] + pub fn set_name(group: &mut RhaiGroup, name: String) -> Result> { + let owned = std::mem::take(group); + *group = owned.name(name); + Ok(group.clone()) + } + + #[rhai_fn(name = "description", return_raw)] + pub fn set_description( + group: &mut RhaiGroup, + description: String, + ) -> Result> { + let owned = std::mem::take(group); + *group = owned.description(description); + Ok(group.clone()) + } + + #[rhai_fn(name = "visibility", return_raw)] + pub fn set_visibility( + group: &mut RhaiGroup, + visibility: String, + ) -> Result> { + let visibility_enum = match visibility.as_str() { + "Public" => Visibility::Public, + "Private" => Visibility::Private, + _ => return Err(format!("Invalid visibility: {}", visibility).into()), + }; + let owned = std::mem::take(group); + *group = owned.visibility(visibility_enum); + Ok(group.clone()) + } + + #[rhai_fn(name = "save_group", return_raw)] + pub fn save_group(group: &mut RhaiGroup) -> Result> { + Ok(group.clone()) + } + + // Getters + #[rhai_fn(name = "get_id")] + pub fn get_id(group: &mut RhaiGroup) -> u32 { + group.base_data.id + } + + #[rhai_fn(name = "get_name")] + pub fn get_name(group: &mut RhaiGroup) -> String { + group.name.clone() + } + + #[rhai_fn(name = "get_description")] + pub fn get_description(group: &mut RhaiGroup) -> String { + group.description.clone() + } +} + +// ============================================================================ +// Account Module (from money.rs) +// ============================================================================ + +type RhaiAccount = Account; + +#[export_module] +mod rhai_account_module { + use super::RhaiAccount; + + #[rhai_fn(name = "new_account", return_raw)] + pub fn new_account() -> Result> { + Ok(Account::new(0)) + } + + #[rhai_fn(name = "owner_id", return_raw)] + pub fn set_owner_id( + account: &mut RhaiAccount, + owner_id: i64, + ) -> Result> { + let owned = std::mem::take(account); + *account = owned.owner_id(owner_id as u32); + Ok(account.clone()) + } + + #[rhai_fn(name = "address", return_raw)] + pub fn set_address( + account: &mut RhaiAccount, + address: String, + ) -> Result> { + let owned = std::mem::take(account); + *account = owned.address(address); + Ok(account.clone()) + } + + #[rhai_fn(name = "currency", return_raw)] + pub fn set_currency( + account: &mut RhaiAccount, + currency: String, + ) -> Result> { + let owned = std::mem::take(account); + *account = owned.currency(currency); + Ok(account.clone()) + } + + #[rhai_fn(name = "save_account", return_raw)] + pub fn save_account(account: &mut RhaiAccount) -> Result> { + Ok(account.clone()) + } + + // Getters + #[rhai_fn(name = "get_id")] + pub fn get_id(account: &mut RhaiAccount) -> u32 { + account.base_data.id + } + + #[rhai_fn(name = "get_address")] + pub fn get_address(account: &mut RhaiAccount) -> String { + account.address.clone() + } + + #[rhai_fn(name = "get_currency")] + pub fn get_currency(account: &mut RhaiAccount) -> String { + account.currency.clone() + } +} + +// ============================================================================ +// DNS Zone Module +// ============================================================================ + +type RhaiDNSZone = DNSZone; + +#[export_module] +mod rhai_dns_zone_module { + use super::RhaiDNSZone; + + #[rhai_fn(name = "new_dns_zone", return_raw)] + pub fn new_dns_zone() -> Result> { + Ok(DNSZone::new(0)) + } + + #[rhai_fn(name = "domain", return_raw)] + pub fn set_domain( + zone: &mut RhaiDNSZone, + domain: String, + ) -> Result> { + let owned = std::mem::take(zone); + *zone = owned.domain(domain); + Ok(zone.clone()) + } + + #[rhai_fn(name = "save_dns_zone", return_raw)] + pub fn save_dns_zone(zone: &mut RhaiDNSZone) -> Result> { + Ok(zone.clone()) + } + + // Getters + #[rhai_fn(name = "get_id")] + pub fn get_id(zone: &mut RhaiDNSZone) -> u32 { + zone.base_data.id + } + + #[rhai_fn(name = "get_domain")] + pub fn get_domain(zone: &mut RhaiDNSZone) -> String { + zone.domain.clone() + } +} + +// ============================================================================ +// Registration Functions +// ============================================================================ +// Registration functions + +/// Register heroledger modules into a Rhai Module (for use in packages) +/// This flattens all functions into the parent module +pub fn register_heroledger_modules(parent_module: &mut Module) { + // Register custom types + parent_module.set_custom_type::("User"); + parent_module.set_custom_type::("Group"); + parent_module.set_custom_type::("Account"); + parent_module.set_custom_type::("DNSZone"); + + // Merge user functions into parent module + let user_module = exported_module!(rhai_user_module); + parent_module.merge(&user_module); + + // Merge group functions into parent module + let group_module = exported_module!(rhai_group_module); + parent_module.merge(&group_module); + + // Merge account functions into parent module + let account_module = exported_module!(rhai_account_module); + parent_module.merge(&account_module); + + // Merge dnszone functions into parent module + let dnszone_module = exported_module!(rhai_dns_zone_module); + parent_module.merge(&dnszone_module); +} + +/// Register heroledger modules into a Rhai Engine (for standalone use) +pub fn register_user_functions(engine: &mut Engine) { + let module = exported_module!(rhai_user_module); + engine.register_static_module("user", module.into()); +} + +pub fn register_group_functions(engine: &mut Engine) { + let module = exported_module!(rhai_group_module); + engine.register_static_module("group", module.into()); +} + +pub fn register_account_functions(engine: &mut Engine) { + let module = exported_module!(rhai_account_module); + engine.register_static_module("account", module.into()); +} + +pub fn register_dnszone_functions(engine: &mut Engine) { + let module = exported_module!(rhai_dns_zone_module); + engine.register_static_module("dnszone", module.into()); +} + +/// Register all heroledger Rhai modules with the engine +pub fn register_heroledger_rhai_modules(engine: &mut Engine) { + register_user_functions(engine); + register_group_functions(engine); + register_account_functions(engine); + register_dnszone_functions(engine); +} + +// ============================================================================ +// CustomType Implementations (for type registration in Rhai) +// ============================================================================ + +impl CustomType for User { + fn build(mut builder: TypeBuilder) { + builder.with_name("User"); + } +} + +impl CustomType for Group { + fn build(mut builder: TypeBuilder) { + builder.with_name("Group"); + } +} + +impl CustomType for Account { + fn build(mut builder: TypeBuilder) { + builder.with_name("Account"); + } +} + +impl CustomType for DNSZone { + fn build(mut builder: TypeBuilder) { + builder.with_name("DNSZone"); + } +} diff --git a/lib/osiris/core/objects/heroledger/secretbox.rs b/lib/osiris/core/objects/heroledger/secretbox.rs new file mode 100644 index 0000000..19dd1e7 --- /dev/null +++ b/lib/osiris/core/objects/heroledger/secretbox.rs @@ -0,0 +1,137 @@ +use crate::store::{BaseData, IndexKey, Object}; +use serde::{Deserialize, Serialize}; + +/// Category of the secret box +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub enum SecretBoxCategory { + Profile, +} + +impl Default for SecretBoxCategory { + fn default() -> Self { + SecretBoxCategory::Profile + } +} + +/// Status of a notary +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub enum NotaryStatus { + Active, + Inactive, + Suspended, + Archived, + Error, +} + +impl Default for NotaryStatus { + fn default() -> Self { + NotaryStatus::Active + } +} + +/// Represents an encrypted secret box for storing sensitive data +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct SecretBox { + pub notary_id: u32, + pub value: String, + pub version: u16, + pub timestamp: u64, + pub cat: SecretBoxCategory, +} + +impl SecretBox { + pub fn new() -> Self { + Self { + notary_id: 0, + value: String::new(), + version: 1, + timestamp: 0, + cat: SecretBoxCategory::default(), + } + } + + pub fn notary_id(mut self, notary_id: u32) -> Self { + self.notary_id = notary_id; + self + } + + pub fn value(mut self, value: impl ToString) -> Self { + self.value = value.to_string(); + self + } + + pub fn version(mut self, version: u16) -> Self { + self.version = version; + self + } + + pub fn timestamp(mut self, timestamp: u64) -> Self { + self.timestamp = timestamp; + self + } + + pub fn cat(mut self, cat: SecretBoxCategory) -> Self { + self.cat = cat; + self + } + + pub fn build(self) -> Self { + self + } +} + +/// Represents a notary who can decrypt secret boxes +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, crate::DeriveObject)] +pub struct Notary { + /// Base model data + pub base_data: BaseData, + #[index] + pub userid: u32, + pub status: NotaryStatus, + pub myceliumaddress: String, + #[index] + pub pubkey: String, +} + +impl Notary { + /// Create a new notary instance + pub fn new(id: u32) -> Self { + let mut base_data = BaseData::new(); + Self { + base_data, + userid: 0, + status: NotaryStatus::default(), + myceliumaddress: String::new(), + pubkey: String::new(), + } + } + + /// Set the user ID (fluent) + pub fn userid(mut self, userid: u32) -> Self { + self.userid = userid; + self + } + + /// Set the notary status (fluent) + pub fn status(mut self, status: NotaryStatus) -> Self { + self.status = status; + self + } + + /// Set the mycelium address (fluent) + pub fn myceliumaddress(mut self, myceliumaddress: impl ToString) -> Self { + self.myceliumaddress = myceliumaddress.to_string(); + self + } + + /// Set the public key (fluent) + pub fn pubkey(mut self, pubkey: impl ToString) -> Self { + self.pubkey = pubkey.to_string(); + self + } + + /// Build the final notary instance + pub fn build(self) -> Self { + self + } +} diff --git a/lib/osiris/core/objects/heroledger/signature.rs b/lib/osiris/core/objects/heroledger/signature.rs new file mode 100644 index 0000000..5411e94 --- /dev/null +++ b/lib/osiris/core/objects/heroledger/signature.rs @@ -0,0 +1,115 @@ +use crate::store::{BaseData, IndexKey, Object}; +use serde::{Deserialize, Serialize}; + +/// Status of a signature +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub enum SignatureStatus { + Active, + Inactive, + Pending, + Revoked, +} + +impl Default for SignatureStatus { + fn default() -> Self { + SignatureStatus::Pending + } +} + +/// Type of object being signed +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub enum ObjectType { + Account, + DNSRecord, + Membership, + User, + Transaction, + KYC, +} + +impl Default for ObjectType { + fn default() -> Self { + ObjectType::User + } +} + +/// Represents a cryptographic signature for various objects +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, crate::DeriveObject)] +pub struct Signature { + /// Base model data + pub base_data: BaseData, + #[index] + pub signature_id: u32, + #[index] + pub user_id: u32, + pub value: String, + #[index] + pub objectid: u32, + pub objecttype: ObjectType, + pub status: SignatureStatus, + pub timestamp: u64, +} + +impl Signature { + /// Create a new signature instance + pub fn new(id: u32) -> Self { + let mut base_data = BaseData::new(); + Self { + base_data, + signature_id: 0, + user_id: 0, + value: String::new(), + objectid: 0, + objecttype: ObjectType::default(), + status: SignatureStatus::default(), + timestamp: 0, + } + } + + /// Set the signature ID (fluent) + pub fn signature_id(mut self, signature_id: u32) -> Self { + self.signature_id = signature_id; + self + } + + /// Set the user ID (fluent) + pub fn user_id(mut self, user_id: u32) -> Self { + self.user_id = user_id; + self + } + + /// Set the signature value (fluent) + pub fn value(mut self, value: impl ToString) -> Self { + self.value = value.to_string(); + self + } + + /// Set the object ID (fluent) + pub fn objectid(mut self, objectid: u32) -> Self { + self.objectid = objectid; + self + } + + /// Set the object type (fluent) + pub fn objecttype(mut self, objecttype: ObjectType) -> Self { + self.objecttype = objecttype; + self + } + + /// Set the signature status (fluent) + pub fn status(mut self, status: SignatureStatus) -> Self { + self.status = status; + self + } + + /// Set the timestamp (fluent) + pub fn timestamp(mut self, timestamp: u64) -> Self { + self.timestamp = timestamp; + self + } + + /// Build the final signature instance + pub fn build(self) -> Self { + self + } +} diff --git a/lib/osiris/core/objects/heroledger/user.rs b/lib/osiris/core/objects/heroledger/user.rs new file mode 100644 index 0000000..4bb06ac --- /dev/null +++ b/lib/osiris/core/objects/heroledger/user.rs @@ -0,0 +1,365 @@ +use crate::store::{BaseData, IndexKey, Object}; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; + +/// Represents the status of a user in the system +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub enum UserStatus { + Active, + Inactive, + Suspended, + Archived, +} + +impl Default for UserStatus { + fn default() -> Self { + UserStatus::Active + } +} + +/// Represents the KYC status of a user +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub enum KYCStatus { + Pending, + Approved, + Rejected, +} + +impl Default for KYCStatus { + fn default() -> Self { + KYCStatus::Pending + } +} + +/// User profile information +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct UserProfile { + pub user_id: u32, + pub full_name: String, + pub bio: String, + pub profile_pic: String, + pub links: HashMap, + pub metadata: HashMap, +} + +impl UserProfile { + pub fn new() -> Self { + Self { + user_id: 0, + full_name: String::new(), + bio: String::new(), + profile_pic: String::new(), + links: HashMap::new(), + metadata: HashMap::new(), + } + } + + pub fn user_id(mut self, user_id: u32) -> Self { + self.user_id = user_id; + self + } + + pub fn full_name(mut self, full_name: impl ToString) -> Self { + self.full_name = full_name.to_string(); + self + } + + pub fn bio(mut self, bio: impl ToString) -> Self { + self.bio = bio.to_string(); + self + } + + pub fn profile_pic(mut self, profile_pic: impl ToString) -> Self { + self.profile_pic = profile_pic.to_string(); + self + } + + pub fn add_link(mut self, key: impl ToString, value: impl ToString) -> Self { + self.links.insert(key.to_string(), value.to_string()); + self + } + + pub fn links(mut self, links: HashMap) -> Self { + self.links = links; + self + } + + pub fn add_metadata(mut self, key: impl ToString, value: impl ToString) -> Self { + self.metadata.insert(key.to_string(), value.to_string()); + self + } + + pub fn metadata(mut self, metadata: HashMap) -> Self { + self.metadata = metadata; + self + } + + pub fn build(self) -> Self { + self + } +} + +/// KYC (Know Your Customer) information for a user +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct KYCInfo { + pub user_id: u32, + pub full_name: String, + pub date_of_birth: u64, + pub address: String, + pub phone_number: String, + pub id_number: String, + pub id_type: String, + pub id_expiry: u64, + pub kyc_status: KYCStatus, + pub kyc_verified: bool, + pub kyc_verified_by: u32, + pub kyc_verified_at: u64, + pub kyc_rejected_reason: String, + pub kyc_signature: u32, + pub metadata: HashMap, +} + +impl KYCInfo { + pub fn new() -> Self { + Self { + user_id: 0, + full_name: String::new(), + date_of_birth: 0, + address: String::new(), + phone_number: String::new(), + id_number: String::new(), + id_type: String::new(), + id_expiry: 0, + kyc_status: KYCStatus::default(), + kyc_verified: false, + kyc_verified_by: 0, + kyc_verified_at: 0, + kyc_rejected_reason: String::new(), + kyc_signature: 0, + metadata: HashMap::new(), + } + } + + pub fn user_id(mut self, user_id: u32) -> Self { + self.user_id = user_id; + self + } + + pub fn full_name(mut self, full_name: impl ToString) -> Self { + self.full_name = full_name.to_string(); + self + } + + pub fn date_of_birth(mut self, date_of_birth: u64) -> Self { + self.date_of_birth = date_of_birth; + self + } + + pub fn address(mut self, address: impl ToString) -> Self { + self.address = address.to_string(); + self + } + + pub fn phone_number(mut self, phone_number: impl ToString) -> Self { + self.phone_number = phone_number.to_string(); + self + } + + pub fn id_number(mut self, id_number: impl ToString) -> Self { + self.id_number = id_number.to_string(); + self + } + + pub fn id_type(mut self, id_type: impl ToString) -> Self { + self.id_type = id_type.to_string(); + self + } + + pub fn id_expiry(mut self, id_expiry: u64) -> Self { + self.id_expiry = id_expiry; + self + } + + pub fn kyc_status(mut self, kyc_status: KYCStatus) -> Self { + self.kyc_status = kyc_status; + self + } + + pub fn kyc_verified(mut self, kyc_verified: bool) -> Self { + self.kyc_verified = kyc_verified; + self + } + + pub fn kyc_verified_by(mut self, kyc_verified_by: u32) -> Self { + self.kyc_verified_by = kyc_verified_by; + self + } + + pub fn kyc_verified_at(mut self, kyc_verified_at: u64) -> Self { + self.kyc_verified_at = kyc_verified_at; + self + } + + pub fn kyc_rejected_reason(mut self, kyc_rejected_reason: impl ToString) -> Self { + self.kyc_rejected_reason = kyc_rejected_reason.to_string(); + self + } + + pub fn kyc_signature(mut self, kyc_signature: u32) -> Self { + self.kyc_signature = kyc_signature; + self + } + + pub fn add_metadata(mut self, key: impl ToString, value: impl ToString) -> Self { + self.metadata.insert(key.to_string(), value.to_string()); + self + } + + pub fn metadata(mut self, metadata: HashMap) -> Self { + self.metadata = metadata; + self + } + + pub fn build(self) -> Self { + self + } +} + +/// Represents a secret box for storing encrypted data +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct SecretBox { + pub data: Vec, + pub nonce: Vec, +} + +impl SecretBox { + pub fn new() -> Self { + Self { + data: Vec::new(), + nonce: Vec::new(), + } + } + + pub fn data(mut self, data: Vec) -> Self { + self.data = data; + self + } + + pub fn nonce(mut self, nonce: Vec) -> Self { + self.nonce = nonce; + self + } + + pub fn build(self) -> Self { + self + } +} + +/// Represents a user in the heroledger system +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, crate::DeriveObject)] +pub struct User { + /// Base model data + pub base_data: BaseData, + #[index] + pub username: String, + #[index] + pub pubkey: String, + pub email: Vec, + pub status: UserStatus, + pub userprofile: Vec, + pub kyc: Vec, +} + +impl Default for User { + fn default() -> Self { + Self { + base_data: BaseData::new(), + username: String::new(), + pubkey: String::new(), + email: Vec::new(), + status: UserStatus::default(), + userprofile: Vec::new(), + kyc: Vec::new(), + } + } +} + +impl User { + /// Create a new user instance + pub fn new(id: u32) -> Self { + let mut base_data = BaseData::new(); + Self { + base_data, + username: String::new(), + pubkey: String::new(), + email: Vec::new(), + status: UserStatus::default(), + userprofile: Vec::new(), + kyc: Vec::new(), + } + } + + /// Get the user ID + pub fn id(&self) -> u32 { + self.base_data.id + } + + /// Set the username (fluent) + pub fn username(mut self, username: impl ToString) -> Self { + self.username = username.to_string(); + self + } + + /// Set the public key (fluent) + pub fn pubkey(mut self, pubkey: impl ToString) -> Self { + self.pubkey = pubkey.to_string(); + self + } + + /// Add an email address (fluent) + pub fn add_email(mut self, email: impl ToString) -> Self { + self.email.push(email.to_string()); + self + } + + /// Set all email addresses (fluent) + pub fn email(mut self, email: Vec) -> Self { + self.email = email; + self + } + + /// Set the user status (fluent) + pub fn status(mut self, status: UserStatus) -> Self { + self.status = status; + self + } + + /// Add a user profile secret box (fluent) + pub fn add_userprofile(mut self, profile: SecretBox) -> Self { + self.userprofile.push(profile); + self + } + + /// Set all user profile secret boxes (fluent) + pub fn userprofile(mut self, userprofile: Vec) -> Self { + self.userprofile = userprofile; + self + } + + /// Add a KYC secret box (fluent) + pub fn add_kyc(mut self, kyc: SecretBox) -> Self { + self.kyc.push(kyc); + self + } + + /// Set all KYC secret boxes (fluent) + pub fn kyc(mut self, kyc: Vec) -> Self { + self.kyc = kyc; + self + } + + /// Build the final user instance + pub fn build(self) -> Self { + self + } +} diff --git a/lib/osiris/core/objects/heroledger/user_kvs.rs b/lib/osiris/core/objects/heroledger/user_kvs.rs new file mode 100644 index 0000000..582e8b7 --- /dev/null +++ b/lib/osiris/core/objects/heroledger/user_kvs.rs @@ -0,0 +1,111 @@ +use super::secretbox::SecretBox; +use crate::store::{BaseData, IndexKey, Object}; +use serde::{Deserialize, Serialize}; + +/// Represents a per-user key-value store +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, crate::DeriveObject)] +pub struct UserKVS { + /// Base model data + pub base_data: BaseData, + #[index] + pub userid: u32, + pub name: String, +} + +impl UserKVS { + /// Create a new user KVS instance + pub fn new(id: u32) -> Self { + let mut base_data = BaseData::new(); + Self { + base_data, + userid: 0, + name: String::new(), + } + } + + /// Set the user ID (fluent) + pub fn userid(mut self, userid: u32) -> Self { + self.userid = userid; + self + } + + /// Set the KVS name (fluent) + pub fn name(mut self, name: impl ToString) -> Self { + self.name = name.to_string(); + self + } + + /// Build the final user KVS instance + pub fn build(self) -> Self { + self + } +} + +/// Represents an item in a user's key-value store +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, crate::DeriveObject)] +pub struct UserKVSItem { + /// Base model data + pub base_data: BaseData, + #[index] + pub userkvs_id: u32, + pub key: String, + pub value: String, + pub secretbox: Vec, + pub timestamp: u64, +} + +impl UserKVSItem { + /// Create a new user KVS item instance + pub fn new(id: u32) -> Self { + let mut base_data = BaseData::new(); + Self { + base_data, + userkvs_id: 0, + key: String::new(), + value: String::new(), + secretbox: Vec::new(), + timestamp: 0, + } + } + + /// Set the user KVS ID (fluent) + pub fn userkvs_id(mut self, userkvs_id: u32) -> Self { + self.userkvs_id = userkvs_id; + self + } + + /// Set the key (fluent) + pub fn key(mut self, key: impl ToString) -> Self { + self.key = key.to_string(); + self + } + + /// Set the value (fluent) + pub fn value(mut self, value: impl ToString) -> Self { + self.value = value.to_string(); + self + } + + /// Add a secret box (fluent) + pub fn add_secretbox(mut self, secretbox: SecretBox) -> Self { + self.secretbox.push(secretbox); + self + } + + /// Set all secret boxes (fluent) + pub fn secretbox(mut self, secretbox: Vec) -> Self { + self.secretbox = secretbox; + self + } + + /// Set the timestamp (fluent) + pub fn timestamp(mut self, timestamp: u64) -> Self { + self.timestamp = timestamp; + self + } + + /// Build the final user KVS item instance + pub fn build(self) -> Self { + self + } +} diff --git a/lib/osiris/core/objects/kyc/client.rs b/lib/osiris/core/objects/kyc/client.rs new file mode 100644 index 0000000..22bac7f --- /dev/null +++ b/lib/osiris/core/objects/kyc/client.rs @@ -0,0 +1,238 @@ +/// KYC Client +/// +/// Actual API client for making KYC provider API calls. +/// Currently implements Idenfy API but designed to be extensible for other providers. + +use serde::{Deserialize, Serialize}; +use super::{KycInfo, KycSession, session::SessionStatus}; + +/// KYC Client for making API calls to KYC providers +#[derive(Debug, Clone)] +pub struct KycClient { + /// Provider name (e.g., "idenfy", "sumsub", "onfido") + pub provider: String, + + /// API key + pub api_key: String, + + /// API secret + pub api_secret: String, + + /// Base URL for API (optional, uses provider default if not set) + pub base_url: Option, +} + +/// Idenfy-specific API request/response structures +#[derive(Debug, Serialize, Deserialize)] +pub struct IdenfyTokenRequest { + #[serde(rename = "clientId")] + pub client_id: String, + + #[serde(rename = "firstName")] + pub first_name: String, + + #[serde(rename = "lastName")] + pub last_name: String, + + #[serde(skip_serializing_if = "Option::is_none")] + pub email: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub phone: Option, + + #[serde(rename = "dateOfBirth", skip_serializing_if = "Option::is_none")] + pub date_of_birth: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub nationality: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub address: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub city: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub country: Option, + + #[serde(rename = "zipCode", skip_serializing_if = "Option::is_none")] + pub zip_code: Option, + + #[serde(rename = "successUrl", skip_serializing_if = "Option::is_none")] + pub success_url: Option, + + #[serde(rename = "errorUrl", skip_serializing_if = "Option::is_none")] + pub error_url: Option, + + #[serde(rename = "callbackUrl", skip_serializing_if = "Option::is_none")] + pub callback_url: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub locale: Option, +} + +#[derive(Debug, Deserialize)] +pub struct IdenfyTokenResponse { + #[serde(rename = "authToken")] + pub auth_token: String, + + #[serde(rename = "scanRef")] + pub scan_ref: String, + + #[serde(rename = "clientId")] + pub client_id: String, +} + +#[derive(Debug, Deserialize)] +pub struct IdenfyVerificationStatus { + pub status: String, + + #[serde(rename = "scanRef")] + pub scan_ref: String, + + #[serde(rename = "clientId")] + pub client_id: String, +} + +impl KycClient { + /// Create a new KYC client + pub fn new(provider: String, api_key: String, api_secret: String) -> Self { + Self { + provider, + api_key, + api_secret, + base_url: None, + } + } + + /// Create an Idenfy client + pub fn idenfy(api_key: String, api_secret: String) -> Self { + Self { + provider: "idenfy".to_string(), + api_key, + api_secret, + base_url: Some("https://ivs.idenfy.com/api/v2".to_string()), + } + } + + /// Set custom base URL + pub fn with_base_url(mut self, base_url: String) -> Self { + self.base_url = Some(base_url); + self + } + + /// Get the base URL for the provider + fn get_base_url(&self) -> String { + if let Some(url) = &self.base_url { + return url.clone(); + } + + match self.provider.as_str() { + "idenfy" => "https://ivs.idenfy.com/api/v2".to_string(), + "sumsub" => "https://api.sumsub.com".to_string(), + "onfido" => "https://api.onfido.com/v3".to_string(), + _ => panic!("Unknown provider: {}", self.provider), + } + } + + /// Create a verification session (Idenfy implementation) + pub async fn create_verification_session( + &self, + kyc_info: &KycInfo, + session: &mut KycSession, + ) -> Result> { + match self.provider.as_str() { + "idenfy" => self.create_idenfy_session(kyc_info, session).await, + _ => Err(format!("Provider {} not yet implemented", self.provider).into()), + } + } + + /// Create an Idenfy verification session + async fn create_idenfy_session( + &self, + kyc_info: &KycInfo, + session: &mut KycSession, + ) -> Result> { + let url = format!("{}/token", self.get_base_url()); + + let request = IdenfyTokenRequest { + client_id: kyc_info.client_id.clone(), + first_name: kyc_info.first_name.clone(), + last_name: kyc_info.last_name.clone(), + email: kyc_info.email.clone(), + phone: kyc_info.phone.clone(), + date_of_birth: kyc_info.date_of_birth.clone(), + nationality: kyc_info.nationality.clone(), + address: kyc_info.address.clone(), + city: kyc_info.city.clone(), + country: kyc_info.country.clone(), + zip_code: kyc_info.postal_code.clone(), + success_url: session.success_url.clone(), + error_url: session.error_url.clone(), + callback_url: session.callback_url.clone(), + locale: session.locale.clone(), + }; + + let client = reqwest::Client::new(); + let response = client + .post(&url) + .basic_auth(&self.api_key, Some(&self.api_secret)) + .json(&request) + .send() + .await?; + + if !response.status().is_success() { + let error_text = response.text().await?; + return Err(format!("Idenfy API error: {}", error_text).into()); + } + + let token_response: IdenfyTokenResponse = response.json().await?; + + // Update session with token and URL + session.set_session_token(token_response.auth_token.clone()); + + // Construct verification URL + let verification_url = format!( + "https://ivs.idenfy.com/api/v2/redirect?authToken={}", + token_response.auth_token + ); + session.set_verification_url(verification_url.clone()); + session.set_status(SessionStatus::Active); + + Ok(verification_url) + } + + /// Get verification status (Idenfy implementation) + pub async fn get_verification_status( + &self, + scan_ref: &str, + ) -> Result> { + match self.provider.as_str() { + "idenfy" => self.get_idenfy_status(scan_ref).await, + _ => Err(format!("Provider {} not yet implemented", self.provider).into()), + } + } + + /// Get Idenfy verification status + async fn get_idenfy_status( + &self, + scan_ref: &str, + ) -> Result> { + let url = format!("{}/status/{}", self.get_base_url(), scan_ref); + + let client = reqwest::Client::new(); + let response = client + .get(&url) + .basic_auth(&self.api_key, Some(&self.api_secret)) + .send() + .await?; + + if !response.status().is_success() { + let error_text = response.text().await?; + return Err(format!("Idenfy API error: {}", error_text).into()); + } + + let status: IdenfyVerificationStatus = response.json().await?; + Ok(status) + } +} diff --git a/lib/osiris/core/objects/kyc/info.rs b/lib/osiris/core/objects/kyc/info.rs new file mode 100644 index 0000000..f2ece62 --- /dev/null +++ b/lib/osiris/core/objects/kyc/info.rs @@ -0,0 +1,319 @@ +/// KYC Info Object +/// +/// Represents customer/person information for KYC verification. +/// Designed to be provider-agnostic but follows Idenfy API patterns. + +use crate::store::{BaseData, Object, Storable}; +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, Serialize, Deserialize, Default, crate::DeriveObject)] +pub struct KycInfo { + #[serde(flatten)] + pub base_data: BaseData, + + /// External client ID (from your system) - links to User + pub client_id: String, + + /// Full name (or separate first/last) + pub full_name: String, + + /// First name + pub first_name: String, + + /// Last name + pub last_name: String, + + /// Email address + pub email: Option, + + /// Phone number + pub phone: Option, + + /// Date of birth (YYYY-MM-DD string or unix timestamp) + pub date_of_birth: Option, + + /// Date of birth as unix timestamp + pub date_of_birth_timestamp: Option, + + /// Nationality (ISO 3166-1 alpha-2 code) + pub nationality: Option, + + /// Address + pub address: Option, + + /// City + pub city: Option, + + /// Country (ISO 3166-1 alpha-2 code) + pub country: Option, + + /// Postal code + pub postal_code: Option, + + /// ID document number + pub id_number: Option, + + /// ID document type (passport, drivers_license, national_id, etc.) + pub id_type: Option, + + /// ID document expiry (unix timestamp) + pub id_expiry: Option, + + /// KYC provider (e.g., "idenfy", "sumsub", "onfido") + pub provider: String, + + /// Provider-specific client ID (assigned by KYC provider) + pub provider_client_id: Option, + + /// Current verification status + pub verification_status: VerificationStatus, + + /// Whether KYC is verified + pub kyc_verified: bool, + + /// User ID who verified this KYC + pub kyc_verified_by: Option, + + /// Timestamp when KYC was verified + pub kyc_verified_at: Option, + + /// Reason for rejection if denied + pub kyc_rejected_reason: Option, + + /// Signature ID for verification record + pub kyc_signature: Option, + + /// Additional metadata + #[serde(default)] + pub metadata: std::collections::HashMap, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +#[serde(rename_all = "UPPERCASE")] +pub enum VerificationStatus { + /// Not yet started + Pending, + /// Verification in progress + Processing, + /// Successfully verified + Approved, + /// Verification failed + Denied, + /// Verification expired + Expired, + /// Requires manual review + Review, +} + +impl Default for VerificationStatus { + fn default() -> Self { + VerificationStatus::Pending + } +} + +impl KycInfo { + /// Create a new KYC info object + pub fn new(id: u32) -> Self { + let mut base_data = BaseData::new(); + base_data.id = id; + Self { + base_data, + client_id: String::new(), + full_name: String::new(), + first_name: String::new(), + last_name: String::new(), + email: None, + phone: None, + date_of_birth: None, + date_of_birth_timestamp: None, + nationality: None, + address: None, + city: None, + country: None, + postal_code: None, + id_number: None, + id_type: None, + id_expiry: None, + provider: "idenfy".to_string(), // Default to Idenfy + provider_client_id: None, + verification_status: VerificationStatus::default(), + kyc_verified: false, + kyc_verified_by: None, + kyc_verified_at: None, + kyc_rejected_reason: None, + kyc_signature: None, + metadata: std::collections::HashMap::new(), + } + } + + /// Builder: Set client ID + pub fn client_id(mut self, client_id: String) -> Self { + self.client_id = client_id; + self.base_data.update_modified(); + self + } + + /// Builder: Set full name + pub fn full_name(mut self, full_name: String) -> Self { + self.full_name = full_name.clone(); + // Try to split into first/last if not already set + if self.first_name.is_empty() && self.last_name.is_empty() { + let parts: Vec<&str> = full_name.split_whitespace().collect(); + if parts.len() >= 2 { + self.first_name = parts[0].to_string(); + self.last_name = parts[1..].join(" "); + } else if parts.len() == 1 { + self.first_name = parts[0].to_string(); + } + } + self.base_data.update_modified(); + self + } + + /// Builder: Set first name + pub fn first_name(mut self, first_name: String) -> Self { + self.first_name = first_name.clone(); + // Update full_name if last_name exists + if !self.last_name.is_empty() { + self.full_name = format!("{} {}", first_name, self.last_name); + } else { + self.full_name = first_name; + } + self.base_data.update_modified(); + self + } + + /// Builder: Set last name + pub fn last_name(mut self, last_name: String) -> Self { + self.last_name = last_name.clone(); + // Update full_name if first_name exists + if !self.first_name.is_empty() { + self.full_name = format!("{} {}", self.first_name, last_name); + } else { + self.full_name = last_name; + } + self.base_data.update_modified(); + self + } + + /// Builder: Set email + pub fn email(mut self, email: String) -> Self { + self.email = Some(email); + self.base_data.update_modified(); + self + } + + /// Builder: Set phone + pub fn phone(mut self, phone: String) -> Self { + self.phone = Some(phone); + self.base_data.update_modified(); + self + } + + /// Builder: Set date of birth + pub fn date_of_birth(mut self, dob: String) -> Self { + self.date_of_birth = Some(dob); + self.base_data.update_modified(); + self + } + + /// Builder: Set nationality + pub fn nationality(mut self, nationality: String) -> Self { + self.nationality = Some(nationality); + self.base_data.update_modified(); + self + } + + /// Builder: Set address + pub fn address(mut self, address: String) -> Self { + self.address = Some(address); + self.base_data.update_modified(); + self + } + + /// Builder: Set city + pub fn city(mut self, city: String) -> Self { + self.city = Some(city); + self.base_data.update_modified(); + self + } + + /// Builder: Set country + pub fn country(mut self, country: String) -> Self { + self.country = Some(country); + self.base_data.update_modified(); + self + } + + /// Builder: Set postal code + pub fn postal_code(mut self, postal_code: String) -> Self { + self.postal_code = Some(postal_code); + self.base_data.update_modified(); + self + } + + /// Builder: Set ID number + pub fn id_number(mut self, id_number: String) -> Self { + self.id_number = Some(id_number); + self.base_data.update_modified(); + self + } + + /// Builder: Set ID type + pub fn id_type(mut self, id_type: String) -> Self { + self.id_type = Some(id_type); + self.base_data.update_modified(); + self + } + + /// Builder: Set ID expiry + pub fn id_expiry(mut self, id_expiry: u64) -> Self { + self.id_expiry = Some(id_expiry); + self.base_data.update_modified(); + self + } + + /// Builder: Set KYC provider + pub fn provider(mut self, provider: String) -> Self { + self.provider = provider; + self.base_data.update_modified(); + self + } + + /// Set provider client ID (assigned by KYC provider) + pub fn set_provider_client_id(&mut self, provider_client_id: String) { + self.provider_client_id = Some(provider_client_id); + self.base_data.update_modified(); + } + + /// Set verification status + pub fn set_verification_status(&mut self, status: VerificationStatus) { + self.verification_status = status; + self.base_data.update_modified(); + } + + /// Set KYC verified + pub fn set_kyc_verified(&mut self, verified: bool, verified_by: Option) { + self.kyc_verified = verified; + self.kyc_verified_by = verified_by; + self.kyc_verified_at = Some(std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_secs()); + self.base_data.update_modified(); + } + + /// Set KYC rejected + pub fn set_kyc_rejected(&mut self, reason: String) { + self.kyc_verified = false; + self.kyc_rejected_reason = Some(reason); + self.verification_status = VerificationStatus::Denied; + self.base_data.update_modified(); + } + + /// Add metadata + pub fn add_metadata(&mut self, key: String, value: String) { + self.metadata.insert(key, value); + self.base_data.update_modified(); + } +} diff --git a/lib/osiris/core/objects/kyc/mod.rs b/lib/osiris/core/objects/kyc/mod.rs new file mode 100644 index 0000000..91364b8 --- /dev/null +++ b/lib/osiris/core/objects/kyc/mod.rs @@ -0,0 +1,13 @@ +/// KYC (Know Your Customer) Module +/// +/// Provides generic KYC client and session management. +/// Designed to work with multiple KYC providers (Idenfy, Sumsub, Onfido, etc.) + +pub mod info; +pub mod client; +pub mod session; +pub mod rhai; + +pub use info::{KycInfo, VerificationStatus}; +pub use client::KycClient; +pub use session::{KycSession, SessionStatus, SessionResult}; diff --git a/lib/osiris/core/objects/kyc/rhai.rs b/lib/osiris/core/objects/kyc/rhai.rs new file mode 100644 index 0000000..72fff60 --- /dev/null +++ b/lib/osiris/core/objects/kyc/rhai.rs @@ -0,0 +1,376 @@ +/// Rhai bindings for KYC objects + +use ::rhai::plugin::*; +use ::rhai::{CustomType, Dynamic, Engine, EvalAltResult, Module, TypeBuilder}; + +use super::info::{KycInfo, VerificationStatus}; +use super::session::{KycSession, SessionStatus}; +use super::client::KycClient; + +// ============================================================================ +// KYC Info Module +// ============================================================================ + +type RhaiKycInfo = KycInfo; + +#[export_module] +mod rhai_kyc_info_module { + use super::RhaiKycInfo; + + #[rhai_fn(name = "new_kyc_info", return_raw)] + pub fn new_kyc_info() -> Result> { + Ok(KycInfo::new(0)) + } + + #[rhai_fn(name = "client_id", return_raw)] + pub fn set_client_id( + info: &mut RhaiKycInfo, + client_id: String, + ) -> Result> { + let owned = std::mem::take(info); + *info = owned.client_id(client_id); + Ok(info.clone()) + } + + #[rhai_fn(name = "first_name", return_raw)] + pub fn set_first_name( + info: &mut RhaiKycInfo, + first_name: String, + ) -> Result> { + let owned = std::mem::take(info); + *info = owned.first_name(first_name); + Ok(info.clone()) + } + + #[rhai_fn(name = "last_name", return_raw)] + pub fn set_last_name( + info: &mut RhaiKycInfo, + last_name: String, + ) -> Result> { + let owned = std::mem::take(info); + *info = owned.last_name(last_name); + Ok(info.clone()) + } + + #[rhai_fn(name = "email", return_raw)] + pub fn set_email( + info: &mut RhaiKycInfo, + email: String, + ) -> Result> { + let owned = std::mem::take(info); + *info = owned.email(email); + Ok(info.clone()) + } + + #[rhai_fn(name = "phone", return_raw)] + pub fn set_phone( + info: &mut RhaiKycInfo, + phone: String, + ) -> Result> { + let owned = std::mem::take(info); + *info = owned.phone(phone); + Ok(info.clone()) + } + + #[rhai_fn(name = "date_of_birth", return_raw)] + pub fn set_date_of_birth( + info: &mut RhaiKycInfo, + dob: String, + ) -> Result> { + let owned = std::mem::take(info); + *info = owned.date_of_birth(dob); + Ok(info.clone()) + } + + #[rhai_fn(name = "nationality", return_raw)] + pub fn set_nationality( + info: &mut RhaiKycInfo, + nationality: String, + ) -> Result> { + let owned = std::mem::take(info); + *info = owned.nationality(nationality); + Ok(info.clone()) + } + + #[rhai_fn(name = "address", return_raw)] + pub fn set_address( + info: &mut RhaiKycInfo, + address: String, + ) -> Result> { + let owned = std::mem::take(info); + *info = owned.address(address); + Ok(info.clone()) + } + + #[rhai_fn(name = "city", return_raw)] + pub fn set_city( + info: &mut RhaiKycInfo, + city: String, + ) -> Result> { + let owned = std::mem::take(info); + *info = owned.city(city); + Ok(info.clone()) + } + + #[rhai_fn(name = "country", return_raw)] + pub fn set_country( + info: &mut RhaiKycInfo, + country: String, + ) -> Result> { + let owned = std::mem::take(info); + *info = owned.country(country); + Ok(info.clone()) + } + + #[rhai_fn(name = "postal_code", return_raw)] + pub fn set_postal_code( + info: &mut RhaiKycInfo, + postal_code: String, + ) -> Result> { + let owned = std::mem::take(info); + *info = owned.postal_code(postal_code); + Ok(info.clone()) + } + + #[rhai_fn(name = "provider", return_raw)] + pub fn set_provider( + info: &mut RhaiKycInfo, + provider: String, + ) -> Result> { + let owned = std::mem::take(info); + *info = owned.provider(provider); + Ok(info.clone()) + } + + #[rhai_fn(name = "document_type", return_raw)] + pub fn set_document_type( + info: &mut RhaiKycInfo, + doc_type: String, + ) -> Result> { + // Store in provider field for now (or add to KycInfo struct) + let provider = info.provider.clone(); + let owned = std::mem::take(info); + *info = owned.provider(format!("{}|doc_type:{}", provider, doc_type)); + Ok(info.clone()) + } + + #[rhai_fn(name = "document_number", return_raw)] + pub fn set_document_number( + info: &mut RhaiKycInfo, + doc_number: String, + ) -> Result> { + // Store in provider field for now (or add to KycInfo struct) + let provider = info.provider.clone(); + let owned = std::mem::take(info); + *info = owned.provider(format!("{}|doc_num:{}", provider, doc_number)); + Ok(info.clone()) + } + + #[rhai_fn(name = "verified", return_raw)] + pub fn set_verified( + info: &mut RhaiKycInfo, + _verified: bool, + ) -> Result> { + // Mark as verified in provider field + let provider = info.provider.clone(); + let owned = std::mem::take(info); + *info = owned.provider(format!("{}|verified", provider)); + Ok(info.clone()) + } + + // Getters + #[rhai_fn(name = "get_id")] + pub fn get_id(info: &mut RhaiKycInfo) -> u32 { + info.base_data.id + } + + #[rhai_fn(name = "get_client_id")] + pub fn get_client_id(info: &mut RhaiKycInfo) -> String { + info.client_id.clone() + } + + #[rhai_fn(name = "get_first_name")] + pub fn get_first_name(info: &mut RhaiKycInfo) -> String { + info.first_name.clone() + } + + #[rhai_fn(name = "get_last_name")] + pub fn get_last_name(info: &mut RhaiKycInfo) -> String { + info.last_name.clone() + } + + #[rhai_fn(name = "get_email")] + pub fn get_email(info: &mut RhaiKycInfo) -> String { + info.email.clone().unwrap_or_default() + } + + #[rhai_fn(name = "get_provider")] + pub fn get_provider(info: &mut RhaiKycInfo) -> String { + info.provider.clone() + } +} + +// ============================================================================ +// KYC Session Module +// ============================================================================ + +type RhaiKycSession = KycSession; + +#[export_module] +mod rhai_kyc_session_module { + use super::RhaiKycSession; + + #[rhai_fn(name = "new_kyc_session", return_raw)] + pub fn new_kyc_session( + client_id: String, + provider: String, + ) -> Result> { + Ok(KycSession::new(0, client_id, provider)) + } + + #[rhai_fn(name = "callback_url", return_raw)] + pub fn set_callback_url( + session: &mut RhaiKycSession, + url: String, + ) -> Result> { + let owned = std::mem::take(session); + *session = owned.callback_url(url); + Ok(session.clone()) + } + + #[rhai_fn(name = "success_url", return_raw)] + pub fn set_success_url( + session: &mut RhaiKycSession, + url: String, + ) -> Result> { + let owned = std::mem::take(session); + *session = owned.success_url(url); + Ok(session.clone()) + } + + #[rhai_fn(name = "error_url", return_raw)] + pub fn set_error_url( + session: &mut RhaiKycSession, + url: String, + ) -> Result> { + let owned = std::mem::take(session); + *session = owned.error_url(url); + Ok(session.clone()) + } + + #[rhai_fn(name = "locale", return_raw)] + pub fn set_locale( + session: &mut RhaiKycSession, + locale: String, + ) -> Result> { + let owned = std::mem::take(session); + *session = owned.locale(locale); + Ok(session.clone()) + } + + // Getters + #[rhai_fn(name = "get_id")] + pub fn get_id(session: &mut RhaiKycSession) -> u32 { + session.base_data.id + } + + #[rhai_fn(name = "get_client_id")] + pub fn get_client_id(session: &mut RhaiKycSession) -> String { + session.client_id.clone() + } + + #[rhai_fn(name = "get_provider")] + pub fn get_provider(session: &mut RhaiKycSession) -> String { + session.provider.clone() + } + + #[rhai_fn(name = "get_verification_url")] + pub fn get_verification_url(session: &mut RhaiKycSession) -> String { + session.verification_url.clone().unwrap_or_default() + } +} + +// ============================================================================ +// KYC Client Module +// ============================================================================ + +type RhaiKycClient = KycClient; + +#[export_module] +mod rhai_kyc_client_module { + use super::RhaiKycClient; + use super::RhaiKycInfo; + use super::RhaiKycSession; + use ::rhai::EvalAltResult; + + #[rhai_fn(name = "new_kyc_client_idenfy", return_raw)] + pub fn new_idenfy_client( + api_key: String, + api_secret: String, + ) -> Result> { + Ok(KycClient::idenfy(api_key, api_secret)) + } + + #[rhai_fn(name = "create_verification_session", return_raw)] + pub fn create_verification_session( + client: &mut RhaiKycClient, + kyc_info: RhaiKycInfo, + session: RhaiKycSession, + ) -> Result> { + // Need to use tokio runtime for async call + let rt = tokio::runtime::Runtime::new() + .map_err(|e| format!("Failed to create runtime: {}", e))?; + + let mut session_mut = session.clone(); + let url = rt.block_on(client.create_verification_session(&kyc_info, &mut session_mut)) + .map_err(|e| format!("Failed to create verification session: {}", e))?; + + Ok(url) + } +} + +// ============================================================================ +// Registration Functions +// ============================================================================ + +/// Register KYC modules into a Rhai Module (for use in packages) +pub fn register_kyc_modules(parent_module: &mut Module) { + // Register custom types + parent_module.set_custom_type::("KycInfo"); + parent_module.set_custom_type::("KycSession"); + parent_module.set_custom_type::("KycClient"); + + // Merge KYC info functions + let info_module = exported_module!(rhai_kyc_info_module); + parent_module.merge(&info_module); + + // Merge KYC session functions + let session_module = exported_module!(rhai_kyc_session_module); + parent_module.merge(&session_module); + + // Merge KYC client functions + let client_module = exported_module!(rhai_kyc_client_module); + parent_module.merge(&client_module); +} + +// ============================================================================ +// CustomType Implementations +// ============================================================================ + +impl CustomType for KycInfo { + fn build(mut builder: TypeBuilder) { + builder.with_name("KycInfo"); + } +} + +impl CustomType for KycSession { + fn build(mut builder: TypeBuilder) { + builder.with_name("KycSession"); + } +} + +impl CustomType for KycClient { + fn build(mut builder: TypeBuilder) { + builder.with_name("KycClient"); + } +} diff --git a/lib/osiris/core/objects/kyc/session.rs b/lib/osiris/core/objects/kyc/session.rs new file mode 100644 index 0000000..ac8fd93 --- /dev/null +++ b/lib/osiris/core/objects/kyc/session.rs @@ -0,0 +1,186 @@ +/// KYC Verification Session +/// +/// Represents a verification session for a KYC client. +/// Follows Idenfy API patterns but is provider-agnostic. + +use crate::store::{BaseData, Object, Storable}; +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, Serialize, Deserialize, Default, crate::DeriveObject)] +pub struct KycSession { + #[serde(flatten)] + pub base_data: BaseData, + + /// Reference to the KYC client + pub client_id: String, + + /// KYC provider + pub provider: String, + + /// Session token/ID from provider + pub session_token: Option, + + /// Verification URL for the client + pub verification_url: Option, + + /// Session status + pub status: SessionStatus, + + /// Session expiration timestamp + pub expires_at: Option, + + /// Callback URL for webhook notifications + pub callback_url: Option, + + /// Success redirect URL + pub success_url: Option, + + /// Error redirect URL + pub error_url: Option, + + /// Locale (e.g., "en", "de", "fr") + pub locale: Option, + + /// Provider-specific configuration + #[serde(default)] + pub provider_config: std::collections::HashMap, + + /// Session result data + pub result: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] +#[serde(rename_all = "UPPERCASE")] +pub enum SessionStatus { + /// Session created but not started + #[default] + Created, + /// Client is currently verifying + Active, + /// Session completed successfully + Completed, + /// Session failed + Failed, + /// Session expired + Expired, + /// Session cancelled + Cancelled, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SessionResult { + /// Overall verification status + pub status: String, + + /// Verification score (0-100) + pub score: Option, + + /// Reason for denial (if denied) + pub denial_reason: Option, + + /// Document type verified + pub document_type: Option, + + /// Document number + pub document_number: Option, + + /// Document issuing country + pub document_country: Option, + + /// Face match result + pub face_match: Option, + + /// Liveness check result + pub liveness_check: Option, + + /// Additional provider-specific data + #[serde(default)] + pub provider_data: std::collections::HashMap, +} + +impl KycSession { + /// Create a new KYC session + pub fn new(id: u32, client_id: String, provider: String) -> Self { + let mut base_data = BaseData::new(); + base_data.id = id; + Self { + base_data, + client_id, + provider, + session_token: None, + verification_url: None, + status: SessionStatus::Created, + expires_at: None, + callback_url: None, + success_url: None, + error_url: None, + locale: None, + provider_config: std::collections::HashMap::new(), + result: None, + } + } + + /// Builder: Set callback URL + pub fn callback_url(mut self, url: String) -> Self { + self.callback_url = Some(url); + self.base_data.update_modified(); + self + } + + /// Builder: Set success URL + pub fn success_url(mut self, url: String) -> Self { + self.success_url = Some(url); + self.base_data.update_modified(); + self + } + + /// Builder: Set error URL + pub fn error_url(mut self, url: String) -> Self { + self.error_url = Some(url); + self.base_data.update_modified(); + self + } + + /// Builder: Set locale + pub fn locale(mut self, locale: String) -> Self { + self.locale = Some(locale); + self.base_data.update_modified(); + self + } + + /// Set session token from provider + pub fn set_session_token(&mut self, token: String) { + self.session_token = Some(token); + self.base_data.update_modified(); + } + + /// Set verification URL + pub fn set_verification_url(&mut self, url: String) { + self.verification_url = Some(url); + self.base_data.update_modified(); + } + + /// Set session status + pub fn set_status(&mut self, status: SessionStatus) { + self.status = status; + self.base_data.update_modified(); + } + + /// Set expiration timestamp + pub fn set_expires_at(&mut self, timestamp: i64) { + self.expires_at = Some(timestamp); + self.base_data.update_modified(); + } + + /// Set session result + pub fn set_result(&mut self, result: SessionResult) { + self.result = Some(result); + self.base_data.update_modified(); + } + + /// Add provider-specific configuration + pub fn add_provider_config(&mut self, key: String, value: String) { + self.provider_config.insert(key, value); + self.base_data.update_modified(); + } +} diff --git a/lib/osiris/core/objects/legal/contract.rs b/lib/osiris/core/objects/legal/contract.rs new file mode 100644 index 0000000..5c1a0c1 --- /dev/null +++ b/lib/osiris/core/objects/legal/contract.rs @@ -0,0 +1,129 @@ +/// Legal Contract Object +/// +/// Simple contract object with signatures for legal agreements + +use crate::store::{BaseData, Object}; +use serde::{Deserialize, Serialize}; + +/// Contract status +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub enum ContractStatus { + Draft, + Active, + Completed, + Cancelled, +} + +impl Default for ContractStatus { + fn default() -> Self { + ContractStatus::Draft + } +} + +/// Legal contract with signatures +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, crate::DeriveObject)] +pub struct Contract { + /// Base data for object storage + pub base_data: BaseData, + + /// Contract title + pub title: String, + + /// Contract content/terms + pub content: String, + + /// Contract status + pub status: ContractStatus, + + /// List of signature IDs (references to Signature objects) + pub signatures: Vec, + + /// Creator user ID + pub creator_id: u32, + + /// Expiry timestamp (optional) + pub expires_at: Option, +} + +impl Contract { + /// Create a new contract + pub fn new(id: u32) -> Self { + let base_data = BaseData::with_id(id, String::new()); + Self { + base_data, + title: String::new(), + content: String::new(), + status: ContractStatus::default(), + signatures: Vec::new(), + creator_id: 0, + expires_at: None, + } + } + + /// Set the title (fluent) + pub fn title(mut self, title: impl ToString) -> Self { + self.title = title.to_string(); + self + } + + /// Set the content (fluent) + pub fn content(mut self, content: impl ToString) -> Self { + self.content = content.to_string(); + self + } + + /// Set the status (fluent) + pub fn status(mut self, status: ContractStatus) -> Self { + self.status = status; + self + } + + /// Set the creator ID (fluent) + pub fn creator_id(mut self, creator_id: u32) -> Self { + self.creator_id = creator_id; + self + } + + /// Set the expiry timestamp (fluent) + pub fn expires_at(mut self, expires_at: u64) -> Self { + self.expires_at = Some(expires_at); + self + } + + /// Add a signature (fluent) + pub fn add_signature(mut self, signature_id: u32) -> Self { + if !self.signatures.contains(&signature_id) { + self.signatures.push(signature_id); + } + self + } + + /// Remove a signature (fluent) + pub fn remove_signature(mut self, signature_id: u32) -> Self { + self.signatures.retain(|&id| id != signature_id); + self + } + + /// Check if all required signatures are present + pub fn is_fully_signed(&self, required_count: usize) -> bool { + self.signatures.len() >= required_count + } + + /// Activate the contract + pub fn activate(mut self) -> Self { + self.status = ContractStatus::Active; + self + } + + /// Complete the contract + pub fn complete(mut self) -> Self { + self.status = ContractStatus::Completed; + self + } + + /// Cancel the contract + pub fn cancel(mut self) -> Self { + self.status = ContractStatus::Cancelled; + self + } +} diff --git a/lib/osiris/core/objects/legal/mod.rs b/lib/osiris/core/objects/legal/mod.rs new file mode 100644 index 0000000..323fe5f --- /dev/null +++ b/lib/osiris/core/objects/legal/mod.rs @@ -0,0 +1,7 @@ +/// Legal module for contracts and legal documents + +pub mod contract; +pub mod rhai; + +pub use contract::{Contract, ContractStatus}; +pub use rhai::register_legal_modules; diff --git a/lib/osiris/core/objects/legal/rhai.rs b/lib/osiris/core/objects/legal/rhai.rs new file mode 100644 index 0000000..f17774d --- /dev/null +++ b/lib/osiris/core/objects/legal/rhai.rs @@ -0,0 +1,150 @@ +/// Rhai bindings for Legal objects (Contract) + +use ::rhai::plugin::*; +use ::rhai::{CustomType, Dynamic, Engine, EvalAltResult, Module, TypeBuilder}; + +use super::{Contract, ContractStatus}; + +/// Register legal modules with the Rhai engine +pub fn register_legal_modules(parent_module: &mut Module) { + // Register custom types + parent_module.set_custom_type::("Contract"); + parent_module.set_custom_type::("ContractStatus"); + + // Merge contract functions + let contract_module = exported_module!(rhai_contract_module); + parent_module.merge(&contract_module); +} + +// ============================================================================ +// Contract Module +// ============================================================================ + +type RhaiContract = Contract; +type RhaiContractStatus = ContractStatus; + +#[export_module] +mod rhai_contract_module { + use super::{RhaiContract, RhaiContractStatus}; + use super::super::{Contract, ContractStatus}; + use ::rhai::EvalAltResult; + + // Contract constructor + #[rhai_fn(name = "new_contract", return_raw)] + pub fn new_contract(id: i64) -> Result> { + Ok(Contract::new(id as u32)) + } + + // Builder methods + #[rhai_fn(name = "title", return_raw)] + pub fn set_title( + contract: RhaiContract, + title: String, + ) -> Result> { + Ok(contract.title(title)) + } + + #[rhai_fn(name = "content", return_raw)] + pub fn set_content( + contract: RhaiContract, + content: String, + ) -> Result> { + Ok(contract.content(content)) + } + + #[rhai_fn(name = "creator_id", return_raw)] + pub fn set_creator_id( + contract: RhaiContract, + creator_id: i64, + ) -> Result> { + Ok(contract.creator_id(creator_id as u32)) + } + + #[rhai_fn(name = "expires_at", return_raw)] + pub fn set_expires_at( + contract: RhaiContract, + expires_at: i64, + ) -> Result> { + Ok(contract.expires_at(expires_at as u64)) + } + + #[rhai_fn(name = "add_signature", return_raw)] + pub fn add_signature( + contract: RhaiContract, + signature_id: i64, + ) -> Result> { + Ok(contract.add_signature(signature_id as u32)) + } + + #[rhai_fn(name = "remove_signature", return_raw)] + pub fn remove_signature( + contract: RhaiContract, + signature_id: i64, + ) -> Result> { + Ok(contract.remove_signature(signature_id as u32)) + } + + // State management methods + #[rhai_fn(name = "activate", return_raw)] + pub fn activate(contract: RhaiContract) -> Result> { + Ok(contract.activate()) + } + + #[rhai_fn(name = "complete", return_raw)] + pub fn complete(contract: RhaiContract) -> Result> { + Ok(contract.complete()) + } + + #[rhai_fn(name = "cancel", return_raw)] + pub fn cancel(contract: RhaiContract) -> Result> { + Ok(contract.cancel()) + } + + // Query methods + #[rhai_fn(name = "is_fully_signed", pure)] + pub fn is_fully_signed(contract: &mut RhaiContract, required_count: i64) -> bool { + contract.is_fully_signed(required_count as usize) + } + + // Getters + #[rhai_fn(name = "title", pure)] + pub fn get_title(contract: &mut RhaiContract) -> String { + contract.title.clone() + } + + #[rhai_fn(name = "content", pure)] + pub fn get_content(contract: &mut RhaiContract) -> String { + contract.content.clone() + } + + #[rhai_fn(name = "status", pure)] + pub fn get_status(contract: &mut RhaiContract) -> String { + format!("{:?}", contract.status) + } + + #[rhai_fn(name = "creator_id", pure)] + pub fn get_creator_id(contract: &mut RhaiContract) -> i64 { + contract.creator_id as i64 + } + + #[rhai_fn(name = "signature_count", pure)] + pub fn get_signature_count(contract: &mut RhaiContract) -> i64 { + contract.signatures.len() as i64 + } +} + +// ============================================================================ +// CustomType Implementations +// ============================================================================ + +impl CustomType for Contract { + fn build(mut builder: TypeBuilder) { + builder.with_name("Contract"); + } +} + +impl CustomType for ContractStatus { + fn build(mut builder: TypeBuilder) { + builder.with_name("ContractStatus"); + } +} diff --git a/lib/osiris/core/objects/mod.rs b/lib/osiris/core/objects/mod.rs new file mode 100644 index 0000000..bfd73dc --- /dev/null +++ b/lib/osiris/core/objects/mod.rs @@ -0,0 +1,19 @@ +pub mod accounting; +pub mod communication; +pub mod event; +pub mod flow; +pub mod grid4; +pub mod heroledger; +pub mod kyc; +pub mod legal; +pub mod money; +pub mod note; +pub mod supervisor; + +pub use note::Note; +pub use event::Event; +pub use kyc::{KycInfo, KycSession}; +pub use flow::{FlowTemplate, FlowInstance}; +pub use communication::{Verification, EmailClient}; +pub use money::{Account, Asset, Transaction, PaymentClient}; +pub use legal::{Contract, ContractStatus}; diff --git a/lib/osiris/core/objects/money/mod.rs b/lib/osiris/core/objects/money/mod.rs new file mode 100644 index 0000000..b0d63ee --- /dev/null +++ b/lib/osiris/core/objects/money/mod.rs @@ -0,0 +1,10 @@ +/// Money Module +/// +/// Financial objects including accounts, assets, transactions, and payment providers. + +pub mod models; +pub mod rhai; +pub mod payments; + +pub use models::{Account, Asset, Transaction, AccountStatus, TransactionType, Signature, AccountPolicyItem}; +pub use payments::{PaymentClient, PaymentRequest, PaymentResponse, PaymentStatus}; diff --git a/lib/osiris/core/objects/money/models.rs b/lib/osiris/core/objects/money/models.rs new file mode 100644 index 0000000..55c4499 --- /dev/null +++ b/lib/osiris/core/objects/money/models.rs @@ -0,0 +1,498 @@ +use crate::store::{BaseData, IndexKey, Object}; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; + +/// Represents the status of an account +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub enum AccountStatus { + Active, + Inactive, + Suspended, + Archived, +} + +impl Default for AccountStatus { + fn default() -> Self { + AccountStatus::Active + } +} + +/// Represents the type of transaction +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub enum TransactionType { + Transfer, + Clawback, + Freeze, + Unfreeze, + Issue, + Burn, +} + +impl Default for TransactionType { + fn default() -> Self { + TransactionType::Transfer + } +} + +/// Represents a signature for transactions +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct Signature { + pub signer_id: u32, + pub signature: String, + pub timestamp: u64, +} + +impl Signature { + pub fn new() -> Self { + Self { + signer_id: 0, + signature: String::new(), + timestamp: 0, + } + } + + pub fn signer_id(mut self, signer_id: u32) -> Self { + self.signer_id = signer_id; + self + } + + pub fn signature(mut self, signature: impl ToString) -> Self { + self.signature = signature.to_string(); + self + } + + pub fn timestamp(mut self, timestamp: u64) -> Self { + self.timestamp = timestamp; + self + } + + pub fn build(self) -> Self { + self + } +} + +/// Policy item for account operations +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] +pub struct AccountPolicyItem { + pub signers: Vec, + pub min_signatures: u32, + pub enabled: bool, + pub threshold: f64, + pub recipient: u32, +} + +impl AccountPolicyItem { + pub fn new() -> Self { + Self { + signers: Vec::new(), + min_signatures: 0, + enabled: false, + threshold: 0.0, + recipient: 0, + } + } + + pub fn add_signer(mut self, signer_id: u32) -> Self { + self.signers.push(signer_id); + self + } + + pub fn signers(mut self, signers: Vec) -> Self { + self.signers = signers; + self + } + + pub fn min_signatures(mut self, min_signatures: u32) -> Self { + self.min_signatures = min_signatures; + self + } + + pub fn enabled(mut self, enabled: bool) -> Self { + self.enabled = enabled; + self + } + + pub fn threshold(mut self, threshold: f64) -> Self { + self.threshold = threshold; + self + } + + pub fn recipient(mut self, recipient: u32) -> Self { + self.recipient = recipient; + self + } + + pub fn build(self) -> Self { + self + } +} + +/// Represents an account in the financial system +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, crate::DeriveObject)] +pub struct Account { + /// Base model data + pub base_data: BaseData, + pub owner_id: u32, + #[index] + pub address: String, + pub balance: f64, + pub currency: String, + pub assetid: u32, + pub last_activity: u64, + pub administrators: Vec, + pub accountpolicy: u32, +} + +impl Account { + /// Create a new account instance + pub fn new(id: u32) -> Self { + let mut base_data = BaseData::new(); + Self { + base_data, + owner_id: 0, + address: String::new(), + balance: 0.0, + currency: String::new(), + assetid: 0, + last_activity: 0, + administrators: Vec::new(), + accountpolicy: 0, + } + } + + /// Set the owner ID (fluent) + pub fn owner_id(mut self, owner_id: u32) -> Self { + self.owner_id = owner_id; + self + } + + /// Set the blockchain address (fluent) + pub fn address(mut self, address: impl ToString) -> Self { + self.address = address.to_string(); + self + } + + /// Set the balance (fluent) + pub fn balance(mut self, balance: f64) -> Self { + self.balance = balance; + self + } + + /// Set the currency (fluent) + pub fn currency(mut self, currency: impl ToString) -> Self { + self.currency = currency.to_string(); + self + } + + /// Set the asset ID (fluent) + pub fn assetid(mut self, assetid: u32) -> Self { + self.assetid = assetid; + self + } + + /// Set the last activity timestamp (fluent) + pub fn last_activity(mut self, last_activity: u64) -> Self { + self.last_activity = last_activity; + self + } + + /// Add an administrator (fluent) + pub fn add_administrator(mut self, admin_id: u32) -> Self { + self.administrators.push(admin_id); + self + } + + /// Set all administrators (fluent) + pub fn administrators(mut self, administrators: Vec) -> Self { + self.administrators = administrators; + self + } + + /// Set the account policy ID (fluent) + pub fn accountpolicy(mut self, accountpolicy: u32) -> Self { + self.accountpolicy = accountpolicy; + self + } + + /// Build the final account instance + pub fn build(self) -> Self { + self + } +} + +/// Represents an asset in the financial system +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, crate::DeriveObject)] +pub struct Asset { + /// Base model data + pub base_data: BaseData, + #[index] + pub address: String, + pub assetid: u32, + pub asset_type: String, + pub issuer: u32, + pub supply: f64, + pub decimals: u8, + pub is_frozen: bool, + pub metadata: HashMap, + pub administrators: Vec, + pub min_signatures: u32, +} + +impl Asset { + /// Create a new asset instance + pub fn new(id: u32) -> Self { + let mut base_data = BaseData::new(); + Self { + base_data, + address: String::new(), + assetid: 0, + asset_type: String::new(), + issuer: 0, + supply: 0.0, + decimals: 0, + is_frozen: false, + metadata: HashMap::new(), + administrators: Vec::new(), + min_signatures: 0, + } + } + + /// Set the blockchain address (fluent) + pub fn address(mut self, address: impl ToString) -> Self { + self.address = address.to_string(); + self + } + + /// Set the asset ID (fluent) + pub fn assetid(mut self, assetid: u32) -> Self { + self.assetid = assetid; + self + } + + /// Set the asset type (fluent) + pub fn asset_type(mut self, asset_type: impl ToString) -> Self { + self.asset_type = asset_type.to_string(); + self + } + + /// Set the issuer (fluent) + pub fn issuer(mut self, issuer: u32) -> Self { + self.issuer = issuer; + self + } + + /// Set the supply (fluent) + pub fn supply(mut self, supply: f64) -> Self { + self.supply = supply; + self + } + + /// Set the decimals (fluent) + pub fn decimals(mut self, decimals: u8) -> Self { + self.decimals = decimals; + self + } + + /// Set the frozen status (fluent) + pub fn is_frozen(mut self, is_frozen: bool) -> Self { + self.is_frozen = is_frozen; + self + } + + /// Add metadata entry (fluent) + pub fn add_metadata(mut self, key: impl ToString, value: impl ToString) -> Self { + self.metadata.insert(key.to_string(), value.to_string()); + self + } + + /// Set all metadata (fluent) + pub fn metadata(mut self, metadata: HashMap) -> Self { + self.metadata = metadata; + self + } + + /// Add an administrator (fluent) + pub fn add_administrator(mut self, admin_id: u32) -> Self { + self.administrators.push(admin_id); + self + } + + /// Set all administrators (fluent) + pub fn administrators(mut self, administrators: Vec) -> Self { + self.administrators = administrators; + self + } + + /// Set minimum signatures required (fluent) + pub fn min_signatures(mut self, min_signatures: u32) -> Self { + self.min_signatures = min_signatures; + self + } + + /// Build the final asset instance + pub fn build(self) -> Self { + self + } +} + +/// Represents account policies for various operations +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, crate::DeriveObject)] +pub struct AccountPolicy { + /// Base model data + pub base_data: BaseData, + pub transferpolicy: AccountPolicyItem, + pub adminpolicy: AccountPolicyItem, + pub clawbackpolicy: AccountPolicyItem, + pub freezepolicy: AccountPolicyItem, +} + +impl AccountPolicy { + /// Create a new account policy instance + pub fn new(id: u32) -> Self { + let mut base_data = BaseData::new(); + Self { + base_data, + transferpolicy: AccountPolicyItem::new(), + adminpolicy: AccountPolicyItem::new(), + clawbackpolicy: AccountPolicyItem::new(), + freezepolicy: AccountPolicyItem::new(), + } + } + + /// Set the transfer policy (fluent) + pub fn transferpolicy(mut self, transferpolicy: AccountPolicyItem) -> Self { + self.transferpolicy = transferpolicy; + self + } + + /// Set the admin policy (fluent) + pub fn adminpolicy(mut self, adminpolicy: AccountPolicyItem) -> Self { + self.adminpolicy = adminpolicy; + self + } + + /// Set the clawback policy (fluent) + pub fn clawbackpolicy(mut self, clawbackpolicy: AccountPolicyItem) -> Self { + self.clawbackpolicy = clawbackpolicy; + self + } + + /// Set the freeze policy (fluent) + pub fn freezepolicy(mut self, freezepolicy: AccountPolicyItem) -> Self { + self.freezepolicy = freezepolicy; + self + } + + /// Build the final account policy instance + pub fn build(self) -> Self { + self + } +} + +/// Represents a financial transaction +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, crate::DeriveObject)] +pub struct Transaction { + /// Base model data + pub base_data: BaseData, + pub txid: u32, + pub source: u32, + pub destination: u32, + pub assetid: u32, + pub amount: f64, + pub timestamp: u64, + pub status: String, + pub memo: String, + pub tx_type: TransactionType, + pub signatures: Vec, +} + +impl Transaction { + /// Create a new transaction instance + pub fn new(id: u32) -> Self { + let mut base_data = BaseData::new(); + Self { + base_data, + txid: 0, + source: 0, + destination: 0, + assetid: 0, + amount: 0.0, + timestamp: 0, + status: String::new(), + memo: String::new(), + tx_type: TransactionType::default(), + signatures: Vec::new(), + } + } + + /// Set the transaction ID (fluent) + pub fn txid(mut self, txid: u32) -> Self { + self.txid = txid; + self + } + + /// Set the source account (fluent) + pub fn source(mut self, source: u32) -> Self { + self.source = source; + self + } + + /// Set the destination account (fluent) + pub fn destination(mut self, destination: u32) -> Self { + self.destination = destination; + self + } + + /// Set the asset ID (fluent) + pub fn assetid(mut self, assetid: u32) -> Self { + self.assetid = assetid; + self + } + + /// Set the amount (fluent) + pub fn amount(mut self, amount: f64) -> Self { + self.amount = amount; + self + } + + /// Set the timestamp (fluent) + pub fn timestamp(mut self, timestamp: u64) -> Self { + self.timestamp = timestamp; + self + } + + /// Set the status (fluent) + pub fn status(mut self, status: impl ToString) -> Self { + self.status = status.to_string(); + self + } + + /// Set the memo (fluent) + pub fn memo(mut self, memo: impl ToString) -> Self { + self.memo = memo.to_string(); + self + } + + /// Set the transaction type (fluent) + pub fn tx_type(mut self, tx_type: TransactionType) -> Self { + self.tx_type = tx_type; + self + } + + /// Add a signature (fluent) + pub fn add_signature(mut self, signature: Signature) -> Self { + self.signatures.push(signature); + self + } + + /// Set all signatures (fluent) + pub fn signatures(mut self, signatures: Vec) -> Self { + self.signatures = signatures; + self + } + + /// Build the final transaction instance + pub fn build(self) -> Self { + self + } +} diff --git a/lib/osiris/core/objects/money/payments.rs b/lib/osiris/core/objects/money/payments.rs new file mode 100644 index 0000000..aa6cfa1 --- /dev/null +++ b/lib/osiris/core/objects/money/payments.rs @@ -0,0 +1,457 @@ +/// Payment Provider Client +/// +/// Generic payment provider API client supporting multiple payment gateways. +/// Currently implements Pesapal API but designed to be extensible for other providers. + +use serde::{Deserialize, Serialize}; +use crate::store::{BaseData, IndexKey, Object}; + +// Helper to run async code synchronously +fn run_async(future: F) -> T +where + F: std::future::Future + Send + 'static, + T: Send + 'static, +{ + // Try to use current runtime handle if available + if tokio::runtime::Handle::try_current().is_ok() { + // We're in a runtime, spawn a blocking thread with its own runtime + std::thread::scope(|s| { + s.spawn(|| { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(future) + }).join().unwrap() + }) + } else { + // No runtime, create one + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(future) + } +} + +/// Payment Provider Client for making API calls to payment gateways +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PaymentClient { + /// Base data for object storage + pub base_data: BaseData, + + /// Provider name (e.g., "pesapal", "stripe", "paypal", "flutterwave") + pub provider: String, + + /// Consumer key / API key + pub consumer_key: String, + + /// Consumer secret / API secret + pub consumer_secret: String, + + /// Base URL for API (optional, uses provider default if not set) + pub base_url: Option, + + /// Sandbox mode (for testing) + pub sandbox: bool, +} + +/// Payment request details +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PaymentRequest { + /// Unique merchant reference + pub merchant_reference: String, + + /// Amount to charge + pub amount: f64, + + /// Currency code (e.g., "USD", "KES", "UGX") + pub currency: String, + + /// Description of the payment + pub description: String, + + /// Callback URL for payment notifications + pub callback_url: String, + + /// Redirect URL after payment (optional) + pub redirect_url: Option, + + /// Cancel URL (optional) + pub cancel_url: Option, + + /// Customer email + pub customer_email: Option, + + /// Customer phone + pub customer_phone: Option, + + /// Customer first name + pub customer_first_name: Option, + + /// Customer last name + pub customer_last_name: Option, +} + +/// Payment response from provider +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PaymentResponse { + /// Payment link URL + pub payment_url: String, + + /// Order tracking ID from provider + pub order_tracking_id: String, + + /// Merchant reference + pub merchant_reference: String, + + /// Status message + pub status: String, +} + +/// Payment status query result +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PaymentStatus { + /// Order tracking ID + pub order_tracking_id: String, + + /// Merchant reference + pub merchant_reference: String, + + /// Payment status (e.g., "PENDING", "COMPLETED", "FAILED") + pub status: String, + + /// Amount + pub amount: f64, + + /// Currency + pub currency: String, + + /// Payment method used + pub payment_method: Option, + + /// Transaction ID + pub transaction_id: Option, +} + +// Pesapal-specific structures +#[derive(Debug, Serialize)] +struct PesapalAuthRequest { + consumer_key: String, + consumer_secret: String, +} + +#[derive(Debug, Deserialize)] +struct PesapalAuthResponse { + token: String, + #[serde(rename = "expiryDate")] + expiry_date: Option, + error: Option, + status: Option, + message: Option, +} + +#[derive(Debug, Serialize)] +struct PesapalSubmitOrderRequest { + id: String, + currency: String, + amount: f64, + description: String, + callback_url: String, + redirect_mode: String, + notification_id: String, + billing_address: Option, +} + +#[derive(Debug, Serialize)] +struct PesapalBillingAddress { + email_address: Option, + phone_number: Option, + first_name: Option, + last_name: Option, +} + +#[derive(Debug, Deserialize)] +struct PesapalSubmitOrderResponse { + order_tracking_id: Option, + merchant_reference: Option, + redirect_url: Option, + error: Option, + status: Option, +} + +#[derive(Debug, Deserialize)] +struct PesapalTransactionStatusResponse { + payment_method: Option, + amount: f64, + created_date: String, + confirmation_code: Option, + payment_status_description: String, + description: String, + message: String, + payment_account: Option, + call_back_url: String, + status_code: i32, + merchant_reference: String, + payment_status_code: String, + currency: String, + error: Option, + status: String, +} + +impl PaymentClient { + /// Create a new payment client + pub fn new(id: u32, provider: String, consumer_key: String, consumer_secret: String) -> Self { + let base_data = BaseData::with_id(id, String::new()); + Self { + base_data, + provider, + consumer_key, + consumer_secret, + base_url: None, + sandbox: false, + } + } + + /// Create a Pesapal client + pub fn pesapal(id: u32, consumer_key: String, consumer_secret: String) -> Self { + let base_data = BaseData::with_id(id, String::new()); + Self { + base_data, + provider: "pesapal".to_string(), + consumer_key, + consumer_secret, + base_url: Some("https://pay.pesapal.com/v3".to_string()), + sandbox: false, + } + } + + /// Create a Pesapal sandbox client + pub fn pesapal_sandbox(id: u32, consumer_key: String, consumer_secret: String) -> Self { + let base_data = BaseData::with_id(id, String::new()); + Self { + base_data, + provider: "pesapal".to_string(), + consumer_key, + consumer_secret, + base_url: Some("https://cybqa.pesapal.com/pesapalv3".to_string()), + sandbox: true, + } + } + + /// Set custom base URL + pub fn with_base_url(mut self, base_url: String) -> Self { + self.base_url = Some(base_url); + self + } + + /// Enable sandbox mode + pub fn with_sandbox(mut self, sandbox: bool) -> Self { + self.sandbox = sandbox; + self + } + + /// Get the base URL for the provider + fn get_base_url(&self) -> String { + if let Some(url) = &self.base_url { + return url.clone(); + } + + match self.provider.as_str() { + "pesapal" => { + if self.sandbox { + "https://cybqa.pesapal.com/pesapalv3".to_string() + } else { + "https://pay.pesapal.com/v3".to_string() + } + } + "stripe" => "https://api.stripe.com/v1".to_string(), + "paypal" => "https://api.paypal.com/v2".to_string(), + "flutterwave" => "https://api.flutterwave.com/v3".to_string(), + _ => panic!("Unknown provider: {}", self.provider), + } + } + + /// Create a payment link + pub fn create_payment_link( + &self, + request: &PaymentRequest, + ) -> Result { + match self.provider.as_str() { + "pesapal" => self.create_pesapal_payment(request), + _ => Err(format!("Provider {} not yet implemented", self.provider)), + } + } + + /// Get payment status + pub fn get_payment_status( + &self, + order_tracking_id: &str, + ) -> Result { + match self.provider.as_str() { + "pesapal" => self.get_pesapal_status(order_tracking_id), + _ => Err(format!("Provider {} not yet implemented", self.provider)), + } + } + + /// Authenticate with Pesapal and get access token + fn pesapal_authenticate(&self) -> Result { + let url = format!("{}/api/Auth/RequestToken", self.get_base_url()); + + let auth_request = PesapalAuthRequest { + consumer_key: self.consumer_key.clone(), + consumer_secret: self.consumer_secret.clone(), + }; + + run_async(async move { + let client = reqwest::Client::new(); + let response = client + .post(&url) + .header("Content-Type", "application/json") + .header("Accept", "application/json") + .json(&auth_request) + .send() + .await + .map_err(|e| format!("Failed to send auth request: {}", e))?; + + if !response.status().is_success() { + let status = response.status(); + let error_text = response.text().await.unwrap_or_default(); + return Err(format!("Pesapal auth failed ({}): {}", status, error_text)); + } + + // Debug: print raw response + let response_text = response.text().await + .map_err(|e| format!("Failed to read auth response: {}", e))?; + println!("=== PESAPAL AUTH RESPONSE ==="); + println!("{}", response_text); + println!("=============================="); + + let auth_response: PesapalAuthResponse = serde_json::from_str(&response_text) + .map_err(|e| format!("Failed to parse auth response: {}", e))?; + + if let Some(error) = auth_response.error { + return Err(format!("Pesapal auth error: {}", error)); + } + + Ok(auth_response.token) + }) + } + + /// Create a Pesapal payment + fn create_pesapal_payment( + &self, + request: &PaymentRequest, + ) -> Result { + // Get auth token + let token = self.pesapal_authenticate()?; + + let url = format!("{}/api/Transactions/SubmitOrderRequest", self.get_base_url()); + + let pesapal_request = PesapalSubmitOrderRequest { + id: request.merchant_reference.clone(), + currency: request.currency.clone(), + amount: request.amount, + description: request.description.clone(), + callback_url: request.callback_url.clone(), + redirect_mode: String::new(), + notification_id: String::new(), + billing_address: Some(PesapalBillingAddress { + email_address: request.customer_email.clone(), + phone_number: request.customer_phone.clone(), + first_name: request.customer_first_name.clone(), + last_name: request.customer_last_name.clone(), + }), + }; + + run_async(async move { + let client = reqwest::Client::new(); + let response = client + .post(&url) + .header("Content-Type", "application/json") + .header("Accept", "application/json") + .bearer_auth(&token) + .json(&pesapal_request) + .send() + .await + .map_err(|e| format!("Failed to send payment request: {}", e))?; + + if !response.status().is_success() { + let status = response.status(); + let error_text = response.text().await.unwrap_or_default(); + return Err(format!("Pesapal payment request failed ({}): {}", status, error_text)); + } + + // Debug: print raw response + let response_text = response.text().await + .map_err(|e| format!("Failed to read payment response: {}", e))?; + println!("=== PESAPAL PAYMENT RESPONSE ==="); + println!("{}", response_text); + println!("================================="); + + let pesapal_response: PesapalSubmitOrderResponse = serde_json::from_str(&response_text) + .map_err(|e| format!("Failed to parse payment response: {}", e))?; + + if let Some(error) = pesapal_response.error { + return Err(format!("Pesapal payment error: {}", error)); + } + + Ok(PaymentResponse { + payment_url: pesapal_response.redirect_url.unwrap_or_default(), + order_tracking_id: pesapal_response.order_tracking_id.unwrap_or_default(), + merchant_reference: pesapal_response.merchant_reference.unwrap_or_default(), + status: pesapal_response.status.unwrap_or_default(), + }) + }) + } + + /// Get Pesapal payment status + fn get_pesapal_status( + &self, + order_tracking_id: &str, + ) -> Result { + let token = self.pesapal_authenticate()?; + let order_tracking_id = order_tracking_id.to_string(); + + let url = format!( + "{}/api/Transactions/GetTransactionStatus?orderTrackingId={}", + self.get_base_url(), + order_tracking_id + ); + + run_async(async move { + let client = reqwest::Client::new(); + let response = client + .get(&url) + .header("Accept", "application/json") + .bearer_auth(&token) + .send() + .await + .map_err(|e| format!("Failed to send status request: {}", e))?; + + if !response.status().is_success() { + let status = response.status(); + let error_text = response.text().await.unwrap_or_default(); + return Err(format!("Pesapal status request failed ({}): {}", status, error_text)); + } + + // Debug: print raw response + let response_text = response.text().await + .map_err(|e| format!("Failed to read status response: {}", e))?; + println!("=== PESAPAL STATUS RESPONSE ==="); + println!("{}", response_text); + println!("================================"); + + let status_response: PesapalTransactionStatusResponse = serde_json::from_str(&response_text) + .map_err(|e| format!("Failed to parse status response: {}", e))?; + + if let Some(error) = status_response.error { + return Err(format!("Pesapal status error: {}", error)); + } + + Ok(PaymentStatus { + order_tracking_id: order_tracking_id.to_string(), + merchant_reference: status_response.merchant_reference, + status: status_response.payment_status_description, + amount: status_response.amount, + currency: status_response.currency, + payment_method: status_response.payment_method, + transaction_id: status_response.confirmation_code, + }) + }) + } +} diff --git a/lib/osiris/core/objects/money/rhai.rs b/lib/osiris/core/objects/money/rhai.rs new file mode 100644 index 0000000..bf673d5 --- /dev/null +++ b/lib/osiris/core/objects/money/rhai.rs @@ -0,0 +1,630 @@ +/// Rhai bindings for Money objects (Account, Asset, Transaction, PaymentClient) + +use ::rhai::plugin::*; +use ::rhai::{CustomType, Dynamic, Engine, EvalAltResult, Module, TypeBuilder}; + +use super::models::{Account, Asset, Transaction}; +use super::payments::{PaymentClient, PaymentRequest, PaymentResponse, PaymentStatus}; + +// ============================================================================ +// Account Module +// ============================================================================ + +type RhaiAccount = Account; + +#[export_module] +mod rhai_account_module { + use super::RhaiAccount; + + #[rhai_fn(name = "new_account", return_raw)] + pub fn new_account() -> Result> { + Ok(Account::new(0)) + } + + #[rhai_fn(name = "owner_id", return_raw)] + pub fn set_owner_id( + account: &mut RhaiAccount, + owner_id: i64, + ) -> Result> { + let owned = std::mem::take(account); + *account = owned.owner_id(owner_id as u32); + Ok(account.clone()) + } + + #[rhai_fn(name = "address", return_raw)] + pub fn set_address( + account: &mut RhaiAccount, + address: String, + ) -> Result> { + let owned = std::mem::take(account); + *account = owned.address(address); + Ok(account.clone()) + } + + #[rhai_fn(name = "balance", return_raw)] + pub fn set_balance( + account: &mut RhaiAccount, + balance: f64, + ) -> Result> { + let owned = std::mem::take(account); + *account = owned.balance(balance); + Ok(account.clone()) + } + + #[rhai_fn(name = "currency", return_raw)] + pub fn set_currency( + account: &mut RhaiAccount, + currency: String, + ) -> Result> { + let owned = std::mem::take(account); + *account = owned.currency(currency); + Ok(account.clone()) + } + + #[rhai_fn(name = "assetid", return_raw)] + pub fn set_assetid( + account: &mut RhaiAccount, + assetid: i64, + ) -> Result> { + let owned = std::mem::take(account); + *account = owned.assetid(assetid as u32); + Ok(account.clone()) + } + + // Getters + #[rhai_fn(name = "get_id")] + pub fn get_id(account: &mut RhaiAccount) -> i64 { + account.base_data.id as i64 + } + + #[rhai_fn(name = "get_owner_id")] + pub fn get_owner_id(account: &mut RhaiAccount) -> i64 { + account.owner_id as i64 + } + + #[rhai_fn(name = "get_address")] + pub fn get_address(account: &mut RhaiAccount) -> String { + account.address.clone() + } + + #[rhai_fn(name = "get_balance")] + pub fn get_balance(account: &mut RhaiAccount) -> f64 { + account.balance + } + + #[rhai_fn(name = "get_currency")] + pub fn get_currency(account: &mut RhaiAccount) -> String { + account.currency.clone() + } +} + +// ============================================================================ +// Asset Module +// ============================================================================ + +type RhaiAsset = Asset; + +#[export_module] +mod rhai_asset_module { + use super::RhaiAsset; + + #[rhai_fn(name = "new_asset", return_raw)] + pub fn new_asset() -> Result> { + Ok(Asset::new(0)) + } + + #[rhai_fn(name = "address", return_raw)] + pub fn set_address( + asset: &mut RhaiAsset, + address: String, + ) -> Result> { + let owned = std::mem::take(asset); + *asset = owned.address(address); + Ok(asset.clone()) + } + + #[rhai_fn(name = "asset_type", return_raw)] + pub fn set_asset_type( + asset: &mut RhaiAsset, + asset_type: String, + ) -> Result> { + let owned = std::mem::take(asset); + *asset = owned.asset_type(asset_type); + Ok(asset.clone()) + } + + #[rhai_fn(name = "issuer", return_raw)] + pub fn set_issuer( + asset: &mut RhaiAsset, + issuer: i64, + ) -> Result> { + let owned = std::mem::take(asset); + *asset = owned.issuer(issuer as u32); + Ok(asset.clone()) + } + + #[rhai_fn(name = "supply", return_raw)] + pub fn set_supply( + asset: &mut RhaiAsset, + supply: f64, + ) -> Result> { + let owned = std::mem::take(asset); + *asset = owned.supply(supply); + Ok(asset.clone()) + } + + // Getters + #[rhai_fn(name = "get_id")] + pub fn get_id(asset: &mut RhaiAsset) -> i64 { + asset.base_data.id as i64 + } + + #[rhai_fn(name = "get_address")] + pub fn get_address(asset: &mut RhaiAsset) -> String { + asset.address.clone() + } + + #[rhai_fn(name = "get_asset_type")] + pub fn get_asset_type(asset: &mut RhaiAsset) -> String { + asset.asset_type.clone() + } + + #[rhai_fn(name = "get_supply")] + pub fn get_supply(asset: &mut RhaiAsset) -> f64 { + asset.supply + } +} + +// ============================================================================ +// Transaction Module +// ============================================================================ + +type RhaiTransaction = Transaction; + +#[export_module] +mod rhai_transaction_module { + use super::RhaiTransaction; + + #[rhai_fn(name = "new_transaction", return_raw)] + pub fn new_transaction() -> Result> { + Ok(Transaction::new(0)) + } + + #[rhai_fn(name = "source", return_raw)] + pub fn set_source( + tx: &mut RhaiTransaction, + source: i64, + ) -> Result> { + let owned = std::mem::take(tx); + *tx = owned.source(source as u32); + Ok(tx.clone()) + } + + #[rhai_fn(name = "destination", return_raw)] + pub fn set_destination( + tx: &mut RhaiTransaction, + destination: i64, + ) -> Result> { + let owned = std::mem::take(tx); + *tx = owned.destination(destination as u32); + Ok(tx.clone()) + } + + #[rhai_fn(name = "amount", return_raw)] + pub fn set_amount( + tx: &mut RhaiTransaction, + amount: f64, + ) -> Result> { + let owned = std::mem::take(tx); + *tx = owned.amount(amount); + Ok(tx.clone()) + } + + #[rhai_fn(name = "assetid", return_raw)] + pub fn set_assetid( + tx: &mut RhaiTransaction, + assetid: i64, + ) -> Result> { + let owned = std::mem::take(tx); + *tx = owned.assetid(assetid as u32); + Ok(tx.clone()) + } + + // Getters + #[rhai_fn(name = "get_id")] + pub fn get_id(tx: &mut RhaiTransaction) -> i64 { + tx.base_data.id as i64 + } + + #[rhai_fn(name = "get_source")] + pub fn get_source(tx: &mut RhaiTransaction) -> i64 { + tx.source as i64 + } + + #[rhai_fn(name = "get_destination")] + pub fn get_destination(tx: &mut RhaiTransaction) -> i64 { + tx.destination as i64 + } + + #[rhai_fn(name = "get_amount")] + pub fn get_amount(tx: &mut RhaiTransaction) -> f64 { + tx.amount + } + + #[rhai_fn(name = "get_assetid")] + pub fn get_assetid(tx: &mut RhaiTransaction) -> i64 { + tx.assetid as i64 + } +} + +// ============================================================================ +// Registration Functions +// ============================================================================ + +/// Register money modules with the Rhai engine +pub fn register_money_modules(parent_module: &mut Module) { + // Register custom types + parent_module.set_custom_type::("Account"); + parent_module.set_custom_type::("Asset"); + parent_module.set_custom_type::("Transaction"); + parent_module.set_custom_type::("PaymentClient"); + parent_module.set_custom_type::("PaymentRequest"); + parent_module.set_custom_type::("PaymentResponse"); + parent_module.set_custom_type::("PaymentStatus"); + + // Merge account functions + let account_module = exported_module!(rhai_account_module); + parent_module.merge(&account_module); + + // Merge asset functions + let asset_module = exported_module!(rhai_asset_module); + parent_module.merge(&asset_module); + + // Merge transaction functions + let transaction_module = exported_module!(rhai_transaction_module); + parent_module.merge(&transaction_module); + + // Merge payment client functions + let payment_module = exported_module!(rhai_payment_module); + parent_module.merge(&payment_module); + + // Merge ethereum wallet functions + let eth_module = exported_module!(rhai_ethereum_module); + parent_module.merge(ð_module); +} + +// ============================================================================ +// Payment Provider Module +// ============================================================================ + +type RhaiPaymentClient = PaymentClient; +type RhaiPaymentRequest = PaymentRequest; +type RhaiPaymentResponse = PaymentResponse; +type RhaiPaymentStatus = PaymentStatus; + +#[export_module] +mod rhai_payment_module { + use super::{RhaiPaymentClient, RhaiPaymentRequest, RhaiPaymentResponse, RhaiPaymentStatus}; + use super::super::payments::{PaymentClient, PaymentRequest, PaymentResponse, PaymentStatus}; + use ::rhai::EvalAltResult; + + // PaymentClient constructors + #[rhai_fn(name = "new_payment_client_pesapal", return_raw)] + pub fn new_pesapal_client( + id: i64, + consumer_key: String, + consumer_secret: String, + ) -> Result> { + Ok(PaymentClient::pesapal(id as u32, consumer_key, consumer_secret)) + } + + #[rhai_fn(name = "new_payment_client_pesapal_sandbox", return_raw)] + pub fn new_pesapal_sandbox_client( + id: i64, + consumer_key: String, + consumer_secret: String, + ) -> Result> { + Ok(PaymentClient::pesapal_sandbox(id as u32, consumer_key, consumer_secret)) + } + + // PaymentRequest constructor and builder methods + #[rhai_fn(name = "new_payment_request", return_raw)] + pub fn new_payment_request() -> Result> { + Ok(PaymentRequest { + merchant_reference: String::new(), + amount: 0.0, + currency: String::from("USD"), + description: String::new(), + callback_url: String::new(), + redirect_url: None, + cancel_url: None, + customer_email: None, + customer_phone: None, + customer_first_name: None, + customer_last_name: None, + }) + } + + #[rhai_fn(name = "amount", return_raw)] + pub fn set_amount( + request: &mut RhaiPaymentRequest, + amount: f64, + ) -> Result> { + request.amount = amount; + Ok(request.clone()) + } + + #[rhai_fn(name = "currency", return_raw)] + pub fn set_currency( + request: &mut RhaiPaymentRequest, + currency: String, + ) -> Result> { + request.currency = currency; + Ok(request.clone()) + } + + #[rhai_fn(name = "description", return_raw)] + pub fn set_description( + request: &mut RhaiPaymentRequest, + description: String, + ) -> Result> { + request.description = description; + Ok(request.clone()) + } + + #[rhai_fn(name = "callback_url", return_raw)] + pub fn set_callback_url( + request: &mut RhaiPaymentRequest, + url: String, + ) -> Result> { + request.callback_url = url; + Ok(request.clone()) + } + + #[rhai_fn(name = "merchant_reference", return_raw)] + pub fn set_merchant_reference( + request: &mut RhaiPaymentRequest, + reference: String, + ) -> Result> { + request.merchant_reference = reference; + Ok(request.clone()) + } + + #[rhai_fn(name = "customer_email", return_raw)] + pub fn set_customer_email( + request: &mut RhaiPaymentRequest, + email: String, + ) -> Result> { + request.customer_email = Some(email); + Ok(request.clone()) + } + + #[rhai_fn(name = "customer_phone", return_raw)] + pub fn set_customer_phone( + request: &mut RhaiPaymentRequest, + phone: String, + ) -> Result> { + request.customer_phone = Some(phone); + Ok(request.clone()) + } + + #[rhai_fn(name = "customer_name", return_raw)] + pub fn set_customer_name( + request: &mut RhaiPaymentRequest, + first_name: String, + last_name: String, + ) -> Result> { + request.customer_first_name = Some(first_name); + request.customer_last_name = Some(last_name); + Ok(request.clone()) + } + + #[rhai_fn(name = "redirect_url", return_raw)] + pub fn set_redirect_url( + request: &mut RhaiPaymentRequest, + url: String, + ) -> Result> { + request.redirect_url = Some(url); + Ok(request.clone()) + } + + // PaymentClient methods + #[rhai_fn(name = "create_payment_link", return_raw)] + pub fn create_payment_link( + client: &mut RhaiPaymentClient, + request: RhaiPaymentRequest, + ) -> Result> { + client.create_payment_link(&request) + .map_err(|e| e.into()) + } + + #[rhai_fn(name = "get_payment_status", return_raw)] + pub fn get_payment_status( + client: &mut RhaiPaymentClient, + order_tracking_id: String, + ) -> Result> { + client.get_payment_status(&order_tracking_id) + .map_err(|e| e.into()) + } + + // PaymentResponse getters + #[rhai_fn(name = "get_payment_url", pure)] + pub fn get_payment_url(response: &mut RhaiPaymentResponse) -> String { + response.payment_url.clone() + } + + #[rhai_fn(name = "get_order_tracking_id", pure)] + pub fn get_order_tracking_id(response: &mut RhaiPaymentResponse) -> String { + response.order_tracking_id.clone() + } + + #[rhai_fn(name = "get_merchant_reference", pure)] + pub fn get_merchant_reference(response: &mut RhaiPaymentResponse) -> String { + response.merchant_reference.clone() + } + + #[rhai_fn(name = "get_status", pure)] + pub fn get_response_status(response: &mut RhaiPaymentResponse) -> String { + response.status.clone() + } + + // PaymentStatus getters + #[rhai_fn(name = "get_status", pure)] + pub fn get_payment_status_value(status: &mut RhaiPaymentStatus) -> String { + status.status.clone() + } + + #[rhai_fn(name = "get_amount", pure)] + pub fn get_amount(status: &mut RhaiPaymentStatus) -> f64 { + status.amount + } + + #[rhai_fn(name = "get_currency", pure)] + pub fn get_currency(status: &mut RhaiPaymentStatus) -> String { + status.currency.clone() + } + + #[rhai_fn(name = "get_payment_method", pure)] + pub fn get_payment_method(status: &mut RhaiPaymentStatus) -> String { + status.payment_method.clone().unwrap_or_default() + } + + #[rhai_fn(name = "get_transaction_id", pure)] + pub fn get_transaction_id(status: &mut RhaiPaymentStatus) -> String { + status.transaction_id.clone().unwrap_or_default() + } +} + +// ============================================================================ +// CustomType Implementations +// ============================================================================ + +impl CustomType for Account { + fn build(mut builder: TypeBuilder) { + builder.with_name("Account"); + } +} + +impl CustomType for Asset { + fn build(mut builder: TypeBuilder) { + builder.with_name("Asset"); + } +} + +impl CustomType for Transaction { + fn build(mut builder: TypeBuilder) { + builder.with_name("Transaction"); + } +} + +impl CustomType for PaymentClient { + fn build(mut builder: TypeBuilder) { + builder.with_name("PaymentClient"); + } +} + +impl CustomType for PaymentRequest { + fn build(mut builder: TypeBuilder) { + builder.with_name("PaymentRequest"); + } +} + +impl CustomType for PaymentResponse { + fn build(mut builder: TypeBuilder) { + builder.with_name("PaymentResponse"); + } +} + +impl CustomType for PaymentStatus { + fn build(mut builder: TypeBuilder) { + builder.with_name("PaymentStatus"); + } +} + +// ============================================================================ +// Ethereum Wallet Module (Stub Implementation) +// ============================================================================ + +/// Simple Ethereum wallet representation +#[derive(Debug, Clone, Default)] +pub struct EthereumWallet { + pub owner_id: u32, + pub address: String, + pub network: String, +} + +impl EthereumWallet { + pub fn new() -> Self { + // Generate a mock Ethereum address (in production, use ethers-rs or similar) + use std::time::{SystemTime, UNIX_EPOCH}; + let timestamp = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_nanos(); + let mock_address = format!("0x{:040x}", timestamp as u128); + Self { + owner_id: 0, + address: mock_address, + network: String::from("mainnet"), + } + } + + pub fn owner_id(mut self, id: u32) -> Self { + self.owner_id = id; + self + } + + pub fn network(mut self, network: impl ToString) -> Self { + self.network = network.to_string(); + self + } +} + +type RhaiEthereumWallet = EthereumWallet; + +#[export_module] +mod rhai_ethereum_module { + use super::RhaiEthereumWallet; + use ::rhai::EvalAltResult; + + #[rhai_fn(name = "new_ethereum_wallet", return_raw)] + pub fn new_ethereum_wallet() -> Result> { + Ok(EthereumWallet::new()) + } + + #[rhai_fn(name = "owner_id", return_raw)] + pub fn set_owner_id( + wallet: &mut RhaiEthereumWallet, + owner_id: i64, + ) -> Result> { + let owned = std::mem::take(wallet); + *wallet = owned.owner_id(owner_id as u32); + Ok(wallet.clone()) + } + + #[rhai_fn(name = "network", return_raw)] + pub fn set_network( + wallet: &mut RhaiEthereumWallet, + network: String, + ) -> Result> { + let owned = std::mem::take(wallet); + *wallet = owned.network(network); + Ok(wallet.clone()) + } + + #[rhai_fn(name = "get_address")] + pub fn get_address(wallet: &mut RhaiEthereumWallet) -> String { + wallet.address.clone() + } + + #[rhai_fn(name = "get_network")] + pub fn get_network(wallet: &mut RhaiEthereumWallet) -> String { + wallet.network.clone() + } +} + +impl CustomType for EthereumWallet { + fn build(mut builder: TypeBuilder) { + builder.with_name("EthereumWallet"); + } +} diff --git a/lib/osiris/core/objects/note/mod.rs b/lib/osiris/core/objects/note/mod.rs new file mode 100644 index 0000000..b5f1fe9 --- /dev/null +++ b/lib/osiris/core/objects/note/mod.rs @@ -0,0 +1,78 @@ +use crate::store::BaseData; +use serde::{Deserialize, Serialize}; +use std::collections::BTreeMap; + +pub mod rhai; + +/// A simple note object +#[derive(Debug, Clone, Serialize, Deserialize, crate::DeriveObject)] +pub struct Note { + /// Base data + pub base_data: BaseData, + + /// Title of the note + #[index] + pub title: Option, + + /// Content of the note (searchable but not indexed) + pub content: Option, + + /// Tags for categorization + #[index] + pub tags: BTreeMap, +} + +impl Note { + /// Create a new note + pub fn new(ns: String) -> Self { + Self { + base_data: BaseData::with_ns(ns), + title: None, + content: None, + tags: BTreeMap::new(), + } + } + + /// Create a note with specific ID + pub fn with_id(id: String, ns: String) -> Self { + let id_u32 = id.parse::().unwrap_or(0); + Self { + base_data: BaseData::with_id(id_u32, ns), + title: None, + content: None, + tags: BTreeMap::new(), + } + } + + /// Set the title + pub fn set_title(mut self, title: impl ToString) -> Self { + self.title = Some(title.to_string()); + self.base_data.update_modified(); + self + } + + /// Set the content + pub fn set_content(mut self, content: impl ToString) -> Self { + let content_str = content.to_string(); + self.base_data.set_size(Some(content_str.len() as u64)); + self.content = Some(content_str); + self.base_data.update_modified(); + self + } + + /// Add a tag + pub fn add_tag(mut self, key: impl ToString, value: impl ToString) -> Self { + self.tags.insert(key.to_string(), value.to_string()); + self.base_data.update_modified(); + self + } + + /// Set MIME type + pub fn set_mime(mut self, mime: impl ToString) -> Self { + self.base_data.set_mime(Some(mime.to_string())); + self + } +} + +// Object trait implementation is auto-generated by #[derive(DeriveObject)] +// The derive macro generates: object_type(), base_data(), base_data_mut(), index_keys(), indexed_fields() diff --git a/lib/osiris/core/objects/note/rhai.rs b/lib/osiris/core/objects/note/rhai.rs new file mode 100644 index 0000000..fb01709 --- /dev/null +++ b/lib/osiris/core/objects/note/rhai.rs @@ -0,0 +1,107 @@ +use crate::objects::Note; +use rhai::{CustomType, Engine, TypeBuilder, Module, FuncRegistration}; + +impl CustomType for Note { + fn build(mut builder: TypeBuilder) { + builder + .with_name("Note") + .with_fn("new", |ns: String| Note::new(ns)) + .with_fn("set_title", |note: &mut Note, title: String| { + note.title = Some(title); + note.base_data.update_modified(); + }) + .with_fn("set_content", |note: &mut Note, content: String| { + let size = content.len() as u64; + note.content = Some(content); + note.base_data.set_size(Some(size)); + note.base_data.update_modified(); + }) + .with_fn("add_tag", |note: &mut Note, key: String, value: String| { + note.tags.insert(key, value); + note.base_data.update_modified(); + }) + .with_fn("set_mime", |note: &mut Note, mime: String| { + note.base_data.set_mime(Some(mime)); + }) + .with_fn("get_id", |note: &mut Note| note.base_data.id.clone()) + .with_fn("get_title", |note: &mut Note| note.title.clone().unwrap_or_default()) + .with_fn("get_content", |note: &mut Note| note.content.clone().unwrap_or_default()) + .with_fn("to_json", |note: &mut Note| { + serde_json::to_string_pretty(note).unwrap_or_default() + }); + } +} + +/// Register Note API in Rhai engine +pub fn register_note_api(engine: &mut Engine) { + engine.build_type::(); + + // Register builder-style constructor + engine.register_fn("note", |ns: String| Note::new(ns)); + + // Register chainable methods that return Self + engine.register_fn("title", |mut note: Note, title: String| { + note.title = Some(title); + note.base_data.update_modified(); + note + }); + + engine.register_fn("content", |mut note: Note, content: String| { + let size = content.len() as u64; + note.content = Some(content); + note.base_data.set_size(Some(size)); + note.base_data.update_modified(); + note + }); + + engine.register_fn("tag", |mut note: Note, key: String, value: String| { + note.tags.insert(key, value); + note.base_data.update_modified(); + note + }); + + engine.register_fn("mime", |mut note: Note, mime: String| { + note.base_data.set_mime(Some(mime)); + note + }); +} + +/// Register Note functions into a module (for use in packages) +pub fn register_note_functions(module: &mut Module) { + // Register Note type + module.set_custom_type::("Note"); + + // Register builder-style constructor + FuncRegistration::new("note") + .set_into_module(module, |ns: String| Note::new(ns)); + + // Register chainable methods that return Self + FuncRegistration::new("title") + .set_into_module(module, |mut note: Note, title: String| { + note.title = Some(title); + note.base_data.update_modified(); + note + }); + + FuncRegistration::new("content") + .set_into_module(module, |mut note: Note, content: String| { + let size = content.len() as u64; + note.content = Some(content); + note.base_data.set_size(Some(size)); + note.base_data.update_modified(); + note + }); + + FuncRegistration::new("tag") + .set_into_module(module, |mut note: Note, key: String, value: String| { + note.tags.insert(key, value); + note.base_data.update_modified(); + note + }); + + FuncRegistration::new("mime") + .set_into_module(module, |mut note: Note, mime: String| { + note.base_data.set_mime(Some(mime)); + note + }); +} diff --git a/lib/osiris/core/objects/supervisor/mod.rs b/lib/osiris/core/objects/supervisor/mod.rs new file mode 100644 index 0000000..04c1a3b --- /dev/null +++ b/lib/osiris/core/objects/supervisor/mod.rs @@ -0,0 +1,183 @@ +use crate::store::BaseData; +use serde::{Deserialize, Serialize}; +use std::collections::BTreeMap; + +pub mod rhai; + +/// API Key scopes for authorization +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub enum ApiKeyScope { + Admin, + User, + Registrar, +} + +/// API Key for supervisor authentication +#[derive(Debug, Clone, Serialize, Deserialize, crate::DeriveObject)] +pub struct ApiKey { + /// Base data + pub base_data: BaseData, + + /// The actual key value (hashed in production) + #[index] + pub key: String, + + /// Human-readable name for the key + #[index] + pub name: String, + + /// Scope/permission level + #[index] + pub scope: ApiKeyScope, + + /// Optional expiration timestamp + pub expires_at: Option, + + /// Metadata + pub metadata: BTreeMap, +} + +impl ApiKey { + /// Create a new API key + pub fn new(ns: String, key: String, name: String, scope: ApiKeyScope) -> Self { + Self { + base_data: BaseData::with_ns(ns), + key, + name, + scope, + expires_at: None, + metadata: BTreeMap::new(), + } + } + + /// Set expiration + pub fn set_expires_at(mut self, expires_at: impl ToString) -> Self { + self.expires_at = Some(expires_at.to_string()); + self.base_data.update_modified(); + self + } + + /// Add metadata + pub fn add_metadata(mut self, key: impl ToString, value: impl ToString) -> Self { + self.metadata.insert(key.to_string(), value.to_string()); + self.base_data.update_modified(); + self + } +} + +/// Runner metadata for supervisor +#[derive(Debug, Clone, Serialize, Deserialize, crate::DeriveObject)] +pub struct Runner { + /// Base data + pub base_data: BaseData, + + /// Runner ID (same as base_data.id but as string) + #[index] + pub runner_id: String, + + /// Runner name + #[index] + pub name: String, + + /// Queue name + pub queue: String, + + /// Registered by (API key name) + #[index] + pub registered_by: String, + + /// Metadata + pub metadata: BTreeMap, +} + +impl Runner { + /// Create a new runner + pub fn new(ns: String, runner_id: String, name: String, queue: String, registered_by: String) -> Self { + Self { + base_data: BaseData::with_ns(ns), + runner_id, + name, + queue, + registered_by, + metadata: BTreeMap::new(), + } + } + + /// Add metadata + pub fn add_metadata(mut self, key: impl ToString, value: impl ToString) -> Self { + self.metadata.insert(key.to_string(), value.to_string()); + self.base_data.update_modified(); + self + } +} + +/// Job metadata for supervisor +#[derive(Debug, Clone, Serialize, Deserialize, crate::DeriveObject)] +pub struct JobMetadata { + /// Base data + pub base_data: BaseData, + + /// Job ID + #[index] + pub job_id: String, + + /// Runner name + #[index] + pub runner: String, + + /// Created by (API key name) + #[index] + pub created_by: String, + + /// Job status + #[index] + pub status: String, + + /// Job payload (Rhai script or data) + pub payload: String, + + /// Result (if completed) + pub result: Option, + + /// Metadata + pub metadata: BTreeMap, +} + +impl JobMetadata { + /// Create new job metadata + pub fn new(ns: String, job_id: String, runner: String, created_by: String, payload: String) -> Self { + Self { + base_data: BaseData::with_ns(ns), + job_id, + runner, + created_by, + status: "created".to_string(), + payload, + result: None, + metadata: BTreeMap::new(), + } + } + + /// Set status + pub fn set_status(mut self, status: impl ToString) -> Self { + self.status = status.to_string(); + self.base_data.update_modified(); + self + } + + /// Set result + pub fn set_result(mut self, result: impl ToString) -> Self { + self.result = Some(result.to_string()); + self.base_data.update_modified(); + self + } + + /// Add metadata + pub fn add_metadata(mut self, key: impl ToString, value: impl ToString) -> Self { + self.metadata.insert(key.to_string(), value.to_string()); + self.base_data.update_modified(); + self + } +} + +// Object trait implementations are auto-generated by #[derive(DeriveObject)] diff --git a/lib/osiris/core/objects/supervisor/rhai.rs b/lib/osiris/core/objects/supervisor/rhai.rs new file mode 100644 index 0000000..a57bb82 --- /dev/null +++ b/lib/osiris/core/objects/supervisor/rhai.rs @@ -0,0 +1,238 @@ +/// Rhai bindings for Supervisor objects (ApiKey, Runner, JobMetadata) + +use ::rhai::plugin::*; +use ::rhai::{CustomType, Dynamic, Engine, EvalAltResult, Module, TypeBuilder}; + +use super::{ApiKey, ApiKeyScope, Runner, JobMetadata}; + +/// Register supervisor modules with the Rhai engine +pub fn register_supervisor_modules(parent_module: &mut Module) { + // Register custom types + parent_module.set_custom_type::("ApiKey"); + parent_module.set_custom_type::("Runner"); + parent_module.set_custom_type::("JobMetadata"); + + // Merge function modules + let api_key_module = exported_module!(rhai_api_key_module); + parent_module.merge(&api_key_module); + + let runner_module = exported_module!(rhai_runner_module); + parent_module.merge(&runner_module); + + let job_module = exported_module!(rhai_job_metadata_module); + parent_module.merge(&job_module); +} + +// ============================================================================ +// ApiKey Module +// ============================================================================ + +type RhaiApiKey = ApiKey; + +#[export_module] +mod rhai_api_key_module { + use super::RhaiApiKey; + use super::super::{ApiKey, ApiKeyScope}; + use ::rhai::EvalAltResult; + + // ApiKey constructor + #[rhai_fn(name = "new_api_key", return_raw)] + pub fn new_api_key( + ns: String, + key: String, + name: String, + scope: String, + ) -> Result> { + let scope_enum = match scope.as_str() { + "admin" => ApiKeyScope::Admin, + "user" => ApiKeyScope::User, + "registrar" => ApiKeyScope::Registrar, + _ => return Err(format!("Invalid scope: {}", scope).into()), + }; + Ok(ApiKey::new(ns, key, name, scope_enum)) + } + + // Builder methods + #[rhai_fn(name = "expires_at", return_raw)] + pub fn set_expires_at( + api_key: RhaiApiKey, + expires_at: String, + ) -> Result> { + Ok(api_key.set_expires_at(expires_at)) + } + + #[rhai_fn(name = "add_metadata", return_raw)] + pub fn add_metadata( + api_key: RhaiApiKey, + key: String, + value: String, + ) -> Result> { + Ok(api_key.add_metadata(key, value)) + } + + // Getters + #[rhai_fn(name = "key", pure)] + pub fn get_key(api_key: &mut RhaiApiKey) -> String { + api_key.key.clone() + } + + #[rhai_fn(name = "name", pure)] + pub fn get_name(api_key: &mut RhaiApiKey) -> String { + api_key.name.clone() + } + + #[rhai_fn(name = "scope", pure)] + pub fn get_scope(api_key: &mut RhaiApiKey) -> String { + format!("{:?}", api_key.scope) + } +} + +// ============================================================================ +// Runner Module +// ============================================================================ + +type RhaiRunner = Runner; + +#[export_module] +mod rhai_runner_module { + use super::RhaiRunner; + use super::super::Runner; + use ::rhai::EvalAltResult; + + // Runner constructor + #[rhai_fn(name = "new_runner", return_raw)] + pub fn new_runner( + ns: String, + runner_id: String, + name: String, + queue: String, + registered_by: String, + ) -> Result> { + Ok(Runner::new(ns, runner_id, name, queue, registered_by)) + } + + // Builder methods + #[rhai_fn(name = "add_metadata", return_raw)] + pub fn add_metadata( + runner: RhaiRunner, + key: String, + value: String, + ) -> Result> { + Ok(runner.add_metadata(key, value)) + } + + // Getters + #[rhai_fn(name = "runner_id", pure)] + pub fn get_runner_id(runner: &mut RhaiRunner) -> String { + runner.runner_id.clone() + } + + #[rhai_fn(name = "name", pure)] + pub fn get_name(runner: &mut RhaiRunner) -> String { + runner.name.clone() + } + + #[rhai_fn(name = "queue", pure)] + pub fn get_queue(runner: &mut RhaiRunner) -> String { + runner.queue.clone() + } + + #[rhai_fn(name = "registered_by", pure)] + pub fn get_registered_by(runner: &mut RhaiRunner) -> String { + runner.registered_by.clone() + } +} + +// ============================================================================ +// JobMetadata Module +// ============================================================================ + +type RhaiJobMetadata = JobMetadata; + +#[export_module] +mod rhai_job_metadata_module { + use super::RhaiJobMetadata; + use super::super::JobMetadata; + use ::rhai::EvalAltResult; + + // JobMetadata constructor + #[rhai_fn(name = "new_job_metadata", return_raw)] + pub fn new_job_metadata( + ns: String, + job_id: String, + runner: String, + created_by: String, + payload: String, + ) -> Result> { + Ok(JobMetadata::new(ns, job_id, runner, created_by, payload)) + } + + // Builder methods + #[rhai_fn(name = "set_status", return_raw)] + pub fn set_status( + job: RhaiJobMetadata, + status: String, + ) -> Result> { + Ok(job.set_status(status)) + } + + #[rhai_fn(name = "set_result", return_raw)] + pub fn set_result( + job: RhaiJobMetadata, + result: String, + ) -> Result> { + Ok(job.set_result(result)) + } + + #[rhai_fn(name = "add_metadata", return_raw)] + pub fn add_metadata( + job: RhaiJobMetadata, + key: String, + value: String, + ) -> Result> { + Ok(job.add_metadata(key, value)) + } + + // Getters + #[rhai_fn(name = "job_id", pure)] + pub fn get_job_id(job: &mut RhaiJobMetadata) -> String { + job.job_id.clone() + } + + #[rhai_fn(name = "runner", pure)] + pub fn get_runner(job: &mut RhaiJobMetadata) -> String { + job.runner.clone() + } + + #[rhai_fn(name = "status", pure)] + pub fn get_status(job: &mut RhaiJobMetadata) -> String { + job.status.clone() + } + + #[rhai_fn(name = "created_by", pure)] + pub fn get_created_by(job: &mut RhaiJobMetadata) -> String { + job.created_by.clone() + } +} + +// ============================================================================ +// CustomType Implementations +// ============================================================================ + +impl CustomType for ApiKey { + fn build(mut builder: TypeBuilder) { + builder.with_name("ApiKey"); + } +} + +impl CustomType for Runner { + fn build(mut builder: TypeBuilder) { + builder.with_name("Runner"); + } +} + +impl CustomType for JobMetadata { + fn build(mut builder: TypeBuilder) { + builder.with_name("JobMetadata"); + } +} diff --git a/lib/osiris/core/retrieve/mod.rs b/lib/osiris/core/retrieve/mod.rs new file mode 100644 index 0000000..ea9f7c0 --- /dev/null +++ b/lib/osiris/core/retrieve/mod.rs @@ -0,0 +1,5 @@ +pub mod query; +pub mod search; + +pub use query::RetrievalQuery; +pub use search::SearchEngine; diff --git a/lib/osiris/core/retrieve/query.rs b/lib/osiris/core/retrieve/query.rs new file mode 100644 index 0000000..81fd4d0 --- /dev/null +++ b/lib/osiris/core/retrieve/query.rs @@ -0,0 +1,74 @@ +/// Retrieval query structure +#[derive(Clone, Debug)] +pub struct RetrievalQuery { + /// Optional text query for keyword substring matching + pub text: Option, + + /// Namespace to search in + pub ns: String, + + /// Field filters (key=value pairs) + pub filters: Vec<(String, String)>, + + /// Maximum number of results to return + pub top_k: usize, +} + +impl RetrievalQuery { + /// Create a new retrieval query + pub fn new(ns: String) -> Self { + Self { + text: None, + ns, + filters: Vec::new(), + top_k: 10, + } + } + + /// Set the text query + pub fn with_text(mut self, text: String) -> Self { + self.text = Some(text); + self + } + + /// Add a filter + pub fn with_filter(mut self, key: String, value: String) -> Self { + self.filters.push((key, value)); + self + } + + /// Set the maximum number of results + pub fn with_top_k(mut self, top_k: usize) -> Self { + self.top_k = top_k; + self + } +} + +/// Search result +#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)] +pub struct SearchResult { + /// Object ID + pub id: String, + + /// Match score (0.0 to 1.0) + pub score: f32, + + /// Matched text snippet (if applicable) + #[serde(skip_serializing_if = "Option::is_none")] + pub snippet: Option, +} + +impl SearchResult { + pub fn new(id: String, score: f32) -> Self { + Self { + id, + score, + snippet: None, + } + } + + pub fn with_snippet(mut self, snippet: String) -> Self { + self.snippet = Some(snippet); + self + } +} diff --git a/lib/osiris/core/retrieve/search.rs b/lib/osiris/core/retrieve/search.rs new file mode 100644 index 0000000..f254fcb --- /dev/null +++ b/lib/osiris/core/retrieve/search.rs @@ -0,0 +1,150 @@ +use crate::error::Result; +use crate::index::FieldIndex; +use crate::retrieve::query::{RetrievalQuery, SearchResult}; +use crate::store::{HeroDbClient, OsirisObject}; + +/// Search engine for OSIRIS +pub struct SearchEngine { + client: HeroDbClient, + index: FieldIndex, +} + +impl SearchEngine { + /// Create a new search engine + pub fn new(client: HeroDbClient) -> Self { + let index = FieldIndex::new(client.clone()); + Self { client, index } + } + + /// Execute a search query + pub async fn search(&self, query: &RetrievalQuery) -> Result> { + // Step 1: Get candidate IDs from field filters + let candidate_ids = if query.filters.is_empty() { + self.index.get_all_ids().await? + } else { + self.index.get_ids_by_filters(&query.filters).await? + }; + + // Step 2: If text query is provided, filter by substring match + let mut results = Vec::new(); + + if let Some(text_query) = &query.text { + let text_query_lower = text_query.to_lowercase(); + + for id in candidate_ids { + // Fetch the object + if let Ok(obj) = self.client.get_object(&id).await { + // Check if text matches + let score = self.compute_text_score(&obj, &text_query_lower); + + if score > 0.0 { + let snippet = self.extract_snippet(&obj, &text_query_lower); + results.push(SearchResult::new(id, score).with_snippet(snippet)); + } + } + } + } else { + // No text query, return all candidates with score 1.0 + for id in candidate_ids { + results.push(SearchResult::new(id, 1.0)); + } + } + + // Step 3: Sort by score (descending) and limit + results.sort_by(|a, b| b.score.partial_cmp(&a.score).unwrap()); + results.truncate(query.top_k); + + Ok(results) + } + + /// Compute text match score (simple substring matching) + fn compute_text_score(&self, obj: &OsirisObject, query: &str) -> f32 { + let mut score = 0.0; + + // Check title + if let Some(title) = &obj.meta.title { + if title.to_lowercase().contains(query) { + score += 0.5; + } + } + + // Check text content + if let Some(text) = &obj.text { + if text.to_lowercase().contains(query) { + score += 0.5; + + // Bonus for multiple occurrences + let count = text.to_lowercase().matches(query).count(); + score += (count as f32 - 1.0) * 0.1; + } + } + + // Check tags + for (key, value) in &obj.meta.tags { + if key.to_lowercase().contains(query) || value.to_lowercase().contains(query) { + score += 0.2; + } + } + + score.min(1.0) + } + + /// Extract a snippet around the matched text + fn extract_snippet(&self, obj: &OsirisObject, query: &str) -> String { + const SNIPPET_LENGTH: usize = 100; + + // Try to find snippet in text + if let Some(text) = &obj.text { + let text_lower = text.to_lowercase(); + if let Some(pos) = text_lower.find(query) { + let start = pos.saturating_sub(SNIPPET_LENGTH / 2); + let end = (pos + query.len() + SNIPPET_LENGTH / 2).min(text.len()); + + let mut snippet = text[start..end].to_string(); + if start > 0 { + snippet = format!("...{}", snippet); + } + if end < text.len() { + snippet = format!("{}...", snippet); + } + + return snippet; + } + } + + // Fallback to title or first N chars + if let Some(title) = &obj.meta.title { + return title.clone(); + } + + if let Some(text) = &obj.text { + let end = SNIPPET_LENGTH.min(text.len()); + let mut snippet = text[..end].to_string(); + if end < text.len() { + snippet = format!("{}...", snippet); + } + return snippet; + } + + String::from("[No content]") + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + #[ignore] + async fn test_search() { + let client = HeroDbClient::new("redis://localhost:6379", 1).unwrap(); + let engine = SearchEngine::new(client); + + let query = RetrievalQuery::new("test".to_string()) + .with_text("rust".to_string()) + .with_top_k(10); + + let results = engine.search(&query).await.unwrap(); + assert!(results.len() <= 10); + } +} diff --git a/lib/osiris/core/store/base_data.rs b/lib/osiris/core/store/base_data.rs new file mode 100644 index 0000000..b94e7b9 --- /dev/null +++ b/lib/osiris/core/store/base_data.rs @@ -0,0 +1,91 @@ +use serde::{Deserialize, Serialize}; +use time::OffsetDateTime; + +/// Base data that all OSIRIS objects must include +/// Similar to heromodels BaseModelData but adapted for OSIRIS +#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)] +pub struct BaseData { + /// Unique ID (auto-generated or user-assigned) + pub id: u32, + + /// Namespace this object belongs to + pub ns: String, + + /// Unix timestamp for creation time + #[serde(with = "time::serde::timestamp")] + pub created_at: OffsetDateTime, + + /// Unix timestamp for last modification time + #[serde(with = "time::serde::timestamp")] + pub modified_at: OffsetDateTime, + + /// Optional MIME type + pub mime: Option, + + /// Content size in bytes + pub size: Option, +} + +impl BaseData { + /// Create new base data with ID 0 (no namespace required) + pub fn new() -> Self { + let now = OffsetDateTime::now_utc(); + Self { + id: 0, + ns: String::new(), + created_at: now, + modified_at: now, + mime: None, + size: None, + } + } + + /// Create new base data with namespace + pub fn with_ns(ns: impl ToString) -> Self { + let now = OffsetDateTime::now_utc(); + Self { + id: 0, + ns: ns.to_string(), + created_at: now, + modified_at: now, + mime: None, + size: None, + } + } + + /// Create new base data with specific ID + pub fn with_id(id: u32, ns: String) -> Self { + let now = OffsetDateTime::now_utc(); + Self { + id, + ns, + created_at: now, + modified_at: now, + mime: None, + size: None, + } + } + + /// Update the modified timestamp + pub fn update_modified(&mut self) { + self.modified_at = OffsetDateTime::now_utc(); + } + + /// Set the MIME type + pub fn set_mime(&mut self, mime: Option) { + self.mime = mime; + self.update_modified(); + } + + /// Set the size + pub fn set_size(&mut self, size: Option) { + self.size = size; + self.update_modified(); + } +} + +impl Default for BaseData { + fn default() -> Self { + Self::new() + } +} diff --git a/lib/osiris/core/store/generic_store.rs b/lib/osiris/core/store/generic_store.rs new file mode 100644 index 0000000..36e0a0b --- /dev/null +++ b/lib/osiris/core/store/generic_store.rs @@ -0,0 +1,135 @@ +use crate::error::Result; +use crate::index::FieldIndex; +use crate::store::{HeroDbClient, Object}; + +/// Generic storage layer for OSIRIS objects +#[derive(Debug, Clone)] +pub struct GenericStore { + client: HeroDbClient, + index: FieldIndex, +} + +impl GenericStore { + /// Create a new generic store + pub fn new(client: HeroDbClient) -> Self { + let index = FieldIndex::new(client.clone()); + Self { + client, + index, + } + } + + /// Store an object + pub async fn put(&self, obj: &T) -> Result<()> { + // Serialize object to JSON + let json = obj.to_json()?; + let key = format!("obj:{}:{}", obj.namespace(), obj.id()); + + // Store in HeroDB + self.client.set(&key, &json).await?; + + // Index the object + self.index_object(obj).await?; + + Ok(()) + } + + /// Get an object by ID + pub async fn get(&self, ns: &str, id: &str) -> Result { + let key = format!("obj:{}:{}", ns, id); + let json = self.client.get(&key).await? + .ok_or_else(|| crate::error::Error::NotFound(format!("Object {}:{}", ns, id)))?; + + T::from_json(&json) + } + + /// Get raw JSON data by ID (for generic access without type) + pub async fn get_raw(&self, ns: &str, id: &str) -> Result { + let key = format!("obj:{}:{}", ns, id); + self.client.get(&key).await? + .ok_or_else(|| crate::error::Error::NotFound(format!("Object {}:{}", ns, id))) + } + + /// Delete an object + pub async fn delete(&self, obj: &T) -> Result { + let key = format!("obj:{}:{}", obj.namespace(), obj.id()); + + // Deindex first + self.deindex_object(obj).await?; + + // Delete from HeroDB + self.client.del(&key).await + } + + /// Check if an object exists + pub async fn exists(&self, ns: &str, id: &str) -> Result { + let key = format!("obj:{}:{}", ns, id); + self.client.exists(&key).await + } + + /// Index an object + async fn index_object(&self, obj: &T) -> Result<()> { + let index_keys = obj.index_keys(); + + for key in index_keys { + let field_key = format!("idx:{}:{}:{}", obj.namespace(), key.name, key.value); + self.client.sadd(&field_key, &obj.id().to_string()).await?; + } + + // Add to scan index for full-text search + let scan_key = format!("scan:{}", obj.namespace()); + self.client.sadd(&scan_key, &obj.id().to_string()).await?; + + Ok(()) + } + + /// Deindex an object + async fn deindex_object(&self, obj: &T) -> Result<()> { + let index_keys = obj.index_keys(); + + for key in index_keys { + let field_key = format!("idx:{}:{}:{}", obj.namespace(), key.name, key.value); + self.client.srem(&field_key, &obj.id().to_string()).await?; + } + + // Remove from scan index + let scan_key = format!("scan:{}", obj.namespace()); + self.client.srem(&scan_key, &obj.id().to_string()).await?; + + Ok(()) + } + + /// Get all IDs matching an index key + pub async fn get_ids_by_index(&self, ns: &str, field: &str, value: &str) -> Result> { + let field_key = format!("idx:{}:{}:{}", ns, field, value); + self.client.smembers(&field_key).await + } + + /// Get all IDs in a namespace + pub async fn get_all_ids(&self, ns: &str) -> Result> { + let scan_key = format!("scan:{}", ns); + self.client.smembers(&scan_key).await + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::objects::Note; + + #[tokio::test] + #[ignore] + async fn test_generic_store() { + let client = HeroDbClient::new("redis://localhost:6379", 1).unwrap(); + let store = GenericStore::new(client); + + let note = Note::new("test".to_string()) + .set_title("Test Note") + .set_content("This is a test"); + + store.put(¬e).await.unwrap(); + + let retrieved: Note = store.get("test", note.id()).await.unwrap(); + assert_eq!(retrieved.title, note.title); + } +} diff --git a/lib/osiris/core/store/herodb_client.rs b/lib/osiris/core/store/herodb_client.rs new file mode 100644 index 0000000..5220437 --- /dev/null +++ b/lib/osiris/core/store/herodb_client.rs @@ -0,0 +1,161 @@ +use crate::error::{Error, Result}; +use crate::store::OsirisObject; +use redis::aio::MultiplexedConnection; +use redis::{AsyncCommands, Client}; + +/// HeroDB client wrapper for OSIRIS operations +#[derive(Clone, Debug)] +pub struct HeroDbClient { + client: Client, + pub db_id: u16, +} + +impl HeroDbClient { + /// Create a new HeroDB client + pub fn new(url: &str, db_id: u16) -> Result { + let client = Client::open(url)?; + Ok(Self { client, db_id }) + } + + /// Get a connection to the database + pub async fn get_connection(&self) -> Result { + let mut conn = self.client.get_multiplexed_async_connection().await?; + + // Select the appropriate database + if self.db_id > 0 { + redis::cmd("SELECT") + .arg(self.db_id) + .query_async(&mut conn) + .await?; + } + + Ok(conn) + } + + /// Store an object in HeroDB + pub async fn put_object(&self, obj: &OsirisObject) -> Result<()> { + let mut conn = self.get_connection().await?; + let key = format!("meta:{}", obj.id); + let value = serde_json::to_string(obj)?; + + conn.set(&key, value).await?; + Ok(()) + } + + /// Retrieve an object from HeroDB + pub async fn get_object(&self, id: &str) -> Result { + let mut conn = self.get_connection().await?; + let key = format!("meta:{}", id); + + let value: Option = conn.get(&key).await?; + match value { + Some(v) => { + let obj: OsirisObject = serde_json::from_str(&v)?; + Ok(obj) + } + None => Err(Error::NotFound(format!("Object not found: {}", id))), + } + } + + /// Delete an object from HeroDB + pub async fn delete_object(&self, id: &str) -> Result { + let mut conn = self.get_connection().await?; + let key = format!("meta:{}", id); + + let deleted: i32 = conn.del(&key).await?; + Ok(deleted > 0) + } + + /// Check if an object exists + pub async fn exists(&self, id: &str) -> Result { + let mut conn = self.get_connection().await?; + let key = format!("meta:{}", id); + + let exists: bool = conn.exists(&key).await?; + Ok(exists) + } + + /// Add an ID to a set (for field indexing) + pub async fn sadd(&self, set_key: &str, member: &str) -> Result<()> { + let mut conn = self.get_connection().await?; + conn.sadd(set_key, member).await?; + Ok(()) + } + + /// Remove an ID from a set + pub async fn srem(&self, set_key: &str, member: &str) -> Result<()> { + let mut conn = self.get_connection().await?; + conn.srem(set_key, member).await?; + Ok(()) + } + + /// Get all members of a set + pub async fn smembers(&self, set_key: &str) -> Result> { + let mut conn = self.get_connection().await?; + let members: Vec = conn.smembers(set_key).await?; + Ok(members) + } + + /// Get the intersection of multiple sets + pub async fn sinter(&self, keys: &[String]) -> Result> { + let mut conn = self.get_connection().await?; + let members: Vec = conn.sinter(keys).await?; + Ok(members) + } + + /// Get all keys matching a pattern + pub async fn keys(&self, pattern: &str) -> Result> { + let mut conn = self.get_connection().await?; + let keys: Vec = conn.keys(pattern).await?; + Ok(keys) + } + + /// Set a key-value pair + pub async fn set(&self, key: &str, value: &str) -> Result<()> { + let mut conn = self.get_connection().await?; + conn.set(key, value).await?; + Ok(()) + } + + /// Get a value by key + pub async fn get(&self, key: &str) -> Result> { + let mut conn = self.get_connection().await?; + let value: Option = conn.get(key).await?; + Ok(value) + } + + /// Delete a key + pub async fn del(&self, key: &str) -> Result { + let mut conn = self.get_connection().await?; + let deleted: i32 = conn.del(key).await?; + Ok(deleted > 0) + } + + /// Get database size (number of keys) + pub async fn dbsize(&self) -> Result { + let mut conn = self.get_connection().await?; + let size: usize = redis::cmd("DBSIZE").query_async(&mut conn).await?; + Ok(size) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + // Note: These tests require a running HeroDB instance + // They are ignored by default + + #[tokio::test] + #[ignore] + async fn test_put_get_object() { + let client = HeroDbClient::new("redis://localhost:6379", 1).unwrap(); + let obj = OsirisObject::new("test".to_string(), Some("Hello".to_string())); + + client.put_object(&obj).await.unwrap(); + let retrieved = client.get_object(&obj.id).await.unwrap(); + + assert_eq!(obj.id, retrieved.id); + assert_eq!(obj.text, retrieved.text); + } +} diff --git a/lib/osiris/core/store/mod.rs b/lib/osiris/core/store/mod.rs new file mode 100644 index 0000000..b149724 --- /dev/null +++ b/lib/osiris/core/store/mod.rs @@ -0,0 +1,11 @@ +pub mod base_data; +pub mod object_trait; +pub mod herodb_client; +pub mod generic_store; +pub mod object; // Keep old implementation for backwards compat temporarily + +pub use base_data::BaseData; +pub use object_trait::{IndexKey, Object, Storable}; +pub use herodb_client::HeroDbClient; +pub use generic_store::GenericStore; +pub use object::{Metadata, OsirisObject}; // Old implementation diff --git a/lib/osiris/core/store/object.rs b/lib/osiris/core/store/object.rs new file mode 100644 index 0000000..b4fa9fb --- /dev/null +++ b/lib/osiris/core/store/object.rs @@ -0,0 +1,160 @@ +use serde::{Deserialize, Serialize}; +use std::collections::BTreeMap; +use time::OffsetDateTime; + +/// Core OSIRIS object structure +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct OsirisObject { + /// Unique identifier (UUID or user-assigned) + pub id: String, + + /// Namespace (e.g., "notes", "calendar") + pub ns: String, + + /// Metadata + pub meta: Metadata, + + /// Optional plain text content + #[serde(skip_serializing_if = "Option::is_none")] + pub text: Option, +} + +/// Object metadata +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct Metadata { + /// Optional human-readable title + #[serde(skip_serializing_if = "Option::is_none")] + pub title: Option, + + /// MIME type + #[serde(skip_serializing_if = "Option::is_none")] + pub mime: Option, + + /// Key-value tags for categorization + #[serde(default)] + pub tags: BTreeMap, + + /// Creation timestamp + #[serde(with = "time::serde::rfc3339")] + pub created: OffsetDateTime, + + /// Last update timestamp + #[serde(with = "time::serde::rfc3339")] + pub updated: OffsetDateTime, + + /// Content size in bytes + #[serde(skip_serializing_if = "Option::is_none")] + pub size: Option, +} + +impl OsirisObject { + /// Create a new object with generated UUID + pub fn new(ns: String, text: Option) -> Self { + let now = OffsetDateTime::now_utc(); + Self { + id: uuid::Uuid::new_v4().to_string(), + ns, + meta: Metadata { + title: None, + mime: None, + tags: BTreeMap::new(), + created: now, + updated: now, + size: text.as_ref().map(|t| t.len() as u64), + }, + text, + } + } + + /// Create a new object with specific ID + pub fn with_id(id: String, ns: String, text: Option) -> Self { + let now = OffsetDateTime::now_utc(); + Self { + id, + ns, + meta: Metadata { + title: None, + mime: None, + tags: BTreeMap::new(), + created: now, + updated: now, + size: text.as_ref().map(|t| t.len() as u64), + }, + text, + } + } + + /// Update the object's text content + pub fn update_text(&mut self, text: Option) { + self.meta.updated = OffsetDateTime::now_utc(); + self.meta.size = text.as_ref().map(|t| t.len() as u64); + self.text = text; + } + + /// Add or update a tag + pub fn set_tag(&mut self, key: String, value: String) { + self.meta.tags.insert(key, value); + self.meta.updated = OffsetDateTime::now_utc(); + } + + /// Remove a tag + pub fn remove_tag(&mut self, key: &str) -> Option { + let result = self.meta.tags.remove(key); + if result.is_some() { + self.meta.updated = OffsetDateTime::now_utc(); + } + result + } + + /// Set the title + pub fn set_title(&mut self, title: Option) { + self.meta.title = title; + self.meta.updated = OffsetDateTime::now_utc(); + } + + /// Set the MIME type + pub fn set_mime(&mut self, mime: Option) { + self.meta.mime = mime; + self.meta.updated = OffsetDateTime::now_utc(); + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_new_object() { + let obj = OsirisObject::new("notes".to_string(), Some("Hello, world!".to_string())); + assert_eq!(obj.ns, "notes"); + assert_eq!(obj.text, Some("Hello, world!".to_string())); + assert_eq!(obj.meta.size, Some(13)); + } + + #[test] + fn test_update_text() { + let mut obj = OsirisObject::new("notes".to_string(), Some("Initial".to_string())); + let initial_updated = obj.meta.updated; + + std::thread::sleep(std::time::Duration::from_millis(10)); + obj.update_text(Some("Updated".to_string())); + + assert_eq!(obj.text, Some("Updated".to_string())); + assert_eq!(obj.meta.size, Some(7)); + assert!(obj.meta.updated > initial_updated); + } + + #[test] + fn test_tags() { + let mut obj = OsirisObject::new("notes".to_string(), None); + obj.set_tag("topic".to_string(), "rust".to_string()); + obj.set_tag("project".to_string(), "osiris".to_string()); + + assert_eq!(obj.meta.tags.get("topic"), Some(&"rust".to_string())); + assert_eq!(obj.meta.tags.get("project"), Some(&"osiris".to_string())); + + let removed = obj.remove_tag("topic"); + assert_eq!(removed, Some("rust".to_string())); + assert_eq!(obj.meta.tags.get("topic"), None); + } +} diff --git a/lib/osiris/core/store/object_trait.rs b/lib/osiris/core/store/object_trait.rs new file mode 100644 index 0000000..fd75ed3 --- /dev/null +++ b/lib/osiris/core/store/object_trait.rs @@ -0,0 +1,113 @@ +use crate::error::Result; +use crate::store::BaseData; +use serde::{Deserialize, Serialize}; +use std::fmt::Debug; + +/// Represents an index key for an object field +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct IndexKey { + /// The name of the index key (field name) + pub name: &'static str, + + /// The value of the index key for this object instance + pub value: String, +} + +impl IndexKey { + pub fn new(name: &'static str, value: impl ToString) -> Self { + Self { + name, + value: value.to_string(), + } + } +} + +/// Core trait that all OSIRIS objects must implement +/// Similar to heromodels Model trait but adapted for OSIRIS +pub trait Object: Debug + Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync { + /// Get the object type name (used for routing/identification) + fn object_type() -> &'static str + where + Self: Sized; + + /// Get a reference to the base data + fn base_data(&self) -> &BaseData; + + /// Get a mutable reference to the base data + fn base_data_mut(&mut self) -> &mut BaseData; + + /// Get the unique ID for this object + fn id(&self) -> u32 { + self.base_data().id + } + + /// Set the unique ID for this object + fn set_id(&mut self, id: u32) { + self.base_data_mut().id = id; + } + + /// Get the namespace for this object + fn namespace(&self) -> &str { + &self.base_data().ns + } + + /// Returns a list of index keys for this object instance + /// These are generated from fields marked with #[index] + /// The default implementation returns base_data indexes only + fn index_keys(&self) -> Vec { + let base = self.base_data(); + let mut keys = Vec::new(); + + // Index MIME type if present + if let Some(mime) = &base.mime { + keys.push(IndexKey::new("mime", mime)); + } + + keys + } + + /// Return a list of field names which have an index applied + /// This should be implemented by the derive macro + fn indexed_fields() -> Vec<&'static str> + where + Self: Sized, + { + Vec::new() + } + + /// Get the full-text searchable content for this object + /// Override this to provide custom searchable text + fn searchable_text(&self) -> Option { + None + } + + /// Serialize the object to JSON + fn to_json(&self) -> Result { + serde_json::to_string(self).map_err(Into::into) + } + + /// Deserialize the object from JSON + fn from_json(json: &str) -> Result + where + Self: Sized, + { + serde_json::from_str(json).map_err(Into::into) + } + + /// Update the modified timestamp + fn touch(&mut self) { + self.base_data_mut().update_modified(); + } +} + +/// Trait for objects that can be stored in OSIRIS +/// This is automatically implemented for all types that implement Object +pub trait Storable: Object { + /// Prepare the object for storage (update timestamps, etc.) + fn prepare_for_storage(&mut self) { + self.touch(); + } +} + +// Blanket implementation +impl Storable for T {} diff --git a/lib/osiris/derive/Cargo.toml b/lib/osiris/derive/Cargo.toml new file mode 100644 index 0000000..1befbc7 --- /dev/null +++ b/lib/osiris/derive/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "osiris-derive" +version.workspace = true +edition.workspace = true +description = "Derive macros for Osiris" +license = "MIT OR Apache-2.0" + +[lib] +proc-macro = true + +[dependencies] +syn = { version = "2.0", features = ["full", "extra-traits"] } +quote = "1.0" +proc-macro2 = "1.0" diff --git a/lib/osiris/derive/src/lib.rs b/lib/osiris/derive/src/lib.rs new file mode 100644 index 0000000..933c33d --- /dev/null +++ b/lib/osiris/derive/src/lib.rs @@ -0,0 +1,202 @@ +use proc_macro::TokenStream; +use quote::quote; +use syn::{parse_macro_input, Data, DeriveInput, Fields, Type}; + +/// Derive macro for the Object trait +/// +/// Automatically implements `index_keys()` and `indexed_fields()` based on fields marked with #[index] +/// +/// # Example +/// +/// ```rust +/// #[derive(Object)] +/// pub struct Note { +/// pub base_data: BaseData, +/// +/// #[index] +/// pub title: Option, +/// +/// pub content: Option, +/// +/// #[index] +/// pub tags: BTreeMap, +/// } +/// ``` +#[proc_macro_derive(Object, attributes(index))] +pub fn derive_object(input: TokenStream) -> TokenStream { + let input = parse_macro_input!(input as DeriveInput); + + let name = &input.ident; + let generics = &input.generics; + let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); + + // Extract fields with #[index] attribute + let indexed_fields = match &input.data { + Data::Struct(data) => match &data.fields { + Fields::Named(fields) => { + fields.named.iter().filter_map(|field| { + let has_index = field.attrs.iter().any(|attr| { + attr.path().is_ident("index") + }); + + if has_index { + let field_name = field.ident.as_ref()?; + let field_type = &field.ty; + Some((field_name.clone(), field_type.clone())) + } else { + None + } + }).collect::>() + } + _ => vec![], + }, + _ => vec![], + }; + + // Generate index_keys() implementation + let index_keys_impl = generate_index_keys(&indexed_fields); + + // Generate indexed_fields() implementation + let field_names: Vec<_> = indexed_fields.iter() + .map(|(name, _)| name.to_string()) + .collect(); + + // Always use ::osiris for external usage + // When used inside the osiris crate's src/, the compiler will resolve it correctly + let crate_path = quote! { ::osiris }; + + let expanded = quote! { + impl #impl_generics #crate_path::Object for #name #ty_generics #where_clause { + fn object_type() -> &'static str { + stringify!(#name) + } + + fn base_data(&self) -> &#crate_path::BaseData { + &self.base_data + } + + fn base_data_mut(&mut self) -> &mut #crate_path::BaseData { + &mut self.base_data + } + + fn index_keys(&self) -> Vec<#crate_path::IndexKey> { + let mut keys = Vec::new(); + + // Index from base_data + if let Some(mime) = &self.base_data.mime { + keys.push(#crate_path::IndexKey::new("mime", mime)); + } + + #index_keys_impl + + keys + } + + fn indexed_fields() -> Vec<&'static str> { + vec![#(#field_names),*] + } + } + }; + + TokenStream::from(expanded) +} + +fn generate_index_keys(fields: &[(syn::Ident, Type)]) -> proc_macro2::TokenStream { + let mut implementations = Vec::new(); + + // Always use ::osiris + let crate_path = quote! { ::osiris }; + + for (field_name, field_type) in fields { + let field_name_str = field_name.to_string(); + + // Check if it's an Option type + if is_option_type(field_type) { + implementations.push(quote! { + if let Some(value) = &self.#field_name { + keys.push(#crate_path::IndexKey::new(#field_name_str, value)); + } + }); + } + // Check if it's a BTreeMap (for tags) + else if is_btreemap_type(field_type) { + implementations.push(quote! { + for (key, value) in &self.#field_name { + keys.push(#crate_path::IndexKey { + name: concat!(#field_name_str, ":tag"), + value: format!("{}={}", key, value), + }); + } + }); + } + // Check if it's a Vec + else if is_vec_type(field_type) { + implementations.push(quote! { + for (idx, value) in self.#field_name.iter().enumerate() { + keys.push(#crate_path::IndexKey { + name: concat!(#field_name_str, ":item"), + value: format!("{}:{}", idx, value), + }); + } + }); + } + // For OffsetDateTime, index as date string + else if is_offsetdatetime_type(field_type) { + implementations.push(quote! { + { + let date_str = self.#field_name.date().to_string(); + keys.push(#crate_path::IndexKey::new(#field_name_str, date_str)); + } + }); + } + // For enums or other types, convert to string + else { + implementations.push(quote! { + { + let value_str = format!("{:?}", &self.#field_name); + keys.push(#crate_path::IndexKey::new(#field_name_str, value_str)); + } + }); + } + } + + quote! { + #(#implementations)* + } +} + +fn is_option_type(ty: &Type) -> bool { + if let Type::Path(type_path) = ty { + if let Some(segment) = type_path.path.segments.last() { + return segment.ident == "Option"; + } + } + false +} + +fn is_btreemap_type(ty: &Type) -> bool { + if let Type::Path(type_path) = ty { + if let Some(segment) = type_path.path.segments.last() { + return segment.ident == "BTreeMap"; + } + } + false +} + +fn is_vec_type(ty: &Type) -> bool { + if let Type::Path(type_path) = ty { + if let Some(segment) = type_path.path.segments.last() { + return segment.ident == "Vec"; + } + } + false +} + +fn is_offsetdatetime_type(ty: &Type) -> bool { + if let Type::Path(type_path) = ty { + if let Some(segment) = type_path.path.segments.last() { + return segment.ident == "OffsetDateTime"; + } + } + false +} diff --git a/lib/runner/Cargo.toml b/lib/runner/Cargo.toml new file mode 100644 index 0000000..fbd692f --- /dev/null +++ b/lib/runner/Cargo.toml @@ -0,0 +1,42 @@ +[package] +name = "hero-runner" +version.workspace = true +edition.workspace = true +description = "Hero Runner library for executing jobs" +license = "MIT OR Apache-2.0" + +[lib] +name = "hero_runner" +path = "lib.rs" + +[dependencies] +# Core dependencies +anyhow.workspace = true +redis.workspace = true +serde.workspace = true +serde_json.workspace = true +tokio.workspace = true +log.workspace = true +env_logger.workspace = true +uuid.workspace = true +chrono.workspace = true +toml.workspace = true +thiserror.workspace = true +async-trait.workspace = true + +# Crypto dependencies +secp256k1.workspace = true +sha2.workspace = true +hex.workspace = true + +# Rhai scripting +rhai = { version = "1.21.0", features = ["std", "sync", "decimal", "internals", "serde"] } + +# Hero dependencies +hero-job = { path = "../models/job" } +hero-job-client = { path = "../clients/job" } +hero_logger = { git = "https://git.ourworld.tf/herocode/baobab.git", branch = "logger" } + +# Tracing +tracing = "0.1.41" +rand = "0.8" diff --git a/lib/runner/async_runner.rs b/lib/runner/async_runner.rs new file mode 100644 index 0000000..d76335f --- /dev/null +++ b/lib/runner/async_runner.rs @@ -0,0 +1,270 @@ +use crate::Job; +use log::{debug, error, info}; +use rhai::{Engine, packages::Package}; +use std::collections::HashMap; +use std::sync::Arc; +use std::time::Duration; +use tokio::sync::{mpsc, Mutex}; +use tokio::task::JoinHandle; + +use crate::runner_trait::Runner; + +/// Represents a running job with its handle and metadata +struct RunningJob { + job_id: String, + handle: JoinHandle>>, + started_at: std::time::Instant, +} + +/// Builder for AsyncRunner +#[derive(Default)] +pub struct AsyncRunnerBuilder { + runner_id: Option, + db_path: Option, + redis_url: Option, + default_timeout: Option, + engine: Option Engine + Send + Sync>>, +} + +impl AsyncRunnerBuilder { + pub fn new() -> Self { + Self::default() + } + + pub fn runner_id>(mut self, runner_id: S) -> Self { + self.runner_id = Some(runner_id.into()); + self + } + + pub fn db_path>(mut self, db_path: S) -> Self { + self.db_path = Some(db_path.into()); + self + } + + pub fn redis_url>(mut self, redis_url: S) -> Self { + self.redis_url = Some(redis_url.into()); + self + } + + pub fn default_timeout(mut self, timeout: Duration) -> Self { + self.default_timeout = Some(timeout); + self + } + + pub fn engine_factory(mut self, factory: F) -> Self + where + F: Fn() -> Engine + Send + Sync + 'static, + { + self.engine = Some(Arc::new(factory)); + self + } + + pub fn build(self) -> Result { + Ok(AsyncRunner { + runner_id: self.runner_id.ok_or("runner_id is required")?, + db_path: self.db_path.ok_or("db_path is required")?, + redis_url: self.redis_url.ok_or("redis_url is required")?, + default_timeout: self.default_timeout.unwrap_or(Duration::from_secs(300)), + engine_factory: self.engine.ok_or("engine factory is required")?, + running_jobs: Arc::new(Mutex::new(HashMap::new())), + }) + } +} + +/// Asynchronous runner that processes jobs concurrently +pub struct AsyncRunner { + pub runner_id: String, + pub db_path: String, + pub redis_url: String, + pub default_timeout: Duration, + pub engine_factory: Arc Engine + Send + Sync>, + running_jobs: Arc>>, +} + +impl AsyncRunner { + /// Create a new AsyncRunnerBuilder + pub fn builder() -> AsyncRunnerBuilder { + AsyncRunnerBuilder::new() + } + + /// Add a running job to the tracking map + async fn add_running_job(&self, job_id: String, handle: JoinHandle>>) { + let running_job = RunningJob { + job_id: job_id.clone(), + handle, + started_at: std::time::Instant::now(), + }; + + let mut jobs = self.running_jobs.lock().await; + jobs.insert(job_id.clone(), running_job); + debug!("Async Runner: Added running job '{}'. Total running: {}", + job_id, jobs.len()); + } + + /// Remove a completed job from the tracking map + async fn remove_running_job(&self, job_id: &str) { + let mut jobs = self.running_jobs.lock().await; + if let Some(job) = jobs.remove(job_id) { + let duration = job.started_at.elapsed(); + debug!("Async Runner: Removed completed job '{}' after {:?}. Remaining: {}", + job_id, duration, jobs.len()); + } + } + + /// Get the count of currently running jobs + pub async fn running_job_count(&self) -> usize { + let jobs = self.running_jobs.lock().await; + jobs.len() + } + + /// Cleanup any finished jobs from the running jobs map + async fn cleanup_finished_jobs(&self) { + let mut jobs = self.running_jobs.lock().await; + let mut to_remove = Vec::new(); + + for (job_id, running_job) in jobs.iter() { + if running_job.handle.is_finished() { + to_remove.push(job_id.clone()); + } + } + + for job_id in to_remove { + if let Some(job) = jobs.remove(&job_id) { + let duration = job.started_at.elapsed(); + debug!("Async Runner: Cleaned up finished job '{}' after {:?}", + job_id, duration); + } + } + } + +} + + +impl Runner for AsyncRunner { + fn process_job(&self, job: Job) -> Result> { + let job_id = job.id.clone(); + let runner_id = &self.runner_id; + + // Determine timeout (use job-specific timeout if available, otherwise default) + let job_timeout = if job.timeout > 0 { + Duration::from_secs(job.timeout) + } else { + self.default_timeout + }; + + info!("Async Runner '{}', Job {}: Spawning job execution task with timeout {:?}", + runner_id, job_id, job_timeout); + + // Clone necessary data for the spawned task + let job_id_clone = job_id.clone(); + let runner_id_clone = runner_id.clone(); + let runner_id_debug = runner_id.clone(); + let job_id_debug = job_id.clone(); + let _redis_url_clone = self.redis_url.clone(); + let running_jobs_clone = Arc::clone(&self.running_jobs); + let engine_factory = Arc::clone(&self.engine_factory); + let db_path_clone = self.db_path.clone(); + + // Spawn the job execution task + let job_handle = tokio::spawn(async move { + // Create a new engine instance (cheap with factory pattern) + let mut engine = engine_factory(); + let mut db_config = rhai::Map::new(); + db_config.insert("DB_PATH".into(), db_path_clone.into()); + db_config.insert("CALLER_ID".into(), job.caller_id.clone().into()); + db_config.insert("CONTEXT_ID".into(), job.context_id.clone().into()); + engine.set_default_tag(rhai::Dynamic::from(db_config)); + + // Execute the Rhai script + let result = match engine.eval::(&job.payload) { + Ok(result) => { + let result_str = if result.is::() { + result.into_string().unwrap() + } else { + result.to_string() + }; + info!("Async Runner '{}', Job {}: Script executed successfully. Result: {}", + runner_id_clone, job_id_clone, result_str); + Ok(result_str) + } + Err(e) => { + let error_msg = format!("Script execution error: {}", e); + error!("Async Runner '{}', Job {}: {}", runner_id_clone, job_id_clone, error_msg); + Err(Box::new(e) as Box) + } + }; + + // Remove this job from the running jobs map when it completes + let mut jobs = running_jobs_clone.lock().await; + if let Some(running_job) = jobs.remove(&job_id_clone) { + let duration = running_job.started_at.elapsed(); + debug!("Async Runner '{}': Removed completed job '{}' after {:?}", + runner_id_debug, job_id_debug, duration); + } + + result + }); + + // Add the job to the running jobs map + let running_job = RunningJob { + job_id: job_id.clone(), + handle: job_handle, + started_at: std::time::Instant::now(), + }; + + let running_jobs_clone = Arc::clone(&self.running_jobs); + let job_id_for_map = job_id.clone(); + tokio::spawn(async move { + let mut jobs = running_jobs_clone.lock().await; + jobs.insert(job_id_for_map, running_job); + debug!("Async Runner: Added running job '{}'. Total running: {}", + job_id, jobs.len()); + }); + + // For async runners, we return immediately with a placeholder + // The actual result will be handled by the spawned task + Ok("Job spawned for async processing".to_string()) + } + + fn runner_type(&self) -> &'static str { + "Async" + } + + fn runner_id(&self) -> &str { + &self.runner_id + } + + fn redis_url(&self) -> &str { + &self.redis_url + } +} + +/// Convenience function to spawn an asynchronous runner using the trait interface +/// +/// This function provides a clean interface for the new async runner implementation +/// with timeout support. +pub fn spawn_async_runner( + runner_id: String, + db_path: String, + redis_url: String, + shutdown_rx: mpsc::Receiver<()>, + default_timeout: std::time::Duration, + engine_factory: F, +) -> JoinHandle>> +where + F: Fn() -> Engine + Send + Sync + 'static, +{ + use std::sync::Arc; + + let runner = Arc::new( + AsyncRunner::builder() + .runner_id(runner_id) + .db_path(db_path) + .redis_url(redis_url) + .default_timeout(default_timeout) + .engine_factory(engine_factory) + .build() + .expect("Failed to build AsyncRunner") + ); + crate::runner_trait::spawn_runner(runner, shutdown_rx) +} diff --git a/lib/runner/lib.rs b/lib/runner/lib.rs new file mode 100644 index 0000000..69d9b74 --- /dev/null +++ b/lib/runner/lib.rs @@ -0,0 +1,80 @@ +// Core modules +pub mod async_runner; +pub mod sync_runner; +pub mod runner_trait; +pub mod script_mode; + +// Public exports for convenience +pub use runner_trait::{Runner, RunnerConfig, spawn_runner}; +pub use async_runner::{AsyncRunner, spawn_async_runner}; +pub use sync_runner::{SyncRunner, SyncRunnerConfig, spawn_sync_runner}; + +// Re-export job types from hero-job crate +pub use hero_job::{Job, JobStatus, JobError, JobBuilder, JobSignature}; +// Re-export job client +pub use hero_job_client::{Client, ClientBuilder}; +pub use redis::AsyncCommands; +use log::{error, info}; + +const BLPOP_TIMEOUT_SECONDS: usize = 5; + +/// Initialize Redis connection for the runner +pub async fn initialize_redis_connection( + runner_id: &str, + redis_url: &str, +) -> Result> { + let redis_client = redis::Client::open(redis_url) + .map_err(|e| { + error!("Runner for Runner ID '{}': Failed to open Redis client: {}", runner_id, e); + e + })?; + + let redis_conn = redis_client.get_multiplexed_async_connection().await + .map_err(|e| { + error!("Runner for Runner ID '{}': Failed to get Redis connection: {}", runner_id, e); + e + })?; + + info!("Runner for Runner ID '{}' successfully connected to Redis.", runner_id); + Ok(redis_conn) +} + +// /// Load job from Redis using the supervisor's Job API +// pub async fn load_job_from_redis( +// redis_conn: &mut redis::aio::MultiplexedConnection, +// job_id: &str, +// runner_id: &str, +// ) -> Result { +// debug!("Runner '{}', Job {}: Loading job from Redis", runner_id, job_id); + +// // Load job data from Redis hash +// let job_data: std::collections::HashMap = redis_conn.hgetall(&client.job_key(job_id)).await +// .map_err(JobError::Redis)?; + +// if job_data.is_empty() { +// return Err(JobError::NotFound(job_id.to_string())); +// } + +// // Parse job from hash data using the supervisor's Job struct +// let job = Job { +// id: job_id.to_string(), +// caller_id: job_data.get("caller_id").unwrap_or(&"".to_string()).clone(), +// context_id: job_data.get("context_id").unwrap_or(&"".to_string()).clone(), +// payload: job_data.get("payload").unwrap_or(&"".to_string()).clone(), +// runner: job_data.get("runner").unwrap_or(&"default".to_string()).clone(), +// executor: job_data.get("executor").unwrap_or(&"rhai".to_string()).clone(), +// timeout: job_data.get("timeout").and_then(|s| s.parse().ok()).unwrap_or(300), +// env_vars: serde_json::from_str(job_data.get("env_vars").unwrap_or(&"{}".to_string())) +// .map_err(JobError::Serialization)?, +// created_at: job_data.get("created_at") +// .and_then(|s| chrono::DateTime::parse_from_rfc3339(s).ok()) +// .map(|dt| dt.with_timezone(&chrono::Utc)) +// .unwrap_or_else(chrono::Utc::now), +// updated_at: job_data.get("updated_at") +// .and_then(|s| chrono::DateTime::parse_from_rfc3339(s).ok()) +// .map(|dt| dt.with_timezone(&chrono::Utc)) +// .unwrap_or_else(chrono::Utc::now), +// }; + +// Ok(job) +// } \ No newline at end of file diff --git a/lib/runner/runner_trait.rs b/lib/runner/runner_trait.rs new file mode 100644 index 0000000..89476aa --- /dev/null +++ b/lib/runner/runner_trait.rs @@ -0,0 +1,272 @@ +//! # Runner Trait Abstraction +//! +//! This module provides a trait-based abstraction for Rhai runners that eliminates +//! code duplication between synchronous and asynchronous runner implementations. +//! +//! The `Runner` trait defines the common interface and behavior, while specific +//! implementations handle job processing differently (sync vs async). +//! +//! ## Architecture +//! +//! ```text +//! ┌─────────────────┐ ┌─────────────────┐ +//! │ SyncRunner │ │ AsyncRunner │ +//! │ │ │ │ +//! │ process_job() │ │ process_job() │ +//! │ (sequential) │ │ (concurrent) │ +//! └─────────────────┘ └─────────────────┘ +//! │ │ +//! └───────┬───────────────┘ +//! │ +//! ┌───────▼───────┐ +//! │ Runner Trait │ +//! │ │ +//! │ spawn() │ +//! │ config │ +//! │ common loop │ +//! └───────────────┘ +//! ``` + +use crate::{Job, JobStatus, Client}; +use log::{debug, error, info}; +use redis::AsyncCommands; + +use std::sync::Arc; +use std::time::Duration; +use tokio::sync::mpsc; +use tokio::task::JoinHandle; + +use crate::{initialize_redis_connection, BLPOP_TIMEOUT_SECONDS}; + +/// Configuration for runner instances +#[derive(Debug, Clone)] +pub struct RunnerConfig { + pub runner_id: String, + pub db_path: String, + pub redis_url: String, + pub default_timeout: Option, // Only used by async runners +} + +impl RunnerConfig { + /// Create a new runner configuration + pub fn new( + runner_id: String, + db_path: String, + redis_url: String, + ) -> Self { + Self { + runner_id, + db_path, + redis_url, + default_timeout: None, + } + } + + /// Set default timeout for async runners + pub fn with_default_timeout(mut self, timeout: Duration) -> Self { + self.default_timeout = Some(timeout); + self + } +} + +/// Trait defining the common interface for Rhai runners +/// +/// This trait abstracts the common functionality between synchronous and +/// asynchronous runners, allowing them to share the same spawn logic and +/// Redis polling loop while implementing different job processing strategies. +pub trait Runner: Send + Sync + 'static { + /// Process a single job + /// + /// This is the core method that differentiates runner implementations: + /// - Sync runners process jobs sequentially, one at a time + /// - Async runners spawn concurrent tasks for each job + /// + /// # Arguments + /// + /// * `job` - The job to process + /// + /// Note: The engine is now owned by the runner implementation as a field + /// For sync runners, this should be a blocking operation + /// For async runners, this can spawn tasks and return immediately + fn process_job(&self, job: Job) -> Result>; + + /// Get the runner type name for logging + fn runner_type(&self) -> &'static str; + + /// Get runner ID for this runner instance + fn runner_id(&self) -> &str; + + /// Get Redis URL for this runner instance + fn redis_url(&self) -> &str; + + /// Spawn the runner + /// + /// This method provides the common runner loop implementation that both + /// sync and async runners can use. It handles: + /// - Redis connection setup + /// - Job polling from Redis queue + /// - Shutdown signal handling + /// - Delegating job processing to the implementation + /// + /// Note: The engine is now owned by the runner implementation as a field + fn spawn( + self: Arc, + mut shutdown_rx: mpsc::Receiver<()>, + ) -> JoinHandle>> { + tokio::spawn(async move { + let runner_id = self.runner_id(); + let redis_url = self.redis_url(); + + // Create client to get the proper queue key + let client = Client::builder() + .redis_url(redis_url) + .build() + .await + .map_err(|e| format!("Failed to create client: {}", e))?; + + let queue_key = client.runner_key(runner_id); + info!( + "{} Runner '{}' starting. Connecting to Redis at {}. Listening on queue: {}", + self.runner_type(), + runner_id, + redis_url, + queue_key + ); + + let mut redis_conn = initialize_redis_connection(runner_id, redis_url).await?; + + loop { + let blpop_keys = vec![queue_key.clone()]; + tokio::select! { + // Listen for shutdown signal + _ = shutdown_rx.recv() => { + info!("{} Runner '{}': Shutdown signal received. Terminating loop.", + self.runner_type(), runner_id); + break; + } + // Listen for tasks from Redis + blpop_result = redis_conn.blpop(&blpop_keys, BLPOP_TIMEOUT_SECONDS as f64) => { + debug!("{} Runner '{}': Attempting BLPOP on queue: {}", + self.runner_type(), runner_id, queue_key); + + let response: Option<(String, String)> = match blpop_result { + Ok(resp) => resp, + Err(e) => { + error!("{} Runner '{}': Redis BLPOP error on queue {}: {}. Runner for this circle might stop.", + self.runner_type(), runner_id, queue_key, e); + return Err(Box::new(e) as Box); + } + }; + + if let Some((_queue_name_recv, job_id)) = response { + info!("{} Runner '{}' received job_id: {} from queue: {}", + self.runner_type(), runner_id, job_id, _queue_name_recv); + + // Load the job from Redis + match client.load_job_from_redis(&job_id).await { + Ok(job) => { + // Check for ping job and handle it directly + if job.payload.trim() == "ping" { + info!("{} Runner '{}': Received ping job '{}', responding with pong", + self.runner_type(), runner_id, job_id); + + // Update job status to started + if let Err(e) = client.set_job_status(&job_id, JobStatus::Started).await { + error!("{} Runner '{}': Failed to update ping job '{}' status to Started: {}", + self.runner_type(), runner_id, job_id, e); + } + + // Set result to "pong" and mark as finished + if let Err(e) = client.set_result(&job_id, "pong").await { + error!("{} Runner '{}': Failed to set ping job '{}' result: {}", + self.runner_type(), runner_id, job_id, e); + } + + if let Err(e) = client.set_job_status(&job_id, JobStatus::Finished).await { + error!("{} Runner '{}': Failed to update ping job '{}' status to Finished: {}", + self.runner_type(), runner_id, job_id, e); + } + + info!("{} Runner '{}': Successfully responded to ping job '{}' with pong", + self.runner_type(), runner_id, job_id); + } else { + // Update job status to started + if let Err(e) = client.set_job_status(&job_id, JobStatus::Started).await { + error!("{} Runner '{}': Failed to update job '{}' status to Started: {}", + self.runner_type(), runner_id, job_id, e); + } + + // Delegate job processing to the implementation + match self.process_job(job) { + Ok(result) => { + // Set result and mark as finished + if let Err(e) = client.set_result(&job_id, &result).await { + error!("{} Runner '{}': Failed to set job '{}' result: {}", + self.runner_type(), runner_id, job_id, e); + } + + if let Err(e) = client.set_job_status(&job_id, JobStatus::Finished).await { + error!("{} Runner '{}': Failed to update job '{}' status to Finished: {}", + self.runner_type(), runner_id, job_id, e); + } + } + Err(e) => { + let error_str = format!("{:?}", e); + error!("{} Runner '{}': Job '{}' processing failed: {}", + self.runner_type(), runner_id, job_id, error_str); + + // Set error and mark as error + if let Err(e) = client.set_error(&job_id, &error_str).await { + error!("{} Runner '{}': Failed to set job '{}' error: {}", + self.runner_type(), runner_id, job_id, e); + } + + if let Err(e) = client.set_job_status(&job_id, JobStatus::Error).await { + error!("{} Runner '{}': Failed to update job '{}' status to Error: {}", + self.runner_type(), runner_id, job_id, e); + } + } + } + } + } + Err(e) => { + error!("{} Runner '{}': Failed to load job '{}': {}", + self.runner_type(), runner_id, job_id, e); + } + } + } else { + debug!("{} Runner '{}': BLPOP timed out on queue {}. No new tasks.", + self.runner_type(), runner_id, queue_key); + } + } + } + } + + info!("{} Runner '{}' has shut down.", self.runner_type(), runner_id); + Ok(()) + }) + } +} + +/// Convenience function to spawn a runner with the trait-based interface +/// +/// This function provides a unified interface for spawning any runner implementation +/// that implements the Runner trait. +/// +/// # Arguments +/// +/// * `runner` - The runner implementation to spawn +/// * `shutdown_rx` - Channel receiver for shutdown signals +/// +/// # Returns +/// +/// Returns a `JoinHandle` that can be awaited to wait for runner shutdown. +pub fn spawn_runner( + runner: Arc, + shutdown_rx: mpsc::Receiver<()>, +) -> JoinHandle>> { + runner.spawn(shutdown_rx) +} + + + diff --git a/lib/runner/script_mode.rs b/lib/runner/script_mode.rs new file mode 100644 index 0000000..fb10903 --- /dev/null +++ b/lib/runner/script_mode.rs @@ -0,0 +1,168 @@ +use std::time::Duration; +use tokio::time::timeout; +use crate::{JobBuilder, JobStatus, Client}; +use log::{info, error}; +use tokio::sync::mpsc; +use std::sync::Arc; +use crate::async_runner::AsyncRunner; +use crate::runner_trait::{Runner, RunnerConfig}; + +/// Execute a script in single-job mode +/// Creates a job, submits it, waits for completion, and returns the result +pub async fn execute_script_mode( + script_content: &str, + runner_id: &str, + redis_url: String, + job_timeout: Duration, + engine_factory: F, +) -> Result> +where + F: Fn() -> rhai::Engine + Send + Sync + 'static, +{ + info!("Executing script in single-job mode"); + + // Create job client + let job_client = Client::builder() + .redis_url(&redis_url) + .build() + .await?; + + // Create the job using JobBuilder + let job = JobBuilder::new() + .caller_id("script_mode") + .payload(script_content) + .runner(runner_id) + .executor("rhai") + .timeout(job_timeout.as_secs()) + .build()?; + + let job_id = job.id.clone(); + info!("Created job with ID: {}", job_id); + + // Submit the job + job_client.store_job_in_redis(&job).await?; + info!("Job stored in Redis"); + + // Dispatch the job to the runner's queue + job_client.job_run(&job_id, runner_id).await?; + info!("Job dispatched to runner queue: {}", runner_id); + + // Create and spawn a temporary runner to process the job + let (shutdown_tx, shutdown_rx) = mpsc::channel::<()>(1); + + let config = RunnerConfig { + runner_id: runner_id.to_string(), + db_path: "/tmp".to_string(), // Temporary path for script mode + redis_url: redis_url.clone(), + default_timeout: Some(job_timeout), + }; + + let runner = Arc::new( + AsyncRunner::builder() + .runner_id(&config.runner_id) + .db_path(&config.db_path) + .redis_url(&config.redis_url) + .default_timeout(config.default_timeout.unwrap_or(job_timeout)) + .engine_factory(engine_factory) + .build() + .map_err(|e| format!("Failed to build runner: {}", e))? + ); + let runner_handle = runner.spawn(shutdown_rx); + + info!("Temporary runner spawned for job processing"); + + // Wait for job completion with timeout + let result = timeout(job_timeout, wait_for_job_completion(&job_client, &job_id)).await; + + // Shutdown the temporary runner + let _ = shutdown_tx.send(()).await; + let _ = runner_handle.await; + + match result { + Ok(job_result) => { + match job_result { + Ok(job_status) => { + match job_status { + JobStatus::Finished => { + info!("Job completed successfully"); + // Get the job result from Redis + match job_client.get_result(&job_id).await { + Ok(Some(result)) => Ok(result), + Ok(None) => Ok("Job completed with no result".to_string()), + Err(e) => { + error!("Failed to get job result: {}", e); + Ok("Job completed but result unavailable".to_string()) + } + } + } + JobStatus::Error => { + // Get the job error from Redis - for now just return a generic error + error!("Job failed with status: Error"); + return Err("Job execution failed".into()); + /*match job_client.get_job_error(&job_id).await { + Ok(Some(error_msg)) => { + error!("Job failed: {}", error_msg); + Err(format!("Job failed: {}", error_msg).into()) + } + Ok(None) => { + error!("Job failed with no error message"); + Err("Job failed with no error message".into()) + } + Err(e) => { + error!("Failed to get job error: {}", e); + Err("Job failed but error details unavailable".into()) + } + }*/ + } + _ => { + error!("Job ended in unexpected status: {:?}", job_status); + Err(format!("Job ended in unexpected status: {:?}", job_status).into()) + } + } + } + Err(e) => { + error!("Error waiting for job completion: {}", e); + Err(e) + } + } + } + Err(_) => { + error!("Job execution timed out after {:?}", job_timeout); + // Try to cancel the job + let _ = job_client.set_job_status(&job_id, JobStatus::Error).await; + Err("Job execution timed out".into()) + } + } +} + +/// Wait for job completion by polling Redis +async fn wait_for_job_completion( + job_client: &Client, + job_id: &str, +) -> Result> { + let poll_interval = Duration::from_millis(500); + + loop { + match job_client.get_status(job_id).await { + Ok(status) => { + match status { + JobStatus::Finished | JobStatus::Error => { + return Ok(status); + } + JobStatus::Created | JobStatus::Dispatched | JobStatus::WaitingForPrerequisites | JobStatus::Started => { + // Continue polling + tokio::time::sleep(poll_interval).await; + } + JobStatus::Stopping => { + // Job is being stopped, wait a bit more + tokio::time::sleep(poll_interval).await; + } + } + } + Err(e) => { + error!("Error polling job status: {}", e); + tokio::time::sleep(poll_interval).await; + } + } + } +} diff --git a/lib/runner/sync_runner.rs b/lib/runner/sync_runner.rs new file mode 100644 index 0000000..0ef7056 --- /dev/null +++ b/lib/runner/sync_runner.rs @@ -0,0 +1,175 @@ +use crate::Job; +use crate::runner_trait::Runner; +use log::{debug, error, info}; +use rhai::{Engine, Dynamic}; +use std::sync::Arc; +use tracing::subscriber::with_default; + +/// Configuration for sync runner instances +#[derive(Debug, Clone)] +pub struct SyncRunnerConfig { + pub runner_id: String, + pub redis_url: String, +} + +/// Synchronous runner that processes jobs sequentially +pub struct SyncRunner { + pub config: SyncRunnerConfig, + pub engine_factory: Arc Engine + Send + Sync>, +} + +impl SyncRunner { + /// Create a new SyncRunner with the provided engine factory + pub fn new(config: SyncRunnerConfig, engine_factory: F) -> Self + where + F: Fn() -> Engine + Send + Sync + 'static, + { + Self { + config, + engine_factory: Arc::new(engine_factory), + } + } + + /// Execute a job with the given engine, setting proper job context + /// + /// This function sets up the engine with job context (DB_PATH, CALLER_ID, CONTEXT_ID) + /// and evaluates the script. It returns the result or error. + fn execute_job_with_engine( + engine: &mut Engine, + job: &Job, + ) -> Result> { + // Set up job context in the engine + let mut db_config = rhai::Map::new(); + db_config.insert("CALLER_ID".into(), job.caller_id.clone().into()); + db_config.insert("CONTEXT_ID".into(), job.context_id.clone().into()); + + // Extract signatories from job signatures, or fall back to env_vars + let signatories: Vec = if !job.signatures.is_empty() { + // Use signatures from the job + job.signatures.iter() + .map(|sig| Dynamic::from(sig.public_key.clone())) + .collect() + } else { + Vec::new() + }; + db_config.insert("SIGNATORIES".into(), Dynamic::from(signatories)); + + engine.set_default_tag(Dynamic::from(db_config)); + + debug!("Sync Runner for Context ID '{}': Evaluating script with Rhai engine (job context set).", job.context_id); + + // Execute the script with the configured engine + engine.eval::(&job.payload) + } + + +} + +impl Runner for SyncRunner { + fn process_job(&self, job: Job) -> Result> { + let job_id = &job.id; + let runner_id = &self.config.runner_id; + + debug!("Sync Runner '{}', Job {}: Processing started.", runner_id, job_id); + info!("Sync Runner '{}' processing job_id: {}. Script: {:.50}...", job.context_id, job_id, job.payload); + + // Determine logs directory (default to ~/hero/logs) + let logs_root = if let Some(home) = std::env::var_os("HOME") { + std::path::PathBuf::from(home).join("hero").join("logs") + } else { + std::path::PathBuf::from("logs") + }; + + // Create job-specific logger + let job_logger_result = hero_logger::create_job_logger_with_guard( + &logs_root, + runner_id, // Use runner_id as the actor_type + job_id, + ); + + // Verify signatures before executing (if any) + if let Err(e) = job.verify_signatures() { + error!("Job {} signature verification failed: {}", job_id, e); + return Err(Box::new(e)); + } + + // Execute job within logging context + let result = match job_logger_result { + Ok((job_logger, _guard)) => { + // Execute ALL job processing within logging context + with_default(job_logger, || { + tracing::info!("Job {} started", job_id); + + // Create a new engine instance and configure Rhai logging + let mut engine = (self.engine_factory)(); + + // Reconfigure Rhai logging for this specific job context + // This ensures print() and debug() calls go to the job logger + hero_logger::rhai_integration::configure_rhai_logging(&mut engine, runner_id); + + // Execute the script + let script_result = Self::execute_job_with_engine(&mut engine, &job); + + tracing::info!("Job {} completed", job_id); + + script_result + }) + } + Err(e) => { + error!("Failed to create job logger for job {}: {}", job_id, e); + // Fallback: execute without job-specific logging + let mut engine = (self.engine_factory)(); + Self::execute_job_with_engine(&mut engine, &job) + } + }; + + // Process result + match result { + Ok(result) => { + let output_str = if result.is::() { + result.into_string().unwrap() + } else { + result.to_string() + }; + info!("Sync Runner for Context ID '{}' job {} completed. Output: {}", job.context_id, job.id, output_str); + Ok(output_str) + } + Err(e) => { + let error_str = format!("{:?}", *e); + error!("Sync Runner for Context ID '{}' job {} script evaluation failed. Error: {}", job.context_id, job.id, error_str); + Err(Box::new(e) as Box) + } + } + } + + fn runner_type(&self) -> &'static str { + "Sync" + } + + fn runner_id(&self) -> &str { + &self.config.runner_id + } + + fn redis_url(&self) -> &str { + &self.config.redis_url + } +} + +/// Convenience function to spawn a synchronous runner using the trait interface +pub fn spawn_sync_runner( + runner_id: String, + redis_url: String, + shutdown_rx: tokio::sync::mpsc::Receiver<()>, + engine_factory: F, +) -> tokio::task::JoinHandle>> +where + F: Fn() -> Engine + Send + Sync + 'static, +{ + let config = SyncRunnerConfig { + runner_id, + redis_url, + }; + + let runner = Arc::new(SyncRunner::new(config, engine_factory)); + crate::runner_trait::spawn_runner(runner, shutdown_rx) +}