move repos into monorepo
This commit is contained in:
42
bin/coordinator/Cargo.toml
Normal file
42
bin/coordinator/Cargo.toml
Normal file
@@ -0,0 +1,42 @@
|
||||
[package]
|
||||
name = "hero-coordinator"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
description = "Hero Coordinator - Manages job execution across runners"
|
||||
license = "MIT OR Apache-2.0"
|
||||
|
||||
[lib]
|
||||
name = "hero_coordinator"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "coordinator"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
# Core dependencies
|
||||
tokio.workspace = true
|
||||
async-trait.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
thiserror.workspace = true
|
||||
clap.workspace = true
|
||||
|
||||
# Redis
|
||||
redis.workspace = true
|
||||
|
||||
# JSON-RPC
|
||||
jsonrpsee.workspace = true
|
||||
|
||||
# HTTP client
|
||||
reqwest = { version = "0.12.7", features = ["json", "rustls-tls"] }
|
||||
|
||||
# Base64 encoding
|
||||
base64 = "0.22.1"
|
||||
|
||||
# Tracing
|
||||
tracing.workspace = true
|
||||
tracing-subscriber.workspace = true
|
||||
|
||||
# Hero dependencies
|
||||
hero-job = { path = "../../lib/models/job" }
|
||||
28
bin/coordinator/README.md
Normal file
28
bin/coordinator/README.md
Normal file
@@ -0,0 +1,28 @@
|
||||
# herocoordinator
|
||||
|
||||
## Demo setup
|
||||
|
||||
A python script is provided in the [scripts directory](./scripts/supervisor_flow_demo.py). This script
|
||||
generates some demo jobs to be run by [a supervisor](https://git.ourworld.tf/herocode/supervisor).
|
||||
Communication happens over [mycelium](https://github.com/threefoldtech/mycelium). To run the demo a
|
||||
supervisor must be running, which uses a mycelium instance to read and write messages. A __different__
|
||||
mycelium instance needs to run for the coordinator (the supervisor can run on a different host than
|
||||
the coordinator, so long as the 2 mycelium instances used can reach eachother).
|
||||
|
||||
An example of a local setup:
|
||||
|
||||
```bash
|
||||
# Run a redis docker
|
||||
docker run -it -d -p 6379:6379 --name redis redis
|
||||
# Spawn mycelium node 1 with default settings. This also creates a TUN interface though that is not
|
||||
# necessary for the messages
|
||||
mycelium
|
||||
# Spawn mycelium node 2, connect to the first node
|
||||
mycelium --key-file key.bin --peers tcp://127.0.0.1:9651 --disable-quic --disable-peer-discovery --api-addr 127.0.0.1:9989 --jsonrpc-addr 127.0.0.1:9990 --no-tun -t 8651
|
||||
# Start the supervisor
|
||||
supervisor --admin-secret admin123 --user-secret user123 --register-secret register123 --mycelium-url http://127.0.0.1:9990 --topic supervisor.rpc
|
||||
# Start the coordinator
|
||||
cargo run # (alternatively if a compiled binary is present that can be run)
|
||||
# Finally, invoke the demo script
|
||||
python3 scripts/supervisor_flow_demo.py
|
||||
```
|
||||
142
bin/coordinator/main.rs
Normal file
142
bin/coordinator/main.rs
Normal file
@@ -0,0 +1,142 @@
|
||||
use clap::Parser;
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::sync::Arc;
|
||||
|
||||
use tracing::{error, info};
|
||||
use tracing_subscriber::EnvFilter;
|
||||
#[derive(Debug, Clone, Parser)]
|
||||
#[command(
|
||||
name = "herocoordinator",
|
||||
version,
|
||||
about = "Hero Coordinator CLI",
|
||||
long_about = None
|
||||
)]
|
||||
struct Cli {
|
||||
#[arg(
|
||||
long = "mycelium-ip",
|
||||
short = 'i',
|
||||
env = "MYCELIUM_IP",
|
||||
default_value = "127.0.0.1",
|
||||
help = "IP address where Mycelium JSON-RPC is listening (default: 127.0.0.1)"
|
||||
)]
|
||||
mycelium_ip: IpAddr,
|
||||
|
||||
#[arg(
|
||||
long = "mycelium-port",
|
||||
short = 'p',
|
||||
env = "MYCELIUM_PORT",
|
||||
default_value_t = 8990u16,
|
||||
help = "Port for Mycelium JSON-RPC (default: 8990)"
|
||||
)]
|
||||
mycelium_port: u16,
|
||||
|
||||
#[arg(
|
||||
long = "redis-addr",
|
||||
short = 'r',
|
||||
env = "REDIS_ADDR",
|
||||
default_value = "127.0.0.1:6379",
|
||||
help = "Socket address of Redis instance (default: 127.0.0.1:6379)"
|
||||
)]
|
||||
redis_addr: SocketAddr,
|
||||
|
||||
#[arg(
|
||||
long = "api-http-ip",
|
||||
env = "API_HTTP_IP",
|
||||
default_value = "127.0.0.1",
|
||||
help = "Bind IP for HTTP JSON-RPC server (default: 127.0.0.1)"
|
||||
)]
|
||||
api_http_ip: IpAddr,
|
||||
|
||||
#[arg(
|
||||
long = "api-http-port",
|
||||
env = "API_HTTP_PORT",
|
||||
default_value_t = 9652u16,
|
||||
help = "Bind port for HTTP JSON-RPC server (default: 9652)"
|
||||
)]
|
||||
api_http_port: u16,
|
||||
|
||||
#[arg(
|
||||
long = "api-ws-ip",
|
||||
env = "API_WS_IP",
|
||||
default_value = "127.0.0.1",
|
||||
help = "Bind IP for WebSocket JSON-RPC server (default: 127.0.0.1)"
|
||||
)]
|
||||
api_ws_ip: IpAddr,
|
||||
|
||||
#[arg(
|
||||
long = "api-ws-port",
|
||||
env = "API_WS_PORT",
|
||||
default_value_t = 9653u16,
|
||||
help = "Bind port for WebSocket JSON-RPC server (default: 9653)"
|
||||
)]
|
||||
api_ws_port: u16,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let cli = Cli::parse();
|
||||
// Initialize tracing subscriber (pretty formatter; controlled by RUST_LOG)
|
||||
let filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info"));
|
||||
tracing_subscriber::fmt()
|
||||
.with_env_filter(filter)
|
||||
.pretty()
|
||||
.with_target(true)
|
||||
.with_level(true)
|
||||
.init();
|
||||
|
||||
let http_addr = SocketAddr::new(cli.api_http_ip, cli.api_http_port);
|
||||
let ws_addr = SocketAddr::new(cli.api_ws_ip, cli.api_ws_port);
|
||||
|
||||
// Initialize Redis driver
|
||||
let redis = herocoordinator::storage::RedisDriver::new(cli.redis_addr.to_string())
|
||||
.await
|
||||
.expect("Failed to connect to Redis");
|
||||
|
||||
// Initialize Service
|
||||
let service = herocoordinator::service::AppService::new(redis);
|
||||
let service_for_router = service.clone();
|
||||
|
||||
// Shared application state
|
||||
let state = Arc::new(herocoordinator::rpc::AppState::new(service));
|
||||
|
||||
// Start router workers (auto-discovered contexts) using a single global SupervisorHub (no separate inbound listener)
|
||||
{
|
||||
let base_url = format!("http://{}:{}", cli.mycelium_ip, cli.mycelium_port);
|
||||
let hub = herocoordinator::clients::SupervisorHub::new(
|
||||
base_url.clone(),
|
||||
"supervisor.rpc".to_string(),
|
||||
)
|
||||
.expect("Failed to initialize SupervisorHub");
|
||||
let cfg = herocoordinator::router::RouterConfig {
|
||||
context_ids: Vec::new(), // ignored by start_router_auto
|
||||
concurrency: 32,
|
||||
base_url,
|
||||
topic: "supervisor.rpc".to_string(),
|
||||
sup_hub: hub.clone(),
|
||||
transport_poll_interval_secs: 2,
|
||||
transport_poll_timeout_secs: 300,
|
||||
};
|
||||
// Per-context outbound delivery loops (replies handled by SupervisorHub)
|
||||
let _auto_handle = herocoordinator::router::start_router_auto(service_for_router, cfg);
|
||||
}
|
||||
|
||||
// Build RPC modules for both servers
|
||||
let http_module = herocoordinator::rpc::build_module(state.clone());
|
||||
let ws_module = herocoordinator::rpc::build_module(state.clone());
|
||||
|
||||
info!(%http_addr, %ws_addr, redis_addr=%cli.redis_addr, "Starting JSON-RPC servers");
|
||||
|
||||
// Start servers
|
||||
let _http_handle = herocoordinator::rpc::start_http(http_addr, http_module)
|
||||
.await
|
||||
.expect("Failed to start HTTP server");
|
||||
let _ws_handle = herocoordinator::rpc::start_ws(ws_addr, ws_module)
|
||||
.await
|
||||
.expect("Failed to start WS server");
|
||||
|
||||
// Wait for Ctrl+C to terminate
|
||||
if let Err(e) = tokio::signal::ctrl_c().await {
|
||||
error!(error=%e, "Failed to listen for shutdown signal");
|
||||
}
|
||||
info!("Shutdown signal received, exiting.");
|
||||
}
|
||||
77
bin/coordinator/specs/architecture.md
Normal file
77
bin/coordinator/specs/architecture.md
Normal file
@@ -0,0 +1,77 @@
|
||||
|
||||
|
||||
## per user
|
||||
|
||||
runs in container or VM, one per user
|
||||
|
||||
- zinit
|
||||
- herocoordinator
|
||||
- think about like DAG worklflow manager
|
||||
- manage jobs who are send around to different nodes
|
||||
- mycelium address range (part of mycelium on host)
|
||||
- herodb
|
||||
- state manager
|
||||
- redis protocol / primitives
|
||||
- fs backend (mem and allways append in future)
|
||||
- encryption & decryption primitives
|
||||
- key mgmt for encryption (creation, deletion)
|
||||
- openrpc admin features: user management, role-based access control
|
||||
- postgresql + postgrest
|
||||
- AI Agent TBD
|
||||
|
||||
```mermaid
|
||||
%%{init: {"theme":"dark"}}%%
|
||||
graph TD
|
||||
subgraph Per Node System
|
||||
N[Node] --> OS(Run on top of ZOS4 or Ubuntu or in a VM)
|
||||
|
||||
subgraph On Node
|
||||
OS --> SV(Supervisors)
|
||||
OS --> ZN(Zinit)
|
||||
OS --> R(Runners)
|
||||
OS --> PGN(Some Nodes: PostgreSQL + Postgrest)
|
||||
OS --> HDN(Each Node: Herodb)
|
||||
|
||||
subgraph Supervisors Responsibilities
|
||||
SV --> SV_MR(Manage runners & scheduling for the node)
|
||||
SV --> SV_MJ(Monitor & schedule jobs)
|
||||
SV --> SV_RU(Check resource usage)
|
||||
SV --> SV_TO(Checks on timeout)
|
||||
end
|
||||
|
||||
subgraph Runners Characteristics
|
||||
R --> R_LV(V/Python & Rust)
|
||||
R --> R_FORK(Uses fork per runner for scalability)
|
||||
R --> R_COUNT(Some runners can only run 1, others more)
|
||||
R --> R_CONTEXT(Some runners are per context)
|
||||
end
|
||||
end
|
||||
|
||||
SV -- "Manage" --> R
|
||||
SV -- "Schedule jobs via" --> ZN
|
||||
ZN -- "Starts" --> R
|
||||
R -- "Interacts with" --> PGN
|
||||
R -- "Interacts with" --> HDN
|
||||
end
|
||||
```
|
||||
|
||||
## per node
|
||||
|
||||
- run on top of ZOS4 or Ubuntu or in a VM
|
||||
- supervisors
|
||||
- manage runners and scheduling for the node of these runners
|
||||
- monitor & schedule jobs, check resource usage, checks on timout
|
||||
- zinit
|
||||
- runners (are scheduled in zinit by supervisor)
|
||||
- V/Python & Rust
|
||||
- uses fork per runner (process) for scalability
|
||||
- some runners can only run 1, others more
|
||||
- some runners are per context
|
||||
- some nodes will have postgresql + postgrest
|
||||
- each node has herodb
|
||||
|
||||
REMARK
|
||||
|
||||
- each rhaj or heroscript running on a node can use herodb if needed (careful, because can and will be lost), but cannot communicate with anyone else outside of the node
|
||||
|
||||
|
||||
16
bin/coordinator/specs/hercoordinator.md
Normal file
16
bin/coordinator/specs/hercoordinator.md
Normal file
@@ -0,0 +1,16 @@
|
||||
|
||||
|
||||
will have openrpc interface
|
||||
|
||||
- start, stop, delete, list a DAG
|
||||
- query the DAG and its status
|
||||
|
||||
|
||||
## remarks for supervisor
|
||||
|
||||
- no retry
|
||||
- no dependencies
|
||||
|
||||
## inspiration
|
||||
|
||||
- DAGU
|
||||
18
bin/coordinator/specs/model/actor.v
Normal file
18
bin/coordinator/specs/model/actor.v
Normal file
@@ -0,0 +1,18 @@
|
||||
module model
|
||||
|
||||
// a actor is a participant in the new internet, the one who can ask for work
|
||||
// user can have more than one actor operating for them, an actor always operates in a context which is hosted by the hero of the user
|
||||
// stored in the context db at actor:<id> (actor is hset)
|
||||
@[heap]
|
||||
pub struct Actor {
|
||||
pub mut:
|
||||
id u32
|
||||
pubkey string
|
||||
address []Address // address (is to reach the actor back), normally mycelium but doesn't have to be
|
||||
created_at u32 // epoch
|
||||
updated_at u32 // epoch
|
||||
}
|
||||
|
||||
pub fn (self Actor) redis_key() string {
|
||||
return 'actor:${self.id}'
|
||||
}
|
||||
20
bin/coordinator/specs/model/context.v
Normal file
20
bin/coordinator/specs/model/context.v
Normal file
@@ -0,0 +1,20 @@
|
||||
module model
|
||||
|
||||
// each job is run in a context, this corresponds to a DB in redis and has specific rights to actors
|
||||
// context is a redis db and also a locaction on a filesystem which can be used for e.g. logs, temporary files, etc.
|
||||
// actors create contexts for others to work in
|
||||
// stored in the context db at context:<id> (context is hset)
|
||||
@[heap]
|
||||
pub struct Context {
|
||||
pub mut:
|
||||
id u32 // corresponds with the redis db (in our ourdb or other redis)
|
||||
admins []u32 // actors which have admin rights on this context (means can do everything)
|
||||
readers []u32 // actors which can read the context info
|
||||
executors []u32 // actors which can execute jobs in this context
|
||||
created_at u32 // epoch
|
||||
updated_at u32 // epoch
|
||||
}
|
||||
|
||||
pub fn (self Context) redis_key() string {
|
||||
return 'context:${self.id}'
|
||||
}
|
||||
41
bin/coordinator/specs/model/flow.v
Normal file
41
bin/coordinator/specs/model/flow.v
Normal file
@@ -0,0 +1,41 @@
|
||||
module model
|
||||
|
||||
// what get's executed by an actor and needs to be tracked as a whole, can be represented as a DAG graph
|
||||
// this is the high level representation of a workflow to execute on work, its fully decentralized and distributed
|
||||
// only the actor who created the flow can modify it and holds it in DB
|
||||
// stored in the context db at flow:<id> (flow is hset)
|
||||
@[heap]
|
||||
pub struct Flow {
|
||||
pub mut:
|
||||
id u32 // this job id is given by the actor who called for it
|
||||
caller_id u32 // is the actor which called for this job
|
||||
context_id u32 // each job is executed in a context
|
||||
jobs []u32 // links to all jobs which make up this flow, this can be dynamically modified
|
||||
env_vars map[string]string // they are copied to every job done
|
||||
result map[string]string // the result of the flow
|
||||
created_at u32 // epoch
|
||||
updated_at u32 // epoch
|
||||
status FlowStatus
|
||||
}
|
||||
|
||||
pub fn (self Flow) redis_key() string {
|
||||
return 'flow:${self.id}'
|
||||
}
|
||||
|
||||
// FlowStatus represents the status of a flow
|
||||
pub enum FlowStatus {
|
||||
dispatched
|
||||
started
|
||||
error
|
||||
finished
|
||||
}
|
||||
|
||||
// str returns the string representation of FlowStatus
|
||||
pub fn (self FlowStatus) str() string {
|
||||
return match self {
|
||||
.dispatched { 'dispatched' }
|
||||
.started { 'started' }
|
||||
.error { 'error' }
|
||||
.finished { 'finished' }
|
||||
}
|
||||
}
|
||||
68
bin/coordinator/specs/model/message.v
Normal file
68
bin/coordinator/specs/model/message.v
Normal file
@@ -0,0 +1,68 @@
|
||||
module model
|
||||
|
||||
// Messages is what goes over mycelium (which is our messaging system), they can have a job inside
|
||||
// stored in the context db at msg:<callerid>:<id> (msg is hset)
|
||||
// there are 2 queues in the context db: queue: msg_out and msg_in these are generic queues which get all messages from mycelium (in) and the ones who need to be sent (out) are in the outqueue
|
||||
@[heap]
|
||||
pub struct Message {
|
||||
pub mut:
|
||||
id u32 // is unique id for the message, has been given by the caller
|
||||
caller_id u32 // is the actor whos send this message
|
||||
context_id u32 // each message is for a specific context
|
||||
message string
|
||||
message_type ScriptType
|
||||
message_format_type MessageFormatType
|
||||
timeout u32 // in sec, to arrive destination
|
||||
timeout_ack u32 // in sec, to acknowledge receipt
|
||||
timeout_result u32 // in sec, to process result and have it back
|
||||
job []Job
|
||||
logs []Log // e.g. for streaming logs back to originator
|
||||
created_at u32 // epoch
|
||||
updated_at u32 // epoch
|
||||
status MessageStatus
|
||||
}
|
||||
|
||||
// MessageType represents the type of message
|
||||
pub enum MessageType {
|
||||
job
|
||||
chat
|
||||
mail
|
||||
}
|
||||
|
||||
// MessageFormatType represents the format of a message
|
||||
pub enum MessageFormatType {
|
||||
html
|
||||
text
|
||||
md
|
||||
}
|
||||
|
||||
pub fn (self Message) redis_key() string {
|
||||
return 'message:${self.caller_id}:${self.id}'
|
||||
}
|
||||
|
||||
// queue_suffix returns the queue suffix for the message type
|
||||
pub fn (mt MessageType) queue_suffix() string {
|
||||
return match mt {
|
||||
.job { 'job' }
|
||||
.chat { 'chat' }
|
||||
.mail { 'mail' }
|
||||
}
|
||||
}
|
||||
|
||||
// MessageStatus represents the status of a message
|
||||
pub enum MessageStatus {
|
||||
dispatched
|
||||
acknowledged
|
||||
error
|
||||
processed // e.g. can be something which comes back
|
||||
}
|
||||
|
||||
// str returns the string representation of MessageStatus
|
||||
pub fn (ms MessageStatus) str() string {
|
||||
return match ms {
|
||||
.dispatched { 'dispatched' }
|
||||
.acknowledged { 'acknowledged' }
|
||||
.error { 'error' }
|
||||
.processed { 'processed' }
|
||||
}
|
||||
}
|
||||
27
bin/coordinator/specs/model/runner.v
Normal file
27
bin/coordinator/specs/model/runner.v
Normal file
@@ -0,0 +1,27 @@
|
||||
module model
|
||||
|
||||
// a runner executes a job, this can be in VM, in a container or just some processes running somewhere
|
||||
// the messages always come in over a topic
|
||||
// stored in the context db at runner:<id> (runner is hset)
|
||||
@[heap]
|
||||
pub struct Runner {
|
||||
pub mut:
|
||||
id u32
|
||||
pubkey string // from mycelium
|
||||
address string // mycelium address
|
||||
topic string // needs to be set by the runner but often runner<runnerid> e.g. runner20
|
||||
local bool // if local then goes on redis using the id
|
||||
created_at u32 // epoch
|
||||
updated_at u32 // epoch
|
||||
}
|
||||
|
||||
pub enum RunnerType {
|
||||
v
|
||||
python
|
||||
osis
|
||||
rust
|
||||
}
|
||||
|
||||
pub fn (self Runner) redis_key() string {
|
||||
return 'runner:${self.id}'
|
||||
}
|
||||
64
bin/coordinator/specs/model/runnerjob.v
Normal file
64
bin/coordinator/specs/model/runnerjob.v
Normal file
@@ -0,0 +1,64 @@
|
||||
module model
|
||||
|
||||
// Job represents a job, a job is only usable in the context of a runner (which is part of a hero)
|
||||
// stored in the context db at job:<callerid>:<id> (job is hset)
|
||||
@[heap]
|
||||
pub struct RunnerJob {
|
||||
pub mut:
|
||||
id u32 // this job id is given by the actor who called for it
|
||||
caller_id u32 // is the actor which called for this job
|
||||
context_id u32 // each job is executed in a context
|
||||
script string
|
||||
script_type ScriptType
|
||||
timeout u32 // in sec
|
||||
retries u8
|
||||
env_vars map[string]string
|
||||
result map[string]string
|
||||
prerequisites []string
|
||||
dependends []u32
|
||||
created_at u32 // epoch
|
||||
updated_at u32 // epoch
|
||||
status JobStatus
|
||||
}
|
||||
|
||||
// ScriptType represents the type of script
|
||||
pub enum ScriptType {
|
||||
osis
|
||||
sal
|
||||
v
|
||||
python
|
||||
}
|
||||
|
||||
pub fn (self RunnerJob) redis_key() string {
|
||||
return 'job:${self.caller_id}:${self.id}'
|
||||
}
|
||||
|
||||
// queue_suffix returns the queue suffix for the script type
|
||||
pub fn (st ScriptType) queue_suffix() string {
|
||||
return match st {
|
||||
.osis { 'osis' }
|
||||
.sal { 'sal' }
|
||||
.v { 'v' }
|
||||
.python { 'python' }
|
||||
}
|
||||
}
|
||||
|
||||
// JobStatus represents the status of a job
|
||||
pub enum JobStatus {
|
||||
dispatched
|
||||
waiting_for_prerequisites
|
||||
started
|
||||
error
|
||||
finished
|
||||
}
|
||||
|
||||
// str returns the string representation of JobStatus
|
||||
pub fn (js JobStatus) str() string {
|
||||
return match js {
|
||||
.dispatched { 'dispatched' }
|
||||
.waiting_for_prerequisites { 'waiting_for_prerequisites' }
|
||||
.started { 'started' }
|
||||
.error { 'error' }
|
||||
.finished { 'finished' }
|
||||
}
|
||||
}
|
||||
314
bin/coordinator/specs/models.md
Normal file
314
bin/coordinator/specs/models.md
Normal file
@@ -0,0 +1,314 @@
|
||||
# Models Specification
|
||||
*Freeflow Universe – mycojobs*
|
||||
|
||||
This document gathers **all data‑models** that exist in the `lib/mycojobs/model/` package, together with a concise purpose description, field semantics, Redis storage layout and the role each model plays in the overall *decentralised workflow* architecture.
|
||||
|
||||
|
||||
## Table of Contents
|
||||
1. [Actor](#actor)
|
||||
2. [Context](#context)
|
||||
3. [Flow](#flow)
|
||||
4. [Message](#message)
|
||||
5. [Runner](#runner)
|
||||
6. [RunnerJob](#runnerjob)
|
||||
7. [Enums & Shared Types](#enums-shared-types)
|
||||
8. [Key‑generation helpers](#key-generation-helpers)
|
||||
|
||||
---
|
||||
|
||||
## <a name="actor"></a>1️⃣ `Actor` – Identity & entry‑point
|
||||
|
||||
| Field | Type | Description |
|
||||
|------|------|-------------|
|
||||
| `id` | `u32` | Sequential identifier **unique per tenant**. Used as part of the Redis key `actor:<id>`. |
|
||||
| `pubkey` | `string` | Public key (Mycelium‑compatible) that authenticates the actor when it sends/receives messages. |
|
||||
| `address` | `[]Address` | One or more reachable addresses (normally Mycelium topics) that other participants can use to contact the actor. |
|
||||
| `created_at` | `u32` | Unix‑epoch time when the record was created. |
|
||||
| `updated_at` | `u32` | Unix‑epoch time of the last mutation. |
|
||||
|
||||
### Purpose
|
||||
* An **Actor** is the *human‑or‑service* that **requests work**, receives results and can be an administrator of a **Context**.
|
||||
* It is the *security principal* – every operation in a context is authorised against the actor’s ID and its public key signature.
|
||||
|
||||
### Redis representation
|
||||
|
||||
| Key | Example | Storage type | Fields |
|
||||
|-----|---------|--------------|--------|
|
||||
| `actor:${id}` | `actor:12` | **hash** (`HSET`) | `id`, `pubkey`, `address` (list), `created_at`, `updated_at` |
|
||||
|
||||
---
|
||||
|
||||
## <a name="context"></a>2️⃣ `Context` – Tenant & permission container
|
||||
|
||||
| Field | Type | Description |
|
||||
|------|------|-------------|
|
||||
| `id` | `u32` | Identifier that also selects the underlying **Redis DB** for this tenant. |
|
||||
| `admins` | `[]u32` | Actor IDs that have **full control** (create/delete any object, manage permissions). |
|
||||
| `readers` | `[]u32` | Actor IDs that may **read** any object in the context but cannot modify. |
|
||||
| `executors` | `[]u32` | Actor IDs allowed to **run** `RunnerJob`s and update their status. |
|
||||
| `created_at` | `u32` | Unix‑epoch of creation. |
|
||||
| `updated_at` | `u32` | Unix‑epoch of last modification. |
|
||||
|
||||
### Purpose
|
||||
* A **Context** isolates a *tenant* – each tenant gets its own Redis database and a dedicated filesystem area (for logs, temporary files, …).
|
||||
* It stores **permission lists** that the system consults before any operation (e.g., creating a `Flow`, enqueuing a `RunnerJob`).
|
||||
|
||||
### Redis representation
|
||||
|
||||
| Key | Example | Storage type | Fields |
|
||||
|-----|---------|--------------|--------|
|
||||
| `context:${id}` | `context:7` | **hash** | `id`, `admins`, `readers`, `executors`, `created_at`, `updated_at` |
|
||||
|
||||
---
|
||||
|
||||
## <a name="flow"></a>3️⃣ `Flow` – High‑level workflow (DAG)
|
||||
|
||||
| Field | Type | Description |
|
||||
|------|------|-------------|
|
||||
| `id` | `u32` | Flow identifier – *unique inside the creator’s actor space*. |
|
||||
| `caller_id` | `u32` | Actor that **created** the flow (owner). |
|
||||
| `context_id` | `u32` | Context in which the flow lives. |
|
||||
| `jobs` | `[]u32` | List of **RunnerJob** IDs that belong to this flow (the DAG edges are stored in each job’s `dependends`). |
|
||||
| `env_vars` | `map[string]string` | Global environment variables injected into **every** job of the flow. |
|
||||
| `result` | `map[string]string` | Aggregated output produced by the flow (filled by the orchestrator when the flow finishes). |
|
||||
| `created_at` | `u32` | Creation timestamp. |
|
||||
| `updated_at` | `u32` | Last update timestamp. |
|
||||
| `status` | `FlowStatus` | Current lifecycle stage (`dispatched`, `started`, `error`, `finished`). |
|
||||
|
||||
### Purpose
|
||||
* A **Flow** is the *public‑facing* representation of a **workflow**.
|
||||
* It groups many `RunnerJob`s, supplies common env‑vars, tracks overall status and collects the final result.
|
||||
* Only the *creator* (the `caller_id`) may mutate the flow definition.
|
||||
|
||||
### Redis representation
|
||||
|
||||
| Key | Example | Storage type | Fields |
|
||||
|-----|---------|--------------|--------|
|
||||
| `flow:${id}` | `flow:33` | **hash** | `id`, `caller_id`, `context_id`, `jobs`, `env_vars`, `result`, `created_at`, `updated_at`, `status` |
|
||||
|
||||
### `FlowStatus` enum
|
||||
|
||||
| Value | Meaning |
|
||||
|-------|---------|
|
||||
| `dispatched` | Flow has been stored but not yet started. |
|
||||
| `started` | At least one job is running. |
|
||||
| `error` | One or more jobs failed; flow aborted. |
|
||||
| `finished` | All jobs succeeded, `result` is final. |
|
||||
|
||||
---
|
||||
|
||||
## <a name="message"></a>4️⃣ `Message` – Transport unit (Mycelium)
|
||||
|
||||
| Field | Type | Description |
|
||||
|------|------|-------------|
|
||||
| `id` |u32 `_type` | `ScriptType` | *Kind* of the message – currently re‑used for job payloads (`osis`, `sal`, `v`, `python`). |
|
||||
| `message_format_type` | `MessageFormatType` | Formatting of `message` (`html`, `text`, `md`). |
|
||||
| `timeout` | `u32` | Seconds before the message is considered *lost* if not delivered. |
|
||||
| `timeout_ack` | `u32` | Seconds allowed for the receiver to acknowledge. |
|
||||
| `timeout_result` | `u32` | Seconds allowed for the receiver to send back a result. |
|
||||
| `job` | `[]Job` | Embedded **RunnerJob** objects (normally a single job). |
|
||||
| `logs` | `[]Log` | Optional streaming logs attached to the message. |
|
||||
| `created_at` | `u32` | Timestamp of creation. |
|
||||
| `updated_at` | `u32` | Timestamp of latest update. |
|
||||
| `status` | `MessageStatus` | Current lifecycle (`dispatched`, `acknowledged`, `error`, `processed`). |
|
||||
|
||||
### Purpose
|
||||
* `Message` is the **payload carrier** that travels over **Mycelium** (the pub/sub system).
|
||||
* It can be a **job request**, a **chat line**, an **email**, or any generic data that needs to be routed between actors, runners, or services.
|
||||
* Every message is persisted as a Redis hash; the system also maintains two *generic* queues:
|
||||
|
||||
* `msg_out` – outbound messages waiting to be handed to Mycelium.
|
||||
* `msg_in` – inbound messages that have already arrived and are awaiting local processing.
|
||||
|
||||
### Redis representation
|
||||
|
||||
| Key | Example | Storage type | Fields |
|
||||
|-----|---------|--------------|--------|
|
||||
| `message:${caller_id}:${id}` | `message:12:101` | **hash** | All fields above (`id`, `caller_id`, `context_id`, …, `status`). |
|
||||
|
||||
### `MessageType` enum (legacy – not used in current code but documented)
|
||||
|
||||
| Value | Meaning |
|
||||
|-------|---------|
|
||||
| `job` | Payload carries a `RunnerJob`. |
|
||||
| `chat` | Human‑to‑human communication. |
|
||||
| `mail` | Email‑like message. |
|
||||
|
||||
### `MessageFormatType` enum
|
||||
|
||||
| Value | Meaning |
|
||||
|-------|---------|
|
||||
| `html` | HTML formatted body. |
|
||||
| `text` | Plain‑text. |
|
||||
| `md` | Markdown. |
|
||||
|
||||
### `MessageStatus` enum
|
||||
|
||||
| Value | Meaning |
|
||||
|-------|---------|
|
||||
| `dispatched` | Stored, not yet processed. |
|
||||
| `acknowledged` | Receiver has confirmed receipt. |
|
||||
| `error` | Delivery or processing failed. |
|
||||
|` | Message handled (e.g., job result returned). |
|
||||
|
||||
---
|
||||
|
||||
## <a name="runner"></a>5️⃣ `Runner` – Worker that executes jobs
|
||||
|
||||
| Field | Type | Description |
|
||||
|------|------|-------------|
|
||||
| `id` | `u32` | Unique runner identifier. |
|
||||
| `pubkey` | `string` | Public key of the runner (used by Mycelium for auth). |
|
||||
| `address` | `string` | Mycelium address (e.g., `mycelium://…`). |
|
||||
| `topic` | `string` | Pub/Sub topic the runner subscribes to; defaults to `runner${id}`. |
|
||||
| `local` | `bool` | If `true`, the runner also consumes jobs directly from **Redis queues** (e.g., `queue:v`). |
|
||||
| `created_at` | `u32` | Creation timestamp. |
|
||||
| `updated_at` | `u32` | Last modification timestamp. |
|
||||
|
||||
### Purpose
|
||||
* A **Runner** is the *execution engine* – it could be a VM, a container, or a process that knows how to run a specific script type (`v`, `python`, `osis`, `rust`).
|
||||
* It **subscribes** to a Mycelium topic to receive job‑related messages, and, when `local==true`, it also **polls** a Redis list named after the script‑type (`queue:<suffix>`).
|
||||
|
||||
### Redis representation
|
||||
|
||||
| Key | Example | Storage type |
|
||||
|-----|---------|--------------|
|
||||
| `runner:${id}` | `runner:20` | **hash** *(all fields above)* |
|
||||
|
||||
### `RunnerType` enum
|
||||
|
||||
| Value | Intended runtime |
|
||||
|-------|------------------|
|
||||
| `v` | V language VM |
|
||||
| `python` | CPython / PyPy |
|
||||
| `osis` | OSIS‑specific runtime |
|
||||
| `rust` | Native Rust binary |
|
||||
|
||||
---
|
||||
|
||||
## <a name="runnerjob"></a>6️⃣ `RunnerJob` – Executable unit
|
||||
|
||||
| Field | Type | Description |
|
||||
|------|------|-------------|
|
||||
| `id` | `u32` | Job identifier **provided by the caller**. |
|
||||
| `caller_id` | `u32` | Actor that created the job. |
|
||||
| `context_id` | `u32` | Context in which the job will run. |
|
||||
| `script` | `string` | Source code / command to be executed. |
|
||||
| `script_type` | `ScriptType` | Language or runtime of the script (`osis`, `sal`, `v`, `python`). |
|
||||
| `timeout` | `u32` | Maximum execution time (seconds). |
|
||||
| `retries` | `u8` | Number of automatic retries on failure. |
|
||||
| `env_vars` | `map[string]string` | Job‑specific environment variables (merged with `Flow.env_vars`). |
|
||||
| `result` | `map[string]string` | Key‑value map that the job writes back upon completion. |
|
||||
| `prerequisites` | `[]string` | Human‑readable IDs of **external** prerequisites (e.g., files, other services). |
|
||||
| `dependends` | `[]u32` | IDs of **other RunnerJob** objects that must finish before this job can start. |
|
||||
| `created_at` | `u32` | Creation timestamp. |
|
||||
| `updated_at` | `u32` | Last update timestamp. |
|
||||
| `status` | `JobStatus` | Lifecycle status (`dispatched`, `waiting_for_prerequisites`, `started`, `error`, `finished`). |
|
||||
|
||||
### Purpose
|
||||
* A **RunnerJob** is the *atomic piece of work* that a `Runner` executes.
|
||||
* It lives inside a **Context**, is queued according to its `script_type`, and moves through a well‑defined **state machine**.
|
||||
* The `dependends` field enables the *DAG* behaviour that the `Flow` model represents at a higher level.
|
||||
|
||||
### Redis representation
|
||||
|
||||
| Key | Example | Storage type |
|
||||
|-----|---------|--------------|
|
||||
| `job:${caller_id}:${id}` | `job:12:2001` | **hash** *(all fields above)* |
|
||||
|
||||
### `ScriptType` enum
|
||||
|
||||
| Value | Runtime |
|
||||
|-------|---------|
|
||||
| `osis` | OSIS interpreter |
|
||||
| `sal` | SAL DSL (custom) |
|
||||
| `v` | V language |
|
||||
| `python`| CPython / PyPy |
|
||||
|
||||
*The enum provides a **`queue_suffix()`** helper that maps a script type to the name of the Redis list used for local job dispatch (`queue:python`, `queue:v`, …).*
|
||||
|
||||
### `JobStatus` enum
|
||||
|
||||
| Value | Meaning |
|
||||
|-------|---------|
|
||||
| `dispatched` | Stored, waiting to be examined for prerequisites. |
|
||||
| `waiting_for_prerequisites` | Has `dependends` that are not yet finished. |
|
||||
| `started` | Currently executing on a runner. |
|
||||
| `error` | Execution failed (or exceeded retries). |
|
||||
| `finished` | Successfully completed, `result` populated. |
|
||||
|
||||
---
|
||||
|
||||
## <a name="enums-shared-types"></a>7️⃣ Other Enums & Shared Types
|
||||
|
||||
| Enum | Location | Values | Note |
|
||||
|------|----------|--------|------|
|
||||
| `MessageType` | `message.v` | `job`, `chat`, `mail` | Determines how a `Message` is interpreted. |
|
||||
| `MessageFormatType` | `message.v` | `html`, `text`, `md` | UI‑layer rendering hint. |
|
||||
| `MessageStatus` | `message.v` | `dispatched`, `acknowledged`, `error`, `processed` | Life‑cycle of a `Message`. |
|
||||
| `FlowStatus` | `flow.v` | `dispatched`, `started`, `error`, `finished` | High‑level flow progress. |
|
||||
| `RunnerType` | `runner.v` | `v`, `python`, `osis`, `rust` | Not currently stored; used by the orchestration layer to pick a runner implementation. |
|
||||
| `ScriptType` | `runnerjob.v` | `osis`, `sal`, `v`, `python` | Determines queue suffix & runtime. |
|
||||
| `JobStatus` | `runnerjob.v` | `dispatched`, `waiting_for_prerequisites`, `started`, `error`, `finished` | Per‑job state machine. |
|
||||
|
||||
---
|
||||
|
||||
## <a name="key-generation-helpers"></a>8️⃣ Key‑generation helpers (methods)
|
||||
|
||||
| Model | Method | Returns | Example |
|
||||
|-------|--------|---------|---------|
|
||||
| `Actor` | `redis_key()` | `"actor:${self.id}"` | `actor:12` |
|
||||
| `Context` | `redis_key()` | `"context:${self.id}"` | `context:7` |
|
||||
| `Flow` | `redis_key()` | `"flow:${self.id}"` | `flow:33` |
|
||||
| `Message` | `redis_key()` | `"message:${self.caller_id}:${self.id}"` | `message:12:101` |
|
||||
| `Runner` | `redis_key()` | `"runner:${self.id}"` | `runner:20` |
|
||||
| `RunnerJob` | `redis_key()` | `"job:${self.caller_id}:${self.id}"` | `job:12:2001` |
|
||||
| `MessageType` | `queue_suffix()` | `"job"` / `"chat"` / `"mail"` | `MessageType.job.queue_suffix() → "job"` |
|
||||
| `ScriptType` | `queue_suffix()` | `"osis"` / `"sal"` / `"v"` / `"python"` | `ScriptType.python.queue_suffix() → "python"` |
|
||||
|
||||
These helpers guarantee **canonical key naming** throughout the code base and simplify Redis interactions.
|
||||
|
||||
---
|
||||
|
||||
## 📌 Summary Diagram (quick reference)
|
||||
|
||||
```mermaid
|
||||
%%{init: {"theme":"dark"}}%%
|
||||
graph TD
|
||||
%% Actors and what they can create
|
||||
A[Actor] -->|creates| Ctx[Context]
|
||||
A -->|creates| Fl[Flow]
|
||||
A -->|creates| Msg[Message]
|
||||
A -->|creates| Rnr[Runner]
|
||||
A -->|creates| Job[RunnerJob]
|
||||
|
||||
%% All objects live inside one Redis DB that belongs to a Context
|
||||
subgraph "Redis DB (per Context)"
|
||||
Ctx
|
||||
A
|
||||
Fl
|
||||
Msg
|
||||
Rnr
|
||||
Job
|
||||
end
|
||||
|
||||
%% Messaging queues (global, outside the Context DB)
|
||||
Msg -->|pushes key onto| OutQ[msg_out]
|
||||
OutQ -->|transport via Mycelium| InQ[msg_in]
|
||||
InQ -->|pulled by| Rnr
|
||||
|
||||
%% Local runner queues (only when runner.local == true)
|
||||
Rnr -->|BRPOP from| QueueV["queue:v"]
|
||||
Rnr -->|BRPOP from| QueuePy["queue:python"]
|
||||
Rnr -->|BRPOP from| QueueOSIS["queue:osis"]
|
||||
|
||||
```
|
||||
|
||||
## context based
|
||||
|
||||
* Inside a Context, an **Actor** can create a **Flow** that references many **RunnerJob** IDs (the DAG).
|
||||
* To *initiate* execution, the Actor packages a **RunnerJob** (or a full Flow) inside a **Message**, pushes it onto `msg_out`, and the system routes it via **Mycelium** to the target Context.
|
||||
* The remote **Runner** receives the Message, materialises the **RunnerJob**, queues it on a script‑type list, executes it, writes back `result` and status, and optionally sends a *result Message* back to the originator.
|
||||
|
||||
All state is persisted as **Redis hashes**, guaranteeing durability and enabling *idempotent* retries. The uniform naming conventions (`actor:<id>`, `job:<caller_id>:<id>`, …) make it trivial to locate any object given its identifiers.
|
||||
|
||||
1399
bin/coordinator/specs/openrpc.json
Normal file
1399
bin/coordinator/specs/openrpc.json
Normal file
File diff suppressed because it is too large
Load Diff
263
bin/coordinator/specs/specs.md
Normal file
263
bin/coordinator/specs/specs.md
Normal file
@@ -0,0 +1,263 @@
|
||||
|
||||
## Objects Used
|
||||
|
||||
| Component | What it **stores** | Where it lives (Redis key) | Main responsibilities |
|
||||
|------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| **Actor** | Public key, reachable addresses, timestamps | `actor:<id>` (hash) | An identity that can request work, receive results and act as an administrator of a *Context*. |
|
||||
| **Context**| Permission lists (`admins`, `readers`, `executors`), timestamps | `context:<id>` (hash) | An isolated “tenant” – a separate Redis DB and filesystem area. All objects (flows, messages, jobs, runners) belonging to a given workflow are stored under this context. The permission lists control who may read, execute or administer the context. |
|
||||
| **Flow** | DAG of job IDs, env‑vars, result map, status, timestamps | `flow:<id>` (hash) | A high‑level workflow created by a single **Actor**. It groups many **RunnerJob** objects, records their execution order, supplies common environment variables and aggregates the final result. |
|
||||
| **Message**| Payload, type (`job\|chat\|mail`), format (`html\|text\|md`), time‑outs, embedded **Job** objects, log stream, status, timestamps | `message:<caller_id>:<id>` (hash) | The transport unit that travels over **Mycelium** (the pub/sub/message bus). A message can contain a **RunnerJob** (or a list of jobs) and is queued in two generic Redis lists: `msg_out` (to be sent) and `msg_in` (already received). |
|
||||
| **Runner** | Public key, Mycelium address, topic name, type (`v\|python\|osis\|rust`), local flag, timestamps | `runner:<id>` (hash) | The *worker* that actually executes **RunnerJob** scripts. It subscribes to a Mycelium topic (normally `runner<id>`). If `local == true` the runner also consumes jobs directly from a Redis queue that is named after the script‑type suffix (`v`, `python`, …). |
|
||||
| **RunnerJob**| Script source, type (`osis\|sal\|v\|python`), env‑vars, prerequisites, dependencies, status, timestamps, result map | `job:<caller_id>:<id>` (hash) | A single executable unit. It lives inside a **Context**, belongs to a **Runner**, and is queued according to its `script_type` (e.g. `queue:python`). Its status moves through the lifecycle `dispatched → waiting_for_prerequisites → started → finished|error`. |
|
||||
|
||||
> **Key idea:** All objects are persisted as *hashes*. Context‑scoped objects (**Context**, **Flow**, **Message**, **Runner**, **RunnerJob**) live in a **Redis** database dedicated to that context. **Actors are global** and are stored in Redis DB 0 under `actor:<id>`. The system is completely **decentralised** – each actor owns its own context and can spin up as many runners as needed. Communication between actors, runners and the rest of the system happens over **Mycelium**, a message‑bus that uses Redis lists as queues.
|
||||
|
||||
---
|
||||
|
||||
## Interaction diagram (who talks to who)
|
||||
|
||||
### Sequence diagram – “Submit a flow and run it”
|
||||
|
||||
```mermaid
|
||||
%%{init: {"theme":"dark"}}%%
|
||||
sequenceDiagram
|
||||
participant A as Actor
|
||||
participant L as Local‑Context (Redis)
|
||||
participant M as Mycelium (msg_out / msg_in)
|
||||
participant R as Remote‑Context (Redis)
|
||||
participant W as Runner (worker)
|
||||
|
||||
%% 1. Actor creates everything locally
|
||||
A->>L: create Flow + RunnerJob (J)
|
||||
A->>L: LPUSH msg_out Message{type=job, payload=J, target=Remote}
|
||||
|
||||
%% 2. Mycelium transports the message
|
||||
M->>R: LPUSH msg_in (Message key)
|
||||
|
||||
%% 3. Remote context materialises the job
|
||||
R->>R: HSET Message hash
|
||||
R->>R: HSET RunnerJob (J') // copy of payload
|
||||
R->>R: LPUSH queue:v (job key)
|
||||
|
||||
%% 4. Runner consumes and executes
|
||||
W->>R: BRPOP queue:v (job key)
|
||||
W->>R: HSET job status = started
|
||||
W->>W: execute script
|
||||
W->>R: HSET job result + status = finished
|
||||
|
||||
%% 5. Result is sent back
|
||||
W->>M: LPUSH msg_out Message{type=result, payload=result, target=Local}
|
||||
M->>L: LPUSH msg_in (result Message key)
|
||||
|
||||
%% 6. Actor receives the result
|
||||
A->>L: RPOP msg_in → read result
|
||||
```
|
||||
|
||||
### 2.2 Component diagram – “Static view of objects & links”
|
||||
|
||||
```mermaid
|
||||
%%{init: {"theme":"dark"}}%%
|
||||
graph LR
|
||||
subgraph Redis["Redis (per Context)"]
|
||||
A[Actor] -->|stores| Ctx[Context]
|
||||
Ctx -->|stores| Fl[Flow]
|
||||
Ctx -->|stores| Msg[Message]
|
||||
Ctx -->|stores| Rnr[Runner]
|
||||
Ctx -->|stores| Job[RunnerJob]
|
||||
end
|
||||
|
||||
subgraph Mycelium["Mycelium (Pub/Sub)"]
|
||||
MsgOut["queue:msg_out"] -->|outgoing| Mcel[Mycelium Bus]
|
||||
Mcel -->|incoming| MsgIn["queue:msg_in"]
|
||||
RnrTopic["topic:runnerX"] -->|subscribed by| Rnr
|
||||
queueV["queue:v"] -->|local jobs| Rnr
|
||||
queuePython["queue:python"] -->|local jobs| Rnr
|
||||
end
|
||||
|
||||
A -->|creates / reads| Fl
|
||||
A -->|creates / reads| Msg
|
||||
A -->|creates / reads| Rnr
|
||||
A -->|creates / reads| Job
|
||||
Fl -->|references| Job
|
||||
Msg -->|may embed| Job
|
||||
Rnr -->|executes| Job
|
||||
Job -->|updates| Fl
|
||||
Msg -->|carries result back to| A
|
||||
```
|
||||
|
||||
### 2.3 Flow‑status life‑cycle (state diagram)
|
||||
|
||||
```mermaid
|
||||
%%{init: {"theme":"dark"}}%%
|
||||
stateDiagram-v2
|
||||
[*] --> dispatched
|
||||
dispatched --> waiting_for_prerequisites : has prereqs
|
||||
waiting_for_prerequisites --> started : prereqs met
|
||||
dispatched --> started : no prereqs
|
||||
started --> finished : success
|
||||
started --> error : failure
|
||||
waiting_for_prerequisites --> error : timeout / impossible
|
||||
error --> [*]
|
||||
finished --> [*]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 3️⃣ Redis objects – concrete key & data layout
|
||||
|
||||
All objects are stored as **hashes** (`HSET`). Below is a concise catalog that can be copied into a design doc.
|
||||
|
||||
| Key pattern | Example | Fields (type) | Comments |
|
||||
|-------------|---------|---------------|----------|
|
||||
| `actor:${id}` | `actor:12` | `id` u32, `pubkey` str, `address` list\<Address\>, `created_at` u32, `updated_at` u32 | One hash per actor. |
|
||||
| `context:${id}` | `context:7` | `id` u32, `admins` list\<u32\>, `readers` list\<u32\>, `executors` list\<u32\>, `created_at` u32, `updated_at` u32 | Holds permission lists for a tenant. |
|
||||
| `flow:${id}` | `flow:33` | `id` u32, `caller_id` u32, `context_id` u32, `jobs` list\<u32\>, `env_vars` map\<str,str\>, `result` map\<str,str\>, `created_at` u32, `updated_at` u32, `status` str (`dispatched|started|error|finished`) |
|
||||
| `message:${caller_id}:${id}` | `message:12:101` | `id` u32, `caller_id` u32, `context_id` u32, `message` str, `message_type` str (`job|chat|mail`), `message_format_type` str (`html|text|md`), `timeout` u32, `timeout_ack` u32, `timeout_result` u32, `job` list\<RunnerJob\> (serialized), `logs` list\<Log\>, `created_at` u32, `updated_at` u32, `status` str (`dispatched|acknowledged|error|processed`) |
|
||||
| `runner:${id}` | `runner:20` | `id` u32, `pubkey` str, `address` str, `topic` str, `local` bool, `created_at` u32, `updated_at` u32 |
|
||||
| `job:${caller_id}:${id}` | `job:12:2001` | `id` u32, `caller_id` u32, `context_id` u32, `script` str, `script_type` str (`osis|sal|v|python`), `timeout` u32, `retries` u8, `env_vars` map\<str,str\>, `result` map\<str,str\>, `prerequisites` list\<str\>, `dependends` list\<u32\>, `created_at` u32, `updated_at` u32, `status` str (`dispatched|waiting_for_prerequisites|started|error|finished`) |
|
||||
|
||||
#### Queue objects (lists)
|
||||
|
||||
| Queue name | Purpose |
|
||||
|------------|---------|
|
||||
| `msg_out` | **Outbound** generic queue – every `Message` that an actor wants to send is pushed here. |
|
||||
| `msg_in` | **Inbound** generic queue – every message received from Mycelium is placed here for the local consumer to process. |
|
||||
| `queue:${suffix}` (e.g. `queue:v`, `queue:python`) | Local job queues used by a **Runner** when `local == true`. The suffix comes from `ScriptType.queue_suffix()`. |
|
||||
|
||||
---
|
||||
|
||||
## 4️⃣ System specification (as a concise “specs” section)
|
||||
|
||||
### 4.1 Naming conventions
|
||||
* All Redis **hashes** are prefixed with the object name (`actor:`, `context:`, …).
|
||||
* All **queues** are simple Redis lists (`LPUSH` / `RPOP`).
|
||||
* **Message** keys embed both the *caller* and a locally unique *message id* – this guarantees global uniqueness across contexts.
|
||||
|
||||
### 4.2 Permissions & security
|
||||
* Only IDs present in `Context.admins` may **create** or **delete** any object inside that context.
|
||||
* `Context.readers` can **GET** any hash but not modify it.
|
||||
* `Context.executors` are allowed to **update** `RunnerJob.status`, `result` and to **pop** from local job queues.
|
||||
* Every `Actor` must present a `pubkey` that can be verified by the receiving side (Mycelium uses asymmetric crypto).
|
||||
|
||||
|
||||
|
||||
### 4.3 Message flow (publish / consume)
|
||||
|
||||
|
||||
|
||||
Below is a **re‑written “Message flow (publish / consume)”** that reflects the real runtime components:
|
||||
|
||||
* **Supervisor daemon** – runs on the node that owns the **Flow** (the *actor’s* side).
|
||||
It is the only process that ever **RPOP**s from the global `msg_out` queue, adds the proper routing information and hands the message to **Mycelium**.
|
||||
|
||||
* **Mycelium** – the pure pub/sub/message‑bus. It never touches Redis directly; it only receives a *payload key* from the coordinator and delivers that key to the remote tenant’s `msg_in` list.
|
||||
|
||||
* **Remote‑side runner / service** – consumes from its own `msg_in`, materialises the job and executes it.
|
||||
|
||||
The table now uses the exact component names and adds a short note about the permission check that the coordinator performs before it releases a message.
|
||||
|
||||
| # | Action (what the system does) | Component that performs it | Redis interaction (exact commands) |
|
||||
|---|-------------------------------|----------------------------|------------------------------------|
|
||||
| **1️⃣ Publish** | Actor creates a `Message` hash and **LPUSH**es its key onto the *outbound* queue. | **Actor** (client code) | `HSET message:12:101 …` <br/> `LPUSH msg_out message:12:101` |
|
||||
| **2️⃣ Coordinate & route** | The **Supervisor daemon** (running at source) **RPOP**s the key, checks the actor’s permissions, adds the *target‑context* and *topic* fields, then forwards the key to Mycelium. | **Supervisor daemon** (per‑actor) | `RPOP msg_out` → (in‑process) → `LPUSH msg_out_coordinator <key>` (internal buffer) |
|
||||
| **3️⃣ Transport** | Mycelium receives the key, looks at `Message.message_type` (or the explicit `topic`) and pushes the key onto the *inbound* queue of the **remote** tenant. | **Mycelium bus** (network layer) | `LPUSH msg_in:<remote‑ctx> <key>` |
|
||||
| **4️⃣ Consume** | The **Remote side** (runner or service) **RPOP**s from its `msg_in`, loads the full hash, verifies the actor’s signature and decides what to do based on `message_type`. | **Remote consumer** (runner / service | `RPOP msg_in:<remote‑ctx>` → `HGETALL message:<key>` |
|
||||
| **5️⃣ Job materialisation** | If `message_type == "job"` the consumer creates a **RunnerJob** entry inside the **remote** context, adds the job **key** to the proper *script‑type* queue (`queue:v`, `queue:python`, …). | **Remote consumer** | `HSET job:<caller_id>:<job_id> …` <br/> `LPUSH queue:<script_type> job:<caller_id>:<job_id>` |
|
||||
| **6️⃣ Runner execution loop** | A **Runner** attached to that remote context **BRPOP**s from its script‑type queue, sets `status = started`, runs the script, writes `result` and final `status`. | **Runner** | `BRPOP queue:<script_type>` → `HSET job:<…> status started` → … → `HSET job:<…> result … status finished` |
|
||||
| **7️⃣ Result notification** | The runner builds a new `Message` (type `chat`, `result`, …) and pushes it onto **msg_out** again. The **Supervisor daemon** on the *originating* side will later pick it up and route it back to the original actor. | **Runner** → **Supervisor (remote side)** → **Mycelium** → **Supervisor (origin side)** → **Actor** | `HSET message:<res_key> …` <br/> `LPUSH msg_out message:<res_key>` (steps 2‑3 repeat in reverse direction) |
|
||||
|
||||
---
|
||||
|
||||
## Tiny end‑to‑end sequence (still simple enough to render)
|
||||
|
||||
```mermaid
|
||||
%%{init: {"theme":"dark"}}%%
|
||||
sequenceDiagram
|
||||
participant A as Actor
|
||||
participant L as Local‑Redis (Flow ctx)
|
||||
participant C as Supervisor daemon (local)
|
||||
participant M as Mycelium bus
|
||||
participant R as Remote‑Redis (target ctx)
|
||||
participant W as Runner (remote)
|
||||
|
||||
%% 1️⃣ publish
|
||||
A->>L: HSET message:12:101 …
|
||||
A->>L: LPUSH msg_out message:12:101
|
||||
|
||||
%% 2️⃣ coordinate
|
||||
C->>L: RPOP msg_out
|
||||
C->>C: check permissions / add routing info
|
||||
C->>M: push key to Mycelium (msg_out_coordinator)
|
||||
|
||||
%% 3️⃣ transport
|
||||
M->>R: LPUSH msg_in message:12:101
|
||||
|
||||
%% 4️⃣ consume
|
||||
R->>W: RPOP msg_in
|
||||
R->>R: HGETALL message:12:101
|
||||
R->>R: verify signature
|
||||
alt message_type == job
|
||||
R->>R: HSET job:12:2001 …
|
||||
R->>R: LPUSH queue:v job:12:2001
|
||||
end
|
||||
|
||||
%% 5️⃣ runner loop
|
||||
W->>R: BRPOP queue:v (job:12:2001)
|
||||
W->>R: HSET job:12:2001 status started
|
||||
W->>W: execute script
|
||||
W->>R: HSET job:12:2001 result … status finished
|
||||
|
||||
%% 6️⃣ result back
|
||||
W->>R: HSET message:12:900 result …
|
||||
W->>R: LPUSH msg_out message:12:900
|
||||
C->>M: (coordinator on remote side) routes back
|
||||
M->>L: LPUSH msg_in message:12:900
|
||||
A->>L: RPOP msg_in → read result
|
||||
```
|
||||
|
||||
|
||||
## 5️⃣ What the **system** is trying to achieve
|
||||
|
||||
| Goal | How it is realized |
|
||||
|------|--------------------|
|
||||
| **Decentralised execution** | Every *actor* owns a **Context**; any number of **Runners** can be attached to that context, possibly on different machines, and they all talk over the same Mycelium/Redis backend. |
|
||||
| **Fine‑grained permissions** | `Context.admins/readers/executors` enforce who can create, view or run jobs. |
|
||||
| **Loose coupling via messages** | All actions (job submission, result propagation, chat, mail …) use the generic `Message` object; the same transport pipeline handles all of them. |
|
||||
| **Workflow orchestration** | The **Flow** object models a DAG of jobs, tracks collective status and aggregates results, without needing a central scheduler. |
|
||||
| **Pluggable runtimes** | `ScriptType` and `RunnerType` let a runner choose the proper execution environment (V, Python, OSIS, Rust, …) – adding a new language only means adding a new `ScriptType` and a corresponding worker. |
|
||||
| **Observability** | `Log` arrays attached to a `Message` and the timestamps on every hash give a complete audit trail. |
|
||||
| **Resilience** | Jobs are idempotent hash entries; queues are persisted in Redis, and status changes are atomic (`HSET`). Retries and time‑outs guarantee eventual consistency. |
|
||||
|
||||
---
|
||||
|
||||
## 6️⃣ Diagram summary (quick visual cheat‑sheet)
|
||||
|
||||
```mermaid
|
||||
%%{init: {"theme":"dark"}}%%
|
||||
graph TD
|
||||
A[Actor] -->|creates| Ctx[Context]
|
||||
A -->|creates| Flow
|
||||
A -->|creates| Msg
|
||||
A -->|creates| Rnr[Runner]
|
||||
A -->|creates| Job[RunnerJob]
|
||||
|
||||
subgraph Redis["Redis (per Context)"]
|
||||
Ctx --> A
|
||||
Ctx --> Flow
|
||||
Ctx --> Msg
|
||||
Ctx --> Rnr
|
||||
Ctx --> Job
|
||||
end
|
||||
|
||||
Msg -->|push to| OutQ[msg_out]
|
||||
OutQ --> Myc[Mycelium Bus]
|
||||
Myc -->|deliver| InQ[msg_in]
|
||||
InQ --> Rnr
|
||||
Rnr -->|pop from| Qv["queue:v"]
|
||||
Rnr -->|pop from| Qpy["queue:python"]
|
||||
|
||||
Rnr -->|updates| Job
|
||||
Job -->|updates| Flow
|
||||
Flow -->|result Message| Msg
|
||||
```
|
||||
|
||||
9
bin/coordinator/src/clients/mod.rs
Normal file
9
bin/coordinator/src/clients/mod.rs
Normal file
@@ -0,0 +1,9 @@
|
||||
pub mod mycelium_client;
|
||||
pub mod supervisor_client;
|
||||
pub mod supervisor_hub;
|
||||
pub mod types;
|
||||
|
||||
pub use mycelium_client::{MyceliumClient, MyceliumClientError};
|
||||
pub use supervisor_client::{SupervisorClient, SupervisorClientError};
|
||||
pub use supervisor_hub::SupervisorHub;
|
||||
pub use types::Destination;
|
||||
319
bin/coordinator/src/clients/mycelium_client.rs
Normal file
319
bin/coordinator/src/clients/mycelium_client.rs
Normal file
@@ -0,0 +1,319 @@
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
|
||||
use reqwest::Client as HttpClient;
|
||||
|
||||
use base64::Engine;
|
||||
use base64::engine::general_purpose::STANDARD as BASE64_STANDARD;
|
||||
use serde_json::{Value, json};
|
||||
use thiserror::Error;
|
||||
|
||||
use crate::clients::Destination;
|
||||
use crate::models::TransportStatus;
|
||||
|
||||
/// Lightweight client for Mycelium JSON-RPC (send + query status)
|
||||
#[derive(Clone)]
|
||||
pub struct MyceliumClient {
|
||||
base_url: String, // e.g. http://127.0.0.1:8990
|
||||
http: HttpClient,
|
||||
id_counter: Arc<AtomicU64>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum MyceliumClientError {
|
||||
#[error("HTTP error: {0}")]
|
||||
Http(#[from] reqwest::Error),
|
||||
#[error("JSON error: {0}")]
|
||||
Json(#[from] serde_json::Error),
|
||||
#[error("Transport timed out waiting for a reply (408)")]
|
||||
TransportTimeout,
|
||||
#[error("JSON-RPC error: {0}")]
|
||||
RpcError(String),
|
||||
#[error("Invalid response: {0}")]
|
||||
InvalidResponse(String),
|
||||
}
|
||||
|
||||
impl MyceliumClient {
|
||||
pub fn new(base_url: impl Into<String>) -> Result<Self, MyceliumClientError> {
|
||||
let url = base_url.into();
|
||||
let http = HttpClient::builder().build()?;
|
||||
Ok(Self {
|
||||
base_url: url,
|
||||
http,
|
||||
id_counter: Arc::new(AtomicU64::new(1)),
|
||||
})
|
||||
}
|
||||
|
||||
fn next_id(&self) -> u64 {
|
||||
self.id_counter.fetch_add(1, Ordering::Relaxed)
|
||||
}
|
||||
|
||||
async fn jsonrpc(&self, method: &str, params: Value) -> Result<Value, MyceliumClientError> {
|
||||
let req = json!({
|
||||
"jsonrpc": "2.0",
|
||||
"id": self.next_id(),
|
||||
"method": method,
|
||||
"params": [ params ]
|
||||
});
|
||||
|
||||
tracing::info!(%req, "jsonrpc");
|
||||
let resp = self.http.post(&self.base_url).json(&req).send().await?;
|
||||
let status = resp.status();
|
||||
let body: Value = resp.json().await?;
|
||||
if let Some(err) = body.get("error") {
|
||||
let code = err.get("code").and_then(|v| v.as_i64()).unwrap_or(0);
|
||||
let msg = err
|
||||
.get("message")
|
||||
.and_then(|v| v.as_str())
|
||||
.unwrap_or("unknown error");
|
||||
if code == 408 {
|
||||
return Err(MyceliumClientError::TransportTimeout);
|
||||
}
|
||||
return Err(MyceliumClientError::RpcError(format!(
|
||||
"code={code} msg={msg}"
|
||||
)));
|
||||
}
|
||||
if !status.is_success() {
|
||||
return Err(MyceliumClientError::RpcError(format!(
|
||||
"HTTP {status}, body {body}"
|
||||
)));
|
||||
}
|
||||
Ok(body)
|
||||
}
|
||||
|
||||
/// Call messageStatus with an outbound message id (hex string)
|
||||
pub async fn message_status(
|
||||
&self,
|
||||
id_hex: &str,
|
||||
) -> Result<TransportStatus, MyceliumClientError> {
|
||||
let params = json!(id_hex);
|
||||
let body = self.jsonrpc("getMessageInfo", params).await?;
|
||||
let result = body.get("result").ok_or_else(|| {
|
||||
MyceliumClientError::InvalidResponse(format!("missing result in response: {body}"))
|
||||
})?;
|
||||
// Accept both { state: "..."} and bare "..."
|
||||
let status_str = if let Some(s) = result.get("state").and_then(|v| v.as_str()) {
|
||||
s.to_string()
|
||||
} else if let Some(s) = result.as_str() {
|
||||
s.to_string()
|
||||
} else {
|
||||
return Err(MyceliumClientError::InvalidResponse(format!(
|
||||
"unexpected result shape: {result}"
|
||||
)));
|
||||
};
|
||||
let status = Self::map_status(&status_str).ok_or_else(|| {
|
||||
MyceliumClientError::InvalidResponse(format!("unknown status: {status_str}"))
|
||||
});
|
||||
tracing::info!(%id_hex, status = %status.as_ref().unwrap(), "queried messages status");
|
||||
status
|
||||
}
|
||||
|
||||
fn map_status(s: &str) -> Option<TransportStatus> {
|
||||
match s {
|
||||
"pending" => Some(TransportStatus::Queued),
|
||||
"received" => Some(TransportStatus::Delivered),
|
||||
"read" => Some(TransportStatus::Read),
|
||||
"aborted" => Some(TransportStatus::Failed),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Build params object for pushMessage without performing any network call.
|
||||
/// Exposed for serializer-only tests and reuse.
|
||||
pub(crate) fn build_push_params(
|
||||
dst: &Destination,
|
||||
topic: &str,
|
||||
payload_b64: &str,
|
||||
reply_timeout: Option<u64>,
|
||||
) -> Value {
|
||||
let dst_v = match dst {
|
||||
Destination::Ip(ip) => json!({ "ip": ip.to_string() }),
|
||||
Destination::Pk(pk) => json!({ "pk": pk }),
|
||||
};
|
||||
let mut message = json!({
|
||||
"dst": dst_v,
|
||||
"topic": topic,
|
||||
"payload": payload_b64,
|
||||
});
|
||||
if let Some(rt) = reply_timeout {
|
||||
message["reply_timeout"] = json!(rt);
|
||||
}
|
||||
message
|
||||
}
|
||||
|
||||
/// pushMessage: send a message with dst/topic/payload. Optional reply_timeout for sync replies.
|
||||
pub async fn push_message(
|
||||
&self,
|
||||
dst: &Destination,
|
||||
topic: &str,
|
||||
payload_b64: &str,
|
||||
reply_timeout: Option<u64>,
|
||||
) -> Result<Value, MyceliumClientError> {
|
||||
let params = Self::build_push_params(dst, topic, payload_b64, reply_timeout);
|
||||
let body = self.jsonrpc("pushMessage", params).await?;
|
||||
let result = body.get("result").ok_or_else(|| {
|
||||
MyceliumClientError::InvalidResponse(format!("missing result in response: {body}"))
|
||||
})?;
|
||||
Ok(result.clone())
|
||||
}
|
||||
|
||||
/// Helper to extract outbound message id from pushMessage result (InboundMessage or PushMessageResponseId)
|
||||
pub fn extract_message_id_from_result(result: &Value) -> Option<String> {
|
||||
result
|
||||
.get("id")
|
||||
.and_then(|v| v.as_str())
|
||||
.map(|s| s.to_string())
|
||||
}
|
||||
/// popMessage: retrieve an inbound message if available (optionally filtered by topic).
|
||||
/// - peek: if true, do not remove the message from the queue
|
||||
/// - timeout_secs: seconds to wait for a message (0 returns immediately)
|
||||
/// - topic_plain: optional plain-text topic which will be base64-encoded per Mycelium spec
|
||||
/// Returns:
|
||||
/// - Ok(Some(result_json)) on success, where result_json matches InboundMessage schema
|
||||
/// - Ok(None) when there is no message ready (Mycelium returns error code 204)
|
||||
pub async fn pop_message(
|
||||
&self,
|
||||
peek: Option<bool>,
|
||||
timeout_secs: Option<u64>,
|
||||
topic_plain: Option<&str>,
|
||||
) -> Result<Option<Value>, MyceliumClientError> {
|
||||
// Build params array
|
||||
let mut params_array = vec![];
|
||||
if let Some(p) = peek {
|
||||
params_array.push(serde_json::Value::Bool(p));
|
||||
} else {
|
||||
params_array.push(serde_json::Value::Null)
|
||||
}
|
||||
if let Some(t) = timeout_secs {
|
||||
params_array.push(serde_json::Value::Number(t.into()));
|
||||
} else {
|
||||
params_array.push(serde_json::Value::Null)
|
||||
}
|
||||
if let Some(tp) = topic_plain {
|
||||
let topic_b64 = BASE64_STANDARD.encode(tp.as_bytes());
|
||||
params_array.push(serde_json::Value::String(topic_b64));
|
||||
} else {
|
||||
params_array.push(serde_json::Value::Null)
|
||||
}
|
||||
|
||||
let req = json!({
|
||||
"jsonrpc": "2.0",
|
||||
"id": self.next_id(),
|
||||
"method": "popMessage",
|
||||
"params": serde_json::Value::Array(params_array),
|
||||
});
|
||||
|
||||
tracing::info!(%req, "calling popMessage");
|
||||
|
||||
let resp = self.http.post(&self.base_url).json(&req).send().await?;
|
||||
let status = resp.status();
|
||||
let body: Value = resp.json().await?;
|
||||
|
||||
// Handle JSON-RPC error envelope specially for code 204 (no message ready)
|
||||
if let Some(err) = body.get("error") {
|
||||
let code = err.get("code").and_then(|v| v.as_i64()).unwrap_or(0);
|
||||
let msg = err
|
||||
.get("message")
|
||||
.and_then(|v| v.as_str())
|
||||
.unwrap_or("unknown error");
|
||||
|
||||
if code == 204 {
|
||||
// No message ready
|
||||
return Ok(None);
|
||||
}
|
||||
if code == 408 {
|
||||
// Align with other transport timeout mapping
|
||||
return Err(MyceliumClientError::TransportTimeout);
|
||||
}
|
||||
return Err(MyceliumClientError::RpcError(format!(
|
||||
"code={code} msg={msg}"
|
||||
)));
|
||||
}
|
||||
|
||||
if !status.is_success() {
|
||||
return Err(MyceliumClientError::RpcError(format!(
|
||||
"HTTP {status}, body {body}"
|
||||
)));
|
||||
}
|
||||
|
||||
let result = body.get("result").ok_or_else(|| {
|
||||
MyceliumClientError::InvalidResponse(format!("missing result in response: {body}"))
|
||||
})?;
|
||||
Ok(Some(result.clone()))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::clients::Destination;
|
||||
|
||||
#[test]
|
||||
fn build_push_params_shapes_ip_pk_and_timeout() {
|
||||
// IP destination
|
||||
let p1 = MyceliumClient::build_push_params(
|
||||
&Destination::Ip("2001:db8::1".parse().unwrap()),
|
||||
"supervisor.rpc",
|
||||
"Zm9vYmFy", // "foobar"
|
||||
Some(10),
|
||||
);
|
||||
let msg1 = p1.get("message").unwrap();
|
||||
assert_eq!(
|
||||
msg1.get("topic").unwrap().as_str().unwrap(),
|
||||
"supervisor.rpc"
|
||||
);
|
||||
assert_eq!(msg1.get("payload").unwrap().as_str().unwrap(), "Zm9vYmFy");
|
||||
assert_eq!(
|
||||
msg1.get("dst")
|
||||
.unwrap()
|
||||
.get("ip")
|
||||
.unwrap()
|
||||
.as_str()
|
||||
.unwrap(),
|
||||
"2001:db8::1"
|
||||
);
|
||||
assert_eq!(p1.get("reply_timeout").unwrap().as_u64().unwrap(), 10);
|
||||
|
||||
// PK destination without timeout
|
||||
let p2 = MyceliumClient::build_push_params(
|
||||
&Destination::Pk(
|
||||
"bb39b4a3a4efd70f3e05e37887677e02efbda14681d0acd3882bc0f754792c32".into(),
|
||||
),
|
||||
"supervisor.rpc",
|
||||
"YmF6", // "baz"
|
||||
None,
|
||||
);
|
||||
let msg2 = p2.get("message").unwrap();
|
||||
assert_eq!(
|
||||
msg2.get("dst")
|
||||
.unwrap()
|
||||
.get("pk")
|
||||
.unwrap()
|
||||
.as_str()
|
||||
.unwrap(),
|
||||
"bb39b4a3a4efd70f3e05e37887677e02efbda14681d0acd3882bc0f754792c32"
|
||||
);
|
||||
assert!(p2.get("reply_timeout").is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extract_message_id_variants() {
|
||||
// PushMessageResponseId
|
||||
let r1 = json!({"id":"0123456789abcdef"});
|
||||
assert_eq!(
|
||||
MyceliumClient::extract_message_id_from_result(&r1).unwrap(),
|
||||
"0123456789abcdef"
|
||||
);
|
||||
|
||||
// InboundMessage-like
|
||||
let r2 = json!({
|
||||
"id":"fedcba9876543210",
|
||||
"srcIp":"449:abcd:0123:defa::1",
|
||||
"payload":"hpV+"
|
||||
});
|
||||
assert_eq!(
|
||||
MyceliumClient::extract_message_id_from_result(&r2).unwrap(),
|
||||
"fedcba9876543210"
|
||||
);
|
||||
}
|
||||
}
|
||||
588
bin/coordinator/src/clients/supervisor_client.rs
Normal file
588
bin/coordinator/src/clients/supervisor_client.rs
Normal file
@@ -0,0 +1,588 @@
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::time::Duration;
|
||||
|
||||
use base64::Engine;
|
||||
use base64::engine::general_purpose::STANDARD as BASE64_STANDARD;
|
||||
use serde_json::{Value, json};
|
||||
use thiserror::Error;
|
||||
use tokio::time::timeout;
|
||||
|
||||
use crate::clients::{Destination, MyceliumClient, MyceliumClientError, SupervisorHub};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct SupervisorClient {
|
||||
hub: Arc<SupervisorHub>, // Global hub with background pop loop and shared id generator
|
||||
destination: Destination, // ip or pk
|
||||
secret: Option<String>, // optional, required by several supervisor methods
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum SupervisorClientError {
|
||||
#[error("HTTP error: {0}")]
|
||||
Http(#[from] reqwest::Error),
|
||||
#[error("JSON error: {0}")]
|
||||
Json(#[from] serde_json::Error),
|
||||
#[error("Transport timed out waiting for a reply (408)")]
|
||||
TransportTimeout,
|
||||
#[error("JSON-RPC error: {0}")]
|
||||
RpcError(String),
|
||||
#[error("Invalid response: {0}")]
|
||||
InvalidResponse(String),
|
||||
#[error("Missing secret for method requiring authentication")]
|
||||
MissingSecret,
|
||||
}
|
||||
|
||||
impl From<MyceliumClientError> for SupervisorClientError {
|
||||
fn from(e: MyceliumClientError) -> Self {
|
||||
match e {
|
||||
MyceliumClientError::TransportTimeout => SupervisorClientError::TransportTimeout,
|
||||
MyceliumClientError::RpcError(m) => SupervisorClientError::RpcError(m),
|
||||
MyceliumClientError::InvalidResponse(m) => SupervisorClientError::InvalidResponse(m),
|
||||
MyceliumClientError::Http(err) => SupervisorClientError::Http(err),
|
||||
MyceliumClientError::Json(err) => SupervisorClientError::Json(err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SupervisorClient {
|
||||
/// Preferred constructor using a shared SupervisorHub (single global listener).
|
||||
pub fn new_with_hub(
|
||||
hub: Arc<SupervisorHub>,
|
||||
destination: Destination,
|
||||
secret: Option<String>,
|
||||
) -> Self {
|
||||
Self {
|
||||
hub,
|
||||
destination,
|
||||
secret,
|
||||
}
|
||||
}
|
||||
|
||||
/// Backward-compatible constructor that builds a new Hub from base_url/topic.
|
||||
/// NOTE: This spawns a background popMessage listener for the given topic.
|
||||
/// Prefer `new_with_hub` so the process has a single global hub.
|
||||
pub fn new(
|
||||
base_url: impl Into<String>,
|
||||
destination: Destination,
|
||||
topic: impl Into<String>,
|
||||
secret: Option<String>,
|
||||
) -> Result<Self, SupervisorClientError> {
|
||||
let mut url = base_url.into();
|
||||
if url.is_empty() {
|
||||
url = "http://127.0.0.1:8990".to_string();
|
||||
}
|
||||
let mycelium = Arc::new(MyceliumClient::new(url)?);
|
||||
Ok(Self::new_with_client(mycelium, destination, topic, secret))
|
||||
}
|
||||
|
||||
/// Backward-compatible constructor that reuses an existing Mycelium client.
|
||||
/// NOTE: This creates a new hub and its own background listener. Prefer `new_with_hub`.
|
||||
pub fn new_with_client(
|
||||
mycelium: Arc<MyceliumClient>,
|
||||
destination: Destination,
|
||||
topic: impl Into<String>,
|
||||
secret: Option<String>,
|
||||
) -> Self {
|
||||
let hub = SupervisorHub::new_with_client(mycelium, topic);
|
||||
Self::new_with_hub(hub, destination, secret)
|
||||
}
|
||||
|
||||
/// Internal helper used by tests to inspect dst JSON shape.
|
||||
fn build_dst(&self) -> Value {
|
||||
match &self.destination {
|
||||
Destination::Ip(ip) => json!({ "ip": ip.to_string() }),
|
||||
Destination::Pk(pk) => json!({ "pk": pk }),
|
||||
}
|
||||
}
|
||||
|
||||
fn build_supervisor_payload(&self, method: &str, params: Value) -> Value {
|
||||
json!({
|
||||
"jsonrpc": "2.0",
|
||||
"id": self.hub.next_id(),
|
||||
"method": method,
|
||||
"params": params,
|
||||
})
|
||||
}
|
||||
|
||||
/// Build a supervisor JSON-RPC payload but force a specific id (used for correlation).
|
||||
fn build_supervisor_payload_with_id(&self, method: &str, params: Value, id: u64) -> Value {
|
||||
json!({
|
||||
"jsonrpc": "2.0",
|
||||
"id": id,
|
||||
"method": method,
|
||||
"params": params,
|
||||
})
|
||||
}
|
||||
|
||||
fn encode_payload(payload: &Value) -> Result<String, SupervisorClientError> {
|
||||
let s = serde_json::to_string(payload)?;
|
||||
Ok(BASE64_STANDARD.encode(s.as_bytes()))
|
||||
}
|
||||
|
||||
fn encode_topic(topic: &[u8]) -> String {
|
||||
BASE64_STANDARD.encode(topic)
|
||||
}
|
||||
|
||||
fn extract_message_id_from_result(result: &Value) -> Option<String> {
|
||||
// Two possibilities per Mycelium spec oneOf:
|
||||
// - PushMessageResponseId: { "id": "0123456789abcdef" }
|
||||
// - InboundMessage: object containing "id" plus srcIp, ...; we still return id.
|
||||
result
|
||||
.get("id")
|
||||
.and_then(|v| v.as_str())
|
||||
.map(|s| s.to_string())
|
||||
}
|
||||
|
||||
fn need_secret(&self) -> Result<&str, SupervisorClientError> {
|
||||
self.secret
|
||||
.as_deref()
|
||||
.ok_or(SupervisorClientError::MissingSecret)
|
||||
}
|
||||
|
||||
// -----------------------------
|
||||
// Core: request-reply call via Hub with default 10s timeout
|
||||
// -----------------------------
|
||||
|
||||
/// Send a supervisor JSON-RPC request and await its reply via the Hub.
|
||||
/// Returns (outbound_message_id, reply_envelope_json).
|
||||
pub async fn call_with_reply_timeout(
|
||||
&self,
|
||||
method: &str,
|
||||
params: Value,
|
||||
timeout_secs: u64,
|
||||
) -> Result<(String, Value), SupervisorClientError> {
|
||||
let inner_id = self.hub.next_id();
|
||||
// Register waiter before sending to avoid race
|
||||
let rx = self.hub.register_waiter(inner_id).await;
|
||||
|
||||
let inner = self.build_supervisor_payload_with_id(method, params, inner_id);
|
||||
let payload_b64 = Self::encode_payload(&inner)?;
|
||||
|
||||
let result = self
|
||||
.hub
|
||||
.mycelium()
|
||||
.push_message(
|
||||
&self.destination,
|
||||
&Self::encode_topic(self.hub.topic().as_bytes()),
|
||||
&payload_b64,
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let out_id = if let Some(id) = MyceliumClient::extract_message_id_from_result(&result) {
|
||||
id
|
||||
} else if let Some(arr) = result.as_array()
|
||||
&& arr.len() == 1
|
||||
&& let Some(id) = MyceliumClient::extract_message_id_from_result(&arr[0])
|
||||
{
|
||||
id
|
||||
} else {
|
||||
// Clean pending entry to avoid leak
|
||||
let _ = self.hub.remove_waiter(inner_id).await;
|
||||
return Err(SupervisorClientError::InvalidResponse(format!(
|
||||
"result did not contain message id: {result}"
|
||||
)));
|
||||
};
|
||||
|
||||
let d = Duration::from_secs(timeout_secs);
|
||||
match timeout(d, rx).await {
|
||||
Ok(Ok(reply)) => Ok((out_id, reply)),
|
||||
Ok(Err(_canceled)) => Err(SupervisorClientError::InvalidResponse(
|
||||
"oneshot canceled before receiving reply".into(),
|
||||
)),
|
||||
Err(_elapsed) => {
|
||||
// Cleanup on timeout
|
||||
let _ = self.hub.remove_waiter(inner_id).await;
|
||||
Err(SupervisorClientError::TransportTimeout)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Send and await with default 10s timeout.
|
||||
pub async fn call_with_reply(
|
||||
&self,
|
||||
method: &str,
|
||||
params: Value,
|
||||
) -> Result<(String, Value), SupervisorClientError> {
|
||||
self.call_with_reply_timeout(method, params, 60).await
|
||||
}
|
||||
|
||||
/// Back-compat: Send and await a reply but return only the outbound id (discard reply).
|
||||
/// This keeps existing call sites working while the system migrates to reply-aware paths.
|
||||
pub async fn call(&self, method: &str, params: Value) -> Result<String, SupervisorClientError> {
|
||||
let (out_id, _reply) = self.call_with_reply(method, params).await?;
|
||||
Ok(out_id)
|
||||
}
|
||||
|
||||
// -----------------------------
|
||||
// Typed wrappers for Supervisor API (await replies)
|
||||
// -----------------------------
|
||||
|
||||
// Runners
|
||||
pub async fn list_runners_wait(&self) -> Result<(String, Value), SupervisorClientError> {
|
||||
self.call_with_reply("list_runners", json!([])).await
|
||||
}
|
||||
|
||||
pub async fn register_runner_wait(
|
||||
&self,
|
||||
name: impl Into<String>,
|
||||
queue: impl Into<String>,
|
||||
) -> Result<(String, Value), SupervisorClientError> {
|
||||
let secret = self.need_secret()?;
|
||||
let params = json!([{
|
||||
"secret": secret,
|
||||
"name": name.into(),
|
||||
"queue": queue.into()
|
||||
}]);
|
||||
self.call_with_reply("register_runner", params).await
|
||||
}
|
||||
|
||||
pub async fn remove_runner_wait(
|
||||
&self,
|
||||
actor_id: impl Into<String>,
|
||||
) -> Result<(String, Value), SupervisorClientError> {
|
||||
self.call_with_reply("remove_runner", json!([actor_id.into()]))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn start_runner_wait(
|
||||
&self,
|
||||
actor_id: impl Into<String>,
|
||||
) -> Result<(String, Value), SupervisorClientError> {
|
||||
self.call_with_reply("start_runner", json!([actor_id.into()]))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn stop_runner_wait(
|
||||
&self,
|
||||
actor_id: impl Into<String>,
|
||||
force: bool,
|
||||
) -> Result<(String, Value), SupervisorClientError> {
|
||||
self.call_with_reply("stop_runner", json!([actor_id.into(), force]))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn get_runner_status_wait(
|
||||
&self,
|
||||
actor_id: impl Into<String>,
|
||||
) -> Result<(String, Value), SupervisorClientError> {
|
||||
self.call_with_reply("get_runner_status", json!([actor_id.into()]))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn get_all_runner_status_wait(
|
||||
&self,
|
||||
) -> Result<(String, Value), SupervisorClientError> {
|
||||
self.call_with_reply("get_all_runner_status", json!([]))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn start_all_wait(&self) -> Result<(String, Value), SupervisorClientError> {
|
||||
self.call_with_reply("start_all", json!([])).await
|
||||
}
|
||||
|
||||
pub async fn stop_all_wait(
|
||||
&self,
|
||||
force: bool,
|
||||
) -> Result<(String, Value), SupervisorClientError> {
|
||||
self.call_with_reply("stop_all", json!([force])).await
|
||||
}
|
||||
|
||||
pub async fn get_all_status_wait(&self) -> Result<(String, Value), SupervisorClientError> {
|
||||
self.call_with_reply("get_all_status", json!([])).await
|
||||
}
|
||||
|
||||
// Jobs (await)
|
||||
pub async fn jobs_create_wait(
|
||||
&self,
|
||||
job: Value,
|
||||
) -> Result<(String, Value), SupervisorClientError> {
|
||||
let secret = self.need_secret()?;
|
||||
let params = json!([{
|
||||
"secret": secret,
|
||||
"job": job
|
||||
}]);
|
||||
self.call_with_reply("jobs.create", params).await
|
||||
}
|
||||
|
||||
pub async fn jobs_list_wait(&self) -> Result<(String, Value), SupervisorClientError> {
|
||||
self.call_with_reply("jobs.list", json!([])).await
|
||||
}
|
||||
|
||||
pub async fn job_run_wait(&self, job: Value) -> Result<(String, Value), SupervisorClientError> {
|
||||
let secret = self.need_secret()?;
|
||||
let params = json!([{
|
||||
"secret": secret,
|
||||
"job": job
|
||||
}]);
|
||||
self.call_with_reply("job.run", params).await
|
||||
}
|
||||
|
||||
pub async fn job_start_wait(
|
||||
&self,
|
||||
job_id: impl Into<String>,
|
||||
) -> Result<(String, Value), SupervisorClientError> {
|
||||
let secret = self.need_secret()?;
|
||||
let params = json!([{
|
||||
"secret": secret,
|
||||
"job_id": job_id.into()
|
||||
}]);
|
||||
self.call_with_reply("job.start", params).await
|
||||
}
|
||||
|
||||
pub async fn job_status_wait(
|
||||
&self,
|
||||
job_id: impl Into<String>,
|
||||
) -> Result<(String, Value), SupervisorClientError> {
|
||||
self.call_with_reply("job.status", json!([job_id.into()]))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn job_result_wait(
|
||||
&self,
|
||||
job_id: impl Into<String>,
|
||||
) -> Result<(String, Value), SupervisorClientError> {
|
||||
self.call_with_reply("job.result", json!([job_id.into()]))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn job_stop_wait(
|
||||
&self,
|
||||
job_id: impl Into<String>,
|
||||
) -> Result<(String, Value), SupervisorClientError> {
|
||||
let secret = self.need_secret()?;
|
||||
let params = json!([{
|
||||
"secret": secret,
|
||||
"job_id": job_id.into()
|
||||
}]);
|
||||
self.call_with_reply("job.stop", params).await
|
||||
}
|
||||
|
||||
pub async fn job_delete_wait(
|
||||
&self,
|
||||
job_id: impl Into<String>,
|
||||
) -> Result<(String, Value), SupervisorClientError> {
|
||||
let secret = self.need_secret()?;
|
||||
let params = json!([{
|
||||
"secret": secret,
|
||||
"job_id": job_id.into()
|
||||
}]);
|
||||
self.call_with_reply("job.delete", params).await
|
||||
}
|
||||
|
||||
pub async fn rpc_discover_wait(&self) -> Result<(String, Value), SupervisorClientError> {
|
||||
self.call_with_reply("rpc.discover", json!([])).await
|
||||
}
|
||||
|
||||
// -----------------------------
|
||||
// Backward-compatible variants returning only outbound id (discarding reply)
|
||||
// -----------------------------
|
||||
|
||||
pub async fn list_runners(&self) -> Result<String, SupervisorClientError> {
|
||||
let (id, _) = self.list_runners_wait().await?;
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
pub async fn register_runner(
|
||||
&self,
|
||||
name: impl Into<String>,
|
||||
queue: impl Into<String>,
|
||||
) -> Result<String, SupervisorClientError> {
|
||||
let (id, _) = self.register_runner_wait(name, queue).await?;
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
pub async fn remove_runner(
|
||||
&self,
|
||||
actor_id: impl Into<String>,
|
||||
) -> Result<String, SupervisorClientError> {
|
||||
let (id, _) = self.remove_runner_wait(actor_id).await?;
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
pub async fn start_runner(
|
||||
&self,
|
||||
actor_id: impl Into<String>,
|
||||
) -> Result<String, SupervisorClientError> {
|
||||
let (id, _) = self.start_runner_wait(actor_id).await?;
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
pub async fn stop_runner(
|
||||
&self,
|
||||
actor_id: impl Into<String>,
|
||||
force: bool,
|
||||
) -> Result<String, SupervisorClientError> {
|
||||
let (id, _) = self.stop_runner_wait(actor_id, force).await?;
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
pub async fn get_runner_status(
|
||||
&self,
|
||||
actor_id: impl Into<String>,
|
||||
) -> Result<String, SupervisorClientError> {
|
||||
let (id, _) = self.get_runner_status_wait(actor_id).await?;
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
pub async fn get_all_runner_status(&self) -> Result<String, SupervisorClientError> {
|
||||
let (id, _) = self.get_all_runner_status_wait().await?;
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
pub async fn start_all(&self) -> Result<String, SupervisorClientError> {
|
||||
let (id, _) = self.start_all_wait().await?;
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
pub async fn stop_all(&self, force: bool) -> Result<String, SupervisorClientError> {
|
||||
let (id, _) = self.stop_all_wait(force).await?;
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
pub async fn get_all_status(&self) -> Result<String, SupervisorClientError> {
|
||||
let (id, _) = self.get_all_status_wait().await?;
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
pub async fn jobs_create(&self, job: Value) -> Result<String, SupervisorClientError> {
|
||||
let (id, _) = self.jobs_create_wait(job).await?;
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
pub async fn jobs_list(&self) -> Result<String, SupervisorClientError> {
|
||||
let (id, _) = self.jobs_list_wait().await?;
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
pub async fn job_run(&self, job: Value) -> Result<String, SupervisorClientError> {
|
||||
let (id, _) = self.job_run_wait(job).await?;
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
pub async fn job_start(
|
||||
&self,
|
||||
job_id: impl Into<String>,
|
||||
) -> Result<String, SupervisorClientError> {
|
||||
let (id, _) = self.job_start_wait(job_id).await?;
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
pub async fn job_status(
|
||||
&self,
|
||||
job_id: impl Into<String>,
|
||||
) -> Result<String, SupervisorClientError> {
|
||||
let (id, _) = self.job_status_wait(job_id).await?;
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
pub async fn job_result(
|
||||
&self,
|
||||
job_id: impl Into<String>,
|
||||
) -> Result<String, SupervisorClientError> {
|
||||
let (id, _) = self.job_result_wait(job_id).await?;
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
pub async fn job_stop(
|
||||
&self,
|
||||
job_id: impl Into<String>,
|
||||
) -> Result<String, SupervisorClientError> {
|
||||
let (id, _) = self.job_stop_wait(job_id).await?;
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
pub async fn job_delete(
|
||||
&self,
|
||||
job_id: impl Into<String>,
|
||||
) -> Result<String, SupervisorClientError> {
|
||||
let (id, _) = self.job_delete_wait(job_id).await?;
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
pub async fn rpc_discover(&self) -> Result<String, SupervisorClientError> {
|
||||
let (id, _) = self.rpc_discover_wait().await?;
|
||||
Ok(id)
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------
|
||||
// Tests (serialization-only)
|
||||
// -----------------------------
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::net::IpAddr;
|
||||
|
||||
fn mk_client() -> SupervisorClient {
|
||||
// Build a hub but it won't issue real network calls in these serializer-only tests.
|
||||
let mycelium = Arc::new(MyceliumClient::new("http://127.0.0.1:8990").unwrap());
|
||||
let hub = SupervisorHub::new_with_client(mycelium, "supervisor.rpc");
|
||||
SupervisorClient::new_with_hub(
|
||||
hub,
|
||||
Destination::Pk(
|
||||
"bb39b4a3a4efd70f3e05e37887677e02efbda14681d0acd3882bc0f754792c32".to_string(),
|
||||
),
|
||||
Some("secret".to_string()),
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn builds_dst_ip_and_pk() {
|
||||
let mycelium = Arc::new(MyceliumClient::new("http://127.0.0.1:8990").unwrap());
|
||||
let hub_ip = SupervisorHub::new_with_client(mycelium.clone(), "supervisor.rpc");
|
||||
let c_ip = SupervisorClient::new_with_hub(
|
||||
hub_ip,
|
||||
Destination::Ip("2001:db8::1".parse().unwrap()),
|
||||
None,
|
||||
);
|
||||
let v_ip = c_ip.build_dst();
|
||||
assert_eq!(v_ip.get("ip").unwrap().as_str().unwrap(), "2001:db8::1");
|
||||
|
||||
let c_pk = mk_client();
|
||||
let v_pk = c_pk.build_dst();
|
||||
assert_eq!(
|
||||
v_pk.get("pk").unwrap().as_str().unwrap(),
|
||||
"bb39b4a3a4efd70f3e05e37887677e02efbda14681d0acd3882bc0f754792c32"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn encodes_supervisor_payload_b64() {
|
||||
let c = mk_client();
|
||||
let payload = c.build_supervisor_payload("list_runners", json!([]));
|
||||
let b64 = SupervisorClient::encode_payload(&payload).unwrap();
|
||||
|
||||
// decode and compare round-trip JSON
|
||||
let raw = base64::engine::general_purpose::STANDARD
|
||||
.decode(b64.as_bytes())
|
||||
.unwrap();
|
||||
let decoded: Value = serde_json::from_slice(&raw).unwrap();
|
||||
assert_eq!(
|
||||
decoded.get("method").unwrap().as_str().unwrap(),
|
||||
"list_runners"
|
||||
);
|
||||
assert_eq!(decoded.get("jsonrpc").unwrap().as_str().unwrap(), "2.0");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extract_message_id_works_for_both_variants() {
|
||||
// PushMessageResponseId
|
||||
let r1 = json!({"id":"0123456789abcdef"});
|
||||
assert_eq!(
|
||||
SupervisorClient::extract_message_id_from_result(&r1).unwrap(),
|
||||
"0123456789abcdef"
|
||||
);
|
||||
// InboundMessage-like
|
||||
let r2 = json!({
|
||||
"id":"fedcba9876543210",
|
||||
"srcIp":"449:abcd:0123:defa::1",
|
||||
"payload":"hpV+"
|
||||
});
|
||||
assert_eq!(
|
||||
SupervisorClient::extract_message_id_from_result(&r2).unwrap(),
|
||||
"fedcba9876543210"
|
||||
);
|
||||
}
|
||||
}
|
||||
143
bin/coordinator/src/clients/supervisor_hub.rs
Normal file
143
bin/coordinator/src/clients/supervisor_hub.rs
Normal file
@@ -0,0 +1,143 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
|
||||
use base64::Engine;
|
||||
use base64::engine::general_purpose::STANDARD as BASE64_STANDARD;
|
||||
use serde_json::Value;
|
||||
use tokio::sync::{Mutex, oneshot};
|
||||
|
||||
use crate::clients::mycelium_client::MyceliumClient;
|
||||
|
||||
/// Global hub that:
|
||||
/// - Owns a single MyceliumClient
|
||||
/// - Spawns a background popMessage loop filtered by topic
|
||||
/// - Correlates supervisor JSON-RPC replies by inner id to waiting callers via oneshot channels
|
||||
#[derive(Clone)]
|
||||
pub struct SupervisorHub {
|
||||
mycelium: Arc<MyceliumClient>,
|
||||
topic: String,
|
||||
pending: Arc<Mutex<HashMap<u64, oneshot::Sender<Value>>>>,
|
||||
id_counter: Arc<AtomicU64>,
|
||||
}
|
||||
|
||||
impl SupervisorHub {
|
||||
/// Create a new hub and start the background popMessage task.
|
||||
/// - base_url: Mycelium JSON-RPC endpoint, e.g. "http://127.0.0.1:8990"
|
||||
/// - topic: plain-text topic (e.g., "supervisor.rpc")
|
||||
pub fn new(
|
||||
base_url: impl Into<String>,
|
||||
topic: impl Into<String>,
|
||||
) -> Result<Arc<Self>, crate::clients::MyceliumClientError> {
|
||||
let myc = Arc::new(MyceliumClient::new(base_url)?);
|
||||
Ok(Self::new_with_client(myc, topic))
|
||||
}
|
||||
|
||||
/// Variant that reuses an existing Mycelium client.
|
||||
pub fn new_with_client(mycelium: Arc<MyceliumClient>, topic: impl Into<String>) -> Arc<Self> {
|
||||
let hub = Arc::new(Self {
|
||||
mycelium,
|
||||
topic: topic.into(),
|
||||
pending: Arc::new(Mutex::new(HashMap::new())),
|
||||
id_counter: Arc::new(AtomicU64::new(1)),
|
||||
});
|
||||
Self::spawn_pop_loop(hub.clone());
|
||||
hub
|
||||
}
|
||||
|
||||
fn spawn_pop_loop(hub: Arc<Self>) {
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
match hub.mycelium.pop_message(Some(false), Some(20), None).await {
|
||||
Ok(Some(inb)) => {
|
||||
// Extract and decode payload
|
||||
let Some(payload_b64) = inb.get("payload").and_then(|v| v.as_str()) else {
|
||||
// Not a payload-bearing message; ignore
|
||||
continue;
|
||||
};
|
||||
let Ok(raw) = BASE64_STANDARD.decode(payload_b64.as_bytes()) else {
|
||||
tracing::warn!(target: "supervisor_hub", "Failed to decode inbound payload base64");
|
||||
continue;
|
||||
};
|
||||
let Ok(rpc): Result<Value, _> = serde_json::from_slice(&raw) else {
|
||||
tracing::warn!(target: "supervisor_hub", "Failed to parse inbound payload JSON");
|
||||
continue;
|
||||
};
|
||||
|
||||
// Extract inner JSON-RPC id
|
||||
let inner_id_u64 = match rpc.get("id") {
|
||||
Some(Value::Number(n)) => n.as_u64(),
|
||||
Some(Value::String(s)) => s.parse::<u64>().ok(),
|
||||
_ => None,
|
||||
};
|
||||
|
||||
if let Some(inner_id) = inner_id_u64 {
|
||||
// Try to deliver to a pending waiter
|
||||
let sender_opt = {
|
||||
let mut guard = hub.pending.lock().await;
|
||||
guard.remove(&inner_id)
|
||||
};
|
||||
if let Some(tx) = sender_opt {
|
||||
let _ = tx.send(rpc);
|
||||
} else {
|
||||
tracing::warn!(
|
||||
target: "supervisor_hub",
|
||||
inner_id,
|
||||
payload = %String::from_utf8_lossy(&raw),
|
||||
"Unmatched supervisor reply; no waiter registered"
|
||||
);
|
||||
}
|
||||
} else {
|
||||
tracing::warn!(target: "supervisor_hub", "Inbound supervisor reply missing id; dropping");
|
||||
}
|
||||
}
|
||||
Ok(None) => {
|
||||
// No message; continue polling
|
||||
continue;
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::warn!(target: "supervisor_hub", error = %e, "popMessage error; backing off");
|
||||
tokio::time::sleep(std::time::Duration::from_millis(200)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/// Allocate a new inner supervisor JSON-RPC id.
|
||||
pub fn next_id(&self) -> u64 {
|
||||
self.id_counter.fetch_add(1, Ordering::Relaxed)
|
||||
}
|
||||
|
||||
/// Register a oneshot sender for the given inner id and return the receiver side.
|
||||
pub async fn register_waiter(&self, inner_id: u64) -> oneshot::Receiver<Value> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
let mut guard = self.pending.lock().await;
|
||||
guard.insert(inner_id, tx);
|
||||
rx
|
||||
}
|
||||
|
||||
/// Remove a pending waiter for a given id (used to cleanup on timeout).
|
||||
pub async fn remove_waiter(&self, inner_id: u64) -> Option<oneshot::Sender<Value>> {
|
||||
let mut guard = self.pending.lock().await;
|
||||
guard.remove(&inner_id)
|
||||
}
|
||||
|
||||
/// Access to underlying Mycelium client (for pushMessage).
|
||||
pub fn mycelium(&self) -> Arc<MyceliumClient> {
|
||||
self.mycelium.clone()
|
||||
}
|
||||
|
||||
/// Access configured topic.
|
||||
pub fn topic(&self) -> &str {
|
||||
&self.topic
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for SupervisorHub {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("SupervisorHub")
|
||||
.field("topic", &self.topic)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
9
bin/coordinator/src/clients/types.rs
Normal file
9
bin/coordinator/src/clients/types.rs
Normal file
@@ -0,0 +1,9 @@
|
||||
use std::net::IpAddr;
|
||||
|
||||
/// Destination for Mycelium messages (shared by clients)
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum Destination {
|
||||
Ip(IpAddr),
|
||||
/// 64-hex public key of the receiver node
|
||||
Pk(String),
|
||||
}
|
||||
381
bin/coordinator/src/dag.rs
Normal file
381
bin/coordinator/src/dag.rs
Normal file
@@ -0,0 +1,381 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::{HashMap, HashSet, VecDeque};
|
||||
use std::fmt;
|
||||
|
||||
use crate::{
|
||||
models::{Flow, Job, JobStatus, ScriptType},
|
||||
storage::RedisDriver,
|
||||
};
|
||||
|
||||
pub type DagResult<T> = Result<T, DagError>;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum DagError {
|
||||
Storage(Box<dyn std::error::Error + Send + Sync>),
|
||||
MissingDependency { job: u32, depends_on: u32 },
|
||||
CycleDetected { remaining: Vec<u32> },
|
||||
UnknownJob { job: u32 },
|
||||
DependenciesIncomplete { job: u32, missing: Vec<u32> },
|
||||
FlowFailed { failed_job: u32 },
|
||||
JobNotStarted { job: u32 },
|
||||
}
|
||||
|
||||
impl fmt::Display for DagError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
DagError::Storage(e) => write!(f, "Storage error: {}", e),
|
||||
DagError::MissingDependency { job, depends_on } => write!(
|
||||
f,
|
||||
"Job {} depends on {}, which is not part of the flow.jobs list",
|
||||
job, depends_on
|
||||
),
|
||||
DagError::CycleDetected { remaining } => {
|
||||
write!(f, "Cycle detected; unresolved nodes: {:?}", remaining)
|
||||
}
|
||||
DagError::UnknownJob { job } => write!(f, "Unknown job id: {}", job),
|
||||
DagError::DependenciesIncomplete { job, missing } => write!(
|
||||
f,
|
||||
"Job {} cannot start; missing completed deps: {:?}",
|
||||
job, missing
|
||||
),
|
||||
DagError::FlowFailed { failed_job } => {
|
||||
write!(f, "Flow failed due to job {}", failed_job)
|
||||
}
|
||||
DagError::JobNotStarted { job } => write!(
|
||||
f,
|
||||
"Job {} cannot be completed because it is not marked as started",
|
||||
job
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for DagError {}
|
||||
|
||||
impl From<Box<dyn std::error::Error + Send + Sync>> for DagError {
|
||||
fn from(e: Box<dyn std::error::Error + Send + Sync>) -> Self {
|
||||
DagError::Storage(e)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct JobSummary {
|
||||
pub id: u32,
|
||||
pub depends: Vec<u32>,
|
||||
pub prerequisites: Vec<String>,
|
||||
pub script_type: ScriptType,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct FlowDag {
|
||||
pub flow_id: u32,
|
||||
pub caller_id: u32,
|
||||
pub context_id: u32,
|
||||
pub nodes: HashMap<u32, JobSummary>,
|
||||
pub edges: Vec<(u32, u32)>, // (from prerequisite, to job)
|
||||
pub reverse_edges: Vec<(u32, u32)>, // (from job, to prerequisite)
|
||||
pub roots: Vec<u32>, // in_degree == 0
|
||||
pub leaves: Vec<u32>, // out_degree == 0
|
||||
pub levels: Vec<Vec<u32>>, // topological layers for parallel execution
|
||||
// Runtime execution state
|
||||
pub started: HashSet<u32>,
|
||||
pub completed: HashSet<u32>,
|
||||
pub failed_job: Option<u32>,
|
||||
}
|
||||
|
||||
pub async fn build_flow_dag(
|
||||
redis: &RedisDriver,
|
||||
context_id: u32,
|
||||
flow_id: u32,
|
||||
) -> DagResult<FlowDag> {
|
||||
// Load flow
|
||||
let flow: Flow = redis
|
||||
.load_flow(context_id, flow_id)
|
||||
.await
|
||||
.map_err(DagError::from)?;
|
||||
let caller_id = flow.caller_id();
|
||||
let flow_job_ids = flow.jobs();
|
||||
|
||||
// Build a set for faster membership tests
|
||||
let job_id_set: HashSet<u32> = flow_job_ids.iter().copied().collect();
|
||||
|
||||
// Load all jobs
|
||||
let mut jobs: HashMap<u32, Job> = HashMap::with_capacity(flow_job_ids.len());
|
||||
for jid in flow_job_ids {
|
||||
let job = redis
|
||||
.load_job(context_id, caller_id, *jid)
|
||||
.await
|
||||
.map_err(DagError::from)?;
|
||||
jobs.insert(*jid, job);
|
||||
}
|
||||
|
||||
// Validate dependencies and construct adjacency
|
||||
let mut edges: Vec<(u32, u32)> = Vec::new();
|
||||
let mut reverse_edges: Vec<(u32, u32)> = Vec::new();
|
||||
let mut adj: HashMap<u32, Vec<u32>> = HashMap::with_capacity(jobs.len());
|
||||
let mut rev_adj: HashMap<u32, Vec<u32>> = HashMap::with_capacity(jobs.len());
|
||||
let mut in_degree: HashMap<u32, usize> = HashMap::with_capacity(jobs.len());
|
||||
|
||||
for &jid in flow_job_ids {
|
||||
adj.entry(jid).or_default();
|
||||
rev_adj.entry(jid).or_default();
|
||||
in_degree.entry(jid).or_insert(0);
|
||||
}
|
||||
|
||||
for (&jid, job) in &jobs {
|
||||
for &dep in job.depends() {
|
||||
if !job_id_set.contains(&dep) {
|
||||
return Err(DagError::MissingDependency {
|
||||
job: jid,
|
||||
depends_on: dep,
|
||||
});
|
||||
}
|
||||
// edge: dep -> jid
|
||||
edges.push((dep, jid));
|
||||
reverse_edges.push((jid, dep));
|
||||
adj.get_mut(&dep).unwrap().push(jid);
|
||||
rev_adj.get_mut(&jid).unwrap().push(dep);
|
||||
*in_degree.get_mut(&jid).unwrap() += 1;
|
||||
}
|
||||
}
|
||||
|
||||
// Kahn's algorithm for topological sorting, with level construction
|
||||
let mut zero_in: VecDeque<u32> = in_degree
|
||||
.iter()
|
||||
.filter_map(|(k, v)| if *v == 0 { Some(*k) } else { None })
|
||||
.collect();
|
||||
|
||||
let mut processed_count = 0usize;
|
||||
let mut levels: Vec<Vec<u32>> = Vec::new();
|
||||
|
||||
// To make deterministic, sort initial zero_in
|
||||
{
|
||||
let mut tmp: Vec<u32> = zero_in.iter().copied().collect();
|
||||
tmp.sort_unstable();
|
||||
zero_in = tmp.into_iter().collect();
|
||||
}
|
||||
|
||||
while !zero_in.is_empty() {
|
||||
let mut level: Vec<u32> = Vec::new();
|
||||
// drain current frontier
|
||||
let mut next_zero: Vec<u32> = Vec::new();
|
||||
let mut current_frontier: Vec<u32> = zero_in.drain(..).collect();
|
||||
current_frontier.sort_unstable();
|
||||
for u in current_frontier {
|
||||
level.push(u);
|
||||
processed_count += 1;
|
||||
if let Some(children) = adj.get(&u) {
|
||||
let mut sorted_children = children.clone();
|
||||
sorted_children.sort_unstable();
|
||||
for &v in &sorted_children {
|
||||
let d = in_degree.get_mut(&v).unwrap();
|
||||
*d -= 1;
|
||||
if *d == 0 {
|
||||
next_zero.push(v);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
next_zero.sort_unstable();
|
||||
zero_in = next_zero.into_iter().collect();
|
||||
levels.push(level);
|
||||
}
|
||||
|
||||
if processed_count != jobs.len() {
|
||||
let remaining: Vec<u32> = in_degree
|
||||
.into_iter()
|
||||
.filter_map(|(k, v)| if v > 0 { Some(k) } else { None })
|
||||
.collect();
|
||||
return Err(DagError::CycleDetected { remaining });
|
||||
}
|
||||
|
||||
// Roots and leaves
|
||||
let roots: Vec<u32> = levels.first().cloned().unwrap_or_default();
|
||||
let leaves: Vec<u32> = adj
|
||||
.iter()
|
||||
.filter_map(|(k, v)| if v.is_empty() { Some(*k) } else { None })
|
||||
.collect();
|
||||
|
||||
// Nodes map (JobSummary)
|
||||
let mut nodes: HashMap<u32, JobSummary> = HashMap::with_capacity(jobs.len());
|
||||
for (&jid, job) in &jobs {
|
||||
let summary = JobSummary {
|
||||
id: jid,
|
||||
depends: job.depends().to_vec(),
|
||||
prerequisites: job.prerequisites().to_vec(),
|
||||
script_type: job.script_type(),
|
||||
};
|
||||
nodes.insert(jid, summary);
|
||||
}
|
||||
|
||||
// Sort edges deterministically
|
||||
edges.sort_unstable();
|
||||
reverse_edges.sort_unstable();
|
||||
|
||||
// Populate runtime execution state from persisted Job.status()
|
||||
let mut started_set: HashSet<u32> = HashSet::new();
|
||||
let mut completed_set: HashSet<u32> = HashSet::new();
|
||||
let mut error_ids: Vec<u32> = Vec::new();
|
||||
|
||||
for (&jid, job) in &jobs {
|
||||
match job.status() {
|
||||
JobStatus::Finished => {
|
||||
completed_set.insert(jid);
|
||||
}
|
||||
JobStatus::Started => {
|
||||
started_set.insert(jid);
|
||||
}
|
||||
JobStatus::Dispatched => {
|
||||
// Consider Dispatched as "in-flight" for DAG runtime started set,
|
||||
// so queued/running work is visible in periodic snapshots.
|
||||
started_set.insert(jid);
|
||||
}
|
||||
JobStatus::Error => {
|
||||
error_ids.push(jid);
|
||||
}
|
||||
JobStatus::WaitingForPrerequisites => {
|
||||
// Neither started nor completed
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Choose a deterministic failed job if any errors exist (smallest job id)
|
||||
let failed_job = if error_ids.is_empty() {
|
||||
None
|
||||
} else {
|
||||
error_ids.sort_unstable();
|
||||
Some(error_ids[0])
|
||||
};
|
||||
|
||||
let dag = FlowDag {
|
||||
flow_id,
|
||||
caller_id,
|
||||
context_id,
|
||||
nodes,
|
||||
edges,
|
||||
reverse_edges,
|
||||
roots,
|
||||
leaves,
|
||||
levels,
|
||||
started: started_set,
|
||||
completed: completed_set,
|
||||
failed_job,
|
||||
};
|
||||
|
||||
Ok(dag)
|
||||
}
|
||||
|
||||
impl FlowDag {
|
||||
/// Return all jobs that are ready to be processed.
|
||||
/// A job is ready if:
|
||||
/// - it exists in the DAG
|
||||
/// - it is not already started or completed
|
||||
/// - it has no dependencies, or all dependencies are completed
|
||||
///
|
||||
/// If any job has failed, the entire flow is considered failed and an error is returned.
|
||||
pub fn ready_jobs(&self) -> DagResult<Vec<u32>> {
|
||||
if let Some(failed_job) = self.failed_job {
|
||||
return Err(DagError::FlowFailed { failed_job });
|
||||
}
|
||||
|
||||
let mut ready: Vec<u32> = Vec::new();
|
||||
for (&jid, summary) in &self.nodes {
|
||||
if self.completed.contains(&jid) || self.started.contains(&jid) {
|
||||
continue;
|
||||
}
|
||||
let mut deps_ok = true;
|
||||
for dep in &summary.depends {
|
||||
if !self.completed.contains(dep) {
|
||||
deps_ok = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if deps_ok {
|
||||
ready.push(jid);
|
||||
}
|
||||
}
|
||||
ready.sort_unstable();
|
||||
Ok(ready)
|
||||
}
|
||||
|
||||
/// Mark a job as started.
|
||||
/// Strict validation rules:
|
||||
/// - Unknown jobs are rejected with UnknownJob
|
||||
/// - If the flow has already failed, return FlowFailed
|
||||
/// - If the job is already started or completed, this is a no-op (idempotent)
|
||||
/// - If any dependency is not completed, return DependenciesIncomplete with the missing deps
|
||||
pub fn mark_job_started(&mut self, job: u32) -> DagResult<()> {
|
||||
if !self.nodes.contains_key(&job) {
|
||||
return Err(DagError::UnknownJob { job });
|
||||
}
|
||||
if self.completed.contains(&job) || self.started.contains(&job) {
|
||||
return Ok(());
|
||||
}
|
||||
if let Some(failed_job) = self.failed_job {
|
||||
return Err(DagError::FlowFailed { failed_job });
|
||||
}
|
||||
|
||||
let summary = self.nodes.get(&job).expect("checked contains_key");
|
||||
let missing: Vec<u32> = summary
|
||||
.depends
|
||||
.iter()
|
||||
.copied()
|
||||
.filter(|d| !self.completed.contains(d))
|
||||
.collect();
|
||||
|
||||
if !missing.is_empty() {
|
||||
return Err(DagError::DependenciesIncomplete { job, missing });
|
||||
}
|
||||
|
||||
self.started.insert(job);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Mark a job as completed.
|
||||
/// Strict validation rules:
|
||||
/// - Unknown jobs are rejected with UnknownJob
|
||||
/// - If the job is already completed, this is a no-op (idempotent)
|
||||
/// - If the flow has already failed, return FlowFailed
|
||||
/// - If the job was not previously started, return JobNotStarted
|
||||
pub fn mark_job_completed(&mut self, job: u32) -> DagResult<()> {
|
||||
if !self.nodes.contains_key(&job) {
|
||||
return Err(DagError::UnknownJob { job });
|
||||
}
|
||||
if self.completed.contains(&job) {
|
||||
return Ok(());
|
||||
}
|
||||
if let Some(failed_job) = self.failed_job {
|
||||
return Err(DagError::FlowFailed { failed_job });
|
||||
}
|
||||
if !self.started.contains(&job) {
|
||||
return Err(DagError::JobNotStarted { job });
|
||||
}
|
||||
|
||||
self.started.remove(&job);
|
||||
self.completed.insert(job);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Mark a job as failed.
|
||||
/// Behavior:
|
||||
/// - Unknown jobs are rejected with UnknownJob
|
||||
/// - If a failure is already recorded:
|
||||
/// - If it is the same job, no-op (idempotent)
|
||||
/// - If it is a different job, return FlowFailed with the already-failed job
|
||||
/// - Otherwise record this job as the failed job
|
||||
pub fn mark_job_failed(&mut self, job: u32) -> DagResult<()> {
|
||||
if !self.nodes.contains_key(&job) {
|
||||
return Err(DagError::UnknownJob { job });
|
||||
}
|
||||
match self.failed_job {
|
||||
Some(existing) if existing == job => Ok(()),
|
||||
Some(existing) => Err(DagError::FlowFailed {
|
||||
failed_job: existing,
|
||||
}),
|
||||
None => {
|
||||
self.failed_job = Some(job);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
8
bin/coordinator/src/lib.rs
Normal file
8
bin/coordinator/src/lib.rs
Normal file
@@ -0,0 +1,8 @@
|
||||
pub mod clients;
|
||||
pub mod dag;
|
||||
pub mod models;
|
||||
pub mod router;
|
||||
pub mod rpc;
|
||||
pub mod service;
|
||||
pub mod storage;
|
||||
mod time;
|
||||
142
bin/coordinator/src/main.rs
Normal file
142
bin/coordinator/src/main.rs
Normal file
@@ -0,0 +1,142 @@
|
||||
use clap::Parser;
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::sync::Arc;
|
||||
|
||||
use tracing::{error, info};
|
||||
use tracing_subscriber::EnvFilter;
|
||||
#[derive(Debug, Clone, Parser)]
|
||||
#[command(
|
||||
name = "herocoordinator",
|
||||
version,
|
||||
about = "Hero Coordinator CLI",
|
||||
long_about = None
|
||||
)]
|
||||
struct Cli {
|
||||
#[arg(
|
||||
long = "mycelium-ip",
|
||||
short = 'i',
|
||||
env = "MYCELIUM_IP",
|
||||
default_value = "127.0.0.1",
|
||||
help = "IP address where Mycelium JSON-RPC is listening (default: 127.0.0.1)"
|
||||
)]
|
||||
mycelium_ip: IpAddr,
|
||||
|
||||
#[arg(
|
||||
long = "mycelium-port",
|
||||
short = 'p',
|
||||
env = "MYCELIUM_PORT",
|
||||
default_value_t = 8990u16,
|
||||
help = "Port for Mycelium JSON-RPC (default: 8990)"
|
||||
)]
|
||||
mycelium_port: u16,
|
||||
|
||||
#[arg(
|
||||
long = "redis-addr",
|
||||
short = 'r',
|
||||
env = "REDIS_ADDR",
|
||||
default_value = "127.0.0.1:6379",
|
||||
help = "Socket address of Redis instance (default: 127.0.0.1:6379)"
|
||||
)]
|
||||
redis_addr: SocketAddr,
|
||||
|
||||
#[arg(
|
||||
long = "api-http-ip",
|
||||
env = "API_HTTP_IP",
|
||||
default_value = "127.0.0.1",
|
||||
help = "Bind IP for HTTP JSON-RPC server (default: 127.0.0.1)"
|
||||
)]
|
||||
api_http_ip: IpAddr,
|
||||
|
||||
#[arg(
|
||||
long = "api-http-port",
|
||||
env = "API_HTTP_PORT",
|
||||
default_value_t = 9652u16,
|
||||
help = "Bind port for HTTP JSON-RPC server (default: 9652)"
|
||||
)]
|
||||
api_http_port: u16,
|
||||
|
||||
#[arg(
|
||||
long = "api-ws-ip",
|
||||
env = "API_WS_IP",
|
||||
default_value = "127.0.0.1",
|
||||
help = "Bind IP for WebSocket JSON-RPC server (default: 127.0.0.1)"
|
||||
)]
|
||||
api_ws_ip: IpAddr,
|
||||
|
||||
#[arg(
|
||||
long = "api-ws-port",
|
||||
env = "API_WS_PORT",
|
||||
default_value_t = 9653u16,
|
||||
help = "Bind port for WebSocket JSON-RPC server (default: 9653)"
|
||||
)]
|
||||
api_ws_port: u16,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let cli = Cli::parse();
|
||||
// Initialize tracing subscriber (pretty formatter; controlled by RUST_LOG)
|
||||
let filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info"));
|
||||
tracing_subscriber::fmt()
|
||||
.with_env_filter(filter)
|
||||
.pretty()
|
||||
.with_target(true)
|
||||
.with_level(true)
|
||||
.init();
|
||||
|
||||
let http_addr = SocketAddr::new(cli.api_http_ip, cli.api_http_port);
|
||||
let ws_addr = SocketAddr::new(cli.api_ws_ip, cli.api_ws_port);
|
||||
|
||||
// Initialize Redis driver
|
||||
let redis = hero_coordinator::storage::RedisDriver::new(cli.redis_addr.to_string())
|
||||
.await
|
||||
.expect("Failed to connect to Redis");
|
||||
|
||||
// Initialize Service
|
||||
let service = hero_coordinator::service::AppService::new(redis);
|
||||
let service_for_router = service.clone();
|
||||
|
||||
// Shared application state
|
||||
let state = Arc::new(hero_coordinator::rpc::AppState::new(service));
|
||||
|
||||
// Start router workers (auto-discovered contexts) using a single global SupervisorHub (no separate inbound listener)
|
||||
{
|
||||
let base_url = format!("http://{}:{}", cli.mycelium_ip, cli.mycelium_port);
|
||||
let hub = hero_coordinator::clients::SupervisorHub::new(
|
||||
base_url.clone(),
|
||||
"supervisor.rpc".to_string(),
|
||||
)
|
||||
.expect("Failed to initialize SupervisorHub");
|
||||
let cfg = hero_coordinator::router::RouterConfig {
|
||||
context_ids: Vec::new(), // ignored by start_router_auto
|
||||
concurrency: 32,
|
||||
base_url,
|
||||
topic: "supervisor.rpc".to_string(),
|
||||
sup_hub: hub.clone(),
|
||||
transport_poll_interval_secs: 2,
|
||||
transport_poll_timeout_secs: 300,
|
||||
};
|
||||
// Per-context outbound delivery loops (replies handled by SupervisorHub)
|
||||
let _auto_handle = hero_coordinator::router::start_router_auto(service_for_router, cfg);
|
||||
}
|
||||
|
||||
// Build RPC modules for both servers
|
||||
let http_module = hero_coordinator::rpc::build_module(state.clone());
|
||||
let ws_module = hero_coordinator::rpc::build_module(state.clone());
|
||||
|
||||
info!(%http_addr, %ws_addr, redis_addr=%cli.redis_addr, "Starting JSON-RPC servers");
|
||||
|
||||
// Start servers
|
||||
let _http_handle = hero_coordinator::rpc::start_http(http_addr, http_module)
|
||||
.await
|
||||
.expect("Failed to start HTTP server");
|
||||
let _ws_handle = hero_coordinator::rpc::start_ws(ws_addr, ws_module)
|
||||
.await
|
||||
.expect("Failed to start WS server");
|
||||
|
||||
// Wait for Ctrl+C to terminate
|
||||
if let Err(e) = tokio::signal::ctrl_c().await {
|
||||
error!(error=%e, "Failed to listen for shutdown signal");
|
||||
}
|
||||
info!("Shutdown signal received, exiting.");
|
||||
}
|
||||
15
bin/coordinator/src/models.rs
Normal file
15
bin/coordinator/src/models.rs
Normal file
@@ -0,0 +1,15 @@
|
||||
mod actor;
|
||||
mod context;
|
||||
mod flow;
|
||||
mod job;
|
||||
mod message;
|
||||
mod runner;
|
||||
mod script_type;
|
||||
|
||||
pub use actor::Actor;
|
||||
pub use context::Context;
|
||||
pub use flow::{Flow, FlowStatus};
|
||||
pub use job::{Job, JobStatus};
|
||||
pub use message::{Message, MessageFormatType, MessageStatus, MessageType, TransportStatus};
|
||||
pub use runner::Runner;
|
||||
pub use script_type::ScriptType;
|
||||
15
bin/coordinator/src/models/actor.rs
Normal file
15
bin/coordinator/src/models/actor.rs
Normal file
@@ -0,0 +1,15 @@
|
||||
use std::net::IpAddr;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::time::Timestamp;
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone)]
|
||||
pub struct Actor {
|
||||
id: u32,
|
||||
pubkey: String,
|
||||
/// IP where the actor is reachable, can be mycelium but that is not mandatory
|
||||
address: Vec<IpAddr>,
|
||||
created_at: Timestamp,
|
||||
updated_at: Timestamp,
|
||||
}
|
||||
17
bin/coordinator/src/models/context.rs
Normal file
17
bin/coordinator/src/models/context.rs
Normal file
@@ -0,0 +1,17 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::time::Timestamp;
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone)]
|
||||
pub struct Context {
|
||||
/// Redis DB to use
|
||||
pub id: u32,
|
||||
/// Actor ids which have admin rights on this context
|
||||
pub admins: Vec<u32>,
|
||||
/// Actor ids which can read the context info
|
||||
pub readers: Vec<u32>,
|
||||
/// Actor ids which can execute jobs in this context
|
||||
pub executors: Vec<u32>,
|
||||
pub created_at: Timestamp,
|
||||
pub updated_at: Timestamp,
|
||||
}
|
||||
49
bin/coordinator/src/models/flow.rs
Normal file
49
bin/coordinator/src/models/flow.rs
Normal file
@@ -0,0 +1,49 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::time::Timestamp;
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone)]
|
||||
pub struct Flow {
|
||||
/// Job Id set tby the actor which created it
|
||||
pub id: u32,
|
||||
/// Actor Id who created this job
|
||||
pub caller_id: u32,
|
||||
/// The context in which this job is executed
|
||||
pub context_id: u32,
|
||||
/// List of jobs which make up the flow
|
||||
pub jobs: Vec<u32>,
|
||||
/// Environment variables, passed to every job when executed
|
||||
pub env_vars: HashMap<String, String>,
|
||||
/// The result of the flow
|
||||
pub result: HashMap<String, String>,
|
||||
pub created_at: Timestamp,
|
||||
pub updated_at: Timestamp,
|
||||
pub status: FlowStatus,
|
||||
}
|
||||
|
||||
/// The status of a flow
|
||||
#[derive(Serialize, Deserialize, Clone, PartialEq, Eq, Debug)]
|
||||
pub enum FlowStatus {
|
||||
Created,
|
||||
Dispatched,
|
||||
Started,
|
||||
Error,
|
||||
Finished,
|
||||
}
|
||||
|
||||
impl Flow {
|
||||
pub fn id(&self) -> u32 {
|
||||
self.id
|
||||
}
|
||||
pub fn caller_id(&self) -> u32 {
|
||||
self.caller_id
|
||||
}
|
||||
pub fn context_id(&self) -> u32 {
|
||||
self.context_id
|
||||
}
|
||||
pub fn jobs(&self) -> &[u32] {
|
||||
&self.jobs
|
||||
}
|
||||
}
|
||||
62
bin/coordinator/src/models/job.rs
Normal file
62
bin/coordinator/src/models/job.rs
Normal file
@@ -0,0 +1,62 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{models::ScriptType, time::Timestamp};
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct Job {
|
||||
/// Job Id, this is given by the actor who created the job
|
||||
pub id: u32,
|
||||
/// Actor ID which created this job
|
||||
pub caller_id: u32,
|
||||
/// Context in which the job is executed
|
||||
pub context_id: u32,
|
||||
pub script: String,
|
||||
pub script_type: ScriptType,
|
||||
/// Timeout in seconds for this job
|
||||
pub timeout: u32,
|
||||
/// Max amount of times to retry this job
|
||||
pub retries: u8,
|
||||
pub env_vars: HashMap<String, String>,
|
||||
pub result: HashMap<String, String>,
|
||||
pub prerequisites: Vec<String>,
|
||||
/// Ids of jobs this job depends on, i.e. this job can't start until those have finished
|
||||
pub depends: Vec<u32>,
|
||||
pub created_at: Timestamp,
|
||||
pub updated_at: Timestamp,
|
||||
pub status: JobStatus,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, Debug)]
|
||||
pub enum JobStatus {
|
||||
Dispatched,
|
||||
WaitingForPrerequisites,
|
||||
Started,
|
||||
Error,
|
||||
Finished,
|
||||
}
|
||||
|
||||
impl Job {
|
||||
pub fn id(&self) -> u32 {
|
||||
self.id
|
||||
}
|
||||
pub fn caller_id(&self) -> u32 {
|
||||
self.caller_id
|
||||
}
|
||||
pub fn context_id(&self) -> u32 {
|
||||
self.context_id
|
||||
}
|
||||
pub fn depends(&self) -> &[u32] {
|
||||
&self.depends
|
||||
}
|
||||
pub fn prerequisites(&self) -> &[String] {
|
||||
&self.prerequisites
|
||||
}
|
||||
pub fn script_type(&self) -> ScriptType {
|
||||
self.script_type.clone()
|
||||
}
|
||||
pub fn status(&self) -> JobStatus {
|
||||
self.status.clone()
|
||||
}
|
||||
}
|
||||
81
bin/coordinator/src/models/message.rs
Normal file
81
bin/coordinator/src/models/message.rs
Normal file
@@ -0,0 +1,81 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
models::{Job, ScriptType},
|
||||
time::Timestamp,
|
||||
};
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct Message {
|
||||
/// Unique ID for the message, set by the caller
|
||||
pub id: u32,
|
||||
/// Id of the actor who sent this message
|
||||
pub caller_id: u32,
|
||||
/// Id of the context in which this message was sent
|
||||
pub context_id: u32,
|
||||
pub message: String,
|
||||
pub message_type: ScriptType,
|
||||
pub message_format_type: MessageFormatType,
|
||||
/// Seconds for the message to arrive at the destination
|
||||
pub timeout: u32,
|
||||
/// Seconds for the receiver to acknowledge receipt of the message
|
||||
pub timeout_ack: u32,
|
||||
/// Seconds for the receiver to send us a reply
|
||||
pub timeout_result: u32,
|
||||
|
||||
/// Outbound transport id returned by Mycelium on push
|
||||
pub transport_id: Option<String>,
|
||||
/// Latest transport status as reported by Mycelium
|
||||
pub transport_status: Option<TransportStatus>,
|
||||
|
||||
pub job: Vec<Job>,
|
||||
pub logs: Vec<Log>,
|
||||
pub created_at: Timestamp,
|
||||
pub updated_at: Timestamp,
|
||||
pub status: MessageStatus,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum MessageType {
|
||||
Job,
|
||||
Chat,
|
||||
Mail,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub enum MessageStatus {
|
||||
Dispatched,
|
||||
Acknowledged,
|
||||
Error,
|
||||
Processed,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub enum TransportStatus {
|
||||
Queued,
|
||||
Sent,
|
||||
Delivered,
|
||||
Read,
|
||||
Failed,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for TransportStatus {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
TransportStatus::Queued => f.write_str("queued"),
|
||||
TransportStatus::Sent => f.write_str("sent"),
|
||||
TransportStatus::Delivered => f.write_str("delivered"),
|
||||
TransportStatus::Read => f.write_str("read"),
|
||||
TransportStatus::Failed => f.write_str("failed"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum MessageFormatType {
|
||||
Html,
|
||||
Text,
|
||||
Md,
|
||||
}
|
||||
|
||||
type Log = String;
|
||||
25
bin/coordinator/src/models/runner.rs
Normal file
25
bin/coordinator/src/models/runner.rs
Normal file
@@ -0,0 +1,25 @@
|
||||
use std::net::IpAddr;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::models::ScriptType;
|
||||
use crate::time::Timestamp;
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone)]
|
||||
pub struct Runner {
|
||||
pub id: u32,
|
||||
/// Mycelium public key
|
||||
pub pubkey: String,
|
||||
/// Mycelium address
|
||||
pub address: IpAddr,
|
||||
/// Needs to be set by the runner, usually `runner<runnerid`
|
||||
pub topic: String,
|
||||
/// The script type this runner can execute; used for routing
|
||||
pub script_type: ScriptType,
|
||||
/// If this is true, the runner also listens on a local redis queue
|
||||
pub local: bool,
|
||||
/// Optional secret used for authenticated supervisor calls (if required)
|
||||
pub secret: Option<String>,
|
||||
pub created_at: Timestamp,
|
||||
pub updated_at: Timestamp,
|
||||
}
|
||||
9
bin/coordinator/src/models/script_type.rs
Normal file
9
bin/coordinator/src/models/script_type.rs
Normal file
@@ -0,0 +1,9 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
|
||||
pub enum ScriptType {
|
||||
Osis,
|
||||
Sal,
|
||||
V,
|
||||
Python,
|
||||
}
|
||||
972
bin/coordinator/src/router.rs
Normal file
972
bin/coordinator/src/router.rs
Normal file
@@ -0,0 +1,972 @@
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use base64::Engine;
|
||||
use base64::engine::general_purpose::STANDARD as BASE64_STANDARD;
|
||||
use serde_json::{Value, json};
|
||||
use std::collections::hash_map::DefaultHasher;
|
||||
use std::hash::{Hash, Hasher};
|
||||
use tokio::sync::{Mutex, Semaphore};
|
||||
|
||||
use crate::{
|
||||
clients::{Destination, MyceliumClient, SupervisorClient, SupervisorHub},
|
||||
models::{Job, JobStatus, Message, MessageStatus, ScriptType, TransportStatus},
|
||||
service::AppService,
|
||||
};
|
||||
use tracing::{error, info};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct RouterConfig {
|
||||
pub context_ids: Vec<u32>,
|
||||
pub concurrency: usize,
|
||||
pub base_url: String, // e.g. http://127.0.0.1:8990
|
||||
pub topic: String, // e.g. "supervisor.rpc"
|
||||
pub sup_hub: Arc<SupervisorHub>, // global supervisor hub for replies
|
||||
// Transport status polling configuration
|
||||
pub transport_poll_interval_secs: u64, // e.g. 2
|
||||
pub transport_poll_timeout_secs: u64, // e.g. 300 (5 minutes)
|
||||
}
|
||||
|
||||
/*
|
||||
SupervisorClient reuse cache (Router-local):
|
||||
|
||||
Rationale:
|
||||
- SupervisorClient maintains an internal JSON-RPC id_counter per instance.
|
||||
- Rebuilding a client for each message resets this counter, causing inner JSON-RPC ids to restart at 1.
|
||||
- We reuse one SupervisorClient per (destination, topic, secret) to preserve monotonically increasing ids.
|
||||
|
||||
Scope:
|
||||
- Cache is per Router loop (and a separate one for the inbound listener).
|
||||
- If cross-loop/process reuse becomes necessary later, promote to a process-global cache.
|
||||
|
||||
Keying:
|
||||
- Key: destination + topic + secret-presence (secret content hashed; not stored in plaintext).
|
||||
|
||||
Concurrency:
|
||||
- tokio::Mutex protects a HashMap<String, Arc<SupervisorClient>>.
|
||||
- Values are Arc so call sites clone cheaply and share the same id_counter.
|
||||
*/
|
||||
#[derive(Clone)]
|
||||
struct SupervisorClientCache {
|
||||
map: Arc<Mutex<HashMap<String, Arc<SupervisorClient>>>>,
|
||||
}
|
||||
|
||||
impl SupervisorClientCache {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
map: Arc::new(Mutex::new(HashMap::new())),
|
||||
}
|
||||
}
|
||||
|
||||
fn make_key(dest: &Destination, topic: &str, secret: &Option<String>) -> String {
|
||||
let dst = match dest {
|
||||
Destination::Ip(ip) => format!("ip:{ip}"),
|
||||
Destination::Pk(pk) => format!("pk:{pk}"),
|
||||
};
|
||||
// Hash the secret to avoid storing plaintext in keys while still differentiating values
|
||||
let sec_hash = match secret {
|
||||
Some(s) if !s.is_empty() => {
|
||||
let mut hasher = DefaultHasher::new();
|
||||
s.hash(&mut hasher);
|
||||
format!("s:{}", hasher.finish())
|
||||
}
|
||||
_ => "s:none".to_string(),
|
||||
};
|
||||
format!("{dst}|t:{topic}|{sec_hash}")
|
||||
}
|
||||
|
||||
async fn get_or_create(
|
||||
&self,
|
||||
hub: Arc<SupervisorHub>,
|
||||
dest: Destination,
|
||||
topic: String,
|
||||
secret: Option<String>,
|
||||
) -> Arc<SupervisorClient> {
|
||||
let key = Self::make_key(&dest, &topic, &secret);
|
||||
|
||||
{
|
||||
let guard = self.map.lock().await;
|
||||
if let Some(existing) = guard.get(&key) {
|
||||
tracing::debug!(target: "router", cache="supervisor", hit=true, %topic, secret = %if secret.is_some() { "set" } else { "none" }, "SupervisorClient cache lookup");
|
||||
return existing.clone();
|
||||
}
|
||||
}
|
||||
|
||||
let mut guard = self.map.lock().await;
|
||||
if let Some(existing) = guard.get(&key) {
|
||||
tracing::debug!(target: "router", cache="supervisor", hit=true, %topic, secret = %if secret.is_some() { "set" } else { "none" }, "SupervisorClient cache lookup (double-checked)");
|
||||
return existing.clone();
|
||||
}
|
||||
let client = Arc::new(SupervisorClient::new_with_hub(hub, dest, secret.clone()));
|
||||
guard.insert(key, client.clone());
|
||||
tracing::debug!(target: "router", cache="supervisor", hit=false, %topic, secret = %if secret.is_some() { "set" } else { "none" }, "SupervisorClient cache insert");
|
||||
client
|
||||
}
|
||||
}
|
||||
|
||||
/// Start background router loops, one per context.
|
||||
/// Each loop:
|
||||
/// - BRPOP msg_out with 1s timeout
|
||||
/// - Loads the Message by key, selects a Runner by script_type
|
||||
/// - Sends supervisor JSON-RPC via Mycelium
|
||||
/// - On success: Message.status = Acknowledged
|
||||
/// - On error: Message.status = Error and append a log
|
||||
pub fn start_router(service: AppService, cfg: RouterConfig) -> Vec<tokio::task::JoinHandle<()>> {
|
||||
let mut handles = Vec::new();
|
||||
for ctx_id in cfg.context_ids.clone() {
|
||||
let service_cloned = service.clone();
|
||||
let cfg_cloned = cfg.clone();
|
||||
let handle = tokio::spawn(async move {
|
||||
let sem = Arc::new(Semaphore::new(cfg_cloned.concurrency));
|
||||
|
||||
// Use the global SupervisorHub and its Mycelium client
|
||||
let sup_hub = cfg_cloned.sup_hub.clone();
|
||||
let mycelium = sup_hub.mycelium();
|
||||
|
||||
let cache = Arc::new(SupervisorClientCache::new());
|
||||
|
||||
loop {
|
||||
// Pop next message key (blocking with timeout)
|
||||
match service_cloned.brpop_msg_out(ctx_id, 1).await {
|
||||
Ok(Some(key)) => {
|
||||
let permit = {
|
||||
// acquire a concurrency permit (non-fair is fine)
|
||||
let sem = sem.clone();
|
||||
// if semaphore is exhausted, await until a slot becomes available
|
||||
match sem.acquire_owned().await {
|
||||
Ok(p) => p,
|
||||
Err(_) => {
|
||||
// Semaphore closed; exit loop
|
||||
break;
|
||||
}
|
||||
}
|
||||
};
|
||||
let service_task = service_cloned.clone();
|
||||
let cfg_task = cfg_cloned.clone();
|
||||
tokio::spawn({
|
||||
let mycelium = mycelium.clone();
|
||||
let cache = cache.clone();
|
||||
let sup_hub = sup_hub.clone();
|
||||
async move {
|
||||
// Ensure permit is dropped at end of task
|
||||
let _permit = permit;
|
||||
if let Err(e) = deliver_one(
|
||||
&service_task,
|
||||
&cfg_task,
|
||||
ctx_id,
|
||||
&key,
|
||||
mycelium,
|
||||
sup_hub,
|
||||
cache.clone(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
error!(context_id=ctx_id, key=%key, error=%e, "Delivery error");
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
Ok(None) => {
|
||||
// timeout: just tick
|
||||
continue;
|
||||
}
|
||||
Err(e) => {
|
||||
error!(context_id=ctx_id, error=%e, "BRPOP error");
|
||||
// small backoff to avoid busy-loop on persistent errors
|
||||
tokio::time::sleep(std::time::Duration::from_millis(200)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
handles.push(handle);
|
||||
}
|
||||
handles
|
||||
}
|
||||
|
||||
async fn deliver_one(
|
||||
service: &AppService,
|
||||
cfg: &RouterConfig,
|
||||
context_id: u32,
|
||||
msg_key: &str,
|
||||
mycelium: Arc<MyceliumClient>,
|
||||
sup_hub: Arc<SupervisorHub>,
|
||||
cache: Arc<SupervisorClientCache>,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
// Parse "message:{caller_id}:{id}"
|
||||
let (caller_id, id) = parse_message_key(msg_key)
|
||||
.ok_or_else(|| format!("invalid message key format: {}", msg_key))?;
|
||||
|
||||
// Load message
|
||||
let msg: Message = service.load_message(context_id, caller_id, id).await?;
|
||||
// Embedded job id (if any)
|
||||
let job_id_opt: Option<u32> = msg.job.first().map(|j| j.id);
|
||||
|
||||
// Determine routing script_type
|
||||
let desired: ScriptType = determine_script_type(&msg);
|
||||
|
||||
// Discover runners and select a matching one
|
||||
let runners = service.scan_runners(context_id).await?;
|
||||
let Some(runner) = runners.into_iter().find(|r| r.script_type == desired) else {
|
||||
let log = format!(
|
||||
"No runner with script_type {:?} available in context {} for message {}",
|
||||
desired, context_id, msg_key
|
||||
);
|
||||
let _ = service
|
||||
.append_message_logs(context_id, caller_id, id, vec![log.clone()])
|
||||
.await;
|
||||
let _ = service
|
||||
.update_message_status(context_id, caller_id, id, MessageStatus::Error)
|
||||
.await;
|
||||
return Err(log.into());
|
||||
};
|
||||
|
||||
// Build SupervisorClient
|
||||
let dest = if !runner.pubkey.trim().is_empty() {
|
||||
Destination::Pk(runner.pubkey.clone())
|
||||
} else {
|
||||
Destination::Ip(runner.address)
|
||||
};
|
||||
// Keep clones for poller usage
|
||||
let dest_for_poller = dest.clone();
|
||||
let topic_for_poller = cfg.topic.clone();
|
||||
let secret_for_poller = runner.secret.clone();
|
||||
let client = cache
|
||||
.get_or_create(
|
||||
sup_hub.clone(),
|
||||
dest.clone(),
|
||||
cfg.topic.clone(),
|
||||
runner.secret.clone(),
|
||||
)
|
||||
.await;
|
||||
|
||||
// Build supervisor method and params from Message
|
||||
let method = msg.message.clone();
|
||||
let params = build_params(&msg)?;
|
||||
|
||||
// Send
|
||||
// If this is a job.run and we have a secret configured on the client,
|
||||
// prefer the typed wrapper that injects the secret into inner supervisor params,
|
||||
// and await the reply to capture job_queued immediately.
|
||||
let (out_id, reply_opt) = if method == "job.run" {
|
||||
if let Some(j) = msg.job.first() {
|
||||
let jv = job_to_json(j)?;
|
||||
// Returns (outbound message id, reply envelope)
|
||||
let (out, reply) = client.job_run_wait(jv).await?;
|
||||
(out, Some(reply))
|
||||
} else {
|
||||
// Fallback: no embedded job, use the generic call (await reply, discard)
|
||||
let out = client.call(&method, params).await?;
|
||||
(out, None)
|
||||
}
|
||||
} else {
|
||||
let out = client.call(&method, params).await?;
|
||||
(out, None)
|
||||
};
|
||||
|
||||
// Store transport id and initial Sent status
|
||||
let _ = service
|
||||
.update_message_transport(
|
||||
context_id,
|
||||
caller_id,
|
||||
id,
|
||||
Some(out_id.clone()),
|
||||
Some(TransportStatus::Sent),
|
||||
)
|
||||
.await;
|
||||
|
||||
// Mark as acknowledged on success
|
||||
service
|
||||
.update_message_status(context_id, caller_id, id, MessageStatus::Acknowledged)
|
||||
.await?;
|
||||
|
||||
// If we got a job.run reply, interpret job_queued immediately
|
||||
if let (Some(reply), Some(job_id)) = (reply_opt, msg.job.first().map(|j| j.id)) {
|
||||
let result_opt = reply.get("result");
|
||||
let error_opt = reply.get("error");
|
||||
|
||||
// Handle job.run success (job_queued)
|
||||
let is_job_queued = result_opt
|
||||
.and_then(|res| {
|
||||
if res.get("job_queued").is_some() {
|
||||
Some(true)
|
||||
} else if let Some(s) = res.as_str() {
|
||||
Some(s == "job_queued")
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.unwrap_or(false);
|
||||
|
||||
if is_job_queued {
|
||||
let _ = service
|
||||
.update_job_status_unchecked(context_id, caller_id, job_id, JobStatus::Dispatched)
|
||||
.await;
|
||||
let _ = service
|
||||
.append_message_logs(
|
||||
context_id,
|
||||
caller_id,
|
||||
id,
|
||||
vec![format!(
|
||||
"Supervisor reply for job {}: job_queued (processed synchronously)",
|
||||
job_id
|
||||
)],
|
||||
)
|
||||
.await;
|
||||
} else if let Some(err_obj) = error_opt {
|
||||
let _ = service
|
||||
.update_job_status_unchecked(context_id, caller_id, job_id, JobStatus::Error)
|
||||
.await;
|
||||
let _ = service
|
||||
.append_message_logs(
|
||||
context_id,
|
||||
caller_id,
|
||||
id,
|
||||
vec![format!(
|
||||
"Supervisor error for job {}: {} (processed synchronously)",
|
||||
job_id, err_obj
|
||||
)],
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
// No correlation map needed; replies are handled synchronously via SupervisorHub
|
||||
|
||||
// Spawn transport-status poller
|
||||
{
|
||||
let service_poll = service.clone();
|
||||
let poll_interval = std::time::Duration::from_secs(cfg.transport_poll_interval_secs);
|
||||
let poll_timeout = std::time::Duration::from_secs(cfg.transport_poll_timeout_secs);
|
||||
let out_id_cloned = out_id.clone();
|
||||
let mycelium = mycelium.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let start = std::time::Instant::now();
|
||||
let client = mycelium;
|
||||
|
||||
// Supervisor call context captured for sync status checks
|
||||
let sup_dest = dest_for_poller;
|
||||
let sup_topic = topic_for_poller;
|
||||
let job_id_opt = job_id_opt;
|
||||
|
||||
let mut last_status: Option<TransportStatus> = Some(TransportStatus::Sent);
|
||||
// Ensure we only request supervisor job.status or job.result once per outbound message
|
||||
let mut requested_job_check: bool = false;
|
||||
|
||||
loop {
|
||||
if start.elapsed() >= poll_timeout {
|
||||
let _ = service_poll
|
||||
.append_message_logs(
|
||||
context_id,
|
||||
caller_id,
|
||||
id,
|
||||
vec!["Transport-status polling timed out".to_string()],
|
||||
)
|
||||
.await;
|
||||
// leave last known status; do not override
|
||||
break;
|
||||
}
|
||||
|
||||
match client.message_status(&out_id_cloned).await {
|
||||
Ok(s) => {
|
||||
if last_status.as_ref() != Some(&s) {
|
||||
let _ = service_poll
|
||||
.update_message_transport(
|
||||
context_id,
|
||||
caller_id,
|
||||
id,
|
||||
None,
|
||||
Some(s.clone()),
|
||||
)
|
||||
.await;
|
||||
last_status = Some(s.clone());
|
||||
}
|
||||
|
||||
// Stop on terminal states
|
||||
if matches!(s, TransportStatus::Delivered | TransportStatus::Read) {
|
||||
if let Some(job_id) = job_id_opt {
|
||||
// First consult Redis for the latest job state in case we already have a terminal update
|
||||
match service_poll.load_job(context_id, caller_id, job_id).await {
|
||||
Ok(job) => {
|
||||
// Promote to Started as soon as transport is delivered/read,
|
||||
// if currently Dispatched or WaitingForPrerequisites.
|
||||
// This makes DAG.started reflect "in-flight" work even when jobs
|
||||
// complete too quickly to observe an intermediate supervisor "running" status.
|
||||
if matches!(
|
||||
job.status(),
|
||||
JobStatus::Dispatched
|
||||
| JobStatus::WaitingForPrerequisites
|
||||
) {
|
||||
let _ = service_poll
|
||||
.update_job_status_unchecked(
|
||||
context_id,
|
||||
caller_id,
|
||||
job_id,
|
||||
JobStatus::Started,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
match job.status() {
|
||||
JobStatus::Finished | JobStatus::Error => {
|
||||
// Local job is already terminal; skip supervisor job.status
|
||||
let _ = service_poll
|
||||
.append_message_logs(
|
||||
context_id,
|
||||
caller_id,
|
||||
id,
|
||||
vec![format!(
|
||||
"Local job {} status is terminal ({:?}); skipping supervisor job.status",
|
||||
job_id,
|
||||
job.status()
|
||||
)],
|
||||
)
|
||||
.await;
|
||||
|
||||
// If result is still empty, immediately request supervisor job.result
|
||||
if job.result.is_empty() {
|
||||
let sup = cache
|
||||
.get_or_create(
|
||||
sup_hub.clone(),
|
||||
sup_dest.clone(),
|
||||
sup_topic.clone(),
|
||||
secret_for_poller.clone(),
|
||||
)
|
||||
.await;
|
||||
match sup
|
||||
.job_result_wait(job_id.to_string())
|
||||
.await
|
||||
{
|
||||
Ok((_out2, reply2)) => {
|
||||
// Interpret reply synchronously: success/error/bare string
|
||||
let res = reply2.get("result");
|
||||
if let Some(obj) =
|
||||
res.and_then(|v| v.as_object())
|
||||
{
|
||||
if let Some(s) = obj
|
||||
.get("success")
|
||||
.and_then(|v| v.as_str())
|
||||
{
|
||||
let mut patch = std::collections::HashMap::new();
|
||||
patch.insert(
|
||||
"success".to_string(),
|
||||
s.to_string(),
|
||||
);
|
||||
let _ = service_poll
|
||||
.update_job_result_merge_unchecked(
|
||||
context_id, caller_id, job_id, patch,
|
||||
)
|
||||
.await;
|
||||
let _ = service_poll
|
||||
.update_message_status(
|
||||
context_id,
|
||||
caller_id,
|
||||
id,
|
||||
MessageStatus::Processed,
|
||||
)
|
||||
.await;
|
||||
// Also mark job as Finished so the flow can progress (ignore invalid transitions)
|
||||
let _ = service_poll
|
||||
.update_job_status_unchecked(
|
||||
context_id, caller_id, job_id, JobStatus::Finished,
|
||||
)
|
||||
.await;
|
||||
let _ = service_poll
|
||||
.append_message_logs(
|
||||
context_id,
|
||||
caller_id,
|
||||
id,
|
||||
vec![format!(
|
||||
"Updated job {} status to Finished (sync)", job_id
|
||||
)],
|
||||
)
|
||||
.await;
|
||||
// Existing log about storing result
|
||||
let _ = service_poll
|
||||
.append_message_logs(
|
||||
context_id,
|
||||
caller_id,
|
||||
id,
|
||||
vec![format!(
|
||||
"Stored supervisor job.result for job {} (success, sync)",
|
||||
job_id
|
||||
)],
|
||||
)
|
||||
.await;
|
||||
} else if let Some(s) = obj
|
||||
.get("error")
|
||||
.and_then(|v| v.as_str())
|
||||
{
|
||||
let mut patch = std::collections::HashMap::new();
|
||||
patch.insert(
|
||||
"error".to_string(),
|
||||
s.to_string(),
|
||||
);
|
||||
let _ = service_poll
|
||||
.update_job_result_merge_unchecked(
|
||||
context_id, caller_id, job_id, patch,
|
||||
)
|
||||
.await;
|
||||
let _ = service_poll
|
||||
.update_message_status(
|
||||
context_id,
|
||||
caller_id,
|
||||
id,
|
||||
MessageStatus::Processed,
|
||||
)
|
||||
.await;
|
||||
// Also mark job as Error so the flow can handle failure (ignore invalid transitions)
|
||||
let _ = service_poll
|
||||
.update_job_status_unchecked(
|
||||
context_id, caller_id, job_id, JobStatus::Error,
|
||||
)
|
||||
.await;
|
||||
let _ = service_poll
|
||||
.append_message_logs(
|
||||
context_id,
|
||||
caller_id,
|
||||
id,
|
||||
vec![format!(
|
||||
"Updated job {} status to Error (sync)", job_id
|
||||
)],
|
||||
)
|
||||
.await;
|
||||
// Existing log about storing result
|
||||
let _ = service_poll
|
||||
.append_message_logs(
|
||||
context_id,
|
||||
caller_id,
|
||||
id,
|
||||
vec![format!(
|
||||
"Stored supervisor job.result for job {} (error, sync)",
|
||||
job_id
|
||||
)],
|
||||
)
|
||||
.await;
|
||||
}
|
||||
} else if let Some(s) =
|
||||
res.and_then(|v| v.as_str())
|
||||
{
|
||||
let mut patch =
|
||||
std::collections::HashMap::new(
|
||||
);
|
||||
patch.insert(
|
||||
"success".to_string(),
|
||||
s.to_string(),
|
||||
);
|
||||
let _ = service_poll
|
||||
.update_job_result_merge_unchecked(
|
||||
context_id, caller_id, job_id, patch,
|
||||
)
|
||||
.await;
|
||||
let _ = service_poll
|
||||
.update_message_status(
|
||||
context_id,
|
||||
caller_id,
|
||||
id,
|
||||
MessageStatus::Processed,
|
||||
)
|
||||
.await;
|
||||
// Also mark job as Finished so the flow can progress (ignore invalid transitions)
|
||||
let _ = service_poll
|
||||
.update_job_status_unchecked(
|
||||
context_id,
|
||||
caller_id,
|
||||
job_id,
|
||||
JobStatus::Finished,
|
||||
)
|
||||
.await;
|
||||
let _ = service_poll
|
||||
.append_message_logs(
|
||||
context_id,
|
||||
caller_id,
|
||||
id,
|
||||
vec![format!(
|
||||
"Updated job {} status to Finished (sync)", job_id
|
||||
)],
|
||||
)
|
||||
.await;
|
||||
// Existing log about storing result
|
||||
let _ = service_poll
|
||||
.append_message_logs(
|
||||
context_id,
|
||||
caller_id,
|
||||
id,
|
||||
vec![format!(
|
||||
"Stored supervisor job.result for job {} (success, sync)",
|
||||
job_id
|
||||
)],
|
||||
)
|
||||
.await;
|
||||
} else {
|
||||
let _ = service_poll
|
||||
.append_message_logs(
|
||||
context_id,
|
||||
caller_id,
|
||||
id,
|
||||
vec!["Supervisor job.result reply missing recognizable fields".to_string()],
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
let _ = service_poll
|
||||
.append_message_logs(
|
||||
context_id,
|
||||
caller_id,
|
||||
id,
|
||||
vec![format!(
|
||||
"job.result request error for job {}: {}",
|
||||
job_id, e
|
||||
)],
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Result already present; nothing to fetch
|
||||
let _ = service_poll
|
||||
.append_message_logs(
|
||||
context_id,
|
||||
caller_id,
|
||||
id,
|
||||
vec![format!(
|
||||
"Job {} already has result; no supervisor calls needed",
|
||||
job_id
|
||||
)],
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
// Mark processed and stop polling for this message
|
||||
let _ = service_poll
|
||||
.update_message_status(
|
||||
context_id,
|
||||
caller_id,
|
||||
id,
|
||||
MessageStatus::Processed,
|
||||
)
|
||||
.await;
|
||||
let _ = service_poll
|
||||
.append_message_logs(
|
||||
context_id,
|
||||
caller_id,
|
||||
id,
|
||||
vec![format!(
|
||||
"Terminal job {} detected; stopping transport polling",
|
||||
job_id
|
||||
)],
|
||||
)
|
||||
.await;
|
||||
break;
|
||||
}
|
||||
// Not terminal yet -> request supervisor job.status as before
|
||||
_ => {
|
||||
let sup = cache
|
||||
.get_or_create(
|
||||
sup_hub.clone(),
|
||||
sup_dest.clone(),
|
||||
sup_topic.clone(),
|
||||
secret_for_poller.clone(),
|
||||
)
|
||||
.await;
|
||||
match sup.job_status_wait(job_id.to_string()).await
|
||||
{
|
||||
Ok((_out_id, reply_status)) => {
|
||||
// Interpret status reply synchronously
|
||||
let result_opt = reply_status.get("result");
|
||||
let error_opt = reply_status.get("error");
|
||||
if let Some(err_obj) = error_opt {
|
||||
let _ = service_poll
|
||||
.update_job_status_unchecked(
|
||||
context_id,
|
||||
caller_id,
|
||||
job_id,
|
||||
JobStatus::Error,
|
||||
)
|
||||
.await;
|
||||
let _ = service_poll
|
||||
.append_message_logs(
|
||||
context_id, caller_id, id,
|
||||
vec![format!(
|
||||
"Supervisor error for job {}: {} (sync)",
|
||||
job_id, err_obj
|
||||
)],
|
||||
)
|
||||
.await;
|
||||
} else if let Some(res) = result_opt {
|
||||
let status_candidate = res
|
||||
.get("status")
|
||||
.and_then(|v| v.as_str())
|
||||
.or_else(|| res.as_str());
|
||||
if let Some(remote_status) =
|
||||
status_candidate
|
||||
{
|
||||
if let Some((mapped, terminal)) =
|
||||
map_supervisor_job_status(
|
||||
remote_status,
|
||||
)
|
||||
{
|
||||
let _ = service_poll
|
||||
.update_job_status_unchecked(
|
||||
context_id, caller_id, job_id, mapped.clone(),
|
||||
)
|
||||
.await;
|
||||
let _ = service_poll
|
||||
.append_message_logs(
|
||||
context_id, caller_id, id,
|
||||
vec![format!(
|
||||
"Supervisor job.status for job {} -> {} (mapped to {:?}, sync)",
|
||||
job_id, remote_status, mapped
|
||||
)],
|
||||
)
|
||||
.await;
|
||||
|
||||
// If terminal, request job.result now (handled above for local terminal case)
|
||||
if terminal {
|
||||
// trigger job.result only if result empty to avoid spam
|
||||
if let Ok(j_after) =
|
||||
service_poll
|
||||
.load_job(
|
||||
context_id,
|
||||
caller_id,
|
||||
job_id,
|
||||
)
|
||||
.await
|
||||
{
|
||||
if j_after
|
||||
.result
|
||||
.is_empty()
|
||||
{
|
||||
let sup2 = cache
|
||||
.get_or_create(
|
||||
sup_hub.clone(),
|
||||
sup_dest.clone(),
|
||||
sup_topic.clone(),
|
||||
secret_for_poller.clone(),
|
||||
)
|
||||
.await;
|
||||
let _ = sup2.job_result_wait(job_id.to_string()).await
|
||||
.and_then(|(_oid, reply2)| {
|
||||
// Minimal parse and store
|
||||
let res2 = reply2.get("result");
|
||||
if let Some(obj) = res2.and_then(|v| v.as_object()) {
|
||||
if let Some(s) = obj.get("success").and_then(|v| v.as_str()) {
|
||||
let mut patch = std::collections::HashMap::new();
|
||||
patch.insert("success".to_string(), s.to_string());
|
||||
tokio::spawn({
|
||||
let service_poll = service_poll.clone();
|
||||
async move {
|
||||
let _ = service_poll.update_job_result_merge_unchecked(context_id, caller_id, job_id, patch).await;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
Ok((String::new(), Value::Null))
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Mark processed and stop polling for this message
|
||||
let _ = service_poll
|
||||
.update_message_status(
|
||||
context_id,
|
||||
caller_id,
|
||||
id,
|
||||
MessageStatus::Processed,
|
||||
)
|
||||
.await;
|
||||
let _ = service_poll
|
||||
.append_message_logs(
|
||||
context_id,
|
||||
caller_id,
|
||||
id,
|
||||
vec![format!(
|
||||
"Terminal job {} detected from supervisor status; stopping transport polling",
|
||||
job_id
|
||||
)],
|
||||
)
|
||||
.await;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
let _ = service_poll
|
||||
.append_message_logs(
|
||||
context_id,
|
||||
caller_id,
|
||||
id,
|
||||
vec![format!(
|
||||
"job.status request error: {}",
|
||||
e
|
||||
)],
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// If we cannot load the job, fall back to requesting job.status
|
||||
Err(_) => {
|
||||
let sup = cache
|
||||
.get_or_create(
|
||||
sup_hub.clone(),
|
||||
sup_dest.clone(),
|
||||
sup_topic.clone(),
|
||||
secret_for_poller.clone(),
|
||||
)
|
||||
.await;
|
||||
match sup.job_status_wait(job_id.to_string()).await {
|
||||
Ok((_out_id, _reply_status)) => {
|
||||
let _ = service_poll
|
||||
.append_message_logs(
|
||||
context_id,
|
||||
caller_id,
|
||||
id,
|
||||
vec![format!(
|
||||
"Requested supervisor job.status for job {} (fallback; load_job failed, sync)",
|
||||
job_id
|
||||
)],
|
||||
)
|
||||
.await;
|
||||
}
|
||||
Err(e) => {
|
||||
let _ = service_poll
|
||||
.append_message_logs(
|
||||
context_id,
|
||||
caller_id,
|
||||
id,
|
||||
vec![format!(
|
||||
"job.status request error: {}",
|
||||
e
|
||||
)],
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Ensure we only do this once
|
||||
requested_job_check = true;
|
||||
}
|
||||
// break;
|
||||
}
|
||||
if matches!(s, TransportStatus::Failed) {
|
||||
let _ = service_poll
|
||||
.append_message_logs(
|
||||
context_id,
|
||||
caller_id,
|
||||
id,
|
||||
vec![format!(
|
||||
"Transport failed for outbound id {out_id_cloned}"
|
||||
)],
|
||||
)
|
||||
.await;
|
||||
break;
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
// Log and continue polling
|
||||
let _ = service_poll
|
||||
.append_message_logs(
|
||||
context_id,
|
||||
caller_id,
|
||||
id,
|
||||
vec![format!("messageStatus query error: {e}")],
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
tokio::time::sleep(poll_interval).await;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn determine_script_type(msg: &Message) -> ScriptType {
|
||||
// Prefer embedded job's script_type if available, else fallback to message.message_type
|
||||
match msg.job.first() {
|
||||
Some(j) => j.script_type.clone(),
|
||||
None => msg.message_type.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
fn build_params(msg: &Message) -> Result<Value, Box<dyn std::error::Error + Send + Sync>> {
|
||||
// Minimal mapping:
|
||||
// - "job.run" with exactly one embedded job: [{ "job": <job> }]
|
||||
// - otherwise: []
|
||||
if msg.message == "job.run"
|
||||
&& let Some(j) = msg.job.first()
|
||||
{
|
||||
let jv = job_to_json(j)?;
|
||||
return Ok(json!([ { "job": jv } ]));
|
||||
}
|
||||
|
||||
Ok(json!([]))
|
||||
}
|
||||
|
||||
fn job_to_json(job: &Job) -> Result<Value, Box<dyn std::error::Error + Send + Sync>> {
|
||||
Ok(serde_json::to_value(job)?)
|
||||
}
|
||||
|
||||
fn parse_message_key(s: &str) -> Option<(u32, u32)> {
|
||||
// Expect "message:{caller_id}:{id}"
|
||||
let mut it = s.split(':');
|
||||
match (it.next(), it.next(), it.next(), it.next()) {
|
||||
(Some("message"), Some(caller), Some(id), None) => {
|
||||
let caller_id = caller.parse::<u32>().ok()?;
|
||||
let msg_id = id.parse::<u32>().ok()?;
|
||||
Some((caller_id, msg_id))
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Map supervisor job.status -> (local JobStatus, terminal)
|
||||
fn map_supervisor_job_status(s: &str) -> Option<(JobStatus, bool)> {
|
||||
match s {
|
||||
"created" | "queued" => Some((JobStatus::Dispatched, false)),
|
||||
"running" => Some((JobStatus::Started, false)),
|
||||
"completed" => Some((JobStatus::Finished, true)),
|
||||
"failed" | "timeout" => Some((JobStatus::Error, true)),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Auto-discover contexts periodically and ensure a router loop exists for each.
|
||||
/// Returns a JoinHandle of the discovery task (router loops are detached).
|
||||
pub fn start_router_auto(service: AppService, cfg: RouterConfig) -> tokio::task::JoinHandle<()> {
|
||||
tokio::spawn(async move {
|
||||
let mut active: HashSet<u32> = HashSet::new();
|
||||
loop {
|
||||
match service.list_context_ids().await {
|
||||
Ok(ids) => {
|
||||
for ctx_id in ids {
|
||||
if !active.contains(&ctx_id) {
|
||||
// Spawn a loop for this new context
|
||||
let cfg_ctx = RouterConfig {
|
||||
context_ids: vec![ctx_id],
|
||||
..cfg.clone()
|
||||
};
|
||||
let _ = start_router(service.clone(), cfg_ctx);
|
||||
active.insert(ctx_id);
|
||||
info!(context_id = ctx_id, "Started loop for context");
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!(error=%e, "list_context_ids error");
|
||||
}
|
||||
}
|
||||
tokio::time::sleep(std::time::Duration::from_secs(5)).await;
|
||||
}
|
||||
})
|
||||
}
|
||||
676
bin/coordinator/src/rpc.rs
Normal file
676
bin/coordinator/src/rpc.rs
Normal file
@@ -0,0 +1,676 @@
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
net::{IpAddr, SocketAddr},
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use jsonrpsee::{
|
||||
RpcModule,
|
||||
server::{ServerBuilder, ServerHandle},
|
||||
types::error::ErrorObjectOwned,
|
||||
};
|
||||
use serde::Deserialize;
|
||||
use serde_json::{Value, json};
|
||||
|
||||
use crate::{
|
||||
dag::{DagError, FlowDag},
|
||||
models::{
|
||||
Actor, Context, Flow, FlowStatus, Job, JobStatus, Message, MessageFormatType,
|
||||
MessageStatus, Runner, ScriptType,
|
||||
},
|
||||
service::AppService,
|
||||
time::current_timestamp,
|
||||
};
|
||||
|
||||
/// The OpenRPC specification for the HeroCoordinator JSON-RPC API
|
||||
const OPENRPC_SPEC: &str = include_str!("../specs/openrpc.json");
|
||||
|
||||
pub struct AppState {
|
||||
pub service: AppService,
|
||||
}
|
||||
|
||||
impl AppState {
|
||||
pub fn new(service: AppService) -> Self {
|
||||
Self { service }
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------
|
||||
// Error helpers
|
||||
// -----------------------------
|
||||
|
||||
fn invalid_params_err<E: std::fmt::Display>(e: E) -> ErrorObjectOwned {
|
||||
ErrorObjectOwned::owned(-32602, "Invalid params", Some(Value::String(e.to_string())))
|
||||
}
|
||||
|
||||
fn storage_err(e: Box<dyn std::error::Error + Send + Sync>) -> ErrorObjectOwned {
|
||||
let msg = e.to_string();
|
||||
if msg.contains("Key not found") {
|
||||
ErrorObjectOwned::owned(-32001, "Not Found", Some(Value::String(msg)))
|
||||
} else {
|
||||
ErrorObjectOwned::owned(-32010, "Storage Error", Some(Value::String(msg)))
|
||||
}
|
||||
}
|
||||
|
||||
fn dag_err(e: DagError) -> ErrorObjectOwned {
|
||||
match e {
|
||||
DagError::Storage(inner) => storage_err(inner),
|
||||
DagError::MissingDependency { .. } => ErrorObjectOwned::owned(
|
||||
-32020,
|
||||
"DAG Missing Dependency",
|
||||
Some(Value::String(e.to_string())),
|
||||
),
|
||||
DagError::CycleDetected { .. } => ErrorObjectOwned::owned(
|
||||
-32021,
|
||||
"DAG Cycle Detected",
|
||||
Some(Value::String(e.to_string())),
|
||||
),
|
||||
DagError::UnknownJob { .. } => ErrorObjectOwned::owned(
|
||||
-32022,
|
||||
"DAG Unknown Job",
|
||||
Some(Value::String(e.to_string())),
|
||||
),
|
||||
DagError::DependenciesIncomplete { .. } => ErrorObjectOwned::owned(
|
||||
-32023,
|
||||
"DAG Dependencies Incomplete",
|
||||
Some(Value::String(e.to_string())),
|
||||
),
|
||||
DagError::FlowFailed { .. } => ErrorObjectOwned::owned(
|
||||
-32024,
|
||||
"DAG Flow Failed",
|
||||
Some(Value::String(e.to_string())),
|
||||
),
|
||||
DagError::JobNotStarted { .. } => ErrorObjectOwned::owned(
|
||||
-32025,
|
||||
"DAG Job Not Started",
|
||||
Some(Value::String(e.to_string())),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------
|
||||
// Create DTOs and Param wrappers
|
||||
// -----------------------------
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct ActorCreate {
|
||||
pub id: u32,
|
||||
pub pubkey: String,
|
||||
pub address: Vec<IpAddr>,
|
||||
}
|
||||
impl ActorCreate {
|
||||
pub fn into_domain(self) -> Result<Actor, String> {
|
||||
let ts = current_timestamp();
|
||||
let v = json!({
|
||||
"id": self.id,
|
||||
"pubkey": self.pubkey,
|
||||
"address": self.address,
|
||||
"created_at": ts,
|
||||
"updated_at": ts,
|
||||
});
|
||||
serde_json::from_value(v).map_err(|e| e.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct ContextCreate {
|
||||
pub id: u32,
|
||||
pub admins: Vec<u32>,
|
||||
pub readers: Vec<u32>,
|
||||
pub executors: Vec<u32>,
|
||||
}
|
||||
impl ContextCreate {
|
||||
pub fn into_domain(self) -> Context {
|
||||
let ts = current_timestamp();
|
||||
|
||||
let ContextCreate {
|
||||
id,
|
||||
admins,
|
||||
readers,
|
||||
executors,
|
||||
} = self;
|
||||
|
||||
Context {
|
||||
id,
|
||||
admins,
|
||||
readers,
|
||||
executors,
|
||||
created_at: ts,
|
||||
updated_at: ts,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct RunnerCreate {
|
||||
pub id: u32,
|
||||
pub pubkey: String,
|
||||
pub address: IpAddr,
|
||||
pub topic: String,
|
||||
/// The script type this runner executes (used for routing)
|
||||
pub script_type: ScriptType,
|
||||
pub local: bool,
|
||||
/// Optional secret used for authenticated supervisor calls (if required)
|
||||
pub secret: Option<String>,
|
||||
}
|
||||
impl RunnerCreate {
|
||||
pub fn into_domain(self) -> Runner {
|
||||
let ts = current_timestamp();
|
||||
|
||||
let RunnerCreate {
|
||||
id,
|
||||
pubkey,
|
||||
address,
|
||||
topic,
|
||||
script_type,
|
||||
local,
|
||||
secret,
|
||||
} = self;
|
||||
|
||||
Runner {
|
||||
id,
|
||||
pubkey,
|
||||
address,
|
||||
topic,
|
||||
script_type,
|
||||
local,
|
||||
secret,
|
||||
created_at: ts,
|
||||
updated_at: ts,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct FlowCreate {
|
||||
pub id: u32,
|
||||
pub caller_id: u32,
|
||||
pub context_id: u32,
|
||||
pub jobs: Vec<u32>,
|
||||
pub env_vars: HashMap<String, String>,
|
||||
}
|
||||
|
||||
impl FlowCreate {
|
||||
pub fn into_domain(self) -> Flow {
|
||||
let ts = current_timestamp();
|
||||
|
||||
let FlowCreate {
|
||||
id,
|
||||
caller_id,
|
||||
context_id,
|
||||
jobs,
|
||||
env_vars,
|
||||
} = self;
|
||||
|
||||
Flow {
|
||||
id,
|
||||
caller_id,
|
||||
context_id,
|
||||
jobs,
|
||||
env_vars,
|
||||
result: HashMap::new(),
|
||||
created_at: ts,
|
||||
updated_at: ts,
|
||||
status: FlowStatus::Created,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct JobCreate {
|
||||
pub id: u32,
|
||||
pub caller_id: u32,
|
||||
pub context_id: u32,
|
||||
pub script: String,
|
||||
pub script_type: ScriptType,
|
||||
pub timeout: u32,
|
||||
pub retries: u8,
|
||||
pub env_vars: HashMap<String, String>,
|
||||
pub prerequisites: Vec<String>,
|
||||
pub depends: Vec<u32>,
|
||||
}
|
||||
|
||||
impl JobCreate {
|
||||
pub fn into_domain(self) -> Job {
|
||||
let ts = current_timestamp();
|
||||
|
||||
let JobCreate {
|
||||
id,
|
||||
caller_id,
|
||||
context_id,
|
||||
script,
|
||||
script_type,
|
||||
timeout,
|
||||
retries,
|
||||
env_vars,
|
||||
prerequisites,
|
||||
depends,
|
||||
} = self;
|
||||
|
||||
Job {
|
||||
id,
|
||||
caller_id,
|
||||
context_id,
|
||||
script,
|
||||
script_type,
|
||||
timeout,
|
||||
retries,
|
||||
env_vars,
|
||||
result: HashMap::new(),
|
||||
prerequisites,
|
||||
depends,
|
||||
created_at: ts,
|
||||
updated_at: ts,
|
||||
status: JobStatus::WaitingForPrerequisites,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct MessageCreate {
|
||||
pub id: u32,
|
||||
pub caller_id: u32,
|
||||
pub context_id: u32,
|
||||
pub message: String,
|
||||
pub message_type: ScriptType,
|
||||
pub message_format_type: MessageFormatType,
|
||||
pub timeout: u32,
|
||||
pub timeout_ack: u32,
|
||||
pub timeout_result: u32,
|
||||
pub job: Vec<JobCreate>,
|
||||
}
|
||||
impl MessageCreate {
|
||||
pub fn into_domain(self) -> Message {
|
||||
let ts = current_timestamp();
|
||||
|
||||
let MessageCreate {
|
||||
id,
|
||||
caller_id,
|
||||
context_id,
|
||||
message,
|
||||
message_type,
|
||||
message_format_type,
|
||||
timeout,
|
||||
timeout_ack,
|
||||
timeout_result,
|
||||
job,
|
||||
} = self;
|
||||
|
||||
Message {
|
||||
id,
|
||||
caller_id,
|
||||
context_id,
|
||||
message,
|
||||
message_type,
|
||||
message_format_type,
|
||||
timeout,
|
||||
timeout_ack,
|
||||
timeout_result,
|
||||
transport_id: None,
|
||||
transport_status: None,
|
||||
job: job.into_iter().map(JobCreate::into_domain).collect(),
|
||||
logs: Vec::new(),
|
||||
created_at: ts,
|
||||
updated_at: ts,
|
||||
status: MessageStatus::Dispatched,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct ActorCreateParams {
|
||||
pub actor: ActorCreate,
|
||||
}
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct ActorLoadParams {
|
||||
pub id: u32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct ContextCreateParams {
|
||||
pub context: ContextCreate,
|
||||
}
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct ContextLoadParams {
|
||||
pub id: u32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct RunnerCreateParams {
|
||||
pub context_id: u32,
|
||||
pub runner: RunnerCreate,
|
||||
}
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct RunnerLoadParams {
|
||||
pub context_id: u32,
|
||||
pub id: u32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct FlowCreateParams {
|
||||
pub context_id: u32,
|
||||
pub flow: FlowCreate,
|
||||
}
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct FlowLoadParams {
|
||||
pub context_id: u32,
|
||||
pub id: u32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct JobCreateParams {
|
||||
pub context_id: u32,
|
||||
pub job: JobCreate,
|
||||
}
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct JobLoadParams {
|
||||
pub context_id: u32,
|
||||
pub caller_id: u32,
|
||||
pub id: u32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct MessageCreateParams {
|
||||
pub context_id: u32,
|
||||
pub message: MessageCreate,
|
||||
}
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct MessageLoadParams {
|
||||
pub context_id: u32,
|
||||
pub caller_id: u32,
|
||||
pub id: u32,
|
||||
}
|
||||
|
||||
// -----------------------------
|
||||
// Rpc module builder (manual registration)
|
||||
// -----------------------------
|
||||
|
||||
pub fn build_module(state: Arc<AppState>) -> RpcModule<()> {
|
||||
let mut module: RpcModule<()> = RpcModule::new(());
|
||||
|
||||
// Actor
|
||||
{
|
||||
let state = state.clone();
|
||||
module
|
||||
.register_async_method("actor.create", move |params, _caller, _ctx| {
|
||||
let state = state.clone();
|
||||
async move {
|
||||
let p: ActorCreateParams = params.parse().map_err(invalid_params_err)?;
|
||||
let actor = p.actor.into_domain().map_err(invalid_params_err)?;
|
||||
let actor = state
|
||||
.service
|
||||
.create_actor(actor)
|
||||
.await
|
||||
.map_err(storage_err)?;
|
||||
Ok::<_, ErrorObjectOwned>(actor)
|
||||
}
|
||||
})
|
||||
.expect("register actor.create");
|
||||
}
|
||||
{
|
||||
let state = state.clone();
|
||||
module
|
||||
.register_async_method("actor.load", move |params, _caller, _ctx| {
|
||||
let state = state.clone();
|
||||
async move {
|
||||
let p: ActorLoadParams = params.parse().map_err(invalid_params_err)?;
|
||||
let actor = state.service.load_actor(p.id).await.map_err(storage_err)?;
|
||||
Ok::<_, ErrorObjectOwned>(actor)
|
||||
}
|
||||
})
|
||||
.expect("register actor.load");
|
||||
}
|
||||
|
||||
// Context
|
||||
{
|
||||
let state = state.clone();
|
||||
module
|
||||
.register_async_method("context.create", move |params, _caller, _ctx| {
|
||||
let state = state.clone();
|
||||
async move {
|
||||
let p: ContextCreateParams = params.parse().map_err(invalid_params_err)?;
|
||||
let ctx = p.context.into_domain();
|
||||
let ctx = state
|
||||
.service
|
||||
.create_context(ctx)
|
||||
.await
|
||||
.map_err(storage_err)?;
|
||||
Ok::<_, ErrorObjectOwned>(ctx)
|
||||
}
|
||||
})
|
||||
.expect("register context.create");
|
||||
}
|
||||
{
|
||||
let state = state.clone();
|
||||
module
|
||||
.register_async_method("context.load", move |params, _caller, _ctx| {
|
||||
let state = state.clone();
|
||||
async move {
|
||||
let p: ContextLoadParams = params.parse().map_err(invalid_params_err)?;
|
||||
let ctx = state
|
||||
.service
|
||||
.load_context(p.id)
|
||||
.await
|
||||
.map_err(storage_err)?;
|
||||
Ok::<_, ErrorObjectOwned>(ctx)
|
||||
}
|
||||
})
|
||||
.expect("register context.load");
|
||||
}
|
||||
|
||||
// Runner
|
||||
{
|
||||
let state = state.clone();
|
||||
module
|
||||
.register_async_method("runner.create", move |params, _caller, _ctx| {
|
||||
let state = state.clone();
|
||||
async move {
|
||||
let p: RunnerCreateParams = params.parse().map_err(invalid_params_err)?;
|
||||
let runner = p.runner.into_domain();
|
||||
let runner = state
|
||||
.service
|
||||
.create_runner(p.context_id, runner)
|
||||
.await
|
||||
.map_err(storage_err)?;
|
||||
Ok::<_, ErrorObjectOwned>(runner)
|
||||
}
|
||||
})
|
||||
.expect("register runner.create");
|
||||
}
|
||||
{
|
||||
let state = state.clone();
|
||||
module
|
||||
.register_async_method("runner.load", move |params, _caller, _ctx| {
|
||||
let state = state.clone();
|
||||
async move {
|
||||
let p: RunnerLoadParams = params.parse().map_err(invalid_params_err)?;
|
||||
let runner = state
|
||||
.service
|
||||
.load_runner(p.context_id, p.id)
|
||||
.await
|
||||
.map_err(storage_err)?;
|
||||
Ok::<_, ErrorObjectOwned>(runner)
|
||||
}
|
||||
})
|
||||
.expect("register runner.load");
|
||||
}
|
||||
|
||||
// Flow
|
||||
{
|
||||
let state = state.clone();
|
||||
module
|
||||
.register_async_method("flow.create", move |params, _caller, _ctx| {
|
||||
let state = state.clone();
|
||||
async move {
|
||||
let p: FlowCreateParams = params.parse().map_err(invalid_params_err)?;
|
||||
let flow = p.flow.into_domain();
|
||||
let flow = state
|
||||
.service
|
||||
.create_flow(p.context_id, flow)
|
||||
.await
|
||||
.map_err(storage_err)?;
|
||||
Ok::<_, ErrorObjectOwned>(flow)
|
||||
}
|
||||
})
|
||||
.expect("register flow.create");
|
||||
}
|
||||
{
|
||||
let state = state.clone();
|
||||
module
|
||||
.register_async_method("flow.load", move |params, _caller, _ctx| {
|
||||
let state = state.clone();
|
||||
async move {
|
||||
let p: FlowLoadParams = params.parse().map_err(invalid_params_err)?;
|
||||
let flow = state
|
||||
.service
|
||||
.load_flow(p.context_id, p.id)
|
||||
.await
|
||||
.map_err(storage_err)?;
|
||||
Ok::<_, ErrorObjectOwned>(flow)
|
||||
}
|
||||
})
|
||||
.expect("register flow.load");
|
||||
}
|
||||
{
|
||||
let state = state.clone();
|
||||
module
|
||||
.register_async_method("flow.dag", move |params, _caller, _ctx| {
|
||||
let state = state.clone();
|
||||
async move {
|
||||
let p: FlowLoadParams = params.parse().map_err(invalid_params_err)?;
|
||||
let dag: FlowDag = state
|
||||
.service
|
||||
.flow_dag(p.context_id, p.id)
|
||||
.await
|
||||
.map_err(dag_err)?;
|
||||
Ok::<_, ErrorObjectOwned>(dag)
|
||||
}
|
||||
})
|
||||
.expect("register flow.dag");
|
||||
}
|
||||
{
|
||||
let state = state.clone();
|
||||
module
|
||||
.register_async_method("flow.start", move |params, _caller, _ctx| {
|
||||
let state = state.clone();
|
||||
async move {
|
||||
let p: FlowLoadParams = params.parse().map_err(invalid_params_err)?;
|
||||
let started: bool = state
|
||||
.service
|
||||
.flow_start(p.context_id, p.id)
|
||||
.await
|
||||
.map_err(storage_err)?;
|
||||
Ok::<_, ErrorObjectOwned>(started)
|
||||
}
|
||||
})
|
||||
.expect("register flow.start");
|
||||
}
|
||||
|
||||
// Job
|
||||
{
|
||||
let state = state.clone();
|
||||
module
|
||||
.register_async_method("job.create", move |params, _caller, _ctx| {
|
||||
let state = state.clone();
|
||||
async move {
|
||||
let p: JobCreateParams = params.parse().map_err(invalid_params_err)?;
|
||||
let job = p.job.into_domain();
|
||||
let job = state
|
||||
.service
|
||||
.create_job(p.context_id, job)
|
||||
.await
|
||||
.map_err(storage_err)?;
|
||||
Ok::<_, ErrorObjectOwned>(job)
|
||||
}
|
||||
})
|
||||
.expect("register job.create");
|
||||
}
|
||||
{
|
||||
let state = state.clone();
|
||||
module
|
||||
.register_async_method("job.load", move |params, _caller, _ctx| {
|
||||
let state = state.clone();
|
||||
async move {
|
||||
let p: JobLoadParams = params.parse().map_err(invalid_params_err)?;
|
||||
let job = state
|
||||
.service
|
||||
.load_job(p.context_id, p.caller_id, p.id)
|
||||
.await
|
||||
.map_err(storage_err)?;
|
||||
Ok::<_, ErrorObjectOwned>(job)
|
||||
}
|
||||
})
|
||||
.expect("register job.load");
|
||||
}
|
||||
|
||||
// Message
|
||||
{
|
||||
let state = state.clone();
|
||||
module
|
||||
.register_async_method("message.create", move |params, _caller, _ctx| {
|
||||
let state = state.clone();
|
||||
async move {
|
||||
let p: MessageCreateParams = params.parse().map_err(invalid_params_err)?;
|
||||
let message = p.message.into_domain();
|
||||
let message = state
|
||||
.service
|
||||
.create_message(p.context_id, message)
|
||||
.await
|
||||
.map_err(storage_err)?;
|
||||
Ok::<_, ErrorObjectOwned>(message)
|
||||
}
|
||||
})
|
||||
.expect("register message.create");
|
||||
}
|
||||
{
|
||||
let state = state;
|
||||
module
|
||||
.register_async_method("message.load", move |params, _caller, _ctx| {
|
||||
let state = state.clone();
|
||||
async move {
|
||||
let p: MessageLoadParams = params.parse().map_err(invalid_params_err)?;
|
||||
let msg = state
|
||||
.service
|
||||
.load_message(p.context_id, p.caller_id, p.id)
|
||||
.await
|
||||
.map_err(storage_err)?;
|
||||
Ok::<_, ErrorObjectOwned>(msg)
|
||||
}
|
||||
})
|
||||
.expect("register message.load");
|
||||
}
|
||||
{
|
||||
module
|
||||
.register_async_method("rpc.discover", move |_params, _caller, _ctx| async move {
|
||||
let spec = serde_json::from_str::<serde_json::Value>(OPENRPC_SPEC)
|
||||
.expect("Failed to parse OpenRPC spec");
|
||||
Ok::<_, ErrorObjectOwned>(spec)
|
||||
})
|
||||
.expect("register rpc.discover");
|
||||
}
|
||||
|
||||
module
|
||||
}
|
||||
|
||||
// -----------------------------
|
||||
// Server runners (HTTP/WS on separate listeners)
|
||||
// -----------------------------
|
||||
|
||||
pub async fn start_http<C>(
|
||||
addr: SocketAddr,
|
||||
module: RpcModule<C>,
|
||||
) -> Result<ServerHandle, Box<dyn std::error::Error + Send + Sync>> {
|
||||
let server = ServerBuilder::default().build(addr).await?;
|
||||
let handle = server.start(module);
|
||||
Ok(handle)
|
||||
}
|
||||
|
||||
pub async fn start_ws<C>(
|
||||
addr: SocketAddr,
|
||||
module: RpcModule<C>,
|
||||
) -> Result<ServerHandle, Box<dyn std::error::Error + Send + Sync>> {
|
||||
// jsonrpsee server supports both HTTP and WS; using a second listener gives us a dedicated WS port.
|
||||
let server = ServerBuilder::default().build(addr).await?;
|
||||
let handle = server.start(module);
|
||||
Ok(handle)
|
||||
}
|
||||
1211
bin/coordinator/src/service.rs
Normal file
1211
bin/coordinator/src/service.rs
Normal file
File diff suppressed because it is too large
Load Diff
3
bin/coordinator/src/storage.rs
Normal file
3
bin/coordinator/src/storage.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
pub mod redis;
|
||||
|
||||
pub use redis::RedisDriver;
|
||||
827
bin/coordinator/src/storage/redis.rs
Normal file
827
bin/coordinator/src/storage/redis.rs
Normal file
@@ -0,0 +1,827 @@
|
||||
use std::collections::HashMap as StdHashMap;
|
||||
|
||||
use redis::{AsyncCommands, aio::ConnectionManager};
|
||||
use serde::Serialize;
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde_json::{Map as JsonMap, Value};
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::models::{
|
||||
Actor, Context, Flow, FlowStatus, Job, JobStatus, Message, MessageStatus, Runner,
|
||||
TransportStatus,
|
||||
};
|
||||
use tracing::{error, warn};
|
||||
|
||||
type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
|
||||
|
||||
/// Async Redis driver that saves/loads every model as a Redis hash (HSET),
|
||||
/// using canonical keys as specified in the specs.
|
||||
/// - Complex fields (arrays, maps, nested structs) are JSON-encoded per field
|
||||
/// - Scalars are written as plain strings (numbers/bools as their string representation)
|
||||
/// - On load, each field value is first attempted to parse as JSON; if that fails it is treated as a plain string
|
||||
pub struct RedisDriver {
|
||||
/// Base address, e.g. "127.0.0.1:6379" or "redis://127.0.0.1:6379"
|
||||
base_addr: String,
|
||||
/// Cache of connection managers per DB index
|
||||
managers: Mutex<StdHashMap<u32, ConnectionManager>>,
|
||||
}
|
||||
|
||||
impl RedisDriver {
|
||||
/// Create a new driver for the given Redis address.
|
||||
/// Accepts either "host:port" or "redis://host:port"
|
||||
pub async fn new(addr: impl Into<String>) -> Result<Self> {
|
||||
let raw = addr.into();
|
||||
let base_addr = if raw.starts_with("redis://") {
|
||||
raw
|
||||
} else {
|
||||
format!("redis://{}", raw)
|
||||
};
|
||||
Ok(Self {
|
||||
base_addr,
|
||||
managers: Mutex::new(StdHashMap::new()),
|
||||
})
|
||||
}
|
||||
|
||||
/// Get or create a ConnectionManager for the given DB index.
|
||||
async fn manager_for_db(&self, db: u32) -> Result<ConnectionManager> {
|
||||
{
|
||||
// Fast path: check existing
|
||||
let guard = self.managers.lock().await;
|
||||
if let Some(cm) = guard.get(&db) {
|
||||
return Ok(cm.clone());
|
||||
}
|
||||
}
|
||||
|
||||
// Slow path: create a new manager and cache it
|
||||
let url = format!("{}/{}", self.base_addr.trim_end_matches('/'), db);
|
||||
let client = redis::Client::open(url.as_str()).map_err(|e| {
|
||||
error!(%url, db=%db, error=%e, "Redis client open failed");
|
||||
e
|
||||
})?;
|
||||
let cm = client.get_connection_manager().await.map_err(|e| {
|
||||
error!(%url, db=%db, error=%e, "Redis connection manager init failed");
|
||||
e
|
||||
})?;
|
||||
|
||||
let mut guard = self.managers.lock().await;
|
||||
let entry = guard.entry(db).or_insert(cm);
|
||||
Ok(entry.clone())
|
||||
}
|
||||
|
||||
// -----------------------------
|
||||
// Generic helpers (serde <-> HSET)
|
||||
// -----------------------------
|
||||
|
||||
fn struct_to_hset_pairs<T: Serialize>(value: &T) -> Result<Vec<(String, String)>> {
|
||||
let json = serde_json::to_value(value)?;
|
||||
let obj = json
|
||||
.as_object()
|
||||
.ok_or("Model must serialize to a JSON object")?;
|
||||
let mut pairs = Vec::with_capacity(obj.len());
|
||||
for (k, v) in obj {
|
||||
let s = match v {
|
||||
Value::Array(_) | Value::Object(_) => serde_json::to_string(v)?, // complex - store JSON
|
||||
Value::String(s) => s.clone(), // string - plain
|
||||
Value::Number(n) => n.to_string(), // number - plain
|
||||
Value::Bool(b) => b.to_string(), // bool - plain
|
||||
Value::Null => "null".to_string(), // null sentinel
|
||||
};
|
||||
pairs.push((k.clone(), s));
|
||||
}
|
||||
Ok(pairs)
|
||||
}
|
||||
|
||||
fn hmap_to_struct<T: DeserializeOwned>(map: StdHashMap<String, String>) -> Result<T> {
|
||||
let mut obj = JsonMap::with_capacity(map.len());
|
||||
for (k, s) in map {
|
||||
// Try parse as JSON first (works for arrays, objects, numbers, booleans, null)
|
||||
// If that fails, fallback to string.
|
||||
match serde_json::from_str::<Value>(&s) {
|
||||
Ok(v) => {
|
||||
obj.insert(k, v);
|
||||
}
|
||||
Err(_) => {
|
||||
obj.insert(k, Value::String(s));
|
||||
}
|
||||
}
|
||||
}
|
||||
let json = Value::Object(obj);
|
||||
let model = serde_json::from_value(json)?;
|
||||
Ok(model)
|
||||
}
|
||||
|
||||
async fn hset_model<T: Serialize>(&self, db: u32, key: &str, model: &T) -> Result<()> {
|
||||
let mut cm = self.manager_for_db(db).await?;
|
||||
let pairs = Self::struct_to_hset_pairs(model).map_err(|e| {
|
||||
error!(db=%db, key=%key, error=%e, "Serialize model to HSET pairs failed");
|
||||
e
|
||||
})?;
|
||||
// Ensure no stale fields
|
||||
let del_res: redis::RedisResult<u64> = cm.del(key).await;
|
||||
if let Err(e) = del_res {
|
||||
warn!(db=%db, key=%key, error=%e, "DEL before HSET failed");
|
||||
}
|
||||
// Write all fields
|
||||
let _: () = cm.hset_multiple(key, &pairs).await.map_err(|e| {
|
||||
error!(db=%db, key=%key, error=%e, "HSET multiple failed");
|
||||
e
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn hget_model<T: DeserializeOwned>(&self, db: u32, key: &str) -> Result<T> {
|
||||
let mut cm = self.manager_for_db(db).await?;
|
||||
let map: StdHashMap<String, String> = cm.hgetall(key).await.map_err(|e| {
|
||||
error!(db=%db, key=%key, error=%e, "HGETALL failed");
|
||||
e
|
||||
})?;
|
||||
if map.is_empty() {
|
||||
// NotFound is expected in some flows; don't log as error
|
||||
return Err(format!("Key not found: {}", key).into());
|
||||
}
|
||||
Self::hmap_to_struct(map).map_err(|e| {
|
||||
error!(db=%db, key=%key, error=%e, "Deserialize model from HGETALL failed");
|
||||
e
|
||||
})
|
||||
}
|
||||
|
||||
// -----------------------------
|
||||
// Key helpers (canonical keys)
|
||||
// -----------------------------
|
||||
|
||||
fn actor_key(id: u32) -> String {
|
||||
format!("actor:{}", id)
|
||||
}
|
||||
|
||||
fn context_key(id: u32) -> String {
|
||||
format!("context:{}", id)
|
||||
}
|
||||
|
||||
fn flow_key(id: u32) -> String {
|
||||
format!("flow:{}", id)
|
||||
}
|
||||
|
||||
fn runner_key(id: u32) -> String {
|
||||
format!("runner:{}", id)
|
||||
}
|
||||
|
||||
fn job_key(caller_id: u32, id: u32) -> String {
|
||||
format!("job:{}:{}", caller_id, id)
|
||||
}
|
||||
|
||||
fn message_key(caller_id: u32, id: u32) -> String {
|
||||
format!("message:{}:{}", caller_id, id)
|
||||
}
|
||||
|
||||
// -----------------------------
|
||||
// Context (DB = context.id)
|
||||
// -----------------------------
|
||||
|
||||
/// Save a Context in its own DB (db index = context.id)
|
||||
pub async fn save_context(&self, ctx: &Context) -> Result<()> {
|
||||
// We don't have field access; compute db and key via JSON to avoid changing model definitions.
|
||||
// Extract "id" from serialized JSON object.
|
||||
let json = serde_json::to_value(ctx)?;
|
||||
let id = json
|
||||
.get("id")
|
||||
.and_then(|v| v.as_u64())
|
||||
.ok_or("Context.id missing or not a number")? as u32;
|
||||
let key = Self::context_key(id);
|
||||
// Write the context hash in its own DB
|
||||
self.hset_model(id, &key, ctx).await?;
|
||||
// Register this context id in the global registry (DB 0)
|
||||
let _ = self.register_context_id(id).await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Load a Context from its own DB (db index = id)
|
||||
pub async fn load_context(&self, id: u32) -> Result<Context> {
|
||||
let key = Self::context_key(id);
|
||||
self.hget_model(id, &key).await
|
||||
}
|
||||
|
||||
// -----------------------------
|
||||
// Actor
|
||||
// -----------------------------
|
||||
|
||||
/// Save an Actor to the given DB (tenant/context DB)
|
||||
pub async fn save_actor(&self, db: u32, actor: &Actor) -> Result<()> {
|
||||
let json = serde_json::to_value(actor)?;
|
||||
let id = json
|
||||
.get("id")
|
||||
.and_then(|v| v.as_u64())
|
||||
.ok_or("Actor.id missing or not a number")? as u32;
|
||||
let key = Self::actor_key(id);
|
||||
self.hset_model(db, &key, actor).await
|
||||
}
|
||||
|
||||
/// Load an Actor by id from the given DB
|
||||
pub async fn load_actor(&self, db: u32, id: u32) -> Result<Actor> {
|
||||
let key = Self::actor_key(id);
|
||||
self.hget_model(db, &key).await
|
||||
}
|
||||
/// Save an Actor globally in DB 0 (Actor is context-independent)
|
||||
pub async fn save_actor_global(&self, actor: &Actor) -> Result<()> {
|
||||
let json = serde_json::to_value(actor)?;
|
||||
let id = json
|
||||
.get("id")
|
||||
.and_then(|v| v.as_u64())
|
||||
.ok_or("Actor.id missing or not a number")? as u32;
|
||||
let key = Self::actor_key(id);
|
||||
self.hset_model(0, &key, actor).await
|
||||
}
|
||||
|
||||
/// Load an Actor globally from DB 0 by id
|
||||
pub async fn load_actor_global(&self, id: u32) -> Result<Actor> {
|
||||
let key = Self::actor_key(id);
|
||||
self.hget_model(0, &key).await
|
||||
}
|
||||
|
||||
// -----------------------------
|
||||
// Runner
|
||||
// -----------------------------
|
||||
|
||||
pub async fn save_runner(&self, db: u32, runner: &Runner) -> Result<()> {
|
||||
let json = serde_json::to_value(runner)?;
|
||||
let id = json
|
||||
.get("id")
|
||||
.and_then(|v| v.as_u64())
|
||||
.ok_or("Runner.id missing or not a number")? as u32;
|
||||
let key = Self::runner_key(id);
|
||||
self.hset_model(db, &key, runner).await
|
||||
}
|
||||
|
||||
pub async fn load_runner(&self, db: u32, id: u32) -> Result<Runner> {
|
||||
let key = Self::runner_key(id);
|
||||
self.hget_model(db, &key).await
|
||||
}
|
||||
|
||||
// -----------------------------
|
||||
// Flow
|
||||
// -----------------------------
|
||||
|
||||
pub async fn save_flow(&self, db: u32, flow: &Flow) -> Result<()> {
|
||||
let json = serde_json::to_value(flow)?;
|
||||
let id = json
|
||||
.get("id")
|
||||
.and_then(|v| v.as_u64())
|
||||
.ok_or("Flow.id missing or not a number")? as u32;
|
||||
let key = Self::flow_key(id);
|
||||
self.hset_model(db, &key, flow).await
|
||||
}
|
||||
|
||||
pub async fn load_flow(&self, db: u32, id: u32) -> Result<Flow> {
|
||||
let key = Self::flow_key(id);
|
||||
self.hget_model(db, &key).await
|
||||
}
|
||||
|
||||
// -----------------------------
|
||||
// Job
|
||||
// -----------------------------
|
||||
|
||||
pub async fn save_job(&self, db: u32, job: &Job) -> Result<()> {
|
||||
let json = serde_json::to_value(job)?;
|
||||
let id = json
|
||||
.get("id")
|
||||
.and_then(|v| v.as_u64())
|
||||
.ok_or("Job.id missing or not a number")? as u32;
|
||||
let caller_id = json
|
||||
.get("caller_id")
|
||||
.and_then(|v| v.as_u64())
|
||||
.ok_or("Job.caller_id missing or not a number")? as u32;
|
||||
let key = Self::job_key(caller_id, id);
|
||||
self.hset_model(db, &key, job).await
|
||||
}
|
||||
|
||||
pub async fn load_job(&self, db: u32, caller_id: u32, id: u32) -> Result<Job> {
|
||||
let key = Self::job_key(caller_id, id);
|
||||
self.hget_model(db, &key).await
|
||||
}
|
||||
|
||||
/// Atomically update a job's status and `updated_at` fields.
|
||||
/// - No transition validation is performed.
|
||||
/// - Writes only the two fields via HSET to avoid rewriting the whole model.
|
||||
pub async fn update_job_status(
|
||||
&self,
|
||||
db: u32,
|
||||
caller_id: u32,
|
||||
id: u32,
|
||||
status: JobStatus,
|
||||
) -> Result<()> {
|
||||
let mut cm = self.manager_for_db(db).await?;
|
||||
let key = Self::job_key(caller_id, id);
|
||||
|
||||
// Serialize enum into the same plain string representation stored by create paths
|
||||
let status_str = match serde_json::to_value(&status)? {
|
||||
Value::String(s) => s,
|
||||
v => v.to_string(),
|
||||
};
|
||||
|
||||
let ts = crate::time::current_timestamp();
|
||||
|
||||
let pairs = vec![
|
||||
("status".to_string(), status_str),
|
||||
("updated_at".to_string(), ts.to_string()),
|
||||
];
|
||||
let _: () = cm.hset_multiple(&key, &pairs).await.map_err(|e| {
|
||||
error!(db=%db, key=%key, error=%e, "HSET update_job_status failed");
|
||||
e
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// -----------------------------
|
||||
// Message
|
||||
// -----------------------------
|
||||
|
||||
pub async fn save_message(&self, db: u32, message: &Message) -> Result<()> {
|
||||
let json = serde_json::to_value(message)?;
|
||||
let id = json
|
||||
.get("id")
|
||||
.and_then(|v| v.as_u64())
|
||||
.ok_or("Message.id missing or not a number")? as u32;
|
||||
let caller_id = json
|
||||
.get("caller_id")
|
||||
.and_then(|v| v.as_u64())
|
||||
.ok_or("Message.caller_id missing or not a number")? as u32;
|
||||
let key = Self::message_key(caller_id, id);
|
||||
self.hset_model(db, &key, message).await
|
||||
}
|
||||
|
||||
pub async fn load_message(&self, db: u32, caller_id: u32, id: u32) -> Result<Message> {
|
||||
let key = Self::message_key(caller_id, id);
|
||||
self.hget_model(db, &key).await
|
||||
}
|
||||
|
||||
// -----------------------------
|
||||
// Partial update helpers
|
||||
// -----------------------------
|
||||
|
||||
/// Flow: update only status and updated_at
|
||||
pub async fn update_flow_status(&self, db: u32, id: u32, status: FlowStatus) -> Result<()> {
|
||||
let mut cm = self.manager_for_db(db).await?;
|
||||
let key = Self::flow_key(id);
|
||||
|
||||
let status_str = match serde_json::to_value(&status)? {
|
||||
Value::String(s) => s,
|
||||
v => v.to_string(),
|
||||
};
|
||||
let ts = crate::time::current_timestamp();
|
||||
|
||||
let pairs = vec![
|
||||
("status".to_string(), status_str),
|
||||
("updated_at".to_string(), ts.to_string()),
|
||||
];
|
||||
let _: () = cm.hset_multiple(&key, &pairs).await.map_err(|e| {
|
||||
error!(db=%db, key=%key, error=%e, "HSET update_flow_status failed");
|
||||
e
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Message: update only status and updated_at
|
||||
pub async fn update_message_status(
|
||||
&self,
|
||||
db: u32,
|
||||
caller_id: u32,
|
||||
id: u32,
|
||||
status: MessageStatus,
|
||||
) -> Result<()> {
|
||||
let mut cm = self.manager_for_db(db).await?;
|
||||
let key = Self::message_key(caller_id, id);
|
||||
|
||||
let status_str = match serde_json::to_value(&status)? {
|
||||
Value::String(s) => s,
|
||||
v => v.to_string(),
|
||||
};
|
||||
let ts = crate::time::current_timestamp();
|
||||
|
||||
let pairs = vec![
|
||||
("status".to_string(), status_str),
|
||||
("updated_at".to_string(), ts.to_string()),
|
||||
];
|
||||
let _: () = cm.hset_multiple(&key, &pairs).await.map_err(|e| {
|
||||
error!(db=%db, key=%key, error=%e, "HSET update_message_status failed");
|
||||
e
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Message: update transport_id / transport_status (optionally) and bump updated_at
|
||||
pub async fn update_message_transport(
|
||||
&self,
|
||||
db: u32,
|
||||
caller_id: u32,
|
||||
id: u32,
|
||||
transport_id: Option<String>,
|
||||
transport_status: Option<TransportStatus>,
|
||||
) -> Result<()> {
|
||||
let mut cm = self.manager_for_db(db).await?;
|
||||
let key = Self::message_key(caller_id, id);
|
||||
|
||||
let mut pairs: Vec<(String, String)> = Vec::new();
|
||||
|
||||
if let Some(tid) = transport_id {
|
||||
pairs.push(("transport_id".to_string(), tid));
|
||||
}
|
||||
|
||||
if let Some(ts_status) = transport_status {
|
||||
let status_str = match serde_json::to_value(&ts_status)? {
|
||||
Value::String(s) => s,
|
||||
v => v.to_string(),
|
||||
};
|
||||
pairs.push(("transport_status".to_string(), status_str));
|
||||
}
|
||||
|
||||
// Always bump updated_at
|
||||
let ts = crate::time::current_timestamp();
|
||||
pairs.push(("updated_at".to_string(), ts.to_string()));
|
||||
|
||||
let _: () = cm.hset_multiple(&key, &pairs).await.map_err(|e| {
|
||||
error!(db=%db, key=%key, error=%e, "HSET update_message_transport failed");
|
||||
e
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Flow: merge env_vars map and bump updated_at
|
||||
pub async fn update_flow_env_vars_merge(
|
||||
&self,
|
||||
db: u32,
|
||||
id: u32,
|
||||
patch: StdHashMap<String, String>,
|
||||
) -> Result<()> {
|
||||
let mut cm = self.manager_for_db(db).await?;
|
||||
let key = Self::flow_key(id);
|
||||
|
||||
let current: Option<String> = cm.hget(&key, "env_vars").await.ok();
|
||||
let mut obj = match current
|
||||
.and_then(|s| serde_json::from_str::<Value>(&s).ok())
|
||||
.and_then(|v| v.as_object().cloned())
|
||||
{
|
||||
Some(m) => m,
|
||||
None => JsonMap::new(),
|
||||
};
|
||||
|
||||
for (k, v) in patch {
|
||||
obj.insert(k, Value::String(v));
|
||||
}
|
||||
|
||||
let env_vars_str = Value::Object(obj).to_string();
|
||||
let ts = crate::time::current_timestamp();
|
||||
let pairs = vec![
|
||||
("env_vars".to_string(), env_vars_str),
|
||||
("updated_at".to_string(), ts.to_string()),
|
||||
];
|
||||
let _: () = cm.hset_multiple(&key, &pairs).await.map_err(|e| {
|
||||
error!(db=%db, key=%key, error=%e, "HSET update_flow_env_vars_merge failed");
|
||||
e
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Flow: merge result map and bump updated_at
|
||||
pub async fn update_flow_result_merge(
|
||||
&self,
|
||||
db: u32,
|
||||
id: u32,
|
||||
patch: StdHashMap<String, String>,
|
||||
) -> Result<()> {
|
||||
let mut cm = self.manager_for_db(db).await?;
|
||||
let key = Self::flow_key(id);
|
||||
|
||||
let current: Option<String> = cm.hget(&key, "result").await.ok();
|
||||
let mut obj = match current
|
||||
.and_then(|s| serde_json::from_str::<Value>(&s).ok())
|
||||
.and_then(|v| v.as_object().cloned())
|
||||
{
|
||||
Some(m) => m,
|
||||
None => JsonMap::new(),
|
||||
};
|
||||
|
||||
for (k, v) in patch {
|
||||
obj.insert(k, Value::String(v));
|
||||
}
|
||||
|
||||
let result_str = Value::Object(obj).to_string();
|
||||
let ts = crate::time::current_timestamp();
|
||||
let pairs = vec![
|
||||
("result".to_string(), result_str),
|
||||
("updated_at".to_string(), ts.to_string()),
|
||||
];
|
||||
let _: () = cm.hset_multiple(&key, &pairs).await.map_err(|e| {
|
||||
error!(db=%db, key=%key, error=%e, "HSET update_flow_result_merge failed");
|
||||
e
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Job: merge env_vars map and bump updated_at
|
||||
pub async fn update_job_env_vars_merge(
|
||||
&self,
|
||||
db: u32,
|
||||
caller_id: u32,
|
||||
id: u32,
|
||||
patch: StdHashMap<String, String>,
|
||||
) -> Result<()> {
|
||||
let mut cm = self.manager_for_db(db).await?;
|
||||
let key = Self::job_key(caller_id, id);
|
||||
|
||||
let current: Option<String> = cm.hget(&key, "env_vars").await.ok();
|
||||
let mut obj = match current
|
||||
.and_then(|s| serde_json::from_str::<Value>(&s).ok())
|
||||
.and_then(|v| v.as_object().cloned())
|
||||
{
|
||||
Some(m) => m,
|
||||
None => JsonMap::new(),
|
||||
};
|
||||
|
||||
for (k, v) in patch {
|
||||
obj.insert(k, Value::String(v));
|
||||
}
|
||||
|
||||
let env_vars_str = Value::Object(obj).to_string();
|
||||
let ts = crate::time::current_timestamp();
|
||||
let pairs = vec![
|
||||
("env_vars".to_string(), env_vars_str),
|
||||
("updated_at".to_string(), ts.to_string()),
|
||||
];
|
||||
let _: () = cm.hset_multiple(&key, &pairs).await.map_err(|e| {
|
||||
error!(db=%db, key=%key, error=%e, "HSET update_job_env_vars_merge failed");
|
||||
e
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Job: merge result map and bump updated_at
|
||||
pub async fn update_job_result_merge(
|
||||
&self,
|
||||
db: u32,
|
||||
caller_id: u32,
|
||||
id: u32,
|
||||
patch: StdHashMap<String, String>,
|
||||
) -> Result<()> {
|
||||
let mut cm = self.manager_for_db(db).await?;
|
||||
let key = Self::job_key(caller_id, id);
|
||||
|
||||
let current: Option<String> = cm.hget(&key, "result").await.ok();
|
||||
let mut obj = match current
|
||||
.and_then(|s| serde_json::from_str::<Value>(&s).ok())
|
||||
.and_then(|v| v.as_object().cloned())
|
||||
{
|
||||
Some(m) => m,
|
||||
None => JsonMap::new(),
|
||||
};
|
||||
|
||||
for (k, v) in patch {
|
||||
obj.insert(k, Value::String(v));
|
||||
}
|
||||
|
||||
let result_str = Value::Object(obj).to_string();
|
||||
let ts = crate::time::current_timestamp();
|
||||
let pairs = vec![
|
||||
("result".to_string(), result_str),
|
||||
("updated_at".to_string(), ts.to_string()),
|
||||
];
|
||||
let _: () = cm.hset_multiple(&key, &pairs).await.map_err(|e| {
|
||||
error!(db=%db, key=%key, error=%e, "HSET update_job_result_merge failed");
|
||||
e
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Flow: set jobs list and bump updated_at
|
||||
pub async fn update_flow_jobs_set(&self, db: u32, id: u32, new_jobs: Vec<u32>) -> Result<()> {
|
||||
let mut cm = self.manager_for_db(db).await?;
|
||||
let key = Self::flow_key(id);
|
||||
|
||||
let jobs_str = serde_json::to_string(&new_jobs)?;
|
||||
let ts = crate::time::current_timestamp();
|
||||
let pairs = vec![
|
||||
("jobs".to_string(), jobs_str),
|
||||
("updated_at".to_string(), ts.to_string()),
|
||||
];
|
||||
let _: () = cm.hset_multiple(&key, &pairs).await.map_err(|e| {
|
||||
error!(db=%db, key=%key, error=%e, "HSET update_flow_jobs_set failed");
|
||||
e
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Message: append logs (no dedup) and bump updated_at
|
||||
pub async fn append_message_logs(
|
||||
&self,
|
||||
db: u32,
|
||||
caller_id: u32,
|
||||
id: u32,
|
||||
new_logs: Vec<String>,
|
||||
) -> Result<()> {
|
||||
let mut cm = self.manager_for_db(db).await?;
|
||||
let key = Self::message_key(caller_id, id);
|
||||
|
||||
let current: Option<String> = cm.hget(&key, "logs").await.ok();
|
||||
let mut arr: Vec<Value> = current
|
||||
.and_then(|s| serde_json::from_str::<Value>(&s).ok())
|
||||
.and_then(|v| v.as_array().cloned())
|
||||
.unwrap_or_default();
|
||||
|
||||
for l in new_logs {
|
||||
arr.push(Value::String(l));
|
||||
}
|
||||
|
||||
let logs_str = Value::Array(arr).to_string();
|
||||
let ts = crate::time::current_timestamp();
|
||||
let pairs = vec![
|
||||
("logs".to_string(), logs_str),
|
||||
("updated_at".to_string(), ts.to_string()),
|
||||
];
|
||||
let _: () = cm.hset_multiple(&key, &pairs).await.map_err(|e| {
|
||||
error!(db=%db, key=%key, error=%e, "HSET append_message_logs failed");
|
||||
e
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// -----------------------------
|
||||
// Queues (lists)
|
||||
// -----------------------------
|
||||
|
||||
/// Push a value onto a Redis list using LPUSH in the given DB.
|
||||
pub async fn lpush_list(&self, db: u32, list: &str, value: &str) -> Result<()> {
|
||||
let mut cm = self.manager_for_db(db).await?;
|
||||
let _: i64 = cm.lpush(list, value).await.map_err(|e| {
|
||||
error!(db=%db, list=%list, value=%value, error=%e, "LPUSH failed");
|
||||
e
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Enqueue a message key onto the outbound queue (msg_out).
|
||||
/// The value is the canonical message key "message:{caller_id}:{id}".
|
||||
pub async fn enqueue_msg_out(&self, db: u32, caller_id: u32, id: u32) -> Result<()> {
|
||||
let key = Self::message_key(caller_id, id);
|
||||
self.lpush_list(db, "msg_out", &key).await
|
||||
}
|
||||
|
||||
/// Block-pop from msg_out with timeout (seconds). Returns the message key if present.
|
||||
/// Uses BRPOP so that the queue behaves FIFO with LPUSH producer.
|
||||
pub async fn brpop_msg_out(&self, db: u32, timeout_secs: usize) -> Result<Option<String>> {
|
||||
let mut cm = self.manager_for_db(db).await?;
|
||||
// BRPOP returns (list, element) on success
|
||||
let res: Option<(String, String)> = redis::cmd("BRPOP")
|
||||
.arg("msg_out")
|
||||
.arg(timeout_secs)
|
||||
.query_async(&mut cm)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(db=%db, timeout_secs=%timeout_secs, error=%e, "BRPOP failed");
|
||||
e
|
||||
})?;
|
||||
Ok(res.map(|(_, v)| v))
|
||||
}
|
||||
|
||||
/// Scan all runner:* keys in this DB and return the deserialized Runner entries.
|
||||
pub async fn scan_runners(&self, db: u32) -> Result<Vec<Runner>> {
|
||||
let mut cm = self.manager_for_db(db).await?;
|
||||
let mut out: Vec<Runner> = Vec::new();
|
||||
let mut cursor: u64 = 0;
|
||||
loop {
|
||||
let (next, keys): (u64, Vec<String>) = redis::cmd("SCAN")
|
||||
.arg(cursor)
|
||||
.arg("MATCH")
|
||||
.arg("runner:*")
|
||||
.arg("COUNT")
|
||||
.arg(100)
|
||||
.query_async(&mut cm)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(db=%db, cursor=%cursor, error=%e, "SCAN failed");
|
||||
e
|
||||
})?;
|
||||
for k in keys {
|
||||
if let Ok(r) = self.hget_model::<Runner>(db, &k).await {
|
||||
out.push(r);
|
||||
}
|
||||
}
|
||||
if next == 0 {
|
||||
break;
|
||||
}
|
||||
cursor = next;
|
||||
}
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
// -----------------------------
|
||||
// Global registry (DB 0) for Context IDs
|
||||
// -----------------------------
|
||||
|
||||
/// Register a context id in the global set "contexts" stored in DB 0.
|
||||
pub async fn register_context_id(&self, id: u32) -> Result<()> {
|
||||
let mut cm = self.manager_for_db(0).await?;
|
||||
let _: i64 = redis::cmd("SADD")
|
||||
.arg("contexts")
|
||||
.arg(id)
|
||||
.query_async(&mut cm)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(db=0, context_id=%id, error=%e, "SADD contexts failed");
|
||||
e
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// List all registered context ids from the global set in DB 0.
|
||||
pub async fn list_context_ids(&self) -> Result<Vec<u32>> {
|
||||
let mut cm = self.manager_for_db(0).await?;
|
||||
// Using SMEMBERS and parsing into u32
|
||||
let vals: Vec<String> = redis::cmd("SMEMBERS")
|
||||
.arg("contexts")
|
||||
.query_async(&mut cm)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(db=0, error=%e, "SMEMBERS contexts failed");
|
||||
e
|
||||
})?;
|
||||
let mut out = Vec::with_capacity(vals.len());
|
||||
for v in vals {
|
||||
if let Ok(n) = v.parse::<u32>() {
|
||||
out.push(n);
|
||||
}
|
||||
}
|
||||
out.sort_unstable();
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
// -----------------------------
|
||||
// Supervisor correlation mapping (DB 0)
|
||||
// Key: "supcorr:{inner_id_decimal}"
|
||||
// Value: JSON {"context_id":u32,"caller_id":u32,"job_id":u32,"message_id":u32}
|
||||
// TTL: 1 hour to avoid leaks in case of crashes
|
||||
pub async fn supcorr_set(
|
||||
&self,
|
||||
inner_id: u64,
|
||||
context_id: u32,
|
||||
caller_id: u32,
|
||||
job_id: u32,
|
||||
message_id: u32,
|
||||
) -> Result<()> {
|
||||
let mut cm = self.manager_for_db(0).await?;
|
||||
let key = format!("supcorr:{}", inner_id);
|
||||
let val = serde_json::json!({
|
||||
"context_id": context_id,
|
||||
"caller_id": caller_id,
|
||||
"job_id": job_id,
|
||||
"message_id": message_id,
|
||||
})
|
||||
.to_string();
|
||||
// SET key val EX 3600
|
||||
let _: () = redis::cmd("SET")
|
||||
.arg(&key)
|
||||
.arg(&val)
|
||||
.arg("EX")
|
||||
.arg(3600)
|
||||
.query_async(&mut cm)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(db=0, key=%key, error=%e, "SET supcorr_set failed");
|
||||
e
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn supcorr_get(&self, inner_id: u64) -> Result<Option<(u32, u32, u32, u32)>> {
|
||||
let mut cm = self.manager_for_db(0).await?;
|
||||
let key = format!("supcorr:{}", inner_id);
|
||||
let res: Option<String> = redis::cmd("GET")
|
||||
.arg(&key)
|
||||
.query_async(&mut cm)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(db=0, key=%key, error=%e, "GET supcorr_get failed");
|
||||
e
|
||||
})?;
|
||||
if let Some(s) = res {
|
||||
let v: Value = serde_json::from_str(&s)?;
|
||||
let ctx = v.get("context_id").and_then(|x| x.as_u64()).unwrap_or(0) as u32;
|
||||
let caller = v.get("caller_id").and_then(|x| x.as_u64()).unwrap_or(0) as u32;
|
||||
let job = v.get("job_id").and_then(|x| x.as_u64()).unwrap_or(0) as u32;
|
||||
let msg = v.get("message_id").and_then(|x| x.as_u64()).unwrap_or(0) as u32;
|
||||
return Ok(Some((ctx, caller, job, msg)));
|
||||
}
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
pub async fn supcorr_del(&self, inner_id: u64) -> Result<()> {
|
||||
let mut cm = self.manager_for_db(0).await?;
|
||||
let key = format!("supcorr:{}", inner_id);
|
||||
let _: i64 = redis::cmd("DEL")
|
||||
.arg(&key)
|
||||
.query_async(&mut cm)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(db=0, key=%key, error=%e, "DEL supcorr_del failed");
|
||||
e
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
14
bin/coordinator/src/time.rs
Normal file
14
bin/coordinator/src/time.rs
Normal file
@@ -0,0 +1,14 @@
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
/// A timestamp since the unix epoch
|
||||
pub type Timestamp = i64;
|
||||
|
||||
/// Get the current system timestamp
|
||||
pub fn current_timestamp() -> Timestamp {
|
||||
let now = SystemTime::now();
|
||||
// A duration is always positive so this returns an unsigned integer, while a timestamp can
|
||||
// predate the unix epoch so we must cast to a signed integer.
|
||||
now.duration_since(UNIX_EPOCH)
|
||||
.expect("Time moves forward")
|
||||
.as_secs() as i64
|
||||
}
|
||||
Reference in New Issue
Block a user