fix: rename overview.md files to avoid conflicts and add collection name

This commit is contained in:
Timur Gordon
2025-11-14 11:01:43 +01:00
parent f67296cd25
commit 2c24b120de
20 changed files with 85 additions and 91 deletions

View File

@@ -13,8 +13,6 @@ pub struct Runner {
pub address: IpAddr, pub address: IpAddr,
/// Needs to be set by the runner, usually `runner<runnerid` /// Needs to be set by the runner, usually `runner<runnerid`
pub topic: String, pub topic: String,
/// The executor this runner can handle (e.g., "python", "rhai"); used for routing
pub executor: String,
/// If this is true, the runner also listens on a local redis queue /// If this is true, the runner also listens on a local redis queue
pub local: bool, pub local: bool,
/// Optional secret used for authenticated supervisor calls (if required) /// Optional secret used for authenticated supervisor calls (if required)

View File

@@ -135,8 +135,6 @@ pub struct RunnerCreate {
pub pubkey: String, pub pubkey: String,
pub address: IpAddr, pub address: IpAddr,
pub topic: String, pub topic: String,
/// The executor this runner can handle (e.g., "python", "rhai")
pub executor: String,
pub local: bool, pub local: bool,
/// Optional secret used for authenticated supervisor calls (if required) /// Optional secret used for authenticated supervisor calls (if required)
pub secret: Option<String>, pub secret: Option<String>,
@@ -150,7 +148,6 @@ impl RunnerCreate {
pubkey, pubkey,
address, address,
topic, topic,
executor,
local, local,
secret, secret,
} = self; } = self;
@@ -160,7 +157,6 @@ impl RunnerCreate {
pubkey, pubkey,
address, address,
topic, topic,
executor,
local, local,
secret, secret,
created_at: ts, created_at: ts,
@@ -211,7 +207,6 @@ pub struct JobCreate {
pub context_id: u32, pub context_id: u32,
pub script: String, pub script: String,
pub runner: Option<String>, pub runner: Option<String>,
pub executor: Option<String>,
pub timeout: u32, pub timeout: u32,
pub retries: u8, pub retries: u8,
pub env_vars: HashMap<String, String>, pub env_vars: HashMap<String, String>,
@@ -231,7 +226,6 @@ impl JobCreate {
context_id: self.context_id.to_string(), context_id: self.context_id.to_string(),
payload: self.script, payload: self.script,
runner: self.runner.unwrap_or_else(|| "default-runner".to_string()), runner: self.runner.unwrap_or_else(|| "default-runner".to_string()),
executor: self.executor.unwrap_or_else(|| "python".to_string()),
timeout: self.timeout as u64, timeout: self.timeout as u64,
env_vars: self.env_vars, env_vars: self.env_vars,
created_at: Utc::now(), created_at: Utc::now(),

View File

@@ -425,7 +425,7 @@ impl AppService {
context_id, context_id,
flow_id, flow_id,
message: "job.run".to_string(), message: "job.run".to_string(),
message_type: job.executor.clone(), message_type: job.runner.clone(),
message_format_type: MessageFormatType::Text, message_format_type: MessageFormatType::Text,
timeout: job.timeout as u32, timeout: job.timeout as u32,
timeout_ack: 10, timeout_ack: 10,
@@ -503,7 +503,7 @@ impl AppService {
context_id, context_id,
flow_id, // Add flow_id for DAG tracking flow_id, // Add flow_id for DAG tracking
message: "job.run".to_string(), message: "job.run".to_string(),
message_type: job.executor.clone(), message_type: job.runner.clone(),
message_format_type: MessageFormatType::Text, message_format_type: MessageFormatType::Text,
timeout: job.timeout as u32, timeout: job.timeout as u32,
timeout_ack: 10, timeout_ack: 10,

View File

@@ -4,16 +4,16 @@ A specialized runner for the Hero ecosystem that executes heroscripts using the
## Overview ## Overview
The Hero runner executes heroscripts by calling `hero run -h <payload>` for each job. This makes it ideal for: The Hero runner executes heroscripts by piping the payload to `hero run -s` via stdin for each job. This makes it ideal for:
- Running heroscripts from job payloads - Running heroscripts from job payloads
- Executing Hero automation tasks - Executing Hero automation tasks (e.g., `!!git.list`, `!!docker.start`)
- Integrating with the Hero CLI ecosystem - Integrating with the Hero CLI ecosystem
- Running scripted workflows - Running scripted workflows
## Features ## Features
- **Heroscript Execution**: Executes `hero run -h <payload>` for each job - **Heroscript Execution**: Pipes payload to `hero run -s` via stdin (no temp files)
- **Environment Variables**: Passes job environment variables to the hero command - **Environment Variables**: Passes job environment variables to the hero command
- **Timeout Support**: Respects job timeout settings - **Timeout Support**: Respects job timeout settings
- **Signature Verification**: Verifies job signatures before execution - **Signature Verification**: Verifies job signatures before execution
@@ -38,15 +38,28 @@ herorunner my-hero-runner --redis-url redis://localhost:6379
## Job Payload Format ## Job Payload Format
The job payload should contain the heroscript content that will be passed to `hero run -h`. The job payload should contain the heroscript content. The runner will pipe it directly to `hero run -s` via stdin.
### Example Payload ### Example Payloads
**Simple print:**
``` ```
print("Hello from heroscript!") print("Hello from heroscript!")
``` ```
The runner will execute: `hero run -h 'print("Hello from heroscript!")'` **Hero actions:**
```
!!git.list
```
**Multi-line script:**
```
!!git.list
print("Repositories listed")
!!docker.ps
```
The runner executes: `echo "<payload>" | hero run -s`
## Examples ## Examples

View File

@@ -25,21 +25,22 @@ impl HeroExecutor {
/// Execute a command from the job payload /// Execute a command from the job payload
fn execute_command(&self, job: &Job) -> Result<String, Box<dyn std::error::Error + Send + Sync>> { fn execute_command(&self, job: &Job) -> Result<String, Box<dyn std::error::Error + Send + Sync>> {
info!("Runner '{}': Executing hero run -h for job {}", self.runner_id, job.id); info!("Runner '{}': Executing hero run for job {}", self.runner_id, job.id);
// Always execute: hero run -h <payload> // Execute: hero run -s (reads from stdin)
let mut cmd = Command::new("hero"); let mut cmd = Command::new("hero");
cmd.args(&["run", "-h", &job.payload]); cmd.args(&["run", "-s"]);
debug!("Runner '{}': Executing: hero run -h {}", self.runner_id, job.payload); debug!("Runner '{}': Executing: hero run -s with stdin", self.runner_id);
// Set environment variables from job // Set environment variables from job
for (key, value) in &job.env_vars { for (key, value) in &job.env_vars {
cmd.env(key, value); cmd.env(key, value);
} }
// Configure stdio // Configure stdio - pipe stdin to send heroscript content
cmd.stdout(Stdio::piped()) cmd.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped()); .stderr(Stdio::piped());
// Execute command with timeout // Execute command with timeout
@@ -49,7 +50,16 @@ impl HeroExecutor {
info!("Runner '{}': Starting command execution for job {}", self.runner_id, job.id); info!("Runner '{}': Starting command execution for job {}", self.runner_id, job.id);
let mut child = cmd.spawn() let mut child = cmd.spawn()
.map_err(|e| format!("Failed to spawn 'hero run -h': {}", e))?; .map_err(|e| format!("Failed to spawn 'hero run -s': {}", e))?;
// Write heroscript payload to stdin
if let Some(mut stdin) = child.stdin.take() {
use std::io::Write;
stdin.write_all(job.payload.as_bytes())
.map_err(|e| format!("Failed to write to stdin: {}", e))?;
// Close stdin to signal EOF
drop(stdin);
}
// Wait for command with timeout // Wait for command with timeout
let output = loop { let output = loop {

View File

@@ -152,11 +152,10 @@ mod tests {
} }
fn create_test_job(id: &str, runner: &str) -> Job { fn create_test_job(id: &str, runner: &str) -> Job {
let mut job = JobBuilder::new() let job = JobBuilder::new()
.caller_id("test_caller") .caller_id("test_caller")
.context_id("test_context") .context_id("test_context")
.runner(runner) .runner(runner)
.executor("test")
.payload("test payload") .payload("test payload")
.build() .build()
.unwrap(); .unwrap();

View File

@@ -81,7 +81,6 @@ impl Supervisor {
.context_id("ping_context") .context_id("ping_context")
.payload("ping") .payload("ping")
.runner(runner_id) .runner(runner_id)
.executor("ping")
.timeout(10) .timeout(10)
.build() .build()
.map_err(|e| SupervisorError::QueueError { .map_err(|e| SupervisorError::QueueError {

View File

@@ -0,0 +1 @@
horus

View File

@@ -15,19 +15,19 @@ Horus is a distributed job execution system with three layers: Coordinator, Supe
### Coordinator ### Coordinator
Workflow orchestration engine for DAG-based execution. Workflow orchestration engine for DAG-based execution.
- [Overview](./coordinator/overview.md) - [Overview](./coordinator/coordinator.md)
### Supervisor ### Supervisor
Job dispatcher with authentication and routing. Job dispatcher with authentication and routing.
- [Overview](./supervisor/overview.md) - [Overview](./supervisor/supervisor.md)
- [Authentication](./supervisor/auth.md) - [Authentication](./supervisor/auth.md)
- [OpenRPC API](./supervisor/openrpc.json) - [OpenRPC API](./supervisor/openrpc.json)
### Runners ### Runners
Job executors for different workload types. Job executors for different workload types.
- [Runner Overview](./runner/overview.md) - [Runner Overview](./runner/runners.md)
- [Hero Runner](./runner/hero.md) - Heroscript execution - [Hero Runner](./runner/hero.md) - Heroscript execution
- [SAL Runner](./runner/sal.md) - System operations - [SAL Runner](./runner/sal.md) - System operations
- [Osiris Runner](./runner/osiris.md) - Database operations - [Osiris Runner](./runner/osiris.md) - Database operations

View File

@@ -56,7 +56,7 @@ Horus is a hierarchical orchestration runtime with three layers: Coordinator, Su
- Parallel execution is required - Parallel execution is required
- Complex data pipelines - Complex data pipelines
[→ Coordinator Documentation](./coordinator/overview.md) [→ Coordinator Documentation](./coordinator/coordinator.md)
### 2. Supervisor (Required) ### 2. Supervisor (Required)
**Purpose:** Job admission, authentication, and routing **Purpose:** Job admission, authentication, and routing
@@ -74,7 +74,7 @@ Horus is a hierarchical orchestration runtime with three layers: Coordinator, Su
- Signature-based authentication - Signature-based authentication
- Runner health monitoring - Runner health monitoring
[→ Supervisor Documentation](./supervisor/overview.md) [→ Supervisor Documentation](./supervisor/supervisor.md)
### 3. Runners (Required) ### 3. Runners (Required)
**Purpose:** Execute actual job workloads **Purpose:** Execute actual job workloads
@@ -90,7 +90,7 @@ Horus is a hierarchical orchestration runtime with three layers: Coordinator, Su
- Timeout support - Timeout support
- Environment variable handling - Environment variable handling
[→ Runner Documentation](./runner/overview.md) [→ Runner Documentation](./runner/runners.md)
## Communication Protocols ## Communication Protocols

View File

@@ -229,7 +229,6 @@ impl OsirisClient {
.context_id("command-execution") .context_id("command-execution")
.runner(&self.runner_name) .runner(&self.runner_name)
.payload(script) .payload(script)
.executor("rhai")
.timeout(self.timeout) .timeout(self.timeout)
.build() .build()
.map_err(|e| OsirisClientError::CommandFailed(format!("Failed to build job: {}", e)))?; .map_err(|e| OsirisClientError::CommandFailed(format!("Failed to build job: {}", e)))?;

View File

@@ -254,14 +254,12 @@ impl WasmSupervisorClient {
pub async fn create_job_with_secret(&self, secret: String, job: hero_job::Job) -> Result<String, JsValue> { pub async fn create_job_with_secret(&self, secret: String, job: hero_job::Job) -> Result<String, JsValue> {
// Backend expects RunJobParams struct with secret and job fields - wrap in array like register_runner // Backend expects RunJobParams struct with secret and job fields - wrap in array like register_runner
let params = serde_json::json!([{ let params = serde_json::json!([{
"secret": secret,
"job": { "job": {
"id": job.id, "id": job.id,
"caller_id": job.caller_id, "caller_id": job.caller_id,
"context_id": job.context_id, "context_id": job.context_id,
"payload": job.payload, "payload": job.payload,
"runner": job.runner, "runner": job.runner,
"executor": job.executor,
"timeout": job.timeout, "timeout": job.timeout,
"env_vars": serde_json::from_str::<serde_json::Value>(&serde_json::to_string(&job.env_vars).unwrap_or_else(|_| "{}".to_string())).unwrap_or(serde_json::json!({})), "env_vars": serde_json::from_str::<serde_json::Value>(&serde_json::to_string(&job.env_vars).unwrap_or_else(|_| "{}".to_string())).unwrap_or(serde_json::json!({})),
"created_at": job.created_at, "created_at": job.created_at,
@@ -286,14 +284,12 @@ impl WasmSupervisorClient {
pub async fn run_job(&self, secret: String, job: hero_job::Job) -> Result<String, JsValue> { pub async fn run_job(&self, secret: String, job: hero_job::Job) -> Result<String, JsValue> {
// Backend expects RunJobParams struct with secret and job fields - wrap in array like register_runner // Backend expects RunJobParams struct with secret and job fields - wrap in array like register_runner
let params = serde_json::json!([{ let params = serde_json::json!([{
"secret": secret,
"job": { "job": {
"id": job.id, "id": job.id,
"caller_id": job.caller_id, "caller_id": job.caller_id,
"context_id": job.context_id, "context_id": job.context_id,
"payload": job.payload, "payload": job.payload,
"runner": job.runner, "runner": job.runner,
"executor": job.executor,
"timeout": job.timeout, "timeout": job.timeout,
"env_vars": serde_json::from_str::<serde_json::Value>(&serde_json::to_string(&job.env_vars).unwrap_or_else(|_| "{}".to_string())).unwrap_or(serde_json::json!({})), "env_vars": serde_json::from_str::<serde_json::Value>(&serde_json::to_string(&job.env_vars).unwrap_or_else(|_| "{}".to_string())).unwrap_or(serde_json::json!({})),
"created_at": job.created_at, "created_at": job.created_at,
@@ -369,7 +365,6 @@ impl WasmSupervisorClient {
caller_id: String, caller_id: String,
context_id: String, context_id: String,
payload: String, payload: String,
executor: String,
) -> Result<String, JsValue> { ) -> Result<String, JsValue> {
// Generate a unique job ID // Generate a unique job ID
let job_id = format!("job-{}", uuid::Uuid::new_v4()); let job_id = format!("job-{}", uuid::Uuid::new_v4());
@@ -380,7 +375,6 @@ impl WasmSupervisorClient {
"caller_id": caller_id, "caller_id": caller_id,
"context_id": context_id, "context_id": context_id,
"payload": payload, "payload": payload,
"executor": executor,
"timeout": 30, "timeout": 30,
"env": {} "env": {}
}); });
@@ -416,7 +410,8 @@ impl WasmSupervisorClient {
/// Get a job by job ID /// Get a job by job ID
pub async fn get_job(&self, job_id: &str) -> Result<hero_job::Job, JsValue> { pub async fn get_job(&self, job_id: &str) -> Result<hero_job::Job, JsValue> {
let params = serde_json::json!([job_id]); let params = serde_json::json!([job_id]);
match self.call_method("get_job", params).await {
match self.call_method("job.run", params).await {
Ok(result) => { Ok(result) => {
// Convert the Job result to hero_job::Job // Convert the Job result to hero_job::Job
if let Ok(job_value) = serde_json::from_value::<serde_json::Value>(result) { if let Ok(job_value) = serde_json::from_value::<serde_json::Value>(result) {
@@ -426,7 +421,6 @@ impl WasmSupervisorClient {
let context_id = job_value.get("context_id").and_then(|v| v.as_str()).unwrap_or("").to_string(); let context_id = job_value.get("context_id").and_then(|v| v.as_str()).unwrap_or("").to_string();
let payload = job_value.get("payload").and_then(|v| v.as_str()).unwrap_or("").to_string(); let payload = job_value.get("payload").and_then(|v| v.as_str()).unwrap_or("").to_string();
let runner = job_value.get("runner").and_then(|v| v.as_str()).unwrap_or("").to_string(); let runner = job_value.get("runner").and_then(|v| v.as_str()).unwrap_or("").to_string();
let executor = job_value.get("executor").and_then(|v| v.as_str()).unwrap_or("").to_string();
let timeout_secs = job_value.get("timeout").and_then(|v| v.get("secs")).and_then(|v| v.as_u64()).unwrap_or(30); let timeout_secs = job_value.get("timeout").and_then(|v| v.get("secs")).and_then(|v| v.as_u64()).unwrap_or(30);
let env_vars = job_value.get("env_vars").map(|v| v.to_string()).unwrap_or_else(|| "{}".to_string()); let env_vars = job_value.get("env_vars").map(|v| v.to_string()).unwrap_or_else(|| "{}".to_string());
let created_at = job_value.get("created_at").and_then(|v| v.as_str()).unwrap_or("").to_string(); let created_at = job_value.get("created_at").and_then(|v| v.as_str()).unwrap_or("").to_string();
@@ -438,7 +432,6 @@ impl WasmSupervisorClient {
context_id, context_id,
payload, payload,
runner, runner,
executor,
timeout: timeout_secs, timeout: timeout_secs,
env_vars: serde_json::from_str(&env_vars).unwrap_or_default(), env_vars: serde_json::from_str(&env_vars).unwrap_or_default(),
created_at: chrono::DateTime::parse_from_rfc3339(&created_at) created_at: chrono::DateTime::parse_from_rfc3339(&created_at)
@@ -830,7 +823,6 @@ pub fn create_job_canonical_repr(
context_id: String, context_id: String,
payload: String, payload: String,
runner: String, runner: String,
executor: String,
timeout: u64, timeout: u64,
env_vars_json: String, env_vars_json: String,
) -> Result<String, JsValue> { ) -> Result<String, JsValue> {
@@ -844,13 +836,12 @@ pub fn create_job_canonical_repr(
// Create canonical representation (matches Job::canonical_representation in runner_rust) // Create canonical representation (matches Job::canonical_representation in runner_rust)
let canonical = format!( let canonical = format!(
"{}:{}:{}:{}:{}:{}:{}:{:?}", "{}:{}:{}:{}:{}:{}:{:?}",
id, id,
caller_id, caller_id,
context_id, context_id,
payload, payload,
runner, runner,
executor,
timeout, timeout,
env_vars_sorted env_vars_sorted
); );

View File

@@ -73,7 +73,6 @@ pub struct Job {
pub context_id: String, pub context_id: String,
pub payload: String, pub payload: String,
pub runner: String, // name of the runner to execute this job pub runner: String, // name of the runner to execute this job
pub executor: String, // name of the executor the runner will use to execute this job
pub timeout: u64, // timeout in seconds pub timeout: u64, // timeout in seconds
#[cfg_attr(target_arch = "wasm32", wasm_bindgen(skip))] #[cfg_attr(target_arch = "wasm32", wasm_bindgen(skip))]
pub env_vars: HashMap<String, String>, // environment variables for script execution pub env_vars: HashMap<String, String>, // environment variables for script execution
@@ -109,7 +108,6 @@ impl Job {
context_id: String, context_id: String,
payload: String, payload: String,
runner: String, runner: String,
executor: String,
) -> Self { ) -> Self {
let now = Utc::now(); let now = Utc::now();
Self { Self {
@@ -118,7 +116,6 @@ impl Job {
context_id, context_id,
payload, payload,
runner, runner,
executor,
timeout: 300, // 5 minutes default timeout: 300, // 5 minutes default
env_vars: HashMap::new(), env_vars: HashMap::new(),
created_at: now, created_at: now,
@@ -137,13 +134,12 @@ impl Job {
env_vars_sorted.sort_by_key(|&(k, _)| k); env_vars_sorted.sort_by_key(|&(k, _)| k);
format!( format!(
"{}:{}:{}:{}:{}:{}:{}:{:?}", "{}:{}:{}:{}:{}:{}:{:?}",
self.id, self.id,
self.caller_id, self.caller_id,
self.context_id, self.context_id,
self.payload, self.payload,
self.runner, self.runner,
self.executor,
self.timeout, self.timeout,
env_vars_sorted env_vars_sorted
) )
@@ -202,7 +198,6 @@ pub struct JobBuilder {
context_id: String, context_id: String,
payload: String, payload: String,
runner: String, runner: String,
executor: String,
timeout: u64, // timeout in seconds timeout: u64, // timeout in seconds
env_vars: HashMap<String, String>, env_vars: HashMap<String, String>,
signatures: Vec<JobSignature>, signatures: Vec<JobSignature>,
@@ -215,7 +210,6 @@ impl JobBuilder {
context_id: "".to_string(), context_id: "".to_string(),
payload: "".to_string(), payload: "".to_string(),
runner: "".to_string(), runner: "".to_string(),
executor: "".to_string(),
timeout: 300, // 5 minutes default timeout: 300, // 5 minutes default
env_vars: HashMap::new(), env_vars: HashMap::new(),
signatures: Vec::new(), signatures: Vec::new(),
@@ -246,11 +240,6 @@ impl JobBuilder {
self self
} }
/// Set the executor for this job
pub fn executor(mut self, executor: &str) -> Self {
self.executor = executor.to_string();
self
}
/// Set the timeout for job execution (in seconds) /// Set the timeout for job execution (in seconds)
pub fn timeout(mut self, timeout: u64) -> Self { pub fn timeout(mut self, timeout: u64) -> Self {
@@ -311,16 +300,12 @@ impl JobBuilder {
if self.runner.is_empty() { if self.runner.is_empty() {
return Err(JobError::InvalidData("runner is required".to_string())); return Err(JobError::InvalidData("runner is required".to_string()));
} }
if self.executor.is_empty() {
return Err(JobError::InvalidData("executor is required".to_string()));
}
let mut job = Job::new( let mut job = Job::new(
self.caller_id, self.caller_id,
self.context_id, self.context_id,
self.payload, self.payload,
self.runner, self.runner,
self.executor,
); );
job.timeout = self.timeout; job.timeout = self.timeout;

View File

@@ -1,7 +1,7 @@
//! Runner trait abstraction for job processing //! Runner trait abstraction for job processing
use crate::{Job, JobStatus, Client}; use crate::{Job, JobStatus, Client};
use log::{debug, error, info}; use log::{error, info};
use redis::AsyncCommands; use redis::AsyncCommands;
use std::sync::Arc; use std::sync::Arc;

View File

@@ -32,7 +32,6 @@ where
.caller_id("script_mode") .caller_id("script_mode")
.payload(script_content) .payload(script_content)
.runner(runner_id) .runner(runner_id)
.executor("rhai")
.timeout(job_timeout.as_secs()) .timeout(job_timeout.as_secs())
.build()?; .build()?;

View File

@@ -139,22 +139,14 @@ async fn test_02_simple_heroscript() {
let job_id = job.id.clone(); let job_id = job.id.clone();
// Save and queue job // Save and queue job
client.store_job_in_redis(&job).await.expect("Failed to save job"); match client.job_run_wait(&job, RUNNER_ID, 5).await {
client.job_run(&job_id, RUNNER_ID).await.expect("Failed to queue job"); Ok(result) => {
println!("✅ Job succeeded with result:\n{}", result);
// Wait for job to complete }
tokio::time::sleep(tokio::time::Duration::from_secs(5)).await; Err(e) => {
println!("❌ Job failed with error: {:?}", e);
// Check job status panic!("Job execution failed");
let status = client.get_status(&job_id).await.expect("Failed to get job status"); }
println!("Job status: {:?}", status);
// Get result or error
if let Some(result) = client.get_result(&job_id).await.expect("Failed to get result") {
println!("Job result: {}", result);
}
if let Some(error) = client.get_error(&job_id).await.expect("Failed to get error") {
println!("Job error: {}", error);
} }
println!("✅ Heroscript job completed"); println!("✅ Heroscript job completed");
@@ -180,11 +172,19 @@ async fn test_03_job_with_env_vars() {
// Check job status // Check job status
let status = client.get_status(&job_id).await.expect("Failed to get job status"); let status = client.get_status(&job_id).await.expect("Failed to get job status");
println!("Job status: {:?}", status); println!("📊 Job status: {:?}", status);
// Get result // Get result or error
if let Some(result) = client.get_result(&job_id).await.expect("Failed to get result") { match (client.get_result(&job_id).await, client.get_error(&job_id).await) {
println!("Job result: {}", result); (Ok(Some(result)), _) => {
println!("✅ Job succeeded with result:\n{}", result);
}
(_, Ok(Some(error))) => {
println!("❌ Job failed with error:\n{}", error);
}
_ => {
println!("⚠️ No result or error available");
}
} }
println!("✅ Job with env vars completed"); println!("✅ Job with env vars completed");
@@ -196,8 +196,13 @@ async fn test_04_job_timeout() {
let client = create_client().await; let client = create_client().await;
// Create job with short timeout // Create job with short timeout - use a heroscript that loops forever
let mut job = create_test_job("sleep 10"); let mut job = create_test_job(r#"
for i in 1..1000 {
print("Loop iteration: ${i}")
sleep(100)
}
"#);
job.timeout = 2; // 2 second timeout job.timeout = 2; // 2 second timeout
let job_id = job.id.clone(); let job_id = job.id.clone();
@@ -210,15 +215,17 @@ async fn test_04_job_timeout() {
// Check job status - should be error due to timeout // Check job status - should be error due to timeout
let status = client.get_status(&job_id).await.expect("Failed to get job status"); let status = client.get_status(&job_id).await.expect("Failed to get job status");
println!("Job status: {:?}", status); println!("📊 Job status: {:?}", status);
// Should have error // Should have error
if let Some(error) = client.get_error(&job_id).await.expect("Failed to get error") { if let Some(error) = client.get_error(&job_id).await.expect("Failed to get error") {
println!("Job error (expected timeout): {}", error); println!("Job error (expected timeout):\n{}", error);
assert!(error.contains("timeout") || error.contains("timed out"), "Error should mention timeout"); assert!(error.contains("timeout") || error.contains("timed out"), "Error should mention timeout");
println!("✅ Job timeout handled correctly");
} else {
println!("⚠️ Expected timeout error but got none");
panic!("Job should have timed out");
} }
println!("✅ Job timeout handled correctly");
} }
/// Final test that ensures cleanup happens /// Final test that ensures cleanup happens

View File

@@ -111,11 +111,10 @@ async fn create_client() -> SupervisorClient {
/// Helper to create a test job (always uses TEST_RUNNER_NAME) /// Helper to create a test job (always uses TEST_RUNNER_NAME)
fn create_test_job(payload: &str) -> Job { fn create_test_job(payload: &str) -> Job {
JobBuilder::new() JobBuilder::new()
.caller_id("e2e-test") .caller_id("test-caller")
.context_id("test-context") .context_id("test-context")
.runner(TEST_RUNNER_NAME) .runner(TEST_RUNNER_NAME)
.payload(payload) .payload(payload)
.executor("rhai")
.timeout(30) .timeout(30)
.build() .build()
.expect("Failed to build test job") .expect("Failed to build test job")