move repos into monorepo

This commit is contained in:
Timur Gordon
2025-11-13 20:44:00 +01:00
commit 4b23e5eb7f
204 changed files with 33737 additions and 0 deletions

View File

@@ -0,0 +1,87 @@
# SAL Runner
The SAL (System Abstraction Layer) Runner is an asynchronous job processing engine that executes Rhai scripts with access to system-level operations and infrastructure management capabilities.
## Features
- **Asynchronous Processing**: Handles multiple jobs concurrently with configurable timeouts
- **Redis Integration**: Uses Redis for job queue management and coordination
- **System Operations**: Full access to SAL modules including OS, networking, containers, and cloud services
- **Graceful Shutdown**: Responds to SIGINT (Ctrl+C) for clean termination
- **Comprehensive Logging**: Detailed logging for monitoring and debugging
## Usage
```bash
cargo run --bin runner_sal -- <RUNNER_ID> [OPTIONS]
```
### Arguments
- `<RUNNER_ID>`: Unique identifier for this runner instance (required, positional)
### Options
- `-d, --db-path <PATH>`: Database file path (default: `/tmp/sal.db`)
- `-r, --redis-url <URL>`: Redis connection URL (default: `redis://localhost:6379`)
- `-t, --timeout <SECONDS>`: Default job timeout in seconds (default: `300`)
### Examples
```bash
# Basic usage with default settings
cargo run --bin runner_sal -- myrunner
# Custom Redis URL and database path
cargo run --bin runner_sal -- production-runner -r redis://prod-redis:6379 -d /var/lib/sal.db
# Custom timeout for long-running jobs
cargo run --bin runner_sal -- batch-runner -t 3600
```
## Available SAL Modules
The SAL runner provides access to the following system modules through Rhai scripts:
- **OS Operations**: File system, process management, system information
- **Redis Client**: Redis database operations and caching
- **PostgreSQL Client**: Database connectivity and queries
- **Process Management**: System process control and monitoring
- **Virtualization**: Container and VM management
- **Git Operations**: Version control system integration
- **Zinit Client**: Service management and initialization
- **Mycelium**: Networking and mesh connectivity
- **Text Processing**: String manipulation and text utilities
- **Network Operations**: HTTP requests, network utilities
- **Kubernetes**: Container orchestration and cluster management
- **Hetzner Cloud**: Cloud infrastructure management
## Architecture
The SAL runner uses an asynchronous architecture that:
1. Connects to Redis for job queue management
2. Creates a Rhai engine with all SAL modules registered
3. Processes jobs concurrently with configurable timeouts
4. Handles graceful shutdown on SIGINT
5. Provides comprehensive error handling and logging
## Error Handling
The runner provides detailed error messages for common issues:
- Redis connection failures
- Database access problems
- Script execution errors
- Timeout handling
- Resource cleanup on shutdown
## Logging
Set the `RUST_LOG` environment variable to control logging levels:
```bash
RUST_LOG=debug cargo run --bin runner_sal -- myrunner
```
Available log levels: `error`, `warn`, `info`, `debug`, `trace`

View File

@@ -0,0 +1,73 @@
use std::sync::{Arc, OnceLock};
// Re-export common Rhai types for convenience
pub use rhai::Engine;
// Re-export specific functions from sal-os package
// Re-export Redis client module registration function
// Re-export PostgreSQL client module registration function
// Re-export virt functions from sal-virt package
/// Engine factory for creating and sharing Rhai engines with SAL modules.
pub struct EngineFactory {
engine: Arc<Engine>,
}
impl EngineFactory {
/// Create a new engine factory with a configured Rhai engine.
pub fn new() -> Self {
let mut engine = Engine::new();
register_sal_modules(&mut engine);
// Logger
hero_logger::rhai_integration::configure_rhai_logging(&mut engine, "sal_runner");
Self {
engine: Arc::new(engine),
}
}
/// Get a shared reference to the engine.
pub fn get_engine(&self) -> Arc<Engine> {
Arc::clone(&self.engine)
}
/// Get the global singleton engine factory.
pub fn global() -> &'static EngineFactory {
static FACTORY: OnceLock<EngineFactory> = OnceLock::new();
FACTORY.get_or_init(|| EngineFactory::new())
}
}
pub fn register_sal_modules(engine: &mut Engine) {
let _ = sal_os::rhai::register_os_module(engine);
let _ = sal_redisclient::rhai::register_redisclient_module(engine);
let _ = sal_postgresclient::rhai::register_postgresclient_module(engine);
let _ = sal_process::rhai::register_process_module(engine);
let _ = sal_virt::rhai::register_virt_module(engine);
let _ = sal_git::rhai::register_git_module(engine);
let _ = sal_zinit_client::rhai::register_zinit_module(engine);
let _ = sal_mycelium::rhai::register_mycelium_module(engine);
let _ = sal_text::rhai::register_text_module(engine);
let _ = sal_net::rhai::register_net_module(engine);
let _ = sal_kubernetes::rhai::register_kubernetes_module(engine);
let _ = sal_hetzner::rhai::register_hetzner_module(engine);
println!("SAL modules registered successfully.");
}
/// Create a new SAL engine instance.
pub fn create_sal_engine() -> Engine {
let mut engine = Engine::new();
register_sal_modules(&mut engine);
hero_logger::rhai_integration::configure_rhai_logging(&mut engine, "sal_runner");
engine
}
/// Create a shared system engine using the factory.
pub fn create_shared_sal_engine() -> Arc<Engine> {
EngineFactory::global().get_engine()
}

108
bin/runners/sal/src/main.rs Normal file
View File

@@ -0,0 +1,108 @@
use hero_runner::{spawn_async_runner, script_mode::execute_script_mode};
use clap::Parser;
use log::{error, info};
use std::time::Duration;
use tokio::sync::mpsc;
mod engine;
use engine::create_sal_engine;
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// Runner ID
runner_id: String,
/// Database path
#[arg(short, long, default_value = "/tmp/sal.db")]
db_path: String,
/// Redis URL
#[arg(short = 'r', long, default_value = "redis://localhost:6379")]
redis_url: String,
/// Default timeout for jobs in seconds
#[arg(short, long, default_value_t = 300)]
timeout: u64,
/// Script to execute in single-job mode (optional)
#[arg(short, long)]
script: Option<String>,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Initialize logging
env_logger::init();
let args = Args::parse();
// Check if we're in script mode
if let Some(script_content) = args.script {
info!("Running in script mode with runner ID: {}", args.runner_id);
let result = execute_script_mode(
&script_content,
&args.runner_id,
args.redis_url,
Duration::from_secs(args.timeout),
create_sal_engine,
).await;
match result {
Ok(output) => {
println!("Script execution result:\n{}", output);
return Ok(());
}
Err(e) => {
error!("Script execution failed: {}", e);
return Err(e);
}
}
}
info!("Starting SAL Async Runner with ID: {}", args.runner_id);
info!("Database path: {}", args.db_path);
info!("Redis URL: {}", args.redis_url);
info!("Default timeout: {} seconds", args.timeout);
// Create shutdown channel
let (shutdown_tx, shutdown_rx) = mpsc::channel::<()>(1);
// Setup signal handling for graceful shutdown
let shutdown_tx_clone = shutdown_tx.clone();
tokio::spawn(async move {
tokio::signal::ctrl_c().await.expect("Failed to listen for ctrl+c");
info!("Received Ctrl+C, initiating shutdown...");
let _ = shutdown_tx_clone.send(()).await;
});
// Spawn the async runner with engine factory
let runner_handle = spawn_async_runner(
args.runner_id.clone(),
args.db_path,
args.redis_url,
shutdown_rx,
Duration::from_secs(args.timeout),
create_sal_engine,
);
info!("SAL Async Runner '{}' started successfully", args.runner_id);
// Wait for the runner to complete
match runner_handle.await {
Ok(Ok(())) => {
info!("SAL Async Runner '{}' shut down successfully", args.runner_id);
}
Ok(Err(e)) => {
error!("SAL Async Runner '{}' encountered an error: {}", args.runner_id, e);
return Err(e);
}
Err(e) => {
error!("Failed to join SAL Async Runner '{}' task: {}", args.runner_id, e);
return Err(Box::new(e));
}
}
Ok(())
}