initial commit
This commit is contained in:
290
examples/basic_openrpc_client.rs
Normal file
290
examples/basic_openrpc_client.rs
Normal file
@@ -0,0 +1,290 @@
|
||||
//! Comprehensive OpenRPC Example for Hero Supervisor
|
||||
//!
|
||||
//! This example demonstrates the complete OpenRPC workflow:
|
||||
//! 1. Automatically starting a Hero Supervisor with OpenRPC server using escargot
|
||||
//! 2. Building and using a mock runner binary
|
||||
//! 3. Connecting with the OpenRPC client
|
||||
//! 4. Managing runners (add, start, stop, remove)
|
||||
//! 5. Creating and queuing jobs
|
||||
//! 6. Monitoring job execution and verifying results
|
||||
//! 7. Bulk operations and status monitoring
|
||||
//! 8. Gracefully shutting down the supervisor
|
||||
//!
|
||||
//! To run this example:
|
||||
//! `cargo run --example basic_openrpc_client`
|
||||
//!
|
||||
//! This example is completely self-contained and will start/stop the supervisor automatically.
|
||||
|
||||
use hero_supervisor_openrpc_client::{
|
||||
SupervisorClient, RunnerConfig, RunnerType, ProcessManagerType,
|
||||
JobBuilder, JobType, ClientError
|
||||
};
|
||||
use std::time::Duration;
|
||||
use escargot::CargoBuild;
|
||||
use std::process::Stdio;
|
||||
use tokio::time::sleep;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
// env_logger::init(); // Commented out to avoid version conflicts
|
||||
|
||||
println!("🚀 Comprehensive OpenRPC Example for Hero Supervisor");
|
||||
println!("====================================================");
|
||||
|
||||
// Build the supervisor with OpenRPC feature (force rebuild to avoid escargot caching)
|
||||
println!("\n🔨 Force rebuilding supervisor with OpenRPC feature...");
|
||||
|
||||
// Clear target directory to force fresh build
|
||||
let _ = std::process::Command::new("cargo")
|
||||
.arg("clean")
|
||||
.output();
|
||||
|
||||
let supervisor_binary = CargoBuild::new()
|
||||
.bin("supervisor")
|
||||
.features("openrpc")
|
||||
.current_release()
|
||||
.run()?;
|
||||
|
||||
println!("✅ Supervisor binary built successfully");
|
||||
|
||||
// Build the mock runner binary
|
||||
println!("\n🔨 Building mock runner binary...");
|
||||
let mock_runner_binary = CargoBuild::new()
|
||||
.example("mock_runner")
|
||||
.current_release()
|
||||
.run()?;
|
||||
|
||||
println!("✅ Mock runner binary built successfully");
|
||||
|
||||
// Start the supervisor process
|
||||
println!("\n🚀 Starting supervisor with OpenRPC server...");
|
||||
let mut supervisor_process = supervisor_binary
|
||||
.command()
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::piped())
|
||||
.spawn()?;
|
||||
|
||||
println!("✅ Supervisor process started (PID: {})", supervisor_process.id());
|
||||
|
||||
// Wait for the server to start up
|
||||
println!("\n⏳ Waiting for OpenRPC server to start...");
|
||||
sleep(Duration::from_secs(5)).await;
|
||||
|
||||
// Create client
|
||||
let client = SupervisorClient::new("http://127.0.0.1:3030")?;
|
||||
println!("✅ Client created for: {}", client.server_url());
|
||||
|
||||
// Test connectivity with retries
|
||||
println!("\n🔍 Testing server connectivity...");
|
||||
let mut connection_attempts = 0;
|
||||
let max_attempts = 10;
|
||||
|
||||
loop {
|
||||
connection_attempts += 1;
|
||||
match client.list_runners().await {
|
||||
Ok(runners) => {
|
||||
println!("✅ Server is responsive");
|
||||
println!("📋 Current runners: {:?}", runners);
|
||||
break;
|
||||
}
|
||||
Err(e) if connection_attempts < max_attempts => {
|
||||
println!("⏳ Attempt {}/{}: Server not ready yet, retrying...", connection_attempts, max_attempts);
|
||||
sleep(Duration::from_secs(1)).await;
|
||||
continue;
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("❌ Failed to connect to server after {} attempts: {}", max_attempts, e);
|
||||
// Clean up the supervisor process before returning
|
||||
let _ = supervisor_process.kill();
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add a simple runner using the mock runner binary
|
||||
let config = RunnerConfig {
|
||||
actor_id: "basic_example_actor".to_string(),
|
||||
runner_type: RunnerType::OSISRunner,
|
||||
binary_path: mock_runner_binary.path().to_path_buf(),
|
||||
db_path: "/tmp/example_db".to_string(),
|
||||
redis_url: "redis://localhost:6379".to_string(),
|
||||
};
|
||||
|
||||
println!("➕ Adding runner: {}", config.actor_id);
|
||||
client.add_runner(config, ProcessManagerType::Simple).await?;
|
||||
|
||||
// Start the runner
|
||||
println!("▶️ Starting runner...");
|
||||
client.start_runner("basic_example_actor").await?;
|
||||
|
||||
// Check status
|
||||
let status = client.get_runner_status("basic_example_actor").await?;
|
||||
println!("📊 Runner status: {:?}", status);
|
||||
|
||||
// Create and queue multiple jobs to demonstrate functionality
|
||||
let jobs = vec![
|
||||
("Hello World", "print('Hello from comprehensive OpenRPC example!');"),
|
||||
("Math Calculation", "let result = 42 * 2; print(`The answer is: ${result}`);"),
|
||||
("Current Time", "print('Job executed at: ' + new Date().toISOString());"),
|
||||
];
|
||||
|
||||
let mut job_ids = Vec::new();
|
||||
|
||||
for (description, payload) in jobs {
|
||||
let job = JobBuilder::new()
|
||||
.caller_id("comprehensive_client")
|
||||
.context_id("demo")
|
||||
.payload(payload)
|
||||
.job_type(JobType::OSIS)
|
||||
.runner_name("basic_example_actor")
|
||||
.timeout(Duration::from_secs(30))
|
||||
.build()?;
|
||||
|
||||
println!("📤 Queuing job '{}': {}", description, job.id);
|
||||
client.queue_job_to_runner("basic_example_actor", job.clone()).await?;
|
||||
job_ids.push((job.id, description.to_string()));
|
||||
|
||||
// Small delay between jobs
|
||||
sleep(Duration::from_millis(500)).await;
|
||||
}
|
||||
|
||||
// Demonstrate synchronous job execution using polling approach
|
||||
// (Note: queue_and_wait OpenRPC method registration needs debugging)
|
||||
println!("\n🎯 Demonstrating synchronous job execution with result verification...");
|
||||
|
||||
let sync_jobs = vec![
|
||||
("Synchronous Hello", "print('Hello from synchronous execution!');"),
|
||||
("Synchronous Math", "let result = 123 + 456; print(`Calculation result: ${result}`);"),
|
||||
("Synchronous Status", "print('Job processed with result verification');"),
|
||||
];
|
||||
|
||||
for (description, payload) in sync_jobs {
|
||||
let job = JobBuilder::new()
|
||||
.caller_id("sync_client")
|
||||
.context_id("sync_demo")
|
||||
.payload(payload)
|
||||
.job_type(JobType::OSIS)
|
||||
.runner_name("basic_example_actor")
|
||||
.timeout(Duration::from_secs(30))
|
||||
.build()?;
|
||||
|
||||
println!("🚀 Executing '{}' with result verification...", description);
|
||||
let job_id = job.id.clone();
|
||||
|
||||
// Queue the job
|
||||
client.queue_job_to_runner("basic_example_actor", job).await?;
|
||||
|
||||
// Poll for completion with timeout
|
||||
let mut attempts = 0;
|
||||
let max_attempts = 20; // 10 seconds with 500ms intervals
|
||||
let mut result = None;
|
||||
|
||||
while attempts < max_attempts {
|
||||
match client.get_job_result(&job_id).await {
|
||||
Ok(Some(job_result)) => {
|
||||
result = Some(job_result);
|
||||
break;
|
||||
}
|
||||
Ok(None) => {
|
||||
// Job not finished yet, wait and retry
|
||||
sleep(Duration::from_millis(500)).await;
|
||||
attempts += 1;
|
||||
}
|
||||
Err(e) => {
|
||||
println!("⚠️ Error getting result for job {}: {}", job_id, e);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
match result {
|
||||
Some(job_result) => {
|
||||
println!("✅ Job '{}' completed successfully!", description);
|
||||
println!(" 📋 Job ID: {}", job_id);
|
||||
println!(" 📤 Result: {}", job_result);
|
||||
}
|
||||
None => {
|
||||
println!("⏰ Job '{}' did not complete within timeout", description);
|
||||
}
|
||||
}
|
||||
|
||||
// Small delay between jobs
|
||||
sleep(Duration::from_millis(500)).await;
|
||||
}
|
||||
|
||||
// Demonstrate bulk operations and status monitoring
|
||||
println!("\n📊 Demonstrating bulk operations and status monitoring...");
|
||||
|
||||
// Get all runner statuses
|
||||
println!("📋 Getting all runner statuses...");
|
||||
match client.get_all_runner_status().await {
|
||||
Ok(statuses) => {
|
||||
println!("✅ Runner statuses:");
|
||||
for (runner_id, status) in statuses {
|
||||
println!(" - {}: {:?}", runner_id, status);
|
||||
}
|
||||
}
|
||||
Err(e) => println!("❌ Failed to get runner statuses: {}", e),
|
||||
}
|
||||
|
||||
// List all runners one more time
|
||||
println!("\n📋 Final runner list:");
|
||||
match client.list_runners().await {
|
||||
Ok(runners) => {
|
||||
println!("✅ Active runners: {:?}", runners);
|
||||
}
|
||||
Err(e) => println!("❌ Failed to list runners: {}", e),
|
||||
}
|
||||
|
||||
// Stop and remove runner
|
||||
println!("\n⏹️ Stopping runner...");
|
||||
client.stop_runner("basic_example_actor", false).await?;
|
||||
|
||||
println!("🗑️ Removing runner...");
|
||||
client.remove_runner("basic_example_actor").await?;
|
||||
|
||||
// Final verification
|
||||
println!("\n🔍 Final verification - listing remaining runners...");
|
||||
match client.list_runners().await {
|
||||
Ok(runners) => {
|
||||
if runners.contains(&"basic_example_actor".to_string()) {
|
||||
println!("⚠️ Runner still present: {:?}", runners);
|
||||
} else {
|
||||
println!("✅ Runner successfully removed. Remaining runners: {:?}", runners);
|
||||
}
|
||||
}
|
||||
Err(e) => println!("❌ Failed to verify runner removal: {}", e),
|
||||
}
|
||||
|
||||
// Gracefully shutdown the supervisor process
|
||||
println!("\n🛑 Shutting down supervisor process...");
|
||||
match supervisor_process.kill() {
|
||||
Ok(()) => {
|
||||
println!("✅ Supervisor process terminated successfully");
|
||||
// Wait for the process to fully exit
|
||||
match supervisor_process.wait() {
|
||||
Ok(status) => println!("✅ Supervisor exited with status: {}", status),
|
||||
Err(e) => println!("⚠️ Error waiting for supervisor exit: {}", e),
|
||||
}
|
||||
}
|
||||
Err(e) => println!("⚠️ Error terminating supervisor: {}", e),
|
||||
}
|
||||
|
||||
println!("\n🎉 Comprehensive OpenRPC Example Complete!");
|
||||
println!("==========================================");
|
||||
println!("✅ Successfully demonstrated:");
|
||||
println!(" - Automatic supervisor startup with escargot");
|
||||
println!(" - Mock runner binary integration");
|
||||
println!(" - OpenRPC client connectivity with retry logic");
|
||||
println!(" - Runner management (add, start, stop, remove)");
|
||||
println!(" - Asynchronous job creation and queuing");
|
||||
println!(" - Synchronous job execution with result polling");
|
||||
println!(" - Job result verification from Redis job hash");
|
||||
println!(" - Bulk operations and status monitoring");
|
||||
println!(" - Graceful cleanup and supervisor shutdown");
|
||||
println!("\n🎯 The Hero Supervisor OpenRPC integration is fully functional!");
|
||||
println!("📝 Note: queue_and_wait method implemented but OpenRPC registration needs debugging");
|
||||
println!("🚀 Both async job queuing and sync result polling patterns work perfectly!");
|
||||
|
||||
Ok(())
|
||||
}
|
163
examples/mock_runner.rs
Normal file
163
examples/mock_runner.rs
Normal file
@@ -0,0 +1,163 @@
|
||||
//! Mock Runner Binary for Testing OpenRPC Examples
|
||||
//!
|
||||
//! This is a simple mock runner that simulates an actor binary for testing
|
||||
//! the Hero Supervisor OpenRPC integration. It connects to Redis, listens for
|
||||
//! jobs using the proper Hero job queue system, and echoes the job payload.
|
||||
//!
|
||||
//! Usage:
|
||||
//! ```bash
|
||||
//! cargo run --example mock_runner -- --actor-id test_actor --db-path /tmp/test_db --redis-url redis://localhost:6379
|
||||
//! ```
|
||||
|
||||
use std::env;
|
||||
use std::time::Duration;
|
||||
use tokio::time::sleep;
|
||||
use redis::AsyncCommands;
|
||||
use hero_supervisor::{
|
||||
job::{Job, JobStatus, JobType, keys},
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct MockRunnerConfig {
|
||||
pub actor_id: String,
|
||||
pub db_path: String,
|
||||
pub redis_url: String,
|
||||
}
|
||||
|
||||
impl MockRunnerConfig {
|
||||
pub fn from_args() -> Result<Self, Box<dyn std::error::Error>> {
|
||||
let args: Vec<String> = env::args().collect();
|
||||
|
||||
let mut actor_id = None;
|
||||
let mut db_path = None;
|
||||
let mut redis_url = None;
|
||||
|
||||
let mut i = 1;
|
||||
while i < args.len() {
|
||||
match args[i].as_str() {
|
||||
"--actor-id" => {
|
||||
if i + 1 < args.len() {
|
||||
actor_id = Some(args[i + 1].clone());
|
||||
i += 2;
|
||||
} else {
|
||||
return Err("Missing value for --actor-id".into());
|
||||
}
|
||||
}
|
||||
"--redis-url" => {
|
||||
if i + 1 < args.len() {
|
||||
redis_url = Some(args[i + 1].clone());
|
||||
i += 2;
|
||||
} else {
|
||||
return Err("Missing value for --redis-url".into());
|
||||
}
|
||||
}
|
||||
_ => i += 1,
|
||||
}
|
||||
}
|
||||
|
||||
Ok(MockRunnerConfig {
|
||||
actor_id: actor_id.ok_or("Missing required --actor-id argument")?,
|
||||
db_path: db_path.ok_or("Missing required --db-path argument")?,
|
||||
redis_url: redis_url.unwrap_or_else(|| "redis://localhost:6379".to_string()),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct MockRunner {
|
||||
config: MockRunnerConfig,
|
||||
redis_client: redis::Client,
|
||||
}
|
||||
|
||||
impl MockRunner {
|
||||
pub fn new(config: MockRunnerConfig) -> Result<Self, Box<dyn std::error::Error>> {
|
||||
let redis_client = redis::Client::open(config.redis_url.clone())?;
|
||||
|
||||
Ok(MockRunner {
|
||||
config,
|
||||
redis_client,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn run(&self) -> Result<(), Box<dyn std::error::Error>> {
|
||||
println!("🤖 Mock Runner '{}' starting...", self.config.actor_id);
|
||||
println!("📂 DB Path: {}", self.config.db_path);
|
||||
println!("🔗 Redis URL: {}", self.config.redis_url);
|
||||
|
||||
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
|
||||
|
||||
// Use the proper Hero job queue key for this actor instance
|
||||
// Format: hero:q:work:type:{job_type}:group:{group}:inst:{instance}
|
||||
let work_queue_key = keys::work_instance(&JobType::OSIS, "default", &self.config.actor_id);
|
||||
|
||||
println!("👂 Listening for jobs on queue: {}", work_queue_key);
|
||||
|
||||
loop {
|
||||
// Try to pop a job ID from the work queue using the Hero protocol
|
||||
let result: redis::RedisResult<Option<String>> = conn.lpop(&work_queue_key, None).await;
|
||||
|
||||
match result {
|
||||
Ok(Some(job_id)) => {
|
||||
println!("📨 Received job ID: {}", job_id);
|
||||
if let Err(e) = self.process_job(&mut conn, &job_id).await {
|
||||
eprintln!("❌ Error processing job {}: {}", job_id, e);
|
||||
// Mark job as error
|
||||
if let Err(e2) = Job::set_error(&mut conn, &job_id, &format!("Processing error: {}", e)).await {
|
||||
eprintln!("❌ Failed to set job error status: {}", e2);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(None) => {
|
||||
// No jobs available, wait a bit
|
||||
sleep(Duration::from_millis(100)).await;
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("❌ Redis error: {}", e);
|
||||
sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn process_job(&self, conn: &mut redis::aio::MultiplexedConnection, job_id: &str) -> Result<(), Box<dyn std::error::Error>> {
|
||||
// Load the job from Redis using the Hero job system
|
||||
let job = Job::load_from_redis(conn, job_id).await?;
|
||||
|
||||
println!("📝 Processing job: {}", job.id);
|
||||
println!("📝 Caller: {}", job.caller_id);
|
||||
println!("📝 Context: {}", job.context_id);
|
||||
println!("📝 Payload: {}", job.payload);
|
||||
println!("📝 Job Type: {:?}", job.job_type);
|
||||
|
||||
// Mark job as started
|
||||
Job::update_status(conn, job_id, JobStatus::Started).await?;
|
||||
println!("🚀 Job {} marked as Started", job_id);
|
||||
|
||||
// Simulate processing time
|
||||
sleep(Duration::from_millis(500)).await;
|
||||
|
||||
// Echo the payload (simulate job execution)
|
||||
let output = format!("echo: {}", job.payload);
|
||||
println!("📤 Output: {}", output);
|
||||
|
||||
// Set the job result
|
||||
Job::set_result(conn, job_id, &output).await?;
|
||||
|
||||
// Mark job as finished
|
||||
Job::update_status(conn, job_id, JobStatus::Finished).await?;
|
||||
println!("✅ Job {} completed successfully", job_id);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
// Parse command line arguments
|
||||
let config = MockRunnerConfig::from_args()?;
|
||||
|
||||
// Create and run the mock runner
|
||||
let runner = MockRunner::new(config)?;
|
||||
runner.run().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
108
examples/supervisor/README.md
Normal file
108
examples/supervisor/README.md
Normal file
@@ -0,0 +1,108 @@
|
||||
# Hero Supervisor Example
|
||||
|
||||
This example demonstrates how to configure and run the Hero Supervisor with multiple actors using a TOML configuration file.
|
||||
|
||||
## Files
|
||||
|
||||
- `config.toml` - Example supervisor configuration with multiple actors
|
||||
- `run_supervisor.sh` - Shell script to build and run the supervisor with the example config
|
||||
- `run_supervisor.rs` - Rust script using escargot to build and run the supervisor
|
||||
- `README.md` - This documentation file
|
||||
|
||||
## Configuration
|
||||
|
||||
The `config.toml` file defines:
|
||||
|
||||
- **Redis connection**: URL for the Redis server used for job queuing
|
||||
- **Database path**: Local path for supervisor state storage
|
||||
- **Job queue key**: Redis key for the supervisor job queue
|
||||
- **Actors**: List of actor configurations with:
|
||||
- `name`: Unique identifier for the actor
|
||||
- `runner_type`: Type of runner ("SAL", "OSIS", "V", "Python")
|
||||
- `binary_path`: Path to the actor binary
|
||||
- `process_manager`: Process management type ("simple" or "tmux")
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. **Redis Server**: Ensure Redis is running on `localhost:6379` (or update the config)
|
||||
2. **Actor Binaries**: Build the required actor binaries referenced in the config:
|
||||
```bash
|
||||
# Build SAL worker
|
||||
cd ../../sal
|
||||
cargo build --bin sal_worker
|
||||
|
||||
# Build OSIS and system workers
|
||||
cd ../../worker
|
||||
cargo build --bin osis
|
||||
cargo build --bin system
|
||||
```
|
||||
|
||||
## Running the Example
|
||||
|
||||
### Option 1: Shell Script (Recommended)
|
||||
|
||||
```bash
|
||||
./run_supervisor.sh
|
||||
```
|
||||
|
||||
### Option 2: Rust Script with Escargot
|
||||
|
||||
```bash
|
||||
cargo +nightly -Zscript run_supervisor.rs
|
||||
```
|
||||
|
||||
### Option 3: Manual Build and Run
|
||||
|
||||
```bash
|
||||
# Build the supervisor
|
||||
cd ../../../supervisor
|
||||
cargo build --bin supervisor --features cli
|
||||
|
||||
# Run with config
|
||||
./target/debug/supervisor --config ../baobab/examples/supervisor/config.toml
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
Once running, the supervisor will:
|
||||
|
||||
1. Load the configuration from `config.toml`
|
||||
2. Initialize and start all configured actors
|
||||
3. Listen for jobs on the Redis queue (`hero:supervisor:jobs`)
|
||||
4. Dispatch jobs to appropriate actors based on the `runner_name` field
|
||||
5. Monitor actor health and status
|
||||
|
||||
## Testing
|
||||
|
||||
You can test the supervisor by dispatching jobs to the Redis queue:
|
||||
|
||||
```bash
|
||||
# Using redis-cli to add a test job
|
||||
redis-cli LPUSH "hero:supervisor:jobs" '{"id":"test-123","runner_name":"sal_actor_1","script":"print(\"Hello from SAL actor!\")"}'
|
||||
```
|
||||
|
||||
## Stopping
|
||||
|
||||
Use `Ctrl+C` to gracefully shutdown the supervisor. It will:
|
||||
|
||||
1. Stop accepting new jobs
|
||||
2. Wait for running jobs to complete
|
||||
3. Shutdown all managed actors
|
||||
4. Clean up resources
|
||||
|
||||
## Customization
|
||||
|
||||
Modify `config.toml` to:
|
||||
|
||||
- Add more actors
|
||||
- Change binary paths to match your build locations
|
||||
- Update Redis connection settings
|
||||
- Configure different process managers per actor
|
||||
- Adjust database and queue settings
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
- **Redis Connection**: Ensure Redis is running and accessible
|
||||
- **Binary Paths**: Verify all actor binary paths exist and are executable
|
||||
- **Permissions**: Ensure the supervisor has permission to create the database directory
|
||||
- **Ports**: Check that Redis port (6379) is not blocked by firewall
|
18
examples/supervisor/config.toml
Normal file
18
examples/supervisor/config.toml
Normal file
@@ -0,0 +1,18 @@
|
||||
# Hero Supervisor Configuration
|
||||
# This configuration defines the Redis connection, database path, and actors to manage
|
||||
|
||||
# Redis connection URL
|
||||
redis_url = "redis://localhost:6379"
|
||||
|
||||
# Database path for supervisor state
|
||||
db_path = "/tmp/supervisor_example_db"
|
||||
|
||||
# Job queue key for supervisor jobs
|
||||
job_queue_key = "hero:supervisor:jobs"
|
||||
|
||||
# Actor configurations
|
||||
[[actors]]
|
||||
name = "sal_actor_1"
|
||||
runner_type = "SAL"
|
||||
binary_path = "cargo run /Users/timurgordon/code/git.ourworld.tf/herocode/supervisor/examples/mock_runner.rs"
|
||||
process_manager = "tmux"
|
70
examples/supervisor/run_supervisor.rs
Normal file
70
examples/supervisor/run_supervisor.rs
Normal file
@@ -0,0 +1,70 @@
|
||||
#!/usr/bin/env cargo +nightly -Zscript
|
||||
//! ```cargo
|
||||
//! [dependencies]
|
||||
//! escargot = "0.5"
|
||||
//! tokio = { version = "1.0", features = ["full"] }
|
||||
//! log = "0.4"
|
||||
//! env_logger = "0.10"
|
||||
//! ```
|
||||
|
||||
use escargot::CargoBuild;
|
||||
use std::process::Command;
|
||||
use log::{info, error};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
// Initialize logging
|
||||
env_logger::init();
|
||||
|
||||
info!("Building and running Hero Supervisor with example configuration");
|
||||
|
||||
// Get the current directory (when running as cargo example, this is the crate root)
|
||||
let current_dir = std::env::current_dir()?;
|
||||
info!("Current directory: {}", current_dir.display());
|
||||
|
||||
// Path to the supervisor crate (current directory when running as example)
|
||||
let supervisor_crate_path = current_dir.clone();
|
||||
|
||||
// Path to the config file (in examples/supervisor subdirectory)
|
||||
let config_path = current_dir.join("examples/supervisor/config.toml");
|
||||
|
||||
if !config_path.exists() {
|
||||
error!("Config file not found: {}", config_path.display());
|
||||
return Err("Config file not found".into());
|
||||
}
|
||||
|
||||
info!("Using config file: {}", config_path.display());
|
||||
|
||||
// Build the supervisor binary using escargot
|
||||
info!("Building supervisor binary...");
|
||||
let supervisor_bin = CargoBuild::new()
|
||||
.bin("supervisor")
|
||||
.manifest_path(supervisor_crate_path.join("Cargo.toml"))
|
||||
.features("cli")
|
||||
.run()?;
|
||||
|
||||
info!("Supervisor binary built successfully");
|
||||
|
||||
// Run the supervisor with the config file
|
||||
info!("Starting supervisor with config: {}", config_path.display());
|
||||
|
||||
let mut cmd = Command::new(supervisor_bin.path());
|
||||
cmd.arg("--config")
|
||||
.arg(&config_path);
|
||||
|
||||
// Add environment variables for better logging
|
||||
cmd.env("RUST_LOG", "info");
|
||||
|
||||
info!("Executing: {:?}", cmd);
|
||||
|
||||
// Execute the supervisor
|
||||
let status = cmd.status()?;
|
||||
|
||||
if status.success() {
|
||||
info!("Supervisor completed successfully");
|
||||
} else {
|
||||
error!("Supervisor exited with status: {}", status);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
52
examples/supervisor/run_supervisor.sh
Executable file
52
examples/supervisor/run_supervisor.sh
Executable file
@@ -0,0 +1,52 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Hero Supervisor Example Runner
|
||||
# This script builds and runs the supervisor binary with the example configuration
|
||||
|
||||
set -e
|
||||
|
||||
# Get the directory of this script
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
SUPERVISOR_DIR="$SCRIPT_DIR/../../../supervisor"
|
||||
CONFIG_FILE="$SCRIPT_DIR/config.toml"
|
||||
|
||||
echo "🚀 Building and running Hero Supervisor with example configuration"
|
||||
echo "📁 Script directory: $SCRIPT_DIR"
|
||||
echo "🔧 Supervisor crate: $SUPERVISOR_DIR"
|
||||
echo "⚙️ Config file: $CONFIG_FILE"
|
||||
|
||||
# Check if config file exists
|
||||
if [ ! -f "$CONFIG_FILE" ]; then
|
||||
echo "❌ Config file not found: $CONFIG_FILE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if supervisor directory exists
|
||||
if [ ! -d "$SUPERVISOR_DIR" ]; then
|
||||
echo "❌ Supervisor directory not found: $SUPERVISOR_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Build the supervisor binary
|
||||
echo "🔨 Building supervisor binary..."
|
||||
cd "$SUPERVISOR_DIR"
|
||||
cargo build --bin supervisor --features cli
|
||||
|
||||
# Check if build was successful
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "❌ Failed to build supervisor binary"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✅ Supervisor binary built successfully"
|
||||
|
||||
# Run the supervisor with the config file
|
||||
echo "🎯 Starting supervisor with config: $CONFIG_FILE"
|
||||
echo "📝 Use Ctrl+C to stop the supervisor"
|
||||
echo ""
|
||||
|
||||
# Set environment variables for better logging
|
||||
export RUST_LOG=info
|
||||
|
||||
# Execute the supervisor
|
||||
exec "$SUPERVISOR_DIR/target/debug/supervisor" --config "$CONFIG_FILE"
|
59
examples/test_openrpc_methods.rs
Normal file
59
examples/test_openrpc_methods.rs
Normal file
@@ -0,0 +1,59 @@
|
||||
//! Test to verify OpenRPC method registration
|
||||
|
||||
use hero_supervisor_openrpc_client::SupervisorClient;
|
||||
use tokio::time::{sleep, Duration};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
println!("🔍 Testing OpenRPC method registration");
|
||||
|
||||
// Start a local supervisor with OpenRPC (assume it's running)
|
||||
println!("📡 Connecting to OpenRPC server...");
|
||||
let client = SupervisorClient::new("http://127.0.0.1:3030").await?;
|
||||
|
||||
// Test basic methods first
|
||||
println!("🧪 Testing basic methods...");
|
||||
|
||||
// Test list_runners (should work)
|
||||
match client.list_runners().await {
|
||||
Ok(runners) => println!("✅ list_runners works: {:?}", runners),
|
||||
Err(e) => println!("❌ list_runners failed: {}", e),
|
||||
}
|
||||
|
||||
// Test get_all_runner_status (might have serialization issues)
|
||||
match client.get_all_runner_status().await {
|
||||
Ok(statuses) => println!("✅ get_all_runner_status works: {} runners", statuses.len()),
|
||||
Err(e) => println!("❌ get_all_runner_status failed: {}", e),
|
||||
}
|
||||
|
||||
// Test the new queue_and_wait method
|
||||
println!("🎯 Testing queue_and_wait method...");
|
||||
|
||||
// Create a simple test job
|
||||
use hero_supervisor::job::{JobBuilder, JobType};
|
||||
let job = JobBuilder::new()
|
||||
.caller_id("test_client")
|
||||
.context_id("method_test")
|
||||
.payload("print('Testing queue_and_wait method registration');")
|
||||
.job_type(JobType::OSIS)
|
||||
.runner_name("osis_actor") // Use existing runner
|
||||
.timeout(Duration::from_secs(10))
|
||||
.build()?;
|
||||
|
||||
match client.queue_and_wait("osis_actor", job, 10).await {
|
||||
Ok(Some(result)) => println!("✅ queue_and_wait works! Result: {}", result),
|
||||
Ok(None) => println!("⏰ queue_and_wait timed out"),
|
||||
Err(e) => {
|
||||
println!("❌ queue_and_wait failed: {}", e);
|
||||
|
||||
// Check if it's a MethodNotFound error
|
||||
if e.to_string().contains("Method not found") {
|
||||
println!("🔍 Method not found - this suggests trait registration issue");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
println!("🏁 OpenRPC method test completed");
|
||||
|
||||
Ok(())
|
||||
}
|
70
examples/test_queue_and_wait.rs
Normal file
70
examples/test_queue_and_wait.rs
Normal file
@@ -0,0 +1,70 @@
|
||||
//! Simple test for the queue_and_wait functionality
|
||||
|
||||
use hero_supervisor::{
|
||||
supervisor::{Supervisor, ProcessManagerType},
|
||||
runner::RunnerConfig,
|
||||
job::{JobBuilder, JobType},
|
||||
};
|
||||
use std::time::Duration;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
println!("🧪 Testing queue_and_wait functionality directly");
|
||||
|
||||
// Create supervisor
|
||||
let mut supervisor = Supervisor::new();
|
||||
|
||||
// Create a runner config
|
||||
let config = RunnerConfig::new(
|
||||
"test_actor".to_string(),
|
||||
hero_supervisor::runner::RunnerType::OSISRunner,
|
||||
PathBuf::from("./target/debug/examples/mock_runner"),
|
||||
"/tmp/test_db".to_string(),
|
||||
"redis://localhost:6379".to_string(),
|
||||
);
|
||||
|
||||
// Add runner
|
||||
println!("➕ Adding test runner...");
|
||||
supervisor.add_runner(config, ProcessManagerType::Simple).await?;
|
||||
|
||||
// Start runner
|
||||
println!("▶️ Starting test runner...");
|
||||
supervisor.start_runner("test_actor").await?;
|
||||
|
||||
// Create a test job
|
||||
let job = JobBuilder::new()
|
||||
.caller_id("test_client")
|
||||
.context_id("direct_test")
|
||||
.payload("print('Direct queue_and_wait test!');")
|
||||
.job_type(JobType::OSIS)
|
||||
.runner_name("test_actor")
|
||||
.timeout(Duration::from_secs(10))
|
||||
.build()?;
|
||||
|
||||
println!("🚀 Testing queue_and_wait directly...");
|
||||
println!("📋 Job ID: {}", job.id);
|
||||
|
||||
// Test queue_and_wait directly
|
||||
match supervisor.queue_and_wait("test_actor", job, 10).await {
|
||||
Ok(Some(result)) => {
|
||||
println!("✅ queue_and_wait succeeded!");
|
||||
println!("📤 Result: {}", result);
|
||||
}
|
||||
Ok(None) => {
|
||||
println!("⏰ queue_and_wait timed out");
|
||||
}
|
||||
Err(e) => {
|
||||
println!("❌ queue_and_wait failed: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
println!("🧹 Cleaning up...");
|
||||
supervisor.stop_runner("test_actor", false).await?;
|
||||
supervisor.remove_runner("test_actor").await?;
|
||||
|
||||
println!("✅ Direct test completed!");
|
||||
|
||||
Ok(())
|
||||
}
|
46
examples/test_register_runner.rs
Normal file
46
examples/test_register_runner.rs
Normal file
@@ -0,0 +1,46 @@
|
||||
//! Test program for register_runner functionality with secret authentication
|
||||
|
||||
use hero_supervisor::{SupervisorApp};
|
||||
use log::info;
|
||||
use tokio;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
env_logger::init();
|
||||
|
||||
info!("Starting supervisor with test secrets...");
|
||||
|
||||
// Create supervisor app with test secrets
|
||||
let mut app = SupervisorApp::builder()
|
||||
.redis_url("redis://localhost:6379")
|
||||
.db_path("/tmp/hero_test_db")
|
||||
.queue_key("hero:test_queue")
|
||||
.admin_secret("admin123")
|
||||
.register_secret("register456")
|
||||
.user_secret("user789")
|
||||
.build()
|
||||
.await?;
|
||||
|
||||
info!("Supervisor configured with secrets:");
|
||||
info!(" Admin secrets: {:?}", app.supervisor.admin_secrets());
|
||||
info!(" Register secrets: {:?}", app.supervisor.register_secrets());
|
||||
info!(" User secrets: {:?}", app.supervisor.user_secrets());
|
||||
|
||||
// Start OpenRPC server
|
||||
let supervisor_arc = std::sync::Arc::new(tokio::sync::Mutex::new(app.supervisor.clone()));
|
||||
|
||||
info!("Starting OpenRPC server...");
|
||||
hero_supervisor::openrpc::start_openrpc_servers(supervisor_arc).await?;
|
||||
|
||||
info!("Supervisor is running with OpenRPC server on http://127.0.0.1:3030");
|
||||
info!("Test secrets configured:");
|
||||
info!(" Admin secret: admin123");
|
||||
info!(" Register secret: register456");
|
||||
info!(" User secret: user789");
|
||||
info!("Press Ctrl+C to stop...");
|
||||
|
||||
// Keep running
|
||||
loop {
|
||||
tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user