From 89e953ca1d833e039b64d5040227ae75b6977ea1 Mon Sep 17 00:00:00 2001 From: Timur Gordon <31495328+timurgordon@users.noreply.github.com> Date: Tue, 5 Aug 2025 15:44:33 +0200 Subject: [PATCH] rename worker to actor --- Cargo.lock | 274 ++++++------- Cargo.toml | 2 +- README.md | 23 +- _archive/benches/simple_rhai_bench/README.md | 16 +- .../benches/simple_rhai_bench/batch_task.lua | 4 +- _archive/benches/simple_rhai_bench/main.rs | 34 +- _archive/core/{worker => actor}/cmd/README.md | 60 +-- _archive/core/{worker => actor}/cmd/osis.rs | 82 ++-- _archive/core/{worker => actor}/cmd/system.rs | 108 ++--- _archive/core/{worker => actor}/cmd/worker.rs | 32 +- .../core/{worker => actor}/examples/README.md | 68 ++-- .../examples/osis/config.toml | 4 +- .../examples/osis/example.sh | 50 +-- _archive/core/actor/examples/osis_config.toml | 14 + .../examples/osis_worker_demo.rs | 14 +- .../examples/system/config.toml | 4 +- .../examples/system/example.sh | 48 +-- .../core/actor/examples/system_config.toml | 15 + .../examples/system_worker_demo.rs | 14 +- .../examples/trait_based_worker_demo.rs | 140 +++---- .../core/actor}/src/async_worker_impl.rs | 190 ++++----- .../core/actor}/src/config.rs | 84 ++-- .../core/actor}/src/engine.rs | 25 +- .../core/actor}/src/sync_worker.rs | 136 +++---- _archive/core/examples/Cargo.toml | 4 +- .../core/examples/supervisor_worker_demo.rs | 28 +- _archive/core/supervisor/cmd/README.md | 20 +- .../cmd/hive_supervisor_tui_safe.rs | 48 +-- _archive/core/supervisor/cmd/supervisor.rs | 4 +- .../core/worker/examples/osis_config.toml | 14 - .../core/worker/examples/system_config.toml | 15 - cmd/config.toml | 5 +- cmd/main.rs | 10 +- core/{worker => actor}/.DS_Store | Bin core/actor/.gitignore | 2 + core/{worker => actor}/Cargo.lock | 2 +- core/{worker => actor}/Cargo.toml | 5 +- core/{worker => actor}/README.md | 46 +-- core/{worker => actor}/docs/ARCHITECTURE.md | 12 +- .../src/actor_trait.rs} | 202 +++++----- core/{worker => actor}/src/lib.rs | 151 ++----- core/docs/architecture.md | 4 +- core/job/README.md | 4 +- core/job/src/lib.rs | 16 +- core/supervisor/LIFECYCLE.md | 162 ++++---- core/supervisor/README.md | 46 +-- core/supervisor/docs/protocol.md | 78 ++-- core/supervisor/examples/cli/README.md | 64 +-- core/supervisor/examples/cli/config.toml | 14 +- core/supervisor/examples/cli/run_examples.sh | 30 +- .../cli/sample_scripts/data_python.py | 4 +- .../cli/sample_scripts/hello_osis.rhai | 6 +- .../examples/cli/sample_scripts/math_v.v | 4 +- .../cli/sample_scripts/system_sal.rhai | 4 +- core/supervisor/examples/lifecycle_demo.rs | 166 ++++---- .../examples/simple_lifecycle_demo.rs | 50 +-- .../examples/supervisor_config.toml | 16 +- core/supervisor/examples/timeout_example.rs | 6 +- core/supervisor/src/error.rs | 36 +- core/supervisor/src/lib.rs | 371 +++++++++--------- core/supervisor/src/lifecycle.rs | 248 ++++++------ core/worker/.gitignore | 2 - interfaces/wasm/README.md | 3 + interfaces/websocket/server/README.md | 2 +- interfaces/websocket/server/cmd/main.rs | 1 - interfaces/websocket/server/docs/webhooks.md | 4 +- .../server/tests/basic_integration_test.rs | 16 +- 67 files changed, 1629 insertions(+), 1737 deletions(-) rename _archive/core/{worker => actor}/cmd/README.md (61%) rename _archive/core/{worker => actor}/cmd/osis.rs (70%) rename _archive/core/{worker => actor}/cmd/system.rs (68%) rename _archive/core/{worker => actor}/cmd/worker.rs (75%) rename _archive/core/{worker => actor}/examples/README.md (70%) rename _archive/core/{worker => actor}/examples/osis/config.toml (76%) rename _archive/core/{worker => actor}/examples/osis/example.sh (73%) create mode 100644 _archive/core/actor/examples/osis_config.toml rename _archive/core/{worker => actor}/examples/osis_worker_demo.rs (77%) rename _archive/core/{worker => actor}/examples/system/config.toml (78%) rename _archive/core/{worker => actor}/examples/system/example.sh (79%) create mode 100644 _archive/core/actor/examples/system_config.toml rename _archive/core/{worker => actor}/examples/system_worker_demo.rs (76%) rename _archive/core/{worker => actor}/examples/trait_based_worker_demo.rs (67%) rename {core/worker => _archive/core/actor}/src/async_worker_impl.rs (63%) rename {core/worker => _archive/core/actor}/src/config.rs (73%) rename {core/worker => _archive/core/actor}/src/engine.rs (92%) rename {core/worker => _archive/core/actor}/src/sync_worker.rs (56%) delete mode 100644 _archive/core/worker/examples/osis_config.toml delete mode 100644 _archive/core/worker/examples/system_config.toml rename core/{worker => actor}/.DS_Store (100%) create mode 100644 core/actor/.gitignore rename core/{worker => actor}/Cargo.lock (99%) rename core/{worker => actor}/Cargo.toml (88%) rename core/{worker => actor}/README.md (53%) rename core/{worker => actor}/docs/ARCHITECTURE.md (66%) rename core/{worker/src/worker_trait.rs => actor/src/actor_trait.rs} (54%) rename core/{worker => actor}/src/lib.rs (50%) delete mode 100644 core/worker/.gitignore create mode 100644 interfaces/wasm/README.md diff --git a/Cargo.lock b/Cargo.lock index dc946ad..da40534 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -91,7 +91,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e01ed3140b2f8d422c68afa1ed2e85d996ea619c988ac834d255db32138655cb" dependencies = [ "quote", - "syn 2.0.104", + "syn", ] [[package]] @@ -246,7 +246,7 @@ dependencies = [ "actix-router", "proc-macro2", "quote", - "syn 2.0.104", + "syn", ] [[package]] @@ -257,7 +257,7 @@ checksum = "b6ac1e58cded18cb28ddc17143c4dea5345b3ad575e14f32f66e4054a56eb271" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn", ] [[package]] @@ -410,7 +410,7 @@ checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn", ] [[package]] @@ -463,6 +463,30 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "baobab_actor" +version = "0.1.0" +dependencies = [ + "async-trait", + "chrono", + "clap", + "env_logger", + "hero_job", + "hero_supervisor", + "heromodels 0.1.0 (git+https://git.ourworld.tf/herocode/db.git)", + "heromodels-derive 0.1.0 (git+https://git.ourworld.tf/herocode/db.git)", + "heromodels_core 0.1.0 (git+https://git.ourworld.tf/herocode/db.git)", + "log", + "redis 0.25.4", + "rhai", + "serde", + "serde_json", + "thiserror 1.0.69", + "tokio", + "toml", + "uuid", +] + [[package]] name = "base16ct" version = "0.2.0" @@ -526,7 +550,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.104", + "syn", "which", ] @@ -736,7 +760,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.104", + "syn", ] [[package]] @@ -1023,7 +1047,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.104", + "syn", ] [[package]] @@ -1034,7 +1058,7 @@ checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ "darling_core", "quote", - "syn 2.0.104", + "syn", ] [[package]] @@ -1062,14 +1086,6 @@ dependencies = [ "powerfmt", ] -[[package]] -name = "derive" -version = "0.1.0" -dependencies = [ - "quote", - "syn 1.0.109", -] - [[package]] name = "derive_more" version = "2.0.1" @@ -1087,7 +1103,7 @@ checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn", "unicode-xid", ] @@ -1111,7 +1127,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn", ] [[package]] @@ -1351,7 +1367,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn", ] [[package]] @@ -1682,22 +1698,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "hero_examples" -version = "0.1.0" -dependencies = [ - "chrono", - "colored", - "env_logger", - "hero_job", - "hero_supervisor", - "log", - "redis 0.25.4", - "serde_json", - "tokio", - "uuid", -] - [[package]] name = "hero_job" version = "0.1.0" @@ -1784,7 +1784,7 @@ dependencies = [ "futures-util", "hero_job", "hero_supervisor", - "heromodels", + "heromodels 0.1.0", "hex", "hmac", "log", @@ -1813,11 +1813,10 @@ version = "0.1.0" dependencies = [ "bincode", "chrono", - "derive", - "heromodels-derive", - "heromodels_core", + "heromodels-derive 0.1.0", + "heromodels_core 0.1.0", "jsonb", - "ourdb", + "ourdb 0.1.0", "postgres", "r2d2", "r2d2_postgres", @@ -1826,7 +1825,30 @@ dependencies = [ "serde_json", "strum", "strum_macros", - "tst", + "tst 0.1.0", + "uuid", +] + +[[package]] +name = "heromodels" +version = "0.1.0" +source = "git+https://git.ourworld.tf/herocode/db.git#453e86edd24d6009f0b154ac777cc66dc5f3bf76" +dependencies = [ + "bincode", + "chrono", + "heromodels-derive 0.1.0 (git+https://git.ourworld.tf/herocode/db.git)", + "heromodels_core 0.1.0 (git+https://git.ourworld.tf/herocode/db.git)", + "jsonb", + "ourdb 0.1.0 (git+https://git.ourworld.tf/herocode/db.git)", + "postgres", + "r2d2", + "r2d2_postgres", + "rhai", + "serde", + "serde_json", + "strum", + "strum_macros", + "tst 0.1.0 (git+https://git.ourworld.tf/herocode/db.git)", "uuid", ] @@ -1836,7 +1858,17 @@ version = "0.1.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn", +] + +[[package]] +name = "heromodels-derive" +version = "0.1.0" +source = "git+https://git.ourworld.tf/herocode/db.git#453e86edd24d6009f0b154ac777cc66dc5f3bf76" +dependencies = [ + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -1847,6 +1879,15 @@ dependencies = [ "serde", ] +[[package]] +name = "heromodels_core" +version = "0.1.0" +source = "git+https://git.ourworld.tf/herocode/db.git#453e86edd24d6009f0b154ac777cc66dc5f3bf76" +dependencies = [ + "chrono", + "serde", +] + [[package]] name = "hex" version = "0.4.3" @@ -2210,7 +2251,7 @@ dependencies = [ "indoc", "proc-macro2", "quote", - "syn 2.0.104", + "syn", ] [[package]] @@ -2327,7 +2368,7 @@ checksum = "03343451ff899767262ec32146f6d559dd759fdadf42ff0e227c7c48f72594b4" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn", ] [[package]] @@ -2511,7 +2552,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.104", + "syn", ] [[package]] @@ -2696,16 +2737,6 @@ dependencies = [ "hashbrown", ] -[[package]] -name = "macros" -version = "0.1.0" -dependencies = [ - "heromodels", - "heromodels_core", - "rhai", - "serde", -] - [[package]] name = "md-5" version = "0.10.6" @@ -2868,7 +2899,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn", ] [[package]] @@ -2908,6 +2939,17 @@ dependencies = [ "thiserror 1.0.69", ] +[[package]] +name = "ourdb" +version = "0.1.0" +source = "git+https://git.ourworld.tf/herocode/db.git#453e86edd24d6009f0b154ac777cc66dc5f3bf76" +dependencies = [ + "crc32fast", + "log", + "rand 0.8.5", + "thiserror 1.0.69", +] + [[package]] name = "parking_lot" version = "0.12.4" @@ -2978,7 +3020,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn", ] [[package]] @@ -3128,7 +3170,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "061c1221631e079b26479d25bbf2275bfe5917ae8419cd7e34f13bfc2aa7539a" dependencies = [ "proc-macro2", - "syn 2.0.104", + "syn", ] [[package]] @@ -3474,68 +3516,7 @@ checksum = "a5a11a05ee1ce44058fa3d5961d05194fdbe3ad6b40f904af764d81b86450e6b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", -] - -[[package]] -name = "rhai_dispatcher" -version = "0.1.0" -dependencies = [ - "chrono", - "clap", - "colored", - "env_logger", - "log", - "redis 0.25.4", - "serde", - "serde_json", - "tokio", - "uuid", -] - -[[package]] -name = "rhailib_dsl" -version = "0.1.0" -dependencies = [ - "chrono", - "derive", - "dotenv", - "heromodels", - "heromodels-derive", - "heromodels_core", - "macros", - "reqwest", - "rhai", - "rhai_dispatcher", - "serde", - "serde_json", - "thiserror 1.0.69", - "tokio", -] - -[[package]] -name = "rhailib_worker" -version = "0.1.0" -dependencies = [ - "async-trait", - "chrono", - "clap", - "env_logger", - "hero_job", - "hero_supervisor", - "heromodels", - "heromodels-derive", - "heromodels_core", - "log", - "redis 0.25.4", - "rhai", - "rhailib_dsl", - "serde", - "serde_json", - "thiserror 1.0.69", - "tokio", - "toml", - "uuid", + "syn", ] [[package]] @@ -3843,7 +3824,7 @@ checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn", ] [[package]] @@ -4093,7 +4074,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.104", + "syn", ] [[package]] @@ -4102,17 +4083,6 @@ version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" -[[package]] -name = "syn" -version = "1.0.109" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - [[package]] name = "syn" version = "2.0.104" @@ -4144,7 +4114,7 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn", ] [[package]] @@ -4222,7 +4192,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn", ] [[package]] @@ -4233,7 +4203,7 @@ checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn", ] [[package]] @@ -4339,7 +4309,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn", ] [[package]] @@ -4543,7 +4513,7 @@ checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn", ] [[package]] @@ -4565,7 +4535,16 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" name = "tst" version = "0.1.0" dependencies = [ - "ourdb", + "ourdb 0.1.0", + "thiserror 1.0.69", +] + +[[package]] +name = "tst" +version = "0.1.0" +source = "git+https://git.ourworld.tf/herocode/db.git#453e86edd24d6009f0b154ac777cc66dc5f3bf76" +dependencies = [ + "ourdb 0.1.0 (git+https://git.ourworld.tf/herocode/db.git)", "thiserror 1.0.69", ] @@ -4823,7 +4802,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.104", + "syn", "wasm-bindgen-shared", ] @@ -4858,7 +4837,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4981,7 +4960,7 @@ checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn", ] [[package]] @@ -4992,7 +4971,7 @@ checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn", ] [[package]] @@ -5360,7 +5339,7 @@ checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn", "synstructure", ] @@ -5381,7 +5360,7 @@ checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn", ] [[package]] @@ -5401,7 +5380,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn", "synstructure", ] @@ -5441,12 +5420,13 @@ checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn", ] [[package]] name = "zinit-client" version = "0.1.0" +source = "git+https://github.com/threefoldtech/zinit?branch=master#1b76c062fe31d552d1b7b23484ce163995a81482" dependencies = [ "anyhow", "async-trait", diff --git a/Cargo.toml b/Cargo.toml index d30ee97..15783b7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -58,7 +58,7 @@ members = [ "interfaces/websocket/client", "interfaces/websocket/server", "core/supervisor", - "core/worker", + "core/actor", "core/job", "interfaces/websocket/examples", "proxies/http", ] diff --git a/README.md b/README.md index a678477..b20efb8 100644 --- a/README.md +++ b/README.md @@ -12,29 +12,26 @@ Hero is a program that runs scripts in contexts on behalf of a peer. Hero aims t ## Core -In its core, a [supervisor](#supervisor) dispatches jobs to execute scripts to [workers](#worker) over redis. Workers spawn appropriate engine instances to execute scripts within the defined [confines]() of the job. +In its core, a [supervisor](#supervisor) dispatches jobs to execute scripts to [actors](#actor) over redis. Actors spawn appropriate engine instances to execute scripts within the defined [confines]() of the job. -### Components +### [Supervisor](./core/supervisor) -#### [Supervisor](./core/supervisor) +Component responsible for distributing jobs to actors over Redis. -Component responsible for distributing jobs to workers over Redis. - -#### [Engine](./core/engine) - -A process that runs a script in a confined environment. - -#### [Job](./core/job) +### [Job](./core/job) A unit of work that executes a Rhai or Hero script. -#### [Worker](./core/worker) +### [Actor](./core/actor) An entity that processes jobs dispatched by the supervisor. - ## Interfaces +The backend supports an OpenRPC interface over Websocket and Unix sockets, and a wasm app interface for simple debugging logging etc. + ### Websocket -### Unix \ No newline at end of file +### Unix + +### WASM \ No newline at end of file diff --git a/_archive/benches/simple_rhai_bench/README.md b/_archive/benches/simple_rhai_bench/README.md index 1e52197..c5d2dec 100644 --- a/_archive/benches/simple_rhai_bench/README.md +++ b/_archive/benches/simple_rhai_bench/README.md @@ -1,20 +1,20 @@ -# Minimal Rhailib Benchmark +# Minimal baobab Benchmark -A simplified, minimal benchmarking tool for rhailib performance testing. +A simplified, minimal benchmarking tool for baobab performance testing. ## Overview This benchmark focuses on simplicity and direct timing measurements: - Creates a single task (n=1) using Lua script - Measures latency using Redis timestamps -- Uses existing worker binary +- Uses existing actor binary - ~85 lines of code total ## Usage ### Prerequisites - Redis running on `127.0.0.1:6379` -- Worker binary built: `cd src/worker && cargo build --release` +- Actor binary built: `cd src/actor && cargo build --release` ### Run Benchmark ```bash @@ -25,7 +25,7 @@ cargo bench ### Expected Output ``` ๐Ÿงน Cleaning up Redis... -๐Ÿš€ Starting worker... +๐Ÿš€ Starting actor... ๐Ÿ“ Creating single task... โฑ๏ธ Waiting for completion... โœ… Task completed in 23.45ms @@ -42,10 +42,10 @@ cargo bench ## How It Works 1. **Cleanup**: Clear Redis queues and task details -2. **Start Worker**: Spawn single worker process +2. **Start Actor**: Spawn single actor process 3. **Create Task**: Use Lua script to create one task with timestamp 4. **Wait & Measure**: Poll task until complete, calculate latency -5. **Cleanup**: Kill worker and clear Redis +5. **Cleanup**: Kill actor and clear Redis ## Latency Calculation @@ -55,7 +55,7 @@ latency_ms = updated_at - created_at Where: - `created_at`: Timestamp when task was created (Lua script) -- `updated_at`: Timestamp when worker completed task +- `updated_at`: Timestamp when actor completed task ## Future Iterations diff --git a/_archive/benches/simple_rhai_bench/batch_task.lua b/_archive/benches/simple_rhai_bench/batch_task.lua index f639aeb..d9db5aa 100644 --- a/_archive/benches/simple_rhai_bench/batch_task.lua +++ b/_archive/benches/simple_rhai_bench/batch_task.lua @@ -15,7 +15,7 @@ if task_count <= 0 or task_count > 10000 then return redis.error_reply("task_count must be a positive integer between 1 and 10000") end --- Get current timestamp in Unix seconds (to match worker expectations) +-- Get current timestamp in Unix seconds (to match actor expectations) local rhai_task_queue = 'rhai_tasks:' .. circle_name local task_keys = {} local current_time = redis.call('TIME')[1] @@ -35,7 +35,7 @@ for i = 1, task_count do 'task_sequence', tostring(i) ) - -- Queue the task for workers + -- Queue the task for actors redis.call('LPUSH', rhai_task_queue, task_id) -- Add key to return array diff --git a/_archive/benches/simple_rhai_bench/main.rs b/_archive/benches/simple_rhai_bench/main.rs index 05f2a53..177a0e6 100644 --- a/_archive/benches/simple_rhai_bench/main.rs +++ b/_archive/benches/simple_rhai_bench/main.rs @@ -23,23 +23,23 @@ fn cleanup_redis() -> Result<(), redis::RedisError> { Ok(()) } -fn start_worker() -> Result { +fn start_actor() -> Result { Command::new("cargo") .args(&[ "run", "--release", "--bin", - "worker", + "actor", "--", "--circle", CIRCLE_NAME, "--redis-url", REDIS_URL, - "--worker-id", - "bench_worker", + "--actor-id", + "bench_actor", "--preserve-tasks", ]) - .current_dir("src/worker") + .current_dir("src/actor") .stdout(Stdio::null()) .stderr(Stdio::null()) .spawn() @@ -126,26 +126,26 @@ fn wait_for_batch_completion(task_keys: &[String]) -> Result Result<(), std::io::Error> { - worker.kill()?; - worker.wait()?; +fn cleanup_actor(mut actor: Child) -> Result<(), std::io::Error> { + actor.kill()?; + actor.wait()?; Ok(()) } fn bench_single_rhai_task(c: &mut Criterion) { - // Setup: ensure worker is built + // Setup: ensure actor is built let _ = Command::new("cargo") - .args(&["build", "--release", "--bin", "worker"]) - .current_dir("src/worker") + .args(&["build", "--release", "--bin", "actor"]) + .current_dir("src/actor") .output() - .expect("Failed to build worker"); + .expect("Failed to build actor"); // Clean up before starting cleanup_redis().expect("Failed to cleanup Redis"); - // Start worker once and reuse it - let worker = start_worker().expect("Failed to start worker"); - thread::sleep(Duration::from_millis(1000)); // Give worker time to start + // Start actor once and reuse it + let actor = start_actor().expect("Failed to start actor"); + thread::sleep(Duration::from_millis(1000)); // Give actor time to start let mut group = c.benchmark_group("rhai_task_execution"); group.sample_size(10); // Reduce sample size @@ -174,8 +174,8 @@ fn bench_single_rhai_task(c: &mut Criterion) { group.finish(); - // Cleanup worker - cleanup_worker(worker).expect("Failed to cleanup worker"); + // Cleanup actor + cleanup_actor(actor).expect("Failed to cleanup actor"); cleanup_redis().expect("Failed to cleanup Redis"); } diff --git a/_archive/core/worker/cmd/README.md b/_archive/core/actor/cmd/README.md similarity index 61% rename from _archive/core/worker/cmd/README.md rename to _archive/core/actor/cmd/README.md index eb33441..85eee46 100644 --- a/_archive/core/worker/cmd/README.md +++ b/_archive/core/actor/cmd/README.md @@ -1,38 +1,38 @@ -# Rhai Worker Binary +# Rhai Actor Binary -A command-line worker for executing Rhai scripts from Redis task queues. +A command-line actor for executing Rhai scripts from Redis task queues. -## Binary: `worker` +## Binary: `actor` ### Installation Build the binary: ```bash -cargo build --bin worker --release +cargo build --bin actor --release ``` ### Usage ```bash # Basic usage - requires circle public key -worker --circle-public-key +actor --circle-public-key # Custom Redis URL -worker -c --redis-url redis://localhost:6379/1 +actor -c --redis-url redis://localhost:6379/1 -# Custom worker ID and database path -worker -c --worker-id my_worker --db-path /tmp/worker_db +# Custom actor ID and database path +actor -c --actor-id my_actor --db-path /tmp/actor_db # Preserve tasks for debugging/benchmarking -worker -c --preserve-tasks +actor -c --preserve-tasks # Remove timestamps from logs -worker -c --no-timestamp +actor -c --no-timestamp # Increase verbosity -worker -c -v # Debug logging -worker -c -vv # Full debug -worker -c -vvv # Trace logging +actor -c -v # Debug logging +actor -c -vv # Full debug +actor -c -vvv # Trace logging ``` ### Command-Line Options @@ -41,9 +41,9 @@ worker -c -vvv # Trace logging |--------|-------|---------|-------------| | `--circle-public-key` | `-c` | **Required** | Circle public key to listen for tasks | | `--redis-url` | `-r` | `redis://localhost:6379` | Redis connection URL | -| `--worker-id` | `-w` | `worker_1` | Unique worker identifier | +| `--actor-id` | `-w` | `actor_1` | Unique actor identifier | | `--preserve-tasks` | | `false` | Preserve task details after completion | -| `--db-path` | | `worker_rhai_temp_db` | Database path for Rhai engine | +| `--db-path` | | `actor_rhai_temp_db` | Database path for Rhai engine | | `--no-timestamp` | | `false` | Remove timestamps from log output | | `--verbose` | `-v` | | Increase verbosity (stackable) | @@ -58,7 +58,7 @@ worker -c -vvv # Trace logging ### How It Works -1. **Queue Listening**: Worker listens on Redis queue `rhailib:{circle_public_key}` +1. **Queue Listening**: Actor listens on Redis queue `baobab:{circle_public_key}` 2. **Task Processing**: Receives task IDs, fetches task details from Redis 3. **Script Execution**: Executes Rhai scripts with configured engine 4. **Result Handling**: Updates task status and sends results to reply queues @@ -66,30 +66,30 @@ worker -c -vvv # Trace logging ### Configuration Examples -#### Development Worker +#### Development Actor ```bash -# Simple development worker -worker -c dev_circle_123 +# Simple development actor +actor -c dev_circle_123 # Development with verbose logging (no timestamps) -worker -c dev_circle_123 -v --no-timestamp +actor -c dev_circle_123 -v --no-timestamp ``` -#### Production Worker +#### Production Actor ```bash -# Production worker with custom configuration -worker \ +# Production actor with custom configuration +actor \ --circle-public-key prod_circle_456 \ --redis-url redis://redis-server:6379/0 \ - --worker-id prod_worker_1 \ - --db-path /var/lib/worker/db \ + --actor-id prod_actor_1 \ + --db-path /var/lib/actor/db \ --preserve-tasks ``` -#### Benchmarking Worker +#### Benchmarking Actor ```bash -# Worker optimized for benchmarking -worker \ +# Actor optimized for benchmarking +actor \ --circle-public-key bench_circle_789 \ --preserve-tasks \ --no-timestamp \ @@ -98,7 +98,7 @@ worker \ ### Error Handling -The worker provides clear error messages for: +The actor provides clear error messages for: - Missing or invalid circle public key - Redis connection failures - Script execution errors @@ -106,7 +106,7 @@ The worker provides clear error messages for: ### Dependencies -- `rhailib_engine`: Rhai engine with heromodels integration +- `baobab_engine`: Rhai engine with heromodels integration - `redis`: Redis client for task queue management - `rhai`: Script execution engine - `clap`: Command-line argument parsing diff --git a/_archive/core/worker/cmd/osis.rs b/_archive/core/actor/cmd/osis.rs similarity index 70% rename from _archive/core/worker/cmd/osis.rs rename to _archive/core/actor/cmd/osis.rs index 87b4e99..1d2d73c 100644 --- a/_archive/core/worker/cmd/osis.rs +++ b/_archive/core/actor/cmd/osis.rs @@ -1,11 +1,11 @@ -//! OSIS Worker Binary - Synchronous worker for system-level operations +//! OSIS Actor Binary - Synchronous actor for system-level operations use clap::Parser; use log::{error, info}; -use rhailib_worker::config::{ConfigError, WorkerConfig}; -use rhailib_worker::engine::create_heromodels_engine; -use rhailib_worker::sync_worker::SyncWorker; -use rhailib_worker::worker_trait::{spawn_worker, WorkerConfig as TraitWorkerConfig}; +use baobab_actor::config::{ConfigError, ActorConfig}; +use baobab_actor::engine::create_heromodels_engine; +use baobab_actor::sync_actor::SyncActor; +use baobab_actor::actor_trait::{spawn_actor, ActorConfig as TraitActorConfig}; use std::path::PathBuf; use std::sync::Arc; use tokio::signal; @@ -15,8 +15,8 @@ use tokio::sync::mpsc; #[command( name = "osis", version = "0.1.0", - about = "OSIS (Operating System Integration Service) - Synchronous Worker", - long_about = "A synchronous worker for Hero framework that processes jobs sequentially. \ + about = "OSIS (Operating System Integration Service) - Synchronous Actor", + long_about = "A synchronous actor for Hero framework that processes jobs sequentially. \ Ideal for system-level operations that require careful resource management." )] struct Args { @@ -24,9 +24,9 @@ struct Args { #[arg(short, long, help = "Path to TOML configuration file")] config: PathBuf, - /// Override worker ID from config - #[arg(long, help = "Override worker ID from configuration file")] - worker_id: Option, + /// Override actor ID from config + #[arg(long, help = "Override actor ID from configuration file")] + actor_id: Option, /// Override Redis URL from config #[arg(long, help = "Override Redis URL from configuration file")] @@ -50,7 +50,7 @@ async fn main() -> Result<(), Box> { let args = Args::parse(); // Load configuration from TOML file - let mut config = match WorkerConfig::from_file(&args.config) { + let mut config = match ActorConfig::from_file(&args.config) { Ok(config) => config, Err(e) => { eprintln!("Failed to load configuration from {:?}: {}", args.config, e); @@ -58,17 +58,17 @@ async fn main() -> Result<(), Box> { } }; - // Validate that this is a sync worker configuration + // Validate that this is a sync actor configuration if !config.is_sync() { - eprintln!("Error: OSIS worker requires a sync worker configuration"); - eprintln!("Expected: [worker_type] type = \"sync\""); - eprintln!("Found: {:?}", config.worker_type); + eprintln!("Error: OSIS actor requires a sync actor configuration"); + eprintln!("Expected: [actor_type] type = \"sync\""); + eprintln!("Found: {:?}", config.actor_type); std::process::exit(1); } // Apply command line overrides - if let Some(worker_id) = args.worker_id { - config.worker_id = worker_id; + if let Some(actor_id) = args.actor_id { + config.actor_id = actor_id; } if let Some(redis_url) = args.redis_url { config.redis_url = redis_url; @@ -80,8 +80,8 @@ async fn main() -> Result<(), Box> { // Configure logging setup_logging(&config, args.verbose, args.no_timestamp)?; - info!("๐Ÿš€ OSIS Worker starting..."); - info!("Worker ID: {}", config.worker_id); + info!("๐Ÿš€ OSIS Actor starting..."); + info!("Actor ID: {}", config.actor_id); info!("Redis URL: {}", config.redis_url); info!("Database Path: {}", config.db_path); info!("Preserve Tasks: {}", config.preserve_tasks); @@ -90,17 +90,17 @@ async fn main() -> Result<(), Box> { let engine = create_heromodels_engine(); info!("โœ… Rhai engine initialized"); - // Create worker configuration for the trait-based interface - let worker_config = TraitWorkerConfig::new( - config.worker_id.clone(), + // Create actor configuration for the trait-based interface + let actor_config = TraitActorConfig::new( + config.actor_id.clone(), config.db_path.clone(), config.redis_url.clone(), config.preserve_tasks, ); - // Create sync worker instance - let worker = Arc::new(SyncWorker::default()); - info!("โœ… Sync worker instance created"); + // Create sync actor instance + let actor = Arc::new(SyncActor::default()); + info!("โœ… Sync actor instance created"); // Setup shutdown signal handling let (shutdown_tx, shutdown_rx) = mpsc::channel(1); @@ -118,21 +118,21 @@ async fn main() -> Result<(), Box> { } }); - // Spawn the worker - info!("๐Ÿ”„ Starting worker loop..."); - let worker_handle = spawn_worker(worker, engine, shutdown_rx); + // Spawn the actor + info!("๐Ÿ”„ Starting actor loop..."); + let actor_handle = spawn_actor(actor, engine, shutdown_rx); - // Wait for the worker to complete - match worker_handle.await { + // Wait for the actor to complete + match actor_handle.await { Ok(Ok(())) => { - info!("โœ… OSIS Worker shut down gracefully"); + info!("โœ… OSIS Actor shut down gracefully"); } Ok(Err(e)) => { - error!("โŒ OSIS Worker encountered an error: {}", e); + error!("โŒ OSIS Actor encountered an error: {}", e); std::process::exit(1); } Err(e) => { - error!("โŒ Failed to join worker task: {}", e); + error!("โŒ Failed to join actor task: {}", e); std::process::exit(1); } } @@ -142,7 +142,7 @@ async fn main() -> Result<(), Box> { /// Setup logging based on configuration and command line arguments fn setup_logging( - config: &WorkerConfig, + config: &ActorConfig, verbose: bool, no_timestamp: bool, ) -> Result<(), Box> { @@ -187,11 +187,11 @@ mod tests { #[test] fn test_config_validation() { let config_toml = r#" -worker_id = "test_osis" +actor_id = "test_osis" redis_url = "redis://localhost:6379" db_path = "/tmp/test_db" -[worker_type] +[actor_type] type = "sync" [logging] @@ -201,20 +201,20 @@ level = "info" let mut temp_file = NamedTempFile::new().unwrap(); temp_file.write_all(config_toml.as_bytes()).unwrap(); - let config = WorkerConfig::from_file(temp_file.path()).unwrap(); + let config = ActorConfig::from_file(temp_file.path()).unwrap(); assert!(config.is_sync()); assert!(!config.is_async()); - assert_eq!(config.worker_id, "test_osis"); + assert_eq!(config.actor_id, "test_osis"); } #[test] fn test_async_config_rejection() { let config_toml = r#" -worker_id = "test_osis" +actor_id = "test_osis" redis_url = "redis://localhost:6379" db_path = "/tmp/test_db" -[worker_type] +[actor_type] type = "async" default_timeout_seconds = 300 @@ -225,7 +225,7 @@ level = "info" let mut temp_file = NamedTempFile::new().unwrap(); temp_file.write_all(config_toml.as_bytes()).unwrap(); - let config = WorkerConfig::from_file(temp_file.path()).unwrap(); + let config = ActorConfig::from_file(temp_file.path()).unwrap(); assert!(!config.is_sync()); assert!(config.is_async()); // This would be rejected in main() function diff --git a/_archive/core/worker/cmd/system.rs b/_archive/core/actor/cmd/system.rs similarity index 68% rename from _archive/core/worker/cmd/system.rs rename to _archive/core/actor/cmd/system.rs index ae65e99..7671718 100644 --- a/_archive/core/worker/cmd/system.rs +++ b/_archive/core/actor/cmd/system.rs @@ -1,11 +1,11 @@ -//! System Worker Binary - Asynchronous worker for high-throughput concurrent processing +//! System Actor Binary - Asynchronous actor for high-throughput concurrent processing use clap::Parser; use log::{error, info, warn}; -use rhailib_worker::async_worker_impl::AsyncWorker; -use rhailib_worker::config::{ConfigError, WorkerConfig}; -use rhailib_worker::engine::create_heromodels_engine; -use rhailib_worker::worker_trait::{spawn_worker, WorkerConfig as TraitWorkerConfig}; +use baobab_actor::async_actor_impl::AsyncActor; +use baobab_actor::config::{ConfigError, ActorConfig}; +use baobab_actor::engine::create_heromodels_engine; +use baobab_actor::actor_trait::{spawn_actor, ActorConfig as TraitActorConfig}; use std::path::PathBuf; use std::sync::Arc; use std::time::Duration; @@ -16,8 +16,8 @@ use tokio::sync::mpsc; #[command( name = "system", version = "0.1.0", - about = "System Worker - Asynchronous Worker with Concurrent Job Processing", - long_about = "An asynchronous worker for Hero framework that processes multiple jobs \ + about = "System Actor - Asynchronous Actor with Concurrent Job Processing", + long_about = "An asynchronous actor for Hero framework that processes multiple jobs \ concurrently with timeout support. Ideal for high-throughput scenarios \ where jobs can be executed in parallel." )] @@ -26,9 +26,9 @@ struct Args { #[arg(short, long, help = "Path to TOML configuration file")] config: PathBuf, - /// Override worker ID from config - #[arg(long, help = "Override worker ID from configuration file")] - worker_id: Option, + /// Override actor ID from config + #[arg(long, help = "Override actor ID from configuration file")] + actor_id: Option, /// Override Redis URL from config #[arg(long, help = "Override Redis URL from configuration file")] @@ -50,8 +50,8 @@ struct Args { #[arg(long, help = "Remove timestamps from log output")] no_timestamp: bool, - /// Show worker statistics periodically - #[arg(long, help = "Show periodic worker statistics")] + /// Show actor statistics periodically + #[arg(long, help = "Show periodic actor statistics")] show_stats: bool, } @@ -60,7 +60,7 @@ async fn main() -> Result<(), Box> { let args = Args::parse(); // Load configuration from TOML file - let mut config = match WorkerConfig::from_file(&args.config) { + let mut config = match ActorConfig::from_file(&args.config) { Ok(config) => config, Err(e) => { eprintln!("Failed to load configuration from {:?}: {}", args.config, e); @@ -68,17 +68,17 @@ async fn main() -> Result<(), Box> { } }; - // Validate that this is an async worker configuration + // Validate that this is an async actor configuration if !config.is_async() { - eprintln!("Error: System worker requires an async worker configuration"); - eprintln!("Expected: [worker_type] type = \"async\""); - eprintln!("Found: {:?}", config.worker_type); + eprintln!("Error: System actor requires an async actor configuration"); + eprintln!("Expected: [actor_type] type = \"async\""); + eprintln!("Found: {:?}", config.actor_type); std::process::exit(1); } // Apply command line overrides - if let Some(worker_id) = args.worker_id { - config.worker_id = worker_id; + if let Some(actor_id) = args.actor_id { + config.actor_id = actor_id; } if let Some(redis_url) = args.redis_url { config.redis_url = redis_url; @@ -89,7 +89,7 @@ async fn main() -> Result<(), Box> { // Override timeout if specified if let Some(timeout_secs) = args.timeout { - if let rhailib_worker::config::WorkerType::Async { ref mut default_timeout_seconds } = config.worker_type { + if let baobab_actor::config::ActorType::Async { ref mut default_timeout_seconds } = config.actor_type { *default_timeout_seconds = timeout_secs; } } @@ -97,8 +97,8 @@ async fn main() -> Result<(), Box> { // Configure logging setup_logging(&config, args.verbose, args.no_timestamp)?; - info!("๐Ÿš€ System Worker starting..."); - info!("Worker ID: {}", config.worker_id); + info!("๐Ÿš€ System Actor starting..."); + info!("Actor ID: {}", config.actor_id); info!("Redis URL: {}", config.redis_url); info!("Database Path: {}", config.db_path); info!("Preserve Tasks: {}", config.preserve_tasks); @@ -111,22 +111,22 @@ async fn main() -> Result<(), Box> { let engine = create_heromodels_engine(); info!("โœ… Rhai engine initialized"); - // Create worker configuration for the trait-based interface - let mut worker_config = TraitWorkerConfig::new( - config.worker_id.clone(), + // Create actor configuration for the trait-based interface + let mut actor_config = TraitActorConfig::new( + config.actor_id.clone(), config.db_path.clone(), config.redis_url.clone(), config.preserve_tasks, ); - // Add timeout configuration for async worker + // Add timeout configuration for async actor if let Some(timeout) = config.get_default_timeout() { - worker_config = worker_config.with_default_timeout(timeout); + actor_config = actor_config.with_default_timeout(timeout); } - // Create async worker instance - let worker = Arc::new(AsyncWorker::default()); - info!("โœ… Async worker instance created"); + // Create async actor instance + let actor = Arc::new(AsyncActor::default()); + info!("โœ… Async actor instance created"); // Setup shutdown signal handling let (shutdown_tx, shutdown_rx) = mpsc::channel(1); @@ -146,36 +146,36 @@ async fn main() -> Result<(), Box> { // Spawn statistics reporter if requested if args.show_stats { - let worker_stats = Arc::clone(&worker); + let actor_stats = Arc::clone(&actor); tokio::spawn(async move { let mut interval = tokio::time::interval(Duration::from_secs(30)); loop { interval.tick().await; - let running_count = worker_stats.running_job_count().await; + let running_count = actor_stats.running_job_count().await; if running_count > 0 { - info!("๐Ÿ“Š Worker Stats: {} jobs currently running", running_count); + info!("๐Ÿ“Š Actor Stats: {} jobs currently running", running_count); } else { - info!("๐Ÿ“Š Worker Stats: No jobs currently running"); + info!("๐Ÿ“Š Actor Stats: No jobs currently running"); } } }); } - // Spawn the worker - info!("๐Ÿ”„ Starting worker loop..."); - let worker_handle = spawn_worker(worker, engine, shutdown_rx); + // Spawn the actor + info!("๐Ÿ”„ Starting actor loop..."); + let actor_handle = spawn_actor(actor, engine, shutdown_rx); - // Wait for the worker to complete - match worker_handle.await { + // Wait for the actor to complete + match actor_handle.await { Ok(Ok(())) => { - info!("โœ… System Worker shut down gracefully"); + info!("โœ… System Actor shut down gracefully"); } Ok(Err(e)) => { - error!("โŒ System Worker encountered an error: {}", e); + error!("โŒ System Actor encountered an error: {}", e); std::process::exit(1); } Err(e) => { - error!("โŒ Failed to join worker task: {}", e); + error!("โŒ Failed to join actor task: {}", e); std::process::exit(1); } } @@ -185,7 +185,7 @@ async fn main() -> Result<(), Box> { /// Setup logging based on configuration and command line arguments fn setup_logging( - config: &WorkerConfig, + config: &ActorConfig, verbose: bool, no_timestamp: bool, ) -> Result<(), Box> { @@ -230,11 +230,11 @@ mod tests { #[test] fn test_config_validation() { let config_toml = r#" -worker_id = "test_system" +actor_id = "test_system" redis_url = "redis://localhost:6379" db_path = "/tmp/test_db" -[worker_type] +[actor_type] type = "async" default_timeout_seconds = 600 @@ -245,21 +245,21 @@ level = "info" let mut temp_file = NamedTempFile::new().unwrap(); temp_file.write_all(config_toml.as_bytes()).unwrap(); - let config = WorkerConfig::from_file(temp_file.path()).unwrap(); + let config = ActorConfig::from_file(temp_file.path()).unwrap(); assert!(!config.is_sync()); assert!(config.is_async()); - assert_eq!(config.worker_id, "test_system"); + assert_eq!(config.actor_id, "test_system"); assert_eq!(config.get_default_timeout(), Some(Duration::from_secs(600))); } #[test] fn test_sync_config_rejection() { let config_toml = r#" -worker_id = "test_system" +actor_id = "test_system" redis_url = "redis://localhost:6379" db_path = "/tmp/test_db" -[worker_type] +[actor_type] type = "sync" [logging] @@ -269,7 +269,7 @@ level = "info" let mut temp_file = NamedTempFile::new().unwrap(); temp_file.write_all(config_toml.as_bytes()).unwrap(); - let config = WorkerConfig::from_file(temp_file.path()).unwrap(); + let config = ActorConfig::from_file(temp_file.path()).unwrap(); assert!(config.is_sync()); assert!(!config.is_async()); // This would be rejected in main() function @@ -278,11 +278,11 @@ level = "info" #[test] fn test_timeout_override() { let config_toml = r#" -worker_id = "test_system" +actor_id = "test_system" redis_url = "redis://localhost:6379" db_path = "/tmp/test_db" -[worker_type] +[actor_type] type = "async" default_timeout_seconds = 300 "#; @@ -290,11 +290,11 @@ default_timeout_seconds = 300 let mut temp_file = NamedTempFile::new().unwrap(); temp_file.write_all(config_toml.as_bytes()).unwrap(); - let mut config = WorkerConfig::from_file(temp_file.path()).unwrap(); + let mut config = ActorConfig::from_file(temp_file.path()).unwrap(); assert_eq!(config.get_default_timeout(), Some(Duration::from_secs(300))); // Test timeout override - if let rhailib_worker::config::WorkerType::Async { ref mut default_timeout_seconds } = config.worker_type { + if let baobab_actor::config::ActorType::Async { ref mut default_timeout_seconds } = config.actor_type { *default_timeout_seconds = 600; } assert_eq!(config.get_default_timeout(), Some(Duration::from_secs(600))); diff --git a/_archive/core/worker/cmd/worker.rs b/_archive/core/actor/cmd/worker.rs similarity index 75% rename from _archive/core/worker/cmd/worker.rs rename to _archive/core/actor/cmd/worker.rs index b0c639c..fd0d671 100644 --- a/_archive/core/worker/cmd/worker.rs +++ b/_archive/core/actor/cmd/worker.rs @@ -1,14 +1,14 @@ use clap::Parser; -use rhailib_worker::engine::create_heromodels_engine; -use rhailib_worker::spawn_rhai_worker; +use baobab_actor::engine::create_heromodels_engine; +use baobab_actor::spawn_rhai_actor; use tokio::sync::mpsc; #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { - /// Worker ID for identification + /// Actor ID for identification #[arg(short, long)] - worker_id: String, + actor_id: String, /// Redis URL #[arg(short, long, default_value = "redis://localhost:6379")] @@ -19,7 +19,7 @@ struct Args { preserve_tasks: bool, /// Root directory for engine database - #[arg(long, default_value = "worker_rhai_temp_db")] + #[arg(long, default_value = "actor_rhai_temp_db")] db_path: String, /// Disable timestamps in log output @@ -41,10 +41,10 @@ async fn main() -> Result<(), Box> { } - log::info!("Rhai Worker (binary) starting with performance-optimized engine."); + log::info!("Rhai Actor (binary) starting with performance-optimized engine."); log::info!( - "Worker ID: {}, Redis: {}", - args.worker_id, + "Actor ID: {}, Redis: {}", + args.actor_id, args.redis_url ); @@ -65,9 +65,9 @@ async fn main() -> Result<(), Box> { // Create shutdown channel (for graceful shutdown, though not used in benchmarks) let (_shutdown_tx, shutdown_rx) = mpsc::channel::<()>(1); - // Spawn the worker - let worker_handle = spawn_rhai_worker( - args.worker_id, + // Spawn the actor + let actor_handle = spawn_rhai_actor( + args.actor_id, args.db_path, engine, args.redis_url, @@ -75,20 +75,20 @@ async fn main() -> Result<(), Box> { args.preserve_tasks, ); - // Wait for the worker to complete - match worker_handle.await { + // Wait for the actor to complete + match actor_handle.await { Ok(result) => match result { Ok(_) => { - log::info!("Worker completed successfully"); + log::info!("Actor completed successfully"); Ok(()) } Err(e) => { - log::error!("Worker failed: {}", e); + log::error!("Actor failed: {}", e); Err(e) } }, Err(e) => { - log::error!("Worker task panicked: {}", e); + log::error!("Actor task panicked: {}", e); Err(Box::new(e) as Box) } } diff --git a/_archive/core/worker/examples/README.md b/_archive/core/actor/examples/README.md similarity index 70% rename from _archive/core/worker/examples/README.md rename to _archive/core/actor/examples/README.md index 8f6026c..3b8e9f2 100644 --- a/_archive/core/worker/examples/README.md +++ b/_archive/core/actor/examples/README.md @@ -1,11 +1,11 @@ -# Worker Examples +# Actor Examples -This directory contains example configurations and test scripts for both OSIS and System worker binaries. +This directory contains example configurations and test scripts for both OSIS and System actor binaries. ## Overview -Both examples demonstrate the ping/pong functionality built into the Hero workers: -- Workers automatically detect jobs with script content "ping" +Both examples demonstrate the ping/pong functionality built into the Hero actors: +- Actors automatically detect jobs with script content "ping" - They respond immediately with "pong" without executing the Rhai engine - This provides a fast health check and connectivity test mechanism @@ -20,20 +20,20 @@ Both examples demonstrate the ping/pong functionality built into the Hero worker redis-server ``` -2. **Rust Environment**: Make sure you can build the worker binaries +2. **Rust Environment**: Make sure you can build the actor binaries ```bash - cd /path/to/herocode/hero/core/worker + cd /path/to/herocode/hero/core/actor cargo build --bin osis --bin system ``` -## OSIS Worker Example +## OSIS Actor Example **Location**: `examples/osis/` -The OSIS (Operating System Integration Service) worker processes jobs synchronously, one at a time. +The OSIS (Operating System Integration Service) actor processes jobs synchronously, one at a time. ### Files -- `config.toml` - Configuration for the OSIS worker +- `config.toml` - Configuration for the OSIS actor - `example.sh` - Test script that demonstrates ping/pong functionality ### Usage @@ -45,31 +45,31 @@ cd examples/osis ### What the script does: 1. Checks Redis connectivity 2. Cleans up any existing jobs -3. Starts the OSIS worker in the background +3. Starts the OSIS actor in the background 4. Sends 3 ping jobs sequentially 5. Verifies each job receives a "pong" response 6. Reports success/failure statistics -7. Cleans up worker and Redis data +7. Cleans up actor and Redis data ### Expected Output ``` -=== OSIS Worker Example === +=== OSIS Actor Example === โœ… Redis is running -โœ… OSIS worker started (PID: 12345) +โœ… OSIS actor started (PID: 12345) ๐Ÿ“ค Sending ping job: ping_job_1_1234567890 โœ… Job ping_job_1_1234567890 completed successfully with result: pong ... -๐ŸŽ‰ All tests passed! OSIS worker is working correctly. +๐ŸŽ‰ All tests passed! OSIS actor is working correctly. ``` -## System Worker Example +## System Actor Example **Location**: `examples/system/` -The System worker processes jobs asynchronously, handling multiple jobs concurrently. +The System actor processes jobs asynchronously, handling multiple jobs concurrently. ### Files -- `config.toml` - Configuration for the System worker (includes async settings) +- `config.toml` - Configuration for the System actor (includes async settings) - `example.sh` - Test script that demonstrates concurrent ping/pong functionality ### Usage @@ -81,22 +81,22 @@ cd examples/system ### What the script does: 1. Checks Redis connectivity 2. Cleans up any existing jobs -3. Starts the System worker with stats reporting +3. Starts the System actor with stats reporting 4. Sends 5 concurrent ping jobs 5. Sends 10 rapid-fire ping jobs to test async capabilities 6. Verifies all jobs receive "pong" responses 7. Reports comprehensive success/failure statistics -8. Cleans up worker and Redis data +8. Cleans up actor and Redis data ### Expected Output ``` -=== System Worker Example === +=== System Actor Example === โœ… Redis is running -โœ… System worker started (PID: 12345) +โœ… System actor started (PID: 12345) ๐Ÿ“ค Sending ping job: ping_job_1_1234567890123 โœ… Job ping_job_1_1234567890123 completed successfully with result: pong ... -๐ŸŽ‰ All tests passed! System worker is handling concurrent jobs correctly. +๐ŸŽ‰ All tests passed! System actor is handling concurrent jobs correctly. Overall success rate: 15/15 ``` @@ -104,12 +104,12 @@ Overall success rate: 15/15 ### OSIS Configuration (`examples/osis/config.toml`) ```toml -worker_id = "osis_example_worker" +actor_id = "osis_example_actor" redis_url = "redis://localhost:6379" db_path = "/tmp/osis_example_db" preserve_tasks = false -[worker_type] +[actor_type] type = "sync" [logging] @@ -119,12 +119,12 @@ level = "info" ### System Configuration (`examples/system/config.toml`) ```toml -worker_id = "system_example_worker" +actor_id = "system_example_actor" redis_url = "redis://localhost:6379" db_path = "/tmp/system_example_db" preserve_tasks = false -[worker_type] +[actor_type] type = "async" default_timeout_seconds = 30 @@ -135,7 +135,7 @@ level = "info" ## Key Differences -| Feature | OSIS Worker | System Worker | +| Feature | OSIS Actor | System Actor | |---------|-------------|---------------| | **Processing** | Sequential (one job at a time) | Concurrent (multiple jobs simultaneously) | | **Use Case** | System-level operations requiring resource management | High-throughput job processing | @@ -154,7 +154,7 @@ redis-cli ping redis-server --loglevel verbose ``` -### Worker Compilation Issues +### Actor Compilation Issues ```bash # Clean and rebuild cargo clean @@ -164,7 +164,7 @@ cargo build --bin osis --bin system ### Job Processing Issues - Check Redis for stuck jobs: `redis-cli keys "hero:*"` - Clear all Hero jobs: `redis-cli eval "return redis.call('del', unpack(redis.call('keys', 'hero:*')))" 0` -- Check worker logs for detailed error messages +- Check actor logs for detailed error messages ## Extending the Examples @@ -183,15 +183,15 @@ To test with custom Rhai scripts instead of ping jobs: ### Testing Different Configurations - Modify `config.toml` files to test different Redis URLs, database paths, or logging levels - Test with `preserve_tasks = true` to inspect job details after completion -- Adjust timeout values in the System worker configuration +- Adjust timeout values in the System actor configuration ## Architecture Notes -Both examples demonstrate the unified Worker trait architecture: -- **Common Interface**: Both workers implement the same `Worker` trait +Both examples demonstrate the unified Actor trait architecture: +- **Common Interface**: Both actors implement the same `Actor` trait - **Ping/Pong Handling**: Built into the trait's `spawn` method before job delegation - **Redis Integration**: Uses the shared Job struct from `hero_job` crate - **Configuration**: TOML-based configuration with CLI overrides -- **Graceful Shutdown**: Both workers handle SIGTERM/SIGINT properly +- **Graceful Shutdown**: Both actors handle SIGTERM/SIGINT properly -This architecture allows for easy extension with new worker types while maintaining consistent behavior and configuration patterns. +This architecture allows for easy extension with new actor types while maintaining consistent behavior and configuration patterns. diff --git a/_archive/core/worker/examples/osis/config.toml b/_archive/core/actor/examples/osis/config.toml similarity index 76% rename from _archive/core/worker/examples/osis/config.toml rename to _archive/core/actor/examples/osis/config.toml index b6d2890..0311457 100644 --- a/_archive/core/worker/examples/osis/config.toml +++ b/_archive/core/actor/examples/osis/config.toml @@ -1,9 +1,9 @@ -worker_id = "osis_example_worker" +actor_id = "osis_example_actor" redis_url = "redis://localhost:6379" db_path = "/tmp/osis_example_db" preserve_tasks = false -[worker_type] +[actor_type] type = "sync" [logging] diff --git a/_archive/core/worker/examples/osis/example.sh b/_archive/core/actor/examples/osis/example.sh similarity index 73% rename from _archive/core/worker/examples/osis/example.sh rename to _archive/core/actor/examples/osis/example.sh index cfb3498..0a9428b 100755 --- a/_archive/core/worker/examples/osis/example.sh +++ b/_archive/core/actor/examples/osis/example.sh @@ -1,8 +1,8 @@ #!/bin/bash -# OSIS Worker Example Script -# This script demonstrates the OSIS worker by: -# 1. Starting the worker with the config.toml +# OSIS Actor Example Script +# This script demonstrates the OSIS actor by: +# 1. Starting the actor with the config.toml # 2. Sending ping jobs to Redis # 3. Verifying pong responses @@ -10,13 +10,13 @@ set -e SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" CONFIG_FILE="$SCRIPT_DIR/config.toml" -WORKER_ID="osis_example_worker" +ACTOR_ID="osis_example_actor" REDIS_URL="redis://localhost:6379" -echo "=== OSIS Worker Example ===" +echo "=== OSIS Actor Example ===" echo "Script directory: $SCRIPT_DIR" echo "Config file: $CONFIG_FILE" -echo "Worker ID: $WORKER_ID" +echo "Actor ID: $ACTOR_ID" echo "Redis URL: $REDIS_URL" echo @@ -32,21 +32,21 @@ echo # Clean up any existing jobs in the queue echo "Cleaning up existing jobs in Redis..." -redis-cli -u "$REDIS_URL" del "hero:jobs:$WORKER_ID" > /dev/null 2>&1 || true +redis-cli -u "$REDIS_URL" del "hero:jobs:$ACTOR_ID" > /dev/null 2>&1 || true redis-cli -u "$REDIS_URL" eval "return redis.call('del', unpack(redis.call('keys', 'hero:job:*')))" 0 > /dev/null 2>&1 || true echo "โœ… Redis queues cleaned" echo -# Start the OSIS worker in the background -echo "Starting OSIS worker..." +# Start the OSIS actor in the background +echo "Starting OSIS actor..." cd "$SCRIPT_DIR/../.." cargo run --bin osis -- --config "$CONFIG_FILE" & -WORKER_PID=$! -echo "โœ… OSIS worker started (PID: $WORKER_PID)" +ACTOR_PID=$! +echo "โœ… OSIS actor started (PID: $ACTOR_PID)" echo -# Wait a moment for the worker to initialize -echo "Waiting for worker to initialize..." +# Wait a moment for the actor to initialize +echo "Waiting for actor to initialize..." sleep 3 # Function to send a ping job and check for pong response @@ -62,10 +62,10 @@ send_ping_job() { script "ping" \ status "Queued" \ created_at "$(date -u +%Y-%m-%dT%H:%M:%SZ)" \ - worker_id "$WORKER_ID" > /dev/null + actor_id "$ACTOR_ID" > /dev/null - # Add job to worker queue - redis-cli -u "$REDIS_URL" lpush "hero:jobs:$WORKER_ID" "$job_id" > /dev/null + # Add job to actor queue + redis-cli -u "$REDIS_URL" lpush "hero:jobs:$ACTOR_ID" "$job_id" > /dev/null # Wait for job completion and check result local timeout=10 @@ -94,7 +94,7 @@ send_ping_job() { return 1 } -# Send multiple ping jobs to test the worker +# Send multiple ping jobs to test the actor echo "Testing ping/pong functionality..." success_count=0 total_jobs=3 @@ -113,26 +113,26 @@ echo "=== Test Results ===" echo "Successful ping/pong tests: $success_count/$total_jobs" if [ $success_count -eq $total_jobs ]; then - echo "๐ŸŽ‰ All tests passed! OSIS worker is working correctly." + echo "๐ŸŽ‰ All tests passed! OSIS actor is working correctly." exit_code=0 else - echo "โš ๏ธ Some tests failed. Check the worker logs for details." + echo "โš ๏ธ Some tests failed. Check the actor logs for details." exit_code=1 fi # Clean up echo echo "Cleaning up..." -echo "Stopping OSIS worker (PID: $WORKER_PID)..." -kill $WORKER_PID 2>/dev/null || true -wait $WORKER_PID 2>/dev/null || true -echo "โœ… Worker stopped" +echo "Stopping OSIS actor (PID: $ACTOR_PID)..." +kill $ACTOR_PID 2>/dev/null || true +wait $ACTOR_PID 2>/dev/null || true +echo "โœ… Actor stopped" echo "Cleaning up Redis jobs..." -redis-cli -u "$REDIS_URL" del "hero:jobs:$WORKER_ID" > /dev/null 2>&1 || true +redis-cli -u "$REDIS_URL" del "hero:jobs:$ACTOR_ID" > /dev/null 2>&1 || true redis-cli -u "$REDIS_URL" eval "return redis.call('del', unpack(redis.call('keys', 'hero:job:*')))" 0 > /dev/null 2>&1 || true echo "โœ… Redis cleaned up" echo -echo "=== OSIS Worker Example Complete ===" +echo "=== OSIS Actor Example Complete ===" exit $exit_code diff --git a/_archive/core/actor/examples/osis_config.toml b/_archive/core/actor/examples/osis_config.toml new file mode 100644 index 0000000..ff032d8 --- /dev/null +++ b/_archive/core/actor/examples/osis_config.toml @@ -0,0 +1,14 @@ +# OSIS Actor Configuration +# Synchronous actor for system-level operations + +actor_id = "osis_actor_1" +redis_url = "redis://localhost:6379" +db_path = "/tmp/osis_actor_db" +preserve_tasks = false + +[actor_type] +type = "sync" + +[logging] +timestamps = true +level = "info" diff --git a/_archive/core/worker/examples/osis_worker_demo.rs b/_archive/core/actor/examples/osis_worker_demo.rs similarity index 77% rename from _archive/core/worker/examples/osis_worker_demo.rs rename to _archive/core/actor/examples/osis_worker_demo.rs index b0d7ee6..474b6f1 100644 --- a/_archive/core/worker/examples/osis_worker_demo.rs +++ b/_archive/core/actor/examples/osis_worker_demo.rs @@ -3,12 +3,12 @@ use std::path::Path; use std::env; use std::io::{self, Write}; -/// OSIS Worker Demo Runner +/// OSIS Actor Demo Runner /// -/// This Rust wrapper executes the OSIS worker bash script example. +/// This Rust wrapper executes the OSIS actor bash script example. /// It provides a way to run shell-based examples through Cargo. fn main() -> Result<(), Box> { - println!("๐Ÿš€ OSIS Worker Demo"); + println!("๐Ÿš€ OSIS Actor Demo"); println!("=================="); println!(); @@ -19,12 +19,12 @@ fn main() -> Result<(), Box> { // Check if the script exists if !script_path.exists() { eprintln!("โŒ Error: Script not found at {:?}", script_path); - eprintln!(" Make sure you're running this from the worker crate root directory."); + eprintln!(" Make sure you're running this from the actor crate root directory."); std::process::exit(1); } println!("๐Ÿ“ Script location: {:?}", script_path); - println!("๐Ÿ”ง Executing OSIS worker example..."); + println!("๐Ÿ”ง Executing OSIS actor example..."); println!(); // Make sure the script is executable @@ -50,9 +50,9 @@ fn main() -> Result<(), Box> { println!(); if status.success() { - println!("โœ… OSIS worker demo completed successfully!"); + println!("โœ… OSIS actor demo completed successfully!"); } else { - println!("โŒ OSIS worker demo failed with exit code: {:?}", status.code()); + println!("โŒ OSIS actor demo failed with exit code: {:?}", status.code()); std::process::exit(status.code().unwrap_or(1)); } diff --git a/_archive/core/worker/examples/system/config.toml b/_archive/core/actor/examples/system/config.toml similarity index 78% rename from _archive/core/worker/examples/system/config.toml rename to _archive/core/actor/examples/system/config.toml index 9e3dbba..22f1c00 100644 --- a/_archive/core/worker/examples/system/config.toml +++ b/_archive/core/actor/examples/system/config.toml @@ -1,9 +1,9 @@ -worker_id = "system_example_worker" +actor_id = "system_example_actor" redis_url = "redis://localhost:6379" db_path = "/tmp/system_example_db" preserve_tasks = false -[worker_type] +[actor_type] type = "async" default_timeout_seconds = 30 diff --git a/_archive/core/worker/examples/system/example.sh b/_archive/core/actor/examples/system/example.sh similarity index 79% rename from _archive/core/worker/examples/system/example.sh rename to _archive/core/actor/examples/system/example.sh index d520980..e94b161 100755 --- a/_archive/core/worker/examples/system/example.sh +++ b/_archive/core/actor/examples/system/example.sh @@ -1,8 +1,8 @@ #!/bin/bash -# System Worker Example Script -# This script demonstrates the System worker by: -# 1. Starting the worker with the config.toml +# System Actor Example Script +# This script demonstrates the System actor by: +# 1. Starting the actor with the config.toml # 2. Sending multiple concurrent ping jobs to Redis # 3. Verifying pong responses @@ -10,13 +10,13 @@ set -e SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" CONFIG_FILE="$SCRIPT_DIR/config.toml" -WORKER_ID="system_example_worker" +ACTOR_ID="system_example_actor" REDIS_URL="redis://localhost:6379" -echo "=== System Worker Example ===" +echo "=== System Actor Example ===" echo "Script directory: $SCRIPT_DIR" echo "Config file: $CONFIG_FILE" -echo "Worker ID: $WORKER_ID" +echo "Actor ID: $ACTOR_ID" echo "Redis URL: $REDIS_URL" echo @@ -32,21 +32,21 @@ echo # Clean up any existing jobs in the queue echo "Cleaning up existing jobs in Redis..." -redis-cli -u "$REDIS_URL" del "hero:jobs:$WORKER_ID" > /dev/null 2>&1 || true +redis-cli -u "$REDIS_URL" del "hero:jobs:$ACTOR_ID" > /dev/null 2>&1 || true redis-cli -u "$REDIS_URL" eval "return redis.call('del', unpack(redis.call('keys', 'hero:job:*')))" 0 > /dev/null 2>&1 || true echo "โœ… Redis queues cleaned" echo -# Start the System worker in the background -echo "Starting System worker..." +# Start the System actor in the background +echo "Starting System actor..." cd "$SCRIPT_DIR/../.." cargo run --bin system -- --config "$CONFIG_FILE" --show-stats & -WORKER_PID=$! -echo "โœ… System worker started (PID: $WORKER_PID)" +ACTOR_PID=$! +echo "โœ… System actor started (PID: $ACTOR_PID)" echo -# Wait a moment for the worker to initialize -echo "Waiting for worker to initialize..." +# Wait a moment for the actor to initialize +echo "Waiting for actor to initialize..." sleep 3 # Function to send a ping job (non-blocking) @@ -62,10 +62,10 @@ send_ping_job() { script "ping" \ status "Queued" \ created_at "$(date -u +%Y-%m-%dT%H:%M:%SZ)" \ - worker_id "$WORKER_ID" > /dev/null + actor_id "$ACTOR_ID" > /dev/null - # Add job to worker queue - redis-cli -u "$REDIS_URL" lpush "hero:jobs:$WORKER_ID" "$job_id" > /dev/null + # Add job to actor queue + redis-cli -u "$REDIS_URL" lpush "hero:jobs:$ACTOR_ID" "$job_id" > /dev/null echo "$job_id" } @@ -129,10 +129,10 @@ echo "=== Test Results ===" echo "Successful concurrent ping/pong tests: $success_count/$total_jobs" if [ $success_count -eq $total_jobs ]; then - echo "๐ŸŽ‰ All tests passed! System worker is handling concurrent jobs correctly." + echo "๐ŸŽ‰ All tests passed! System actor is handling concurrent jobs correctly." exit_code=0 else - echo "โš ๏ธ Some tests failed. Check the worker logs for details." + echo "โš ๏ธ Some tests failed. Check the actor logs for details." exit_code=1 fi @@ -160,18 +160,18 @@ echo "Rapid submission test: $rapid_success/$rapid_jobs successful" # Clean up echo echo "Cleaning up..." -echo "Stopping System worker (PID: $WORKER_PID)..." -kill $WORKER_PID 2>/dev/null || true -wait $WORKER_PID 2>/dev/null || true -echo "โœ… Worker stopped" +echo "Stopping System actor (PID: $ACTOR_PID)..." +kill $ACTOR_PID 2>/dev/null || true +wait $ACTOR_PID 2>/dev/null || true +echo "โœ… Actor stopped" echo "Cleaning up Redis jobs..." -redis-cli -u "$REDIS_URL" del "hero:jobs:$WORKER_ID" > /dev/null 2>&1 || true +redis-cli -u "$REDIS_URL" del "hero:jobs:$ACTOR_ID" > /dev/null 2>&1 || true redis-cli -u "$REDIS_URL" eval "return redis.call('del', unpack(redis.call('keys', 'hero:job:*')))" 0 > /dev/null 2>&1 || true echo "โœ… Redis cleaned up" echo -echo "=== System Worker Example Complete ===" +echo "=== System Actor Example Complete ===" total_success=$((success_count + rapid_success)) total_tests=$((total_jobs + rapid_jobs)) echo "Overall success rate: $total_success/$total_tests" diff --git a/_archive/core/actor/examples/system_config.toml b/_archive/core/actor/examples/system_config.toml new file mode 100644 index 0000000..ee3fc15 --- /dev/null +++ b/_archive/core/actor/examples/system_config.toml @@ -0,0 +1,15 @@ +# System Actor Configuration +# Asynchronous actor for high-throughput concurrent processing + +actor_id = "system_actor_1" +redis_url = "redis://localhost:6379" +db_path = "/tmp/system_actor_db" +preserve_tasks = false + +[actor_type] +type = "async" +default_timeout_seconds = 300 # 5 minutes + +[logging] +timestamps = true +level = "info" diff --git a/_archive/core/worker/examples/system_worker_demo.rs b/_archive/core/actor/examples/system_worker_demo.rs similarity index 76% rename from _archive/core/worker/examples/system_worker_demo.rs rename to _archive/core/actor/examples/system_worker_demo.rs index e476b64..ef92be7 100644 --- a/_archive/core/worker/examples/system_worker_demo.rs +++ b/_archive/core/actor/examples/system_worker_demo.rs @@ -3,12 +3,12 @@ use std::path::Path; use std::env; use std::io::{self, Write}; -/// System Worker Demo Runner +/// System Actor Demo Runner /// -/// This Rust wrapper executes the System worker bash script example. +/// This Rust wrapper executes the System actor bash script example. /// It provides a way to run shell-based examples through Cargo. fn main() -> Result<(), Box> { - println!("๐Ÿš€ System Worker Demo"); + println!("๐Ÿš€ System Actor Demo"); println!("===================="); println!(); @@ -19,12 +19,12 @@ fn main() -> Result<(), Box> { // Check if the script exists if !script_path.exists() { eprintln!("โŒ Error: Script not found at {:?}", script_path); - eprintln!(" Make sure you're running this from the worker crate root directory."); + eprintln!(" Make sure you're running this from the actor crate root directory."); std::process::exit(1); } println!("๐Ÿ“ Script location: {:?}", script_path); - println!("๐Ÿ”ง Executing System worker example..."); + println!("๐Ÿ”ง Executing System actor example..."); println!(); // Make sure the script is executable @@ -50,9 +50,9 @@ fn main() -> Result<(), Box> { println!(); if status.success() { - println!("โœ… System worker demo completed successfully!"); + println!("โœ… System actor demo completed successfully!"); } else { - println!("โŒ System worker demo failed with exit code: {:?}", status.code()); + println!("โŒ System actor demo failed with exit code: {:?}", status.code()); std::process::exit(status.code().unwrap_or(1)); } diff --git a/_archive/core/worker/examples/trait_based_worker_demo.rs b/_archive/core/actor/examples/trait_based_worker_demo.rs similarity index 67% rename from _archive/core/worker/examples/trait_based_worker_demo.rs rename to _archive/core/actor/examples/trait_based_worker_demo.rs index 8aee64e..05bc349 100644 --- a/_archive/core/worker/examples/trait_based_worker_demo.rs +++ b/_archive/core/actor/examples/trait_based_worker_demo.rs @@ -1,13 +1,13 @@ -//! # Trait-Based Worker Demo +//! # Trait-Based Actor Demo //! -//! This example demonstrates the new unified worker interface using the Worker trait. -//! It shows how both synchronous and asynchronous workers can be used with the same +//! This example demonstrates the new unified actor interface using the Actor trait. +//! It shows how both synchronous and asynchronous actors can be used with the same //! API, eliminating code duplication and providing a clean, consistent interface. //! //! ## Features Demonstrated //! -//! - Unified worker interface using the Worker trait -//! - Both sync and async worker implementations +//! - Unified actor interface using the Actor trait +//! - Both sync and async actor implementations //! - Shared configuration and spawn logic //! - Clean shutdown handling //! - Job processing with different strategies @@ -16,16 +16,16 @@ //! //! Make sure Redis is running on localhost:6379, then run: //! ```bash -//! cargo run --example trait_based_worker_demo +//! cargo run --example trait_based_actor_demo //! ``` use hero_job::{Job, JobStatus, ScriptType}; use log::{info, warn, error}; -use rhailib_worker::{ - SyncWorker, AsyncWorker, - spawn_sync_worker, spawn_async_worker, +use baobab_actor::{ + SyncActor, AsyncActor, + spawn_sync_actor, spawn_async_actor, engine::create_heromodels_engine, - worker_trait::{spawn_worker, Worker} + actor_trait::{spawn_actor, Actor} }; use redis::AsyncCommands; use std::sync::Arc; @@ -40,7 +40,7 @@ async fn main() -> Result<(), Box> { // Initialize logging env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init(); - info!("Starting Trait-Based Worker Demo"); + info!("Starting Trait-Based Actor Demo"); // Create Redis connection for job creation let redis_client = redis::Client::open(REDIS_URL)?; @@ -49,83 +49,83 @@ async fn main() -> Result<(), Box> { // Demo 1: Using the unified trait-based interface info!("=== Demo 1: Unified Trait-Based Interface ==="); - // Create shutdown channels for both workers + // Create shutdown channels for both actors let (sync_shutdown_tx, sync_shutdown_rx) = mpsc::channel::<()>(1); let (async_shutdown_tx, async_shutdown_rx) = mpsc::channel::<()>(1); - // Workers are now configured using builder pattern directly + // Actors are now configured using builder pattern directly - // Create worker instances using builder pattern - let sync_worker = Arc::new( - SyncWorker::builder() - .worker_id("demo_sync_worker") + // Create actor instances using builder pattern + let sync_actor = Arc::new( + SyncActor::builder() + .actor_id("demo_sync_actor") .db_path("/tmp") .redis_url("redis://localhost:6379") .preserve_tasks(false) .build() - .expect("Failed to build SyncWorker") + .expect("Failed to build SyncActor") ); - let async_worker = Arc::new( - AsyncWorker::builder() - .worker_id("demo_async_worker") + let async_actor = Arc::new( + AsyncActor::builder() + .actor_id("demo_async_actor") .db_path("/tmp") .redis_url("redis://localhost:6379") .default_timeout(Duration::from_secs(300)) .build() - .expect("Failed to build AsyncWorker") + .expect("Failed to build AsyncActor") ); let sync_engine = create_heromodels_engine(); let async_engine = create_heromodels_engine(); - info!("Spawning {} worker: {}", sync_worker.worker_type(), sync_worker.worker_id()); - let sync_handle = spawn_worker(sync_worker.clone(), sync_engine, sync_shutdown_rx); + info!("Spawning {} actor: {}", sync_actor.actor_type(), sync_actor.actor_id()); + let sync_handle = spawn_actor(sync_actor.clone(), sync_engine, sync_shutdown_rx); - info!("Spawning {} worker: {}", async_worker.worker_type(), async_worker.worker_id()); - let async_handle = spawn_worker(async_worker.clone(), async_engine, async_shutdown_rx); + info!("Spawning {} actor: {}", async_actor.actor_type(), async_actor.actor_id()); + let async_handle = spawn_actor(async_actor.clone(), async_engine, async_shutdown_rx); - // Give workers time to start + // Give actors time to start sleep(Duration::from_secs(1)).await; - // Create and dispatch jobs to both workers - info!("Creating demo jobs for both workers..."); + // Create and dispatch jobs to both actors + info!("Creating demo jobs for both actors..."); - // Job for sync worker - simple calculation + // Job for sync actor - simple calculation let sync_job = create_demo_job( "sync_calculation", r#" - print("Sync worker: Starting calculation..."); + print("Sync actor: Starting calculation..."); let result = 0; for i in 1..=100 { result += i; } - print("Sync worker: Sum of 1-100 = " + result); + print("Sync actor: Sum of 1-100 = " + result); result "#, None, ).await?; - dispatch_job(&mut redis_conn, &sync_job, sync_worker.worker_id()).await?; - info!("Dispatched job to sync worker: {}", sync_job.id); + dispatch_job(&mut redis_conn, &sync_job, sync_actor.actor_id()).await?; + info!("Dispatched job to sync actor: {}", sync_job.id); - // Job for async worker - with timeout demonstration + // Job for async actor - with timeout demonstration let async_job = create_demo_job( "async_calculation", r#" - print("Async worker: Starting calculation..."); + print("Async actor: Starting calculation..."); let result = 1; for i in 1..=10 { result *= i; } - print("Async worker: 10! = " + result); + print("Async actor: 10! = " + result); result "#, Some(15), // 15 second timeout ).await?; - dispatch_job(&mut redis_conn, &async_job, async_worker.worker_id()).await?; - info!("Dispatched job to async worker: {}", async_job.id); + dispatch_job(&mut redis_conn, &async_job, async_actor.actor_id()).await?; + info!("Dispatched job to async actor: {}", async_job.id); // Monitor job execution info!("Monitoring job execution for 10 seconds..."); @@ -188,13 +188,13 @@ async fn main() -> Result<(), Box> { let (conv_sync_shutdown_tx, conv_sync_shutdown_rx) = mpsc::channel::<()>(1); let (conv_async_shutdown_tx, conv_async_shutdown_rx) = mpsc::channel::<()>(1); - // Spawn workers using convenience functions + // Spawn actors using convenience functions let conv_sync_engine = create_heromodels_engine(); let conv_async_engine = create_heromodels_engine(); - info!("Spawning sync worker using convenience function..."); - let conv_sync_handle = spawn_sync_worker( - "convenience_sync_worker".to_string(), + info!("Spawning sync actor using convenience function..."); + let conv_sync_handle = spawn_sync_actor( + "convenience_sync_actor".to_string(), "/tmp".to_string(), conv_sync_engine, REDIS_URL.to_string(), @@ -202,9 +202,9 @@ async fn main() -> Result<(), Box> { false, ); - info!("Spawning async worker using convenience function..."); - let conv_async_handle = spawn_async_worker( - "convenience_async_worker".to_string(), + info!("Spawning async actor using convenience function..."); + let conv_async_handle = spawn_async_actor( + "convenience_async_actor".to_string(), "/tmp".to_string(), conv_async_engine, REDIS_URL.to_string(), @@ -212,15 +212,15 @@ async fn main() -> Result<(), Box> { Duration::from_secs(20), // 20 second timeout ); - // Give convenience workers time to start + // Give convenience actors time to start sleep(Duration::from_secs(1)).await; - // Create jobs for convenience workers + // Create jobs for convenience actors let conv_sync_job = create_demo_job( "convenience_sync", r#" - print("Convenience sync worker: Hello World!"); - "Hello from convenience sync worker" + print("Convenience sync actor: Hello World!"); + "Hello from convenience sync actor" "#, None, ).await?; @@ -228,22 +228,22 @@ async fn main() -> Result<(), Box> { let conv_async_job = create_demo_job( "convenience_async", r#" - print("Convenience async worker: Hello World!"); - "Hello from convenience async worker" + print("Convenience async actor: Hello World!"); + "Hello from convenience async actor" "#, Some(10), ).await?; - dispatch_job(&mut redis_conn, &conv_sync_job, "convenience_sync_worker").await?; - dispatch_job(&mut redis_conn, &conv_async_job, "convenience_async_worker").await?; + dispatch_job(&mut redis_conn, &conv_sync_job, "convenience_sync_actor").await?; + dispatch_job(&mut redis_conn, &conv_async_job, "convenience_async_actor").await?; - info!("Dispatched jobs to convenience workers"); + info!("Dispatched jobs to convenience actors"); // Wait a bit for jobs to complete sleep(Duration::from_secs(5)).await; - // Shutdown all workers gracefully - info!("\n=== Shutting Down All Workers ==="); + // Shutdown all actors gracefully + info!("\n=== Shutting Down All Actors ==="); info!("Sending shutdown signals..."); let _ = sync_shutdown_tx.send(()).await; @@ -251,9 +251,9 @@ async fn main() -> Result<(), Box> { let _ = conv_sync_shutdown_tx.send(()).await; let _ = conv_async_shutdown_tx.send(()).await; - info!("Waiting for workers to shutdown..."); + info!("Waiting for actors to shutdown..."); - // Wait for all workers to shutdown + // Wait for all actors to shutdown let results = tokio::join!( sync_handle, async_handle, @@ -263,23 +263,23 @@ async fn main() -> Result<(), Box> { match results { (Ok(Ok(())), Ok(Ok(())), Ok(Ok(())), Ok(Ok(()))) => { - info!("All workers shut down successfully!"); + info!("All actors shut down successfully!"); } _ => { - error!("Some workers encountered errors during shutdown"); + error!("Some actors encountered errors during shutdown"); } } - info!("Trait-Based Worker Demo completed successfully!"); + info!("Trait-Based Actor Demo completed successfully!"); // Summary info!("\n=== Summary ==="); - info!("โœ… Demonstrated unified Worker trait interface"); - info!("โœ… Showed both sync and async worker implementations"); + info!("โœ… Demonstrated unified Actor trait interface"); + info!("โœ… Showed both sync and async actor implementations"); info!("โœ… Used shared configuration and spawn logic"); info!("โœ… Maintained backward compatibility with convenience functions"); - info!("โœ… Eliminated code duplication between worker types"); - info!("โœ… Provided clean, consistent API for all worker operations"); + info!("โœ… Eliminated code duplication between actor types"); + info!("โœ… Provided clean, consistent API for all actor operations"); Ok(()) } @@ -305,17 +305,17 @@ async fn create_demo_job( Ok(job) } -/// Dispatch a job to the worker queue +/// Dispatch a job to the actor queue async fn dispatch_job( redis_conn: &mut redis::aio::MultiplexedConnection, job: &Job, - worker_queue: &str, + actor_queue: &str, ) -> Result<(), Box> { // Store job in Redis job.store_in_redis(redis_conn).await?; - // Add job to worker queue - let queue_key = format!("hero:job:{}", worker_queue); + // Add job to actor queue + let queue_key = format!("hero:job:{}", actor_queue); let _: () = redis_conn.rpush(&queue_key, &job.id).await?; Ok(()) diff --git a/core/worker/src/async_worker_impl.rs b/_archive/core/actor/src/async_worker_impl.rs similarity index 63% rename from core/worker/src/async_worker_impl.rs rename to _archive/core/actor/src/async_worker_impl.rs index 297678d..f0dd39a 100644 --- a/core/worker/src/async_worker_impl.rs +++ b/_archive/core/actor/src/async_worker_impl.rs @@ -1,6 +1,6 @@ -//! # Asynchronous Worker Implementation +//! # Asynchronous Actor Implementation //! -//! This module provides an asynchronous worker implementation that can process +//! This module provides an asynchronous actor implementation that can process //! multiple jobs concurrently with timeout support. Each job is spawned as a //! separate Tokio task, allowing for parallel execution and proper timeout handling. //! @@ -9,7 +9,7 @@ //! - **Concurrent Processing**: Multiple jobs can run simultaneously //! - **Timeout Support**: Jobs that exceed their timeout are automatically cancelled //! - **Resource Cleanup**: Proper cleanup of aborted/cancelled jobs -//! - **Non-blocking**: Worker continues processing new jobs while others are running +//! - **Non-blocking**: Actor continues processing new jobs while others are running //! - **Scalable**: Can handle high job throughput with parallel execution //! //! ## Usage @@ -17,25 +17,25 @@ //! ```rust //! use std::sync::Arc; //! use std::time::Duration; -//! use rhailib_worker::async_worker_impl::AsyncWorker; -//! use rhailib_worker::worker_trait::{spawn_worker, WorkerConfig}; -//! use rhailib_worker::engine::create_heromodels_engine; +//! use baobab_actor::async_actor_impl::AsyncActor; +//! use baobab_actor::actor_trait::{spawn_actor, ActorConfig}; +//! use baobab_actor::engine::create_heromodels_engine; //! use tokio::sync::mpsc; //! -//! let config = WorkerConfig::new( -//! "async_worker_1".to_string(), +//! let config = ActorConfig::new( +//! "async_actor_1".to_string(), //! "/path/to/db".to_string(), //! "redis://localhost:6379".to_string(), //! false, // preserve_tasks //! ).with_default_timeout(Duration::from_secs(300)); //! -//! let worker = Arc::new(AsyncWorker::new()); +//! let actor = Arc::new(AsyncActor::new()); //! let engine = create_heromodels_engine(); //! let (shutdown_tx, shutdown_rx) = mpsc::channel(1); //! -//! let handle = spawn_worker(worker, config, engine, shutdown_rx); +//! let handle = spawn_actor(actor, config, engine, shutdown_rx); //! -//! // Later, shutdown the worker +//! // Later, shutdown the actor //! shutdown_tx.send(()).await.unwrap(); //! handle.await.unwrap().unwrap(); //! ``` @@ -52,7 +52,7 @@ use tokio::task::JoinHandle; use tokio::time::timeout; use crate::engine::eval_script; -use crate::worker_trait::{Worker, WorkerConfig}; +use crate::actor_trait::{Actor, ActorConfig}; use crate::initialize_redis_connection; /// Represents a running job with its handle and metadata @@ -63,22 +63,22 @@ struct RunningJob { started_at: std::time::Instant, } -/// Builder for AsyncWorker +/// Builder for AsyncActor #[derive(Debug, Default)] -pub struct AsyncWorkerBuilder { - worker_id: Option, +pub struct AsyncActorBuilder { + actor_id: Option, db_path: Option, redis_url: Option, default_timeout: Option, } -impl AsyncWorkerBuilder { +impl AsyncActorBuilder { pub fn new() -> Self { Self::default() } - pub fn worker_id>(mut self, worker_id: S) -> Self { - self.worker_id = Some(worker_id.into()); + pub fn actor_id>(mut self, actor_id: S) -> Self { + self.actor_id = Some(actor_id.into()); self } @@ -97,9 +97,9 @@ impl AsyncWorkerBuilder { self } - pub fn build(self) -> Result { - Ok(AsyncWorker { - worker_id: self.worker_id.ok_or("worker_id is required")?, + pub fn build(self) -> Result { + Ok(AsyncActor { + actor_id: self.actor_id.ok_or("actor_id is required")?, db_path: self.db_path.ok_or("db_path is required")?, redis_url: self.redis_url.ok_or("redis_url is required")?, default_timeout: self.default_timeout.unwrap_or(Duration::from_secs(300)), @@ -108,20 +108,20 @@ impl AsyncWorkerBuilder { } } -/// Asynchronous worker that processes jobs concurrently +/// Asynchronous actor that processes jobs concurrently #[derive(Debug, Clone)] -pub struct AsyncWorker { - pub worker_id: String, +pub struct AsyncActor { + pub actor_id: String, pub db_path: String, pub redis_url: String, pub default_timeout: Duration, running_jobs: Arc>>, } -impl AsyncWorker { - /// Create a new AsyncWorkerBuilder - pub fn builder() -> AsyncWorkerBuilder { - AsyncWorkerBuilder::new() +impl AsyncActor { + /// Create a new AsyncActorBuilder + pub fn builder() -> AsyncActorBuilder { + AsyncActorBuilder::new() } /// Add a running job to the tracking map @@ -134,7 +134,7 @@ impl AsyncWorker { let mut jobs = self.running_jobs.lock().await; jobs.insert(job_id.clone(), running_job); - debug!("Async Worker: Added running job '{}'. Total running: {}", + debug!("Async Actor: Added running job '{}'. Total running: {}", job_id, jobs.len()); } @@ -143,7 +143,7 @@ impl AsyncWorker { let mut jobs = self.running_jobs.lock().await; if let Some(job) = jobs.remove(job_id) { let duration = job.started_at.elapsed(); - debug!("Async Worker: Removed completed job '{}' after {:?}. Remaining: {}", + debug!("Async Actor: Removed completed job '{}' after {:?}. Remaining: {}", job_id, duration, jobs.len()); } } @@ -168,7 +168,7 @@ impl AsyncWorker { for job_id in to_remove { if let Some(job) = jobs.remove(&job_id) { let duration = job.started_at.elapsed(); - debug!("Async Worker: Cleaned up finished job '{}' after {:?}", + debug!("Async Actor: Cleaned up finished job '{}' after {:?}", job_id, duration); } } @@ -178,28 +178,28 @@ impl AsyncWorker { async fn execute_job_with_timeout( job: Job, engine: Engine, - worker_id: String, + actor_id: String, redis_url: String, job_timeout: Duration, ) { let job_id = job.id.clone(); - info!("Async Worker '{}', Job {}: Starting execution with timeout {:?}", - worker_id, job_id, job_timeout); + info!("Async Actor '{}', Job {}: Starting execution with timeout {:?}", + actor_id, job_id, job_timeout); // Create a new Redis connection for this job - let mut redis_conn = match initialize_redis_connection(&worker_id, &redis_url).await { + let mut redis_conn = match initialize_redis_connection(&actor_id, &redis_url).await { Ok(conn) => conn, Err(e) => { - error!("Async Worker '{}', Job {}: Failed to initialize Redis connection: {}", - worker_id, job_id, e); + error!("Async Actor '{}', Job {}: Failed to initialize Redis connection: {}", + actor_id, job_id, e); return; } }; // Update job status to Started if let Err(e) = Job::update_status(&mut redis_conn, &job_id, JobStatus::Started).await { - error!("Async Worker '{}', Job {}: Failed to update status to Started: {}", - worker_id, job_id, e); + error!("Async Actor '{}', Job {}: Failed to update status to Started: {}", + actor_id, job_id, e); return; } @@ -209,35 +209,35 @@ impl AsyncWorker { match eval_script(&engine, &job.script) { Ok(result) => { let result_str = format!("{:?}", result); - info!("Async Worker '{}', Job {}: Script executed successfully. Result: {}", - worker_id, job_id, result_str); + info!("Async Actor '{}', Job {}: Script executed successfully. Result: {}", + actor_id, job_id, result_str); // Update job with success result if let Err(e) = Job::set_result(&mut redis_conn, &job_id, &result_str).await { - error!("Async Worker '{}', Job {}: Failed to set result: {}", - worker_id, job_id, e); + error!("Async Actor '{}', Job {}: Failed to set result: {}", + actor_id, job_id, e); return; } if let Err(e) = Job::update_status(&mut redis_conn, &job_id, JobStatus::Finished).await { - error!("Async Worker '{}', Job {}: Failed to update status to Finished: {}", - worker_id, job_id, e); + error!("Async Actor '{}', Job {}: Failed to update status to Finished: {}", + actor_id, job_id, e); } } Err(e) => { let error_msg = format!("Script execution error: {}", e); - error!("Async Worker '{}', Job {}: {}", worker_id, job_id, error_msg); + error!("Async Actor '{}', Job {}: {}", actor_id, job_id, error_msg); // Update job with error if let Err(e) = Job::set_error(&mut redis_conn, &job_id, &error_msg).await { - error!("Async Worker '{}', Job {}: Failed to set error: {}", - worker_id, job_id, e); + error!("Async Actor '{}', Job {}: Failed to set error: {}", + actor_id, job_id, e); return; } if let Err(e) = Job::update_status(&mut redis_conn, &job_id, JobStatus::Error).await { - error!("Async Worker '{}', Job {}: Failed to update status to Error: {}", - worker_id, job_id, e); + error!("Async Actor '{}', Job {}: Failed to update status to Error: {}", + actor_id, job_id, e); } } } @@ -246,35 +246,35 @@ impl AsyncWorker { // Execute the script with timeout match timeout(job_timeout, script_task).await { Ok(()) => { - info!("Async Worker '{}', Job {}: Completed within timeout", worker_id, job_id); + info!("Async Actor '{}', Job {}: Completed within timeout", actor_id, job_id); } Err(_) => { - warn!("Async Worker '{}', Job {}: Timed out after {:?}, marking as error", - worker_id, job_id, job_timeout); + warn!("Async Actor '{}', Job {}: Timed out after {:?}, marking as error", + actor_id, job_id, job_timeout); let timeout_msg = format!("Job timed out after {:?}", job_timeout); if let Err(e) = Job::set_error(&mut redis_conn, &job_id, &timeout_msg).await { - error!("Async Worker '{}', Job {}: Failed to set timeout error: {}", - worker_id, job_id, e); + error!("Async Actor '{}', Job {}: Failed to set timeout error: {}", + actor_id, job_id, e); } if let Err(e) = Job::update_status(&mut redis_conn, &job_id, JobStatus::Error).await { - error!("Async Worker '{}', Job {}: Failed to update status to Error after timeout: {}", - worker_id, job_id, e); + error!("Async Actor '{}', Job {}: Failed to update status to Error after timeout: {}", + actor_id, job_id, e); } } } - info!("Async Worker '{}', Job {}: Job processing completed", worker_id, job_id); + info!("Async Actor '{}', Job {}: Job processing completed", actor_id, job_id); } } -impl Default for AsyncWorker { +impl Default for AsyncActor { fn default() -> Self { - // Default AsyncWorker with placeholder values + // Default AsyncActor with placeholder values // In practice, use the builder pattern instead Self { - worker_id: "default_async_worker".to_string(), + actor_id: "default_async_actor".to_string(), db_path: "/tmp".to_string(), redis_url: "redis://localhost:6379".to_string(), default_timeout: Duration::from_secs(300), @@ -284,7 +284,7 @@ impl Default for AsyncWorker { } #[async_trait] -impl Worker for AsyncWorker { +impl Actor for AsyncActor { async fn process_job( &self, job: Job, @@ -292,22 +292,22 @@ impl Worker for AsyncWorker { _redis_conn: &mut redis::aio::MultiplexedConnection, ) { let job_id = job.id.clone(); - let worker_id = &self.worker_id.clone(); + let actor_id = &self.actor_id.clone(); // Determine timeout (use job-specific timeout if available, otherwise default) let job_timeout = if job.timeout.as_secs() > 0 { job.timeout } else { - self.default_timeout // Use worker's default timeout + self.default_timeout // Use actor's default timeout }; - info!("Async Worker '{}', Job {}: Spawning job execution task with timeout {:?}", - worker_id, job_id, job_timeout); + info!("Async Actor '{}', Job {}: Spawning job execution task with timeout {:?}", + actor_id, job_id, job_timeout); // Clone necessary data for the spawned task let job_id_clone = job_id.clone(); - let worker_id_clone = worker_id.clone(); - let worker_id_debug = worker_id.clone(); // Additional clone for debug statement + let actor_id_clone = actor_id.clone(); + let actor_id_debug = actor_id.clone(); // Additional clone for debug statement let job_id_debug = job_id.clone(); // Additional clone for debug statement let redis_url_clone = self.redis_url.clone(); let running_jobs_clone = Arc::clone(&self.running_jobs); @@ -317,7 +317,7 @@ impl Worker for AsyncWorker { Self::execute_job_with_timeout( job, engine, - worker_id_clone, + actor_id_clone, redis_url_clone, job_timeout, ).await; @@ -326,8 +326,8 @@ impl Worker for AsyncWorker { let mut jobs = running_jobs_clone.lock().await; if let Some(running_job) = jobs.remove(&job_id_clone) { let duration = running_job.started_at.elapsed(); - debug!("Async Worker '{}': Removed completed job '{}' after {:?}", - worker_id_debug, job_id_debug, duration); + debug!("Async Actor '{}': Removed completed job '{}' after {:?}", + actor_id_debug, job_id_debug, duration); } }); @@ -338,12 +338,12 @@ impl Worker for AsyncWorker { self.cleanup_finished_jobs().await; } - fn worker_type(&self) -> &'static str { + fn actor_type(&self) -> &'static str { "Async" } - fn worker_id(&self) -> &str { - &self.worker_id + fn actor_id(&self) -> &str { + &self.actor_id } fn redis_url(&self) -> &str { @@ -358,51 +358,51 @@ mod tests { use hero_job::ScriptType; #[tokio::test] - async fn test_async_worker_creation() { - let worker = AsyncWorker::new(); - assert_eq!(worker.worker_type(), "Async"); - assert_eq!(worker.running_job_count().await, 0); + async fn test_async_actor_creation() { + let actor = AsyncActor::new(); + assert_eq!(actor.actor_type(), "Async"); + assert_eq!(actor.running_job_count().await, 0); } #[tokio::test] - async fn test_async_worker_default() { - let worker = AsyncWorker::default(); - assert_eq!(worker.worker_type(), "Async"); + async fn test_async_actor_default() { + let actor = AsyncActor::default(); + assert_eq!(actor.actor_type(), "Async"); } #[tokio::test] - async fn test_async_worker_job_tracking() { - let worker = AsyncWorker::new(); + async fn test_async_actor_job_tracking() { + let actor = AsyncActor::new(); // Simulate adding a job let handle = tokio::spawn(async { tokio::time::sleep(Duration::from_millis(100)).await; }); - worker.add_running_job("job_1".to_string(), handle).await; - assert_eq!(worker.running_job_count().await, 1); + actor.add_running_job("job_1".to_string(), handle).await; + assert_eq!(actor.running_job_count().await, 1); // Wait for job to complete tokio::time::sleep(Duration::from_millis(200)).await; - worker.cleanup_finished_jobs().await; - assert_eq!(worker.running_job_count().await, 0); + actor.cleanup_finished_jobs().await; + assert_eq!(actor.running_job_count().await, 0); } #[tokio::test] - async fn test_async_worker_process_job_interface() { - let worker = AsyncWorker::new(); + async fn test_async_actor_process_job_interface() { + let actor = AsyncActor::new(); let engine = create_heromodels_engine(); // Create a simple test job let job = Job::new( "test_caller".to_string(), "test_context".to_string(), - r#"print("Hello from async worker test!"); 42"#.to_string(), + r#"print("Hello from async actor test!"); 42"#.to_string(), ScriptType::OSIS, ); - let config = WorkerConfig::new( - "test_async_worker".to_string(), + let config = ActorConfig::new( + "test_async_actor".to_string(), "/tmp".to_string(), "redis://localhost:6379".to_string(), false, @@ -412,9 +412,9 @@ mod tests { // In a real test environment, you'd need a Redis instance or mock // The process_job method should be callable (interface test) - // worker.process_job(job, engine, &mut redis_conn, &config).await; + // actor.process_job(job, engine, &mut redis_conn, &config).await; - // For now, just verify the worker was created successfully - assert_eq!(worker.worker_type(), "Async"); + // For now, just verify the actor was created successfully + assert_eq!(actor.actor_type(), "Async"); } } diff --git a/core/worker/src/config.rs b/_archive/core/actor/src/config.rs similarity index 73% rename from core/worker/src/config.rs rename to _archive/core/actor/src/config.rs index a1204e7..e1174db 100644 --- a/core/worker/src/config.rs +++ b/_archive/core/actor/src/config.rs @@ -1,15 +1,15 @@ -//! Worker Configuration Module - TOML-based configuration for Hero workers +//! Actor Configuration Module - TOML-based configuration for Hero actors use serde::{Deserialize, Serialize}; use std::fs; use std::path::Path; use std::time::Duration; -/// Worker configuration loaded from TOML file +/// Actor configuration loaded from TOML file #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct WorkerConfig { - /// Worker identification - pub worker_id: String, +pub struct ActorConfig { + /// Actor identification + pub actor_id: String, /// Redis connection URL pub redis_url: String, @@ -21,23 +21,23 @@ pub struct WorkerConfig { #[serde(default = "default_preserve_tasks")] pub preserve_tasks: bool, - /// Worker type configuration - pub worker_type: WorkerType, + /// Actor type configuration + pub actor_type: ActorType, /// Logging configuration #[serde(default)] pub logging: LoggingConfig, } -/// Worker type configuration +/// Actor type configuration #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(tag = "type")] -pub enum WorkerType { - /// Synchronous worker configuration +pub enum ActorType { + /// Synchronous actor configuration #[serde(rename = "sync")] Sync, - /// Asynchronous worker configuration + /// Asynchronous actor configuration #[serde(rename = "async")] Async { /// Default timeout for jobs in seconds @@ -67,13 +67,13 @@ impl Default for LoggingConfig { } } -impl WorkerConfig { +impl ActorConfig { /// Load configuration from TOML file pub fn from_file>(path: P) -> Result { let content = fs::read_to_string(&path) .map_err(|e| ConfigError::IoError(format!("Failed to read config file: {}", e)))?; - let config: WorkerConfig = toml::from_str(&content) + let config: ActorConfig = toml::from_str(&content) .map_err(|e| ConfigError::ParseError(format!("Failed to parse TOML: {}", e)))?; config.validate()?; @@ -82,8 +82,8 @@ impl WorkerConfig { /// Validate the configuration fn validate(&self) -> Result<(), ConfigError> { - if self.worker_id.is_empty() { - return Err(ConfigError::ValidationError("worker_id cannot be empty".to_string())); + if self.actor_id.is_empty() { + return Err(ConfigError::ValidationError("actor_id cannot be empty".to_string())); } if self.redis_url.is_empty() { @@ -105,24 +105,24 @@ impl WorkerConfig { Ok(()) } - /// Get the default timeout duration for async workers + /// Get the default timeout duration for async actors pub fn get_default_timeout(&self) -> Option { - match &self.worker_type { - WorkerType::Sync => None, - WorkerType::Async { default_timeout_seconds } => { + match &self.actor_type { + ActorType::Sync => None, + ActorType::Async { default_timeout_seconds } => { Some(Duration::from_secs(*default_timeout_seconds)) } } } - /// Check if this is a sync worker configuration + /// Check if this is a sync actor configuration pub fn is_sync(&self) -> bool { - matches!(self.worker_type, WorkerType::Sync) + matches!(self.actor_type, ActorType::Sync) } - /// Check if this is an async worker configuration + /// Check if this is an async actor configuration pub fn is_async(&self) -> bool { - matches!(self.worker_type, WorkerType::Async { .. }) + matches!(self.actor_type, ActorType::Async { .. }) } } @@ -163,13 +163,13 @@ mod tests { use tempfile::NamedTempFile; #[test] - fn test_sync_worker_config() { + fn test_sync_actor_config() { let config_toml = r#" -worker_id = "sync_worker_1" +actor_id = "sync_actor_1" redis_url = "redis://localhost:6379" -db_path = "/tmp/worker_db" +db_path = "/tmp/actor_db" -[worker_type] +[actor_type] type = "sync" [logging] @@ -177,8 +177,8 @@ timestamps = false level = "debug" "#; - let config: WorkerConfig = toml::from_str(config_toml).unwrap(); - assert_eq!(config.worker_id, "sync_worker_1"); + let config: ActorConfig = toml::from_str(config_toml).unwrap(); + assert_eq!(config.actor_id, "sync_actor_1"); assert!(config.is_sync()); assert!(!config.is_async()); assert_eq!(config.get_default_timeout(), None); @@ -187,13 +187,13 @@ level = "debug" } #[test] - fn test_async_worker_config() { + fn test_async_actor_config() { let config_toml = r#" -worker_id = "async_worker_1" +actor_id = "async_actor_1" redis_url = "redis://localhost:6379" -db_path = "/tmp/worker_db" +db_path = "/tmp/actor_db" -[worker_type] +[actor_type] type = "async" default_timeout_seconds = 600 @@ -202,8 +202,8 @@ timestamps = true level = "info" "#; - let config: WorkerConfig = toml::from_str(config_toml).unwrap(); - assert_eq!(config.worker_id, "async_worker_1"); + let config: ActorConfig = toml::from_str(config_toml).unwrap(); + assert_eq!(config.actor_id, "async_actor_1"); assert!(!config.is_sync()); assert!(config.is_async()); assert_eq!(config.get_default_timeout(), Some(Duration::from_secs(600))); @@ -214,34 +214,34 @@ level = "info" #[test] fn test_config_from_file() { let config_toml = r#" -worker_id = "test_worker" +actor_id = "test_actor" redis_url = "redis://localhost:6379" db_path = "/tmp/test_db" -[worker_type] +[actor_type] type = "sync" "#; let mut temp_file = NamedTempFile::new().unwrap(); temp_file.write_all(config_toml.as_bytes()).unwrap(); - let config = WorkerConfig::from_file(temp_file.path()).unwrap(); - assert_eq!(config.worker_id, "test_worker"); + let config = ActorConfig::from_file(temp_file.path()).unwrap(); + assert_eq!(config.actor_id, "test_actor"); assert!(config.is_sync()); } #[test] fn test_config_validation() { let config_toml = r#" -worker_id = "" +actor_id = "" redis_url = "redis://localhost:6379" db_path = "/tmp/test_db" -[worker_type] +[actor_type] type = "sync" "#; - let result: Result = toml::from_str(config_toml); + let result: Result = toml::from_str(config_toml); assert!(result.is_ok()); let config = result.unwrap(); diff --git a/core/worker/src/engine.rs b/_archive/core/actor/src/engine.rs similarity index 92% rename from core/worker/src/engine.rs rename to _archive/core/actor/src/engine.rs index 8b62595..5370bed 100644 --- a/core/worker/src/engine.rs +++ b/_archive/core/actor/src/engine.rs @@ -14,7 +14,7 @@ //! ## Quick Start //! //! ```rust -//! use rhailib_worker::engine::{create_heromodels_engine, eval_script}; +//! use baobab_actor::engine::{create_heromodels_engine, eval_script}; //! //! // Create a fully configured engine //! let engine = create_heromodels_engine(); @@ -40,7 +40,6 @@ //! - `biz`: Business operations and entities use rhai::{Engine, EvalAltResult, Scope, AST}; -use rhailib_dsl; use std::fs; use std::path::Path; @@ -67,7 +66,7 @@ use std::path::Path; /// # Example /// /// ```rust -/// use rhailib_worker::engine::create_heromodels_engine; +/// use baobab_actor::engine::create_heromodels_engine; /// /// let engine = create_heromodels_engine(); /// @@ -82,14 +81,14 @@ use std::path::Path; /// The engine is optimized for production use with reasonable defaults for /// operation limits, expression depth, and memory usage. For benchmarking /// or special use cases, you may want to adjust these limits after creation. -pub fn create_heromodels_engine() -> Engine { - let mut engine = Engine::new(); +// pub fn create_heromodels_engine() -> Engine { +// let mut engine = Engine::new(); - // Register all heromodels Rhai modules - rhailib_dsl::register_dsl_modules(&mut engine); +// // Register all heromodels Rhai modules +// baobab_dsl::register_dsl_modules(&mut engine); - engine -} +// engine +// } /// Evaluates a Rhai script string and returns the result. /// @@ -110,7 +109,7 @@ pub fn create_heromodels_engine() -> Engine { /// # Example /// /// ```rust -/// use rhailib_worker::engine::{create_heromodels_engine, eval_script}; +/// use baobab_actor::engine::{create_heromodels_engine, eval_script}; /// /// let engine = create_heromodels_engine(); /// let result = eval_script(&engine, r#" @@ -146,7 +145,7 @@ pub fn eval_script( /// # Example /// /// ```rust -/// use rhailib_worker::engine::{create_heromodels_engine, eval_file}; +/// use baobab_actor::engine::{create_heromodels_engine, eval_file}; /// use std::path::Path; /// /// let engine = create_heromodels_engine(); @@ -191,7 +190,7 @@ pub fn eval_file( /// # Example /// /// ```rust -/// use rhailib_worker::engine::{create_heromodels_engine, compile_script, run_ast}; +/// use baobab_actor::engine::{create_heromodels_engine, compile_script, run_ast}; /// use rhai::Scope; /// /// let engine = create_heromodels_engine(); @@ -233,7 +232,7 @@ pub fn compile_script(engine: &Engine, script: &str) -> Result, +pub struct SyncActorBuilder { + actor_id: Option, db_path: Option, redis_url: Option, preserve_tasks: bool, } -impl SyncWorkerBuilder { +impl SyncActorBuilder { pub fn new() -> Self { Self::default() } - pub fn worker_id>(mut self, worker_id: S) -> Self { - self.worker_id = Some(worker_id.into()); + pub fn actor_id>(mut self, actor_id: S) -> Self { + self.actor_id = Some(actor_id.into()); self } @@ -81,9 +81,9 @@ impl SyncWorkerBuilder { self } - pub fn build(self) -> Result { - Ok(SyncWorker { - worker_id: self.worker_id.ok_or("worker_id is required")?, + pub fn build(self) -> Result { + Ok(SyncActor { + actor_id: self.actor_id.ok_or("actor_id is required")?, db_path: self.db_path.ok_or("db_path is required")?, redis_url: self.redis_url.ok_or("redis_url is required")?, preserve_tasks: self.preserve_tasks, @@ -91,28 +91,28 @@ impl SyncWorkerBuilder { } } -/// Synchronous worker that processes jobs sequentially +/// Synchronous actor that processes jobs sequentially #[derive(Debug, Clone)] -pub struct SyncWorker { - pub worker_id: String, +pub struct SyncActor { + pub actor_id: String, pub db_path: String, pub redis_url: String, pub preserve_tasks: bool, } -impl SyncWorker { - /// Create a new SyncWorkerBuilder - pub fn builder() -> SyncWorkerBuilder { - SyncWorkerBuilder::new() +impl SyncActor { + /// Create a new SyncActorBuilder + pub fn builder() -> SyncActorBuilder { + SyncActorBuilder::new() } } -impl Default for SyncWorker { +impl Default for SyncActor { fn default() -> Self { - // Default SyncWorker with placeholder values + // Default SyncActor with placeholder values // In practice, use the builder pattern instead Self { - worker_id: "default_sync_worker".to_string(), + actor_id: "default_sync_actor".to_string(), db_path: "/tmp".to_string(), redis_url: "redis://localhost:6379".to_string(), preserve_tasks: false, @@ -121,7 +121,7 @@ impl Default for SyncWorker { } #[async_trait] -impl Worker for SyncWorker { +impl Actor for SyncActor { async fn process_job( &self, job: Job, @@ -129,15 +129,15 @@ impl Worker for SyncWorker { redis_conn: &mut redis::aio::MultiplexedConnection, ) { let job_id = &job.id; - let worker_id = &self.worker_id; + let actor_id = &self.actor_id; let db_path = &self.db_path; - info!("Sync Worker '{}', Job {}: Starting sequential processing", worker_id, job_id); + info!("Sync Actor '{}', Job {}: Starting sequential processing", actor_id, job_id); // Update job status to Started if let Err(e) = Job::update_status(redis_conn, job_id, JobStatus::Started).await { - error!("Sync Worker '{}', Job {}: Failed to update status to Started: {}", - worker_id, job_id, e); + error!("Sync Actor '{}', Job {}: Failed to update status to Started: {}", + actor_id, job_id, e); return; } @@ -145,35 +145,35 @@ impl Worker for SyncWorker { match eval_script(&engine, &job.script) { Ok(result) => { let result_str = format!("{:?}", result); - info!("Sync Worker '{}', Job {}: Script executed successfully. Result: {}", - worker_id, job_id, result_str); + info!("Sync Actor '{}', Job {}: Script executed successfully. Result: {}", + actor_id, job_id, result_str); // Update job with success result if let Err(e) = Job::set_result(redis_conn, job_id, &result_str).await { - error!("Sync Worker '{}', Job {}: Failed to set result: {}", - worker_id, job_id, e); + error!("Sync Actor '{}', Job {}: Failed to set result: {}", + actor_id, job_id, e); return; } if let Err(e) = Job::update_status(redis_conn, job_id, JobStatus::Finished).await { - error!("Sync Worker '{}', Job {}: Failed to update status to Finished: {}", - worker_id, job_id, e); + error!("Sync Actor '{}', Job {}: Failed to update status to Finished: {}", + actor_id, job_id, e); } } Err(e) => { let error_msg = format!("Script execution error: {}", e); - error!("Sync Worker '{}', Job {}: {}", worker_id, job_id, error_msg); + error!("Sync Actor '{}', Job {}: {}", actor_id, job_id, error_msg); // Update job with error if let Err(e) = Job::set_error(redis_conn, job_id, &error_msg).await { - error!("Sync Worker '{}', Job {}: Failed to set error: {}", - worker_id, job_id, e); + error!("Sync Actor '{}', Job {}: Failed to set error: {}", + actor_id, job_id, e); return; } if let Err(e) = Job::update_status(redis_conn, job_id, JobStatus::Error).await { - error!("Sync Worker '{}', Job {}: Failed to update status to Error: {}", - worker_id, job_id, e); + error!("Sync Actor '{}', Job {}: Failed to update status to Error: {}", + actor_id, job_id, e); } } } @@ -181,22 +181,22 @@ impl Worker for SyncWorker { // Cleanup job if preserve_tasks is false if !self.preserve_tasks { if let Err(e) = Job::delete_from_redis(redis_conn, job_id).await { - error!("Sync Worker '{}', Job {}: Failed to cleanup job: {}", - worker_id, job_id, e); + error!("Sync Actor '{}', Job {}: Failed to cleanup job: {}", + actor_id, job_id, e); } else { - debug!("Sync Worker '{}', Job {}: Job cleaned up from Redis", worker_id, job_id); + debug!("Sync Actor '{}', Job {}: Job cleaned up from Redis", actor_id, job_id); } } - info!("Sync Worker '{}', Job {}: Sequential processing completed", worker_id, job_id); + info!("Sync Actor '{}', Job {}: Sequential processing completed", actor_id, job_id); } - fn worker_type(&self) -> &'static str { + fn actor_type(&self) -> &'static str { "Sync" } - fn worker_id(&self) -> &str { - &self.worker_id + fn actor_id(&self) -> &str { + &self.actor_id } fn redis_url(&self) -> &str { @@ -212,32 +212,32 @@ mod tests { use std::time::Duration; #[tokio::test] - async fn test_sync_worker_creation() { - let worker = SyncWorker::new(); - assert_eq!(worker.worker_type(), "Sync"); + async fn test_sync_actor_creation() { + let actor = SyncActor::new(); + assert_eq!(actor.actor_type(), "Sync"); } #[tokio::test] - async fn test_sync_worker_default() { - let worker = SyncWorker::default(); - assert_eq!(worker.worker_type(), "Sync"); + async fn test_sync_actor_default() { + let actor = SyncActor::default(); + assert_eq!(actor.actor_type(), "Sync"); } #[tokio::test] - async fn test_sync_worker_process_job_interface() { - let worker = SyncWorker::new(); + async fn test_sync_actor_process_job_interface() { + let actor = SyncActor::new(); let engine = create_heromodels_engine(); // Create a simple test job let job = Job::new( "test_caller".to_string(), "test_context".to_string(), - r#"print("Hello from sync worker test!"); 42"#.to_string(), + r#"print("Hello from sync actor test!"); 42"#.to_string(), ScriptType::OSIS, ); - let config = WorkerConfig::new( - "test_sync_worker".to_string(), + let config = ActorConfig::new( + "test_sync_actor".to_string(), "/tmp".to_string(), "redis://localhost:6379".to_string(), false, @@ -247,9 +247,9 @@ mod tests { // In a real test environment, you'd need a Redis instance or mock // The process_job method should be callable (interface test) - // worker.process_job(job, engine, &mut redis_conn, &config).await; + // actor.process_job(job, engine, &mut redis_conn, &config).await; - // For now, just verify the worker was created successfully - assert_eq!(worker.worker_type(), "Sync"); + // For now, just verify the actor was created successfully + assert_eq!(actor.actor_type(), "Sync"); } } diff --git a/_archive/core/examples/Cargo.toml b/_archive/core/examples/Cargo.toml index ded565e..af03dec 100644 --- a/_archive/core/examples/Cargo.toml +++ b/_archive/core/examples/Cargo.toml @@ -4,8 +4,8 @@ version = "0.1.0" edition = "2021" [[bin]] -name = "supervisor_worker_demo" -path = "supervisor_worker_demo.rs" +name = "supervisor_actor_demo" +path = "supervisor_actor_demo.rs" [dependencies] hero_supervisor = { path = "../supervisor" } diff --git a/_archive/core/examples/supervisor_worker_demo.rs b/_archive/core/examples/supervisor_worker_demo.rs index f1826a3..3f0dbdc 100644 --- a/_archive/core/examples/supervisor_worker_demo.rs +++ b/_archive/core/examples/supervisor_worker_demo.rs @@ -1,8 +1,8 @@ -//! Hero Supervisor Worker Demo +//! Hero Supervisor Actor Demo //! //! This example demonstrates the new Hero Supervisor API with: //! - Synchronous build() method -//! - Asynchronous start_workers() method +//! - Asynchronous start_actors() method //! - Proper cleanup on program exit //! - Signal handling for graceful shutdown @@ -18,21 +18,21 @@ async fn run_supervisor_demo() -> Result<(), Box> { // Build supervisor synchronously (no .await needed) let supervisor = SupervisorBuilder::new() .redis_url("redis://127.0.0.1:6379") - .osis_worker("/usr/local/bin/osis_worker") - .sal_worker("/usr/local/bin/sal_worker") - .v_worker("/usr/local/bin/v_worker") - .python_worker("/usr/local/bin/python_worker") - .worker_env_var("REDIS_URL", "redis://127.0.0.1:6379") - .worker_env_var("LOG_LEVEL", "info") + .osis_actor("/usr/local/bin/osis_actor") + .sal_actor("/usr/local/bin/sal_actor") + .v_actor("/usr/local/bin/v_actor") + .python_actor("/usr/local/bin/python_actor") + .actor_env_var("REDIS_URL", "redis://127.0.0.1:6379") + .actor_env_var("LOG_LEVEL", "info") .build()?; println!("{}", "โœ… Supervisor built successfully!".green()); - println!("{}", "Starting workers asynchronously...".yellow()); + println!("{}", "Starting actors asynchronously...".yellow()); - // Start workers asynchronously - supervisor.start_workers().await?; + // Start actors asynchronously + supervisor.start_actors().await?; - println!("{}", "โœ… All workers started successfully!".green()); + println!("{}", "โœ… All actors started successfully!".green()); // Demonstrate job creation and execution println!("{}", "\n๐Ÿ“‹ Creating and running test jobs...".cyan().bold()); @@ -43,7 +43,7 @@ async fn run_supervisor_demo() -> Result<(), Box> { // Submit and run the job match supervisor.new_job() .script_type(ScriptType::OSIS) - .script("println('Hello from OSIS worker!')") + .script("println('Hello from OSIS actor!')") .timeout(Duration::from_secs(30)) .await_response().await { Ok(result) => { @@ -60,7 +60,7 @@ async fn run_supervisor_demo() -> Result<(), Box> { println!("{}", "\n๐Ÿ›‘ Shutdown signal received, cleaning up...".yellow().bold()); - // Cleanup workers before exit + // Cleanup actors before exit supervisor.cleanup_and_shutdown().await?; println!("{}", "โœ… Cleanup completed. Goodbye!".green().bold()); diff --git a/_archive/core/supervisor/cmd/README.md b/_archive/core/supervisor/cmd/README.md index ad664e5..e1dbdaa 100644 --- a/_archive/core/supervisor/cmd/README.md +++ b/_archive/core/supervisor/cmd/README.md @@ -25,20 +25,20 @@ Where config is toml file with the following structure: [global] redis_url = "redis://localhost:6379" -[osis_worker] -binary_path = "/path/to/osis_worker" +[osis_actor] +binary_path = "/path/to/osis_actor" env_vars = { "VAR1" = "value1", "VAR2" = "value2" } -[sal_worker] -binary_path = "/path/to/sal_worker" +[sal_actor] +binary_path = "/path/to/sal_actor" env_vars = { "VAR1" = "value1", "VAR2" = "value2" } -[v_worker] -binary_path = "/path/to/v_worker" +[v_actor] +binary_path = "/path/to/v_actor" env_vars = { "VAR1" = "value1", "VAR2" = "value2" } -[python_worker] -binary_path = "/path/to/python_worker" +[python_actor] +binary_path = "/path/to/python_actor" env_vars = { "VAR1" = "value1", "VAR2" = "value2" } ``` @@ -46,7 +46,7 @@ env_vars = { "VAR1" = "value1", "VAR2" = "value2" } Lets have verbosity settings etc. CLI Offers a few commands: -workers: +actors: start stop restart @@ -63,4 +63,4 @@ jobs: logs list -repl: you can enter interactive mode to run scripts, however predefine caller_id, context_id and worker type so supervisor dispathces jobs accordingly \ No newline at end of file +repl: you can enter interactive mode to run scripts, however predefine caller_id, context_id and actor type so supervisor dispathces jobs accordingly \ No newline at end of file diff --git a/_archive/core/supervisor/cmd/hive_supervisor_tui_safe.rs b/_archive/core/supervisor/cmd/hive_supervisor_tui_safe.rs index edb1344..8d567b3 100644 --- a/_archive/core/supervisor/cmd/hive_supervisor_tui_safe.rs +++ b/_archive/core/supervisor/cmd/hive_supervisor_tui_safe.rs @@ -43,7 +43,7 @@ struct Args { struct Config { global: GlobalConfig, #[serde(flatten)] - workers: std::collections::HashMap, + actors: std::collections::HashMap, } #[derive(Debug, Deserialize)] @@ -52,7 +52,7 @@ struct GlobalConfig { } #[derive(Debug, Deserialize)] -struct WorkerConfigToml { +struct ActorConfigToml { binary_path: String, env_vars: Option>, } @@ -60,20 +60,20 @@ struct WorkerConfigToml { #[derive(Debug, Clone, PartialEq)] enum TabId { Dashboard, - Workers, + Actors, Jobs, Logs, } impl TabId { fn all() -> Vec { - vec![TabId::Dashboard, TabId::Workers, TabId::Jobs, TabId::Logs] + vec![TabId::Dashboard, TabId::Actors, TabId::Jobs, TabId::Logs] } fn title(&self) -> &str { match self { TabId::Dashboard => "Dashboard", - TabId::Workers => "Workers", + TabId::Actors => "Actors", TabId::Jobs => "Jobs", TabId::Logs => "Logs", } @@ -167,7 +167,7 @@ fn render_ui(f: &mut Frame, app: &mut App) { // Render content based on selected tab match app.current_tab { TabId::Dashboard => render_dashboard(f, chunks[1], app), - TabId::Workers => render_workers(f, chunks[1], app), + TabId::Actors => render_actors(f, chunks[1], app), TabId::Jobs => render_jobs(f, chunks[1], app), TabId::Logs => render_logs(f, chunks[1], app), } @@ -180,7 +180,7 @@ fn render_dashboard(f: &mut Frame, area: Rect, app: &App) { .split(area); // Status overview - supervisor is already running if we get here - let status_text = "Status: โœ“ Running\nWorkers: Started successfully\nJobs: Ready for processing\n\nPress 'q' to quit, Tab to navigate"; + let status_text = "Status: โœ“ Running\nActors: Started successfully\nJobs: Ready for processing\n\nPress 'q' to quit, Tab to navigate"; let status_paragraph = Paragraph::new(status_text) .block(Block::default().borders(Borders::ALL).title("System Status")) @@ -202,9 +202,9 @@ fn render_dashboard(f: &mut Frame, area: Rect, app: &App) { f.render_widget(logs_list, chunks[1]); } -fn render_workers(f: &mut Frame, area: Rect, _app: &App) { - let paragraph = Paragraph::new("Workers tab - Status checking not implemented yet to avoid system issues") - .block(Block::default().borders(Borders::ALL).title("Workers")) +fn render_actors(f: &mut Frame, area: Rect, _app: &App) { + let paragraph = Paragraph::new("Actors tab - Status checking not implemented yet to avoid system issues") + .block(Block::default().borders(Borders::ALL).title("Actors")) .wrap(Wrap { trim: true }); f.render_widget(paragraph, area); @@ -305,18 +305,18 @@ async fn main() -> Result<()> { let mut builder = SupervisorBuilder::new() .redis_url(&config.global.redis_url); - for (worker_name, worker_config) in &config.workers { - match worker_name.as_str() { - "osis_worker" => builder = builder.osis_worker(&worker_config.binary_path), - "sal_worker" => builder = builder.sal_worker(&worker_config.binary_path), - "v_worker" => builder = builder.v_worker(&worker_config.binary_path), - "python_worker" => builder = builder.python_worker(&worker_config.binary_path), - _ => log::warn!("Unknown worker type: {}", worker_name), + for (actor_name, actor_config) in &config.actors { + match actor_name.as_str() { + "osis_actor" => builder = builder.osis_actor(&actor_config.binary_path), + "sal_actor" => builder = builder.sal_actor(&actor_config.binary_path), + "v_actor" => builder = builder.v_actor(&actor_config.binary_path), + "python_actor" => builder = builder.python_actor(&actor_config.binary_path), + _ => log::warn!("Unknown actor type: {}", actor_name), } - if let Some(env_vars) = &worker_config.env_vars { + if let Some(env_vars) = &actor_config.env_vars { for (key, value) in env_vars { - builder = builder.worker_env_var(key, value); + builder = builder.actor_env_var(key, value); } } } @@ -325,11 +325,11 @@ async fn main() -> Result<()> { .map_err(|e| anyhow::anyhow!("Failed to build supervisor: {}", e))?); info!("โœ“ Supervisor built successfully"); - // Step 4: Start supervisor and workers - info!("Step 4/4: Starting supervisor and workers..."); - supervisor.start_workers().await - .map_err(|e| anyhow::anyhow!("Failed to start workers: {}", e))?; - info!("โœ“ All workers started successfully"); + // Step 4: Start supervisor and actors + info!("Step 4/4: Starting supervisor and actors..."); + supervisor.start_actors().await + .map_err(|e| anyhow::anyhow!("Failed to start actors: {}", e))?; + info!("โœ“ All actors started successfully"); // All initialization successful - now start TUI info!("Initialization complete - starting TUI..."); diff --git a/_archive/core/supervisor/cmd/supervisor.rs b/_archive/core/supervisor/cmd/supervisor.rs index 2b1564d..970da2b 100644 --- a/_archive/core/supervisor/cmd/supervisor.rs +++ b/_archive/core/supervisor/cmd/supervisor.rs @@ -73,7 +73,7 @@ async fn main() -> Result<(), Box> { // Validate script type match args.script_type.to_lowercase().as_str() { "osis" | "sal" | "v" | "python" => { - // Valid script types - no worker validation needed since we use hardcoded queues + // Valid script types - no actor validation needed since we use hardcoded queues } _ => { error!("โŒ Invalid script type: {}. Valid types: osis, sal, v, python", args.script_type); @@ -89,7 +89,7 @@ async fn main() -> Result<(), Box> { info!(" Script Type: {}", args.script_type); info!(" Redis URL: {}", args.redis_url); info!(" Timeout: {}s", args.timeout); - info!(" Using hardcoded worker queues for script type: {}", args.script_type); + info!(" Using hardcoded actor queues for script type: {}", args.script_type); info!(""); } diff --git a/_archive/core/worker/examples/osis_config.toml b/_archive/core/worker/examples/osis_config.toml deleted file mode 100644 index 9f9ecc1..0000000 --- a/_archive/core/worker/examples/osis_config.toml +++ /dev/null @@ -1,14 +0,0 @@ -# OSIS Worker Configuration -# Synchronous worker for system-level operations - -worker_id = "osis_worker_1" -redis_url = "redis://localhost:6379" -db_path = "/tmp/osis_worker_db" -preserve_tasks = false - -[worker_type] -type = "sync" - -[logging] -timestamps = true -level = "info" diff --git a/_archive/core/worker/examples/system_config.toml b/_archive/core/worker/examples/system_config.toml deleted file mode 100644 index 7d19ac6..0000000 --- a/_archive/core/worker/examples/system_config.toml +++ /dev/null @@ -1,15 +0,0 @@ -# System Worker Configuration -# Asynchronous worker for high-throughput concurrent processing - -worker_id = "system_worker_1" -redis_url = "redis://localhost:6379" -db_path = "/tmp/system_worker_db" -preserve_tasks = false - -[worker_type] -type = "async" -default_timeout_seconds = 300 # 5 minutes - -[logging] -timestamps = true -level = "info" diff --git a/cmd/config.toml b/cmd/config.toml index e03523b..fb23925 100644 --- a/cmd/config.toml +++ b/cmd/config.toml @@ -21,8 +21,7 @@ tls = false # "users" = ["04ghi789...", "04jkl012..."] # "ws" = [] # Public circle (no auth required) -# OSIS Worker Configuration +# OSIS Actor Configuration # Handles OSIS (HeroScript) execution -[osis_worker] +[osis_actor] binary_path = "../target/debug/osis" -env_vars = { "RUST_LOG" = "info", "WORKER_TYPE" = "osis", "MAX_CONCURRENT_JOBS" = "5" } diff --git a/cmd/main.rs b/cmd/main.rs index c999a30..1cbf6db 100644 --- a/cmd/main.rs +++ b/cmd/main.rs @@ -37,17 +37,17 @@ async fn main() -> Result<(), Box> { // Wrap supervisor in Arc for sharing across tasks let supervisor = Arc::new(supervisor); - // Extract worker configurations from TOML config - let worker_configs = supervisor.get_worker_configs()?; - info!("Loaded {} worker configurations from TOML", worker_configs.len()); + // Extract actor configurations from TOML config + let actor_configs = supervisor.get_actor_configs()?; + info!("Loaded {} actor configurations from TOML", actor_configs.len()); // Spawn the background lifecycle manager with 5-minute health check interval let health_check_interval = Duration::from_secs(5 * 60); // 5 minutes - let mut lifecycle_handle = supervisor.clone().spawn_lifecycle_manager(worker_configs, health_check_interval); + let mut lifecycle_handle = supervisor.clone().spawn_lifecycle_manager(actor_configs, health_check_interval); info!("Hero Supervisor started successfully!"); info!("Background lifecycle manager is running with 5-minute health checks."); - info!("Workers are being monitored and will be automatically restarted if they fail."); + info!("Actors are being monitored and will be automatically restarted if they fail."); // Start WebSocket server for job dispatching info!("Starting WebSocket server for job dispatching..."); diff --git a/core/worker/.DS_Store b/core/actor/.DS_Store similarity index 100% rename from core/worker/.DS_Store rename to core/actor/.DS_Store diff --git a/core/actor/.gitignore b/core/actor/.gitignore new file mode 100644 index 0000000..c6a536c --- /dev/null +++ b/core/actor/.gitignore @@ -0,0 +1,2 @@ +/target +actor_rhai_temp_db \ No newline at end of file diff --git a/core/worker/Cargo.lock b/core/actor/Cargo.lock similarity index 99% rename from core/worker/Cargo.lock rename to core/actor/Cargo.lock index 1bac0b4..a35c259 100644 --- a/core/worker/Cargo.lock +++ b/core/actor/Cargo.lock @@ -1302,7 +1302,7 @@ dependencies = [ ] [[package]] -name = "worker" +name = "actor" version = "0.1.0" dependencies = [ "chrono", diff --git a/core/worker/Cargo.toml b/core/actor/Cargo.toml similarity index 88% rename from core/worker/Cargo.toml rename to core/actor/Cargo.toml index 51f9df1..a384d32 100644 --- a/core/worker/Cargo.toml +++ b/core/actor/Cargo.toml @@ -1,10 +1,10 @@ [package] -name = "rhailib_worker" +name = "baobab_actor" version = "0.1.0" edition = "2021" [lib] -name = "rhailib_worker" # Can be different from package name, or same +name = "baobab_actor" # Can be different from package name, or same path = "src/lib.rs" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html @@ -29,7 +29,6 @@ hero_job = { path = "../job" } heromodels = { git = "https://git.ourworld.tf/herocode/db.git" } heromodels_core = { git = "https://git.ourworld.tf/herocode/db.git" } heromodels-derive = { git = "https://git.ourworld.tf/herocode/db.git" } -rhailib_dsl = { git = "https://git.ourworld.tf/herocode/rhailib.git" } [features] default = ["calendar", "finance"] diff --git a/core/worker/README.md b/core/actor/README.md similarity index 53% rename from core/worker/README.md rename to core/actor/README.md index fa20ab0..c4deecd 100644 --- a/core/worker/README.md +++ b/core/actor/README.md @@ -1,6 +1,8 @@ -# Rhai Worker +# Actor -The `rhai_worker` crate implements a standalone worker service that listens for Rhai script execution tasks from a Redis queue, executes them, and posts results back to Redis. It is designed to be spawned as a separate OS process by an orchestrator like the `launcher` crate. +The `actor` crate defines the trait `Actor` is + +and implements a standalone actor service that listens for Rhai script execution tasks from a Redis queue, executes them, and posts results back to Redis. It is designed to be spawned as a separate OS process by an orchestrator like the `launcher` crate. ## Features @@ -8,61 +10,59 @@ The `rhai_worker` crate implements a standalone worker service that listens for - **Rhai Script Execution**: Executes Rhai scripts retrieved from Redis based on task IDs. - **Task State Management**: Updates task status (`processing`, `completed`, `error`) and stores results in Redis hashes. - **Script Scope Injection**: Automatically injects two important constants into the Rhai script's scope: - - `CONTEXT_ID`: The public key of the worker's own circle. + - `CONTEXT_ID`: The public key of the actor's own circle. - `CALLER_ID`: The public key of the entity that requested the script execution. - **Asynchronous Operations**: Built with `tokio` for non-blocking Redis communication. - **Graceful Error Handling**: Captures errors during script execution and stores them for the client. ## Core Components -- **`worker_lib` (Library Crate)**: +- **`actor_lib` (Library Crate)**: - **`Args`**: A struct (using `clap`) for parsing command-line arguments: `--redis-url` and `--circle-public-key`. - - **`run_worker_loop(engine: Engine, args: Args)`**: The main asynchronous function that: + - **`run_actor_loop(engine: Engine, args: Args)`**: The main asynchronous function that: - Connects to Redis. - Continuously polls the designated Redis queue (`rhai_tasks:`) using `BLPOP`. - Upon receiving a `task_id`, it fetches the task details from a Redis hash. - It injects `CALLER_ID` and `CONTEXT_ID` into the script's scope. - It executes the script and updates the task status in Redis with the output or error. -- **`worker` (Binary Crate - `cmd/worker.rs`)**: - - The main executable entry point. It parses command-line arguments, initializes a Rhai engine, and invokes `run_worker_loop`. ## How It Works -1. The worker executable is launched by an external process (e.g., `launcher`), which passes the required command-line arguments. +1. The actor executable is launched by an external process (e.g., `launcher`), which passes the required command-line arguments. ```bash # This is typically done programmatically by a parent process. - /path/to/worker --redis-url redis://127.0.0.1/ --circle-public-key 02...abc + /path/to/actor --redis-url redis://127.0.0.1/ --circle-public-key 02...abc ``` -2. The `run_worker_loop` connects to Redis and starts listening to its designated task queue (e.g., `rhai_tasks:02...abc`). +2. The `run_actor_loop` connects to Redis and starts listening to its designated task queue (e.g., `rhai_tasks:02...abc`). 3. A `rhai_supervisor` submits a task by pushing a `task_id` to this queue and storing the script and other details in a Redis hash. -4. The worker's `BLPOP` command picks up the `task_id`. -5. The worker retrieves the script from the corresponding `rhai_task_details:` hash. +4. The actor's `BLPOP` command picks up the `task_id`. +5. The actor retrieves the script from the corresponding `rhai_task_details:` hash. 6. It updates the task's status to "processing". 7. The Rhai script is executed within a scope that contains both `CONTEXT_ID` and `CALLER_ID`. 8. After execution, the status is updated to "completed" (with output) or "error" (with an error message). -9. The worker then goes back to listening for the next task. +9. The actor then goes back to listening for the next task. ## Prerequisites -- A running Redis instance accessible by the worker. -- An orchestrator process (like `launcher`) to spawn the worker. +- A running Redis instance accessible by the actor. +- An orchestrator process (like `launcher`) to spawn the actor. - A `rhai_supervisor` (or another system) to populate the Redis queues. ## Building and Running -The worker is intended to be built as a dependency and run by another program. +The actor is intended to be built as a dependency and run by another program. -1. **Build the worker:** +1. **Build the actor:** ```bash - # From the root of the rhailib project - cargo build --package worker + # From the root of the baobab project + cargo build --package actor ``` - The binary will be located at `target/debug/worker`. + The binary will be located at `target/debug/actor`. -2. **Running the worker:** - The worker is not typically run manually. The `launcher` crate is responsible for spawning it with the correct arguments. If you need to run it manually for testing, you must provide the required arguments: +2. **Running the actor:** + The actor is not typically run manually. The `launcher` crate is responsible for spawning it with the correct arguments. If you need to run it manually for testing, you must provide the required arguments: ```bash - ./target/debug/worker --redis-url redis://127.0.0.1/ --circle-public-key + ./target/debug/actor --redis-url redis://127.0.0.1/ --circle-public-key ``` ## Dependencies diff --git a/core/worker/docs/ARCHITECTURE.md b/core/actor/docs/ARCHITECTURE.md similarity index 66% rename from core/worker/docs/ARCHITECTURE.md rename to core/actor/docs/ARCHITECTURE.md index 47ce958..f529514 100644 --- a/core/worker/docs/ARCHITECTURE.md +++ b/core/actor/docs/ARCHITECTURE.md @@ -1,12 +1,12 @@ -# Architecture of the `rhailib_worker` Crate +# Architecture of the `baobab_actor` Crate -The `rhailib_worker` crate implements a distributed task execution system for Rhai scripts, providing scalable, reliable script processing through Redis-based task queues. Workers are decoupled from contexts, allowing a single worker to process tasks for multiple contexts (circles). +The `baobab_actor` crate implements a distributed task execution system for Rhai scripts, providing scalable, reliable script processing through Redis-based task queues. Actors are decoupled from contexts, allowing a single actor to process tasks for multiple contexts (circles). ## Core Architecture ```mermaid graph TD - A[Worker Process] --> B[Task Queue Processing] + A[Actor Process] --> B[Task Queue Processing] A --> C[Script Execution Engine] A --> D[Result Management] @@ -31,12 +31,12 @@ graph TD - **Result Handling**: Comprehensive result and error management ### Engine Integration -- **Rhailib Engine**: Full integration with rhailib_engine for DSL access +- **baobab Engine**: Full integration with baobab_engine for DSL access - **Context Injection**: Proper authentication and database context setup - **Security**: Isolated execution environment with access controls ### Scalability Features -- **Horizontal Scaling**: Multiple worker instances for load distribution +- **Horizontal Scaling**: Multiple actor instances for load distribution - **Queue-based Architecture**: Reliable task distribution via Redis - **Fault Tolerance**: Robust error handling and recovery mechanisms @@ -50,4 +50,4 @@ graph TD ## Deployment Patterns -Workers can be deployed as standalone processes, containerized services, or embedded components, providing flexibility for various deployment scenarios from development to production. \ No newline at end of file +Actors can be deployed as standalone processes, containerized services, or embedded components, providing flexibility for various deployment scenarios from development to production. \ No newline at end of file diff --git a/core/worker/src/worker_trait.rs b/core/actor/src/actor_trait.rs similarity index 54% rename from core/worker/src/worker_trait.rs rename to core/actor/src/actor_trait.rs index bdabe1f..e8924f0 100644 --- a/core/worker/src/worker_trait.rs +++ b/core/actor/src/actor_trait.rs @@ -1,16 +1,16 @@ -//! # Worker Trait Abstraction +//! # Actor Trait Abstraction //! -//! This module provides a trait-based abstraction for Rhai workers that eliminates -//! code duplication between synchronous and asynchronous worker implementations. +//! This module provides a trait-based abstraction for Rhai actors that eliminates +//! code duplication between synchronous and asynchronous actor implementations. //! -//! The `Worker` trait defines the common interface and behavior, while specific +//! The `Actor` trait defines the common interface and behavior, while specific //! implementations handle job processing differently (sync vs async). //! //! ## Architecture //! //! ```text //! โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -//! โ”‚ SyncWorker โ”‚ โ”‚ AsyncWorker โ”‚ +//! โ”‚ SyncActor โ”‚ โ”‚ AsyncActor โ”‚ //! โ”‚ โ”‚ โ”‚ โ”‚ //! โ”‚ process_job() โ”‚ โ”‚ process_job() โ”‚ //! โ”‚ (sequential) โ”‚ โ”‚ (concurrent) โ”‚ @@ -19,7 +19,7 @@ //! โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ //! โ”‚ //! โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ–ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -//! โ”‚ Worker Trait โ”‚ +//! โ”‚ Actor Trait โ”‚ //! โ”‚ โ”‚ //! โ”‚ spawn() โ”‚ //! โ”‚ config โ”‚ @@ -38,26 +38,26 @@ use tokio::task::JoinHandle; use crate::{initialize_redis_connection, NAMESPACE_PREFIX, BLPOP_TIMEOUT_SECONDS}; -/// Configuration for worker instances +/// Configuration for actor instances #[derive(Debug, Clone)] -pub struct WorkerConfig { - pub worker_id: String, +pub struct ActorConfig { + pub actor_id: String, pub db_path: String, pub redis_url: String, pub preserve_tasks: bool, - pub default_timeout: Option, // Only used by async workers + pub default_timeout: Option, // Only used by async actors } -impl WorkerConfig { - /// Create a new worker configuration +impl ActorConfig { + /// Create a new actor configuration pub fn new( - worker_id: String, + actor_id: String, db_path: String, redis_url: String, preserve_tasks: bool, ) -> Self { Self { - worker_id, + actor_id, db_path, redis_url, preserve_tasks, @@ -65,164 +65,164 @@ impl WorkerConfig { } } - /// Set default timeout for async workers + /// Set default timeout for async actors pub fn with_default_timeout(mut self, timeout: Duration) -> Self { self.default_timeout = Some(timeout); self } } -/// Trait defining the common interface for Rhai workers +/// Trait defining the common interface for Rhai actors /// /// This trait abstracts the common functionality between synchronous and -/// asynchronous workers, allowing them to share the same spawn logic and +/// asynchronous actors, allowing them to share the same spawn logic and /// Redis polling loop while implementing different job processing strategies. #[async_trait::async_trait] -pub trait Worker: Send + Sync + 'static { +pub trait Actor: Send + Sync + 'static { /// Process a single job /// - /// This is the core method that differentiates worker implementations: - /// - Sync workers process jobs sequentially, one at a time - /// - Async workers spawn concurrent tasks for each job + /// This is the core method that differentiates actor implementations: + /// - Sync actors process jobs sequentially, one at a time + /// - Async actors spawn concurrent tasks for each job /// /// # Arguments /// /// * `job` - The job to process - /// * `engine` - Rhai engine for script execution /// * `redis_conn` - Redis connection for status updates + /// + /// Note: The engine is now owned by the actor implementation as a field async fn process_job( &self, job: Job, - engine: Engine, redis_conn: &mut redis::aio::MultiplexedConnection, ); - /// Get the worker type name for logging - fn worker_type(&self) -> &'static str; + /// Get the actor type name for logging + fn actor_type(&self) -> &'static str; - /// Get worker ID for this worker instance - fn worker_id(&self) -> &str; + /// Get actor ID for this actor instance + fn actor_id(&self) -> &str; - /// Get Redis URL for this worker instance + /// Get Redis URL for this actor instance fn redis_url(&self) -> &str; - /// Spawn the worker + /// Spawn the actor /// - /// This method provides the common worker loop implementation that both - /// sync and async workers can use. It handles: + /// This method provides the common actor loop implementation that both + /// sync and async actors can use. It handles: /// - Redis connection setup /// - Job polling from Redis queue /// - Shutdown signal handling /// - Delegating job processing to the implementation + /// + /// Note: The engine is now owned by the actor implementation as a field fn spawn( self: Arc, - engine: Engine, mut shutdown_rx: mpsc::Receiver<()>, ) -> JoinHandle>> { tokio::spawn(async move { - let worker_id = self.worker_id(); + let actor_id = self.actor_id(); let redis_url = self.redis_url(); - let queue_key = format!("{}{}", NAMESPACE_PREFIX, worker_id); + let queue_key = format!("{}{}", NAMESPACE_PREFIX, actor_id); info!( - "{} Worker '{}' starting. Connecting to Redis at {}. Listening on queue: {}", - self.worker_type(), - worker_id, + "{} Actor '{}' starting. Connecting to Redis at {}. Listening on queue: {}", + self.actor_type(), + actor_id, redis_url, queue_key ); - let mut redis_conn = initialize_redis_connection(worker_id, redis_url).await?; + let mut redis_conn = initialize_redis_connection(actor_id, redis_url).await?; loop { let blpop_keys = vec![queue_key.clone()]; tokio::select! { // Listen for shutdown signal _ = shutdown_rx.recv() => { - info!("{} Worker '{}': Shutdown signal received. Terminating loop.", - self.worker_type(), worker_id); + info!("{} Actor '{}': Shutdown signal received. Terminating loop.", + self.actor_type(), actor_id); break; } // Listen for tasks from Redis blpop_result = redis_conn.blpop(&blpop_keys, BLPOP_TIMEOUT_SECONDS as f64) => { - debug!("{} Worker '{}': Attempting BLPOP on queue: {}", - self.worker_type(), worker_id, queue_key); + debug!("{} Actor '{}': Attempting BLPOP on queue: {}", + self.actor_type(), actor_id, queue_key); let response: Option<(String, String)> = match blpop_result { Ok(resp) => resp, Err(e) => { - error!("{} Worker '{}': Redis BLPOP error on queue {}: {}. Worker for this circle might stop.", - self.worker_type(), worker_id, queue_key, e); + error!("{} Actor '{}': Redis BLPOP error on queue {}: {}. Actor for this circle might stop.", + self.actor_type(), actor_id, queue_key, e); return Err(Box::new(e) as Box); } }; if let Some((_queue_name_recv, job_id)) = response { - info!("{} Worker '{}' received job_id: {} from queue: {}", - self.worker_type(), worker_id, job_id, _queue_name_recv); + info!("{} Actor '{}' received job_id: {} from queue: {}", + self.actor_type(), actor_id, job_id, _queue_name_recv); // Load the job from Redis - match crate::load_job_from_redis(&mut redis_conn, &job_id, worker_id).await { - Ok(mut job) => { + match crate::load_job_from_redis(&mut redis_conn, &job_id, actor_id).await { + Ok(job) => { // Check for ping job and handle it directly if job.script.trim() == "ping" { - info!("{} Worker '{}': Received ping job '{}', responding with pong", - self.worker_type(), worker_id, job_id); + info!("{} Actor '{}': Received ping job '{}', responding with pong", + self.actor_type(), actor_id, job_id); // Update job status to started if let Err(e) = hero_job::Job::update_status(&mut redis_conn, &job_id, hero_job::JobStatus::Started).await { - error!("{} Worker '{}': Failed to update ping job '{}' status to Started: {}", - self.worker_type(), worker_id, job_id, e); + error!("{} Actor '{}': Failed to update ping job '{}' status to Started: {}", + self.actor_type(), actor_id, job_id, e); } // Set result to "pong" and mark as finished if let Err(e) = hero_job::Job::set_result(&mut redis_conn, &job_id, "pong").await { - error!("{} Worker '{}': Failed to set ping job '{}' result: {}", - self.worker_type(), worker_id, job_id, e); + error!("{} Actor '{}': Failed to set ping job '{}' result: {}", + self.actor_type(), actor_id, job_id, e); } - info!("{} Worker '{}': Successfully responded to ping job '{}' with pong", - self.worker_type(), worker_id, job_id); + info!("{} Actor '{}': Successfully responded to ping job '{}' with pong", + self.actor_type(), actor_id, job_id); } else { - // Create a new engine for each job to avoid sharing state - let job_engine = crate::engine::create_heromodels_engine(); // Delegate job processing to the implementation - self.process_job(job, job_engine, &mut redis_conn).await; + // The engine is now owned by the actor implementation + self.process_job(job, &mut redis_conn).await; } } Err(e) => { - error!("{} Worker '{}': Failed to load job '{}': {}", - self.worker_type(), worker_id, job_id, e); + error!("{} Actor '{}': Failed to load job '{}': {}", + self.actor_type(), actor_id, job_id, e); } } } else { - debug!("{} Worker '{}': BLPOP timed out on queue {}. No new tasks.", - self.worker_type(), worker_id, queue_key); + debug!("{} Actor '{}': BLPOP timed out on queue {}. No new tasks.", + self.actor_type(), actor_id, queue_key); } } } } - info!("{} Worker '{}' has shut down.", self.worker_type(), worker_id); + info!("{} Actor '{}' has shut down.", self.actor_type(), actor_id); Ok(()) }) } } -/// Convenience function to spawn a worker with the trait-based interface +/// Convenience function to spawn a actor with the trait-based interface /// -/// This function provides a unified interface for spawning any worker implementation -/// that implements the Worker trait. +/// This function provides a unified interface for spawning any actor implementation +/// that implements the Actor trait. /// /// # Arguments /// -/// * `worker` - The worker implementation to spawn -/// * `config` - Worker configuration +/// * `actor` - The actor implementation to spawn +/// * `config` - Actor configuration /// * `engine` - Rhai engine for script execution /// * `shutdown_rx` - Channel receiver for shutdown signals /// /// # Returns /// -/// Returns a `JoinHandle` that can be awaited to wait for worker shutdown. +/// Returns a `JoinHandle` that can be awaited to wait for actor shutdown. /// /// # Example /// @@ -230,29 +230,28 @@ pub trait Worker: Send + Sync + 'static { /// use std::sync::Arc; /// use std::time::Duration; /// -/// let config = WorkerConfig::new( -/// "worker_1".to_string(), +/// let config = ActorConfig::new( +/// "actor_1".to_string(), /// "/path/to/db".to_string(), /// "redis://localhost:6379".to_string(), /// false, /// ); /// -/// let worker = Arc::new(SyncWorker::new()); +/// let actor = Arc::new(SyncActor::new()); /// let engine = create_heromodels_engine(); /// let (shutdown_tx, shutdown_rx) = mpsc::channel(1); /// -/// let handle = spawn_worker(worker, config, engine, shutdown_rx); +/// let handle = spawn_actor(actor, config, engine, shutdown_rx); /// -/// // Later, shutdown the worker +/// // Later, shutdown the actor /// shutdown_tx.send(()).await.unwrap(); /// handle.await.unwrap().unwrap(); /// ``` -pub fn spawn_worker( - worker: Arc, - engine: Engine, +pub fn spawn_actor( + actor: Arc, shutdown_rx: mpsc::Receiver<()>, ) -> JoinHandle>> { - worker.spawn(engine, shutdown_rx) + actor.spawn(shutdown_rx) } #[cfg(test)] @@ -260,26 +259,26 @@ mod tests { use super::*; use crate::engine::create_heromodels_engine; - // Mock worker for testing - struct MockWorker; + // Mock actor for testing + struct MockActor; #[async_trait::async_trait] - impl Worker for MockWorker { + impl Actor for MockActor { async fn process_job( &self, _job: Job, - _engine: Engine, _redis_conn: &mut redis::aio::MultiplexedConnection, ) { // Mock implementation - do nothing + // Engine would be owned by the actor implementation as a field } - fn worker_type(&self) -> &'static str { + fn actor_type(&self) -> &'static str { "Mock" } - fn worker_id(&self) -> &str { - "mock_worker" + fn actor_id(&self) -> &str { + "mock_actor" } fn redis_url(&self) -> &str { @@ -288,15 +287,15 @@ mod tests { } #[tokio::test] - async fn test_worker_config_creation() { - let config = WorkerConfig::new( - "test_worker".to_string(), + async fn test_actor_config_creation() { + let config = ActorConfig::new( + "test_actor".to_string(), "/tmp".to_string(), "redis://localhost:6379".to_string(), false, ); - assert_eq!(config.worker_id, "test_worker"); + assert_eq!(config.actor_id, "test_actor"); assert_eq!(config.db_path, "/tmp"); assert_eq!(config.redis_url, "redis://localhost:6379"); assert!(!config.preserve_tasks); @@ -304,10 +303,10 @@ mod tests { } #[tokio::test] - async fn test_worker_config_with_timeout() { + async fn test_actor_config_with_timeout() { let timeout = Duration::from_secs(300); - let config = WorkerConfig::new( - "test_worker".to_string(), + let config = ActorConfig::new( + "test_actor".to_string(), "/tmp".to_string(), "redis://localhost:6379".to_string(), false, @@ -317,23 +316,16 @@ mod tests { } #[tokio::test] - async fn test_spawn_worker_function() { + async fn test_spawn_actor_function() { let (_shutdown_tx, shutdown_rx) = mpsc::channel(1); - let config = WorkerConfig::new( - "test_worker".to_string(), - "/tmp".to_string(), - "redis://localhost:6379".to_string(), - false, - ); - let engine = create_heromodels_engine(); - let worker = Arc::new(MockWorker); + let actor = Arc::new(MockActor); - let handle = spawn_worker(worker, config, engine, shutdown_rx); + let handle = spawn_actor(actor, shutdown_rx); - // The worker should be created successfully + // The actor should be created successfully assert!(!handle.is_finished()); - // Abort the worker for cleanup + // Abort the actor for cleanup handle.abort(); } } diff --git a/core/worker/src/lib.rs b/core/actor/src/lib.rs similarity index 50% rename from core/worker/src/lib.rs rename to core/actor/src/lib.rs index bfda449..bf248f5 100644 --- a/core/worker/src/lib.rs +++ b/core/actor/src/lib.rs @@ -5,42 +5,30 @@ use rhai::{Dynamic, Engine}; use tokio::sync::mpsc; // For shutdown signal use tokio::task::JoinHandle; -/// Engine module containing Rhai engine creation and script execution utilities -pub mod engine; - -/// Worker trait abstraction for unified worker interface -pub mod worker_trait; - -/// Synchronous worker implementation -pub mod sync_worker; - -/// Asynchronous worker implementation with trait-based interface -pub mod async_worker_impl; - -/// Configuration module for TOML-based worker configuration -pub mod config; +/// Actor trait abstraction for unified actor interface +pub mod actor_trait; const NAMESPACE_PREFIX: &str = "hero:job:"; const BLPOP_TIMEOUT_SECONDS: usize = 5; -/// Initialize Redis connection for the worker +/// Initialize Redis connection for the actor pub(crate) async fn initialize_redis_connection( - worker_id: &str, + actor_id: &str, redis_url: &str, ) -> Result> { let redis_client = redis::Client::open(redis_url) .map_err(|e| { - error!("Worker for Worker ID '{}': Failed to open Redis client: {}", worker_id, e); + error!("Actor for Actor ID '{}': Failed to open Redis client: {}", actor_id, e); e })?; let redis_conn = redis_client.get_multiplexed_async_connection().await .map_err(|e| { - error!("Worker for Worker ID '{}': Failed to get Redis connection: {}", worker_id, e); + error!("Actor for Actor ID '{}': Failed to get Redis connection: {}", actor_id, e); e })?; - info!("Worker for Worker ID '{}' successfully connected to Redis.", worker_id); + info!("Actor for Actor ID '{}' successfully connected to Redis.", actor_id); Ok(redis_conn) } @@ -48,17 +36,17 @@ pub(crate) async fn initialize_redis_connection( pub(crate) async fn load_job_from_redis( redis_conn: &mut redis::aio::MultiplexedConnection, job_id: &str, - worker_id: &str, + actor_id: &str, ) -> Result> { - debug!("Worker '{}', Job {}: Loading job from Redis", worker_id, job_id); + debug!("Actor '{}', Job {}: Loading job from Redis", actor_id, job_id); match Job::load_from_redis(redis_conn, job_id).await { Ok(job) => { - debug!("Worker '{}', Job {}: Successfully loaded job", worker_id, job_id); + debug!("Actor '{}', Job {}: Successfully loaded job", actor_id, job_id); Ok(job) } Err(e) => { - error!("Worker '{}', Job {}: Failed to load job from Redis: {}", worker_id, job_id, e); + error!("Actor '{}', Job {}: Failed to load job from Redis: {}", actor_id, job_id, e); Err(Box::new(e)) } } @@ -77,7 +65,7 @@ async fn execute_script_and_update_status( db_config.insert("CONTEXT_ID".into(), job.context_id.clone().into()); engine.set_default_tag(Dynamic::from(db_config)); - debug!("Worker for Context ID '{}': Evaluating script with Rhai engine.", job.context_id); + debug!("Actor for Context ID '{}': Evaluating script with Rhai engine.", job.context_id); match engine.eval::(&job.script) { Ok(result) => { @@ -86,7 +74,7 @@ async fn execute_script_and_update_status( } else { result.to_string() }; - info!("Worker for Context ID '{}' job {} completed. Output: {}", job.context_id, job.id, output_str); + info!("Actor for Context ID '{}' job {} completed. Output: {}", job.context_id, job.id, output_str); // Update job status to finished and set result Job::update_status(redis_conn, &job.id, JobStatus::Finished).await @@ -105,7 +93,7 @@ async fn execute_script_and_update_status( } Err(e) => { let error_str = format!("{:?}", *e); - error!("Worker for Context ID '{}' job {} script evaluation failed. Error: {}", job.context_id, job.id, error_str); + error!("Actor for Context ID '{}' job {} script evaluation failed. Error: {}", job.context_id, job.id, error_str); // Update job status to error and set error message Job::update_status(redis_conn, &job.id, JobStatus::Error).await @@ -134,12 +122,12 @@ async fn cleanup_job( ) { if !preserve_tasks { if let Err(e) = Job::delete_from_redis(redis_conn, job_id).await { - error!("Worker for Context ID '{}', Job {}: Failed to delete job: {}", context_id, job_id, e); + error!("Actor for Context ID '{}', Job {}: Failed to delete job: {}", context_id, job_id, e); } else { - debug!("Worker for Context ID '{}', Job {}: Cleaned up job.", context_id, job_id); + debug!("Actor for Context ID '{}', Job {}: Cleaned up job.", context_id, job_id); } } else { - debug!("Worker for Context ID '{}', Job {}: Preserving job (preserve_tasks=true)", context_id, job_id); + debug!("Actor for Context ID '{}', Job {}: Preserving job (preserve_tasks=true)", context_id, job_id); } } @@ -147,33 +135,33 @@ async fn cleanup_job( async fn process_job( redis_conn: &mut redis::aio::MultiplexedConnection, job_id: &str, - worker_id: &str, + actor_id: &str, db_path: &str, engine: &mut Engine, preserve_tasks: bool, ) { - debug!("Worker '{}', Job {}: Processing started.", worker_id, job_id); + debug!("Actor '{}', Job {}: Processing started.", actor_id, job_id); // Load job from Redis - match load_job_from_redis(redis_conn, job_id, worker_id).await { + match load_job_from_redis(redis_conn, job_id, actor_id).await { Ok(job) => { - info!("Worker '{}' processing job_id: {}. Script: {:.50}...", job.context_id, job_id, job.script); + info!("Actor '{}' processing job_id: {}. Script: {:.50}...", job.context_id, job_id, job.script); // Update status to started - debug!("Worker for Context ID '{}', Job {}: Attempting to update status to 'started'.", job.context_id, job_id); + debug!("Actor for Context ID '{}', Job {}: Attempting to update status to 'started'.", job.context_id, job_id); if let Err(e) = Job::update_status(redis_conn, job_id, JobStatus::Started).await { - error!("Worker for Context ID '{}', Job {}: Failed to update status to 'started': {}", job.context_id, job_id, e); + error!("Actor for Context ID '{}', Job {}: Failed to update status to 'started': {}", job.context_id, job_id, e); } else { - debug!("Worker for Context ID '{}', Job {}: Status updated to 'started'.", job.context_id, job_id); + debug!("Actor for Context ID '{}', Job {}: Status updated to 'started'.", job.context_id, job_id); } // Execute the script and update status if let Err(e) = execute_script_and_update_status(redis_conn, engine, &job, db_path).await { - error!("Worker for Context ID '{}', Job {}: Script execution failed: {}", job.context_id, job_id, e); + error!("Actor for Context ID '{}', Job {}: Script execution failed: {}", job.context_id, job_id, e); // Ensure job status is set to error if execution failed if let Err(status_err) = Job::update_status(redis_conn, job_id, JobStatus::Error).await { - error!("Worker for Context ID '{}', Job {}: Failed to update status to error after execution failure: {}", job.context_id, job_id, status_err); + error!("Actor for Context ID '{}', Job {}: Failed to update status to error after execution failure: {}", job.context_id, job_id, status_err); } } @@ -181,21 +169,21 @@ async fn process_job( cleanup_job(redis_conn, job_id, &job.context_id, preserve_tasks).await; } Err(e) => { - error!("Worker '{}', Job {}: Failed to load job: {}", worker_id, job_id, e); + error!("Actor '{}', Job {}: Failed to load job: {}", actor_id, job_id, e); // Clean up invalid job if needed if !preserve_tasks { if let Err(del_err) = Job::delete_from_redis(redis_conn, job_id).await { - error!("Worker '{}', Job {}: Failed to delete invalid job: {}", worker_id, job_id, del_err); + error!("Actor '{}', Job {}: Failed to delete invalid job: {}", actor_id, job_id, del_err); } } else { - debug!("Worker '{}', Job {}: Preserving invalid job (preserve_tasks=true)", worker_id, job_id); + debug!("Actor '{}', Job {}: Preserving invalid job (preserve_tasks=true)", actor_id, job_id); } } } } -pub fn spawn_rhai_worker( - worker_id: String, +pub fn spawn_rhai_actor( + actor_id: String, db_path: String, mut engine: Engine, redis_url: String, @@ -203,101 +191,48 @@ pub fn spawn_rhai_worker( preserve_tasks: bool, ) -> JoinHandle>> { tokio::spawn(async move { - let queue_key = format!("{}{}", NAMESPACE_PREFIX, worker_id); + let queue_key = format!("{}{}", NAMESPACE_PREFIX, actor_id); info!( - "Rhai Worker for Worker ID '{}' starting. Connecting to Redis at {}. Listening on queue: {}. Waiting for tasks or shutdown signal.", - worker_id, redis_url, queue_key + "Rhai Actor for Actor ID '{}' starting. Connecting to Redis at {}. Listening on queue: {}. Waiting for tasks or shutdown signal.", + actor_id, redis_url, queue_key ); - let mut redis_conn = initialize_redis_connection(&worker_id, &redis_url).await?; + let mut redis_conn = initialize_redis_connection(&actor_id, &redis_url).await?; loop { let blpop_keys = vec![queue_key.clone()]; tokio::select! { // Listen for shutdown signal _ = shutdown_rx.recv() => { - info!("Worker for Worker ID '{}': Shutdown signal received. Terminating loop.", worker_id); + info!("Actor for Actor ID '{}': Shutdown signal received. Terminating loop.", actor_id); break; } // Listen for tasks from Redis blpop_result = redis_conn.blpop(&blpop_keys, BLPOP_TIMEOUT_SECONDS as f64) => { - debug!("Worker for Worker ID '{}': Attempting BLPOP on queue: {}", worker_id, queue_key); + debug!("Actor for Actor ID '{}': Attempting BLPOP on queue: {}", actor_id, queue_key); let response: Option<(String, String)> = match blpop_result { Ok(resp) => resp, Err(e) => { - error!("Worker '{}': Redis BLPOP error on queue {}: {}. Worker for this circle might stop.", worker_id, queue_key, e); + error!("Actor '{}': Redis BLPOP error on queue {}: {}. Actor for this circle might stop.", actor_id, queue_key, e); return Err(Box::new(e) as Box); } }; if let Some((_queue_name_recv, job_id)) = response { - info!("Worker '{}' received job_id: {} from queue: {}", worker_id, job_id, _queue_name_recv); - process_job(&mut redis_conn, &job_id, &worker_id, &db_path, &mut engine, preserve_tasks).await; + info!("Actor '{}' received job_id: {} from queue: {}", actor_id, job_id, _queue_name_recv); + process_job(&mut redis_conn, &job_id, &actor_id, &db_path, &mut engine, preserve_tasks).await; } else { - debug!("Worker '{}': BLPOP timed out on queue {}. No new tasks. Checking for shutdown signal again.", worker_id, queue_key); + debug!("Actor '{}': BLPOP timed out on queue {}. No new tasks. Checking for shutdown signal again.", actor_id, queue_key); } } } } - info!("Worker '{}' has shut down.", worker_id); + info!("Actor '{}' has shut down.", actor_id); Ok(()) }) } // Re-export the main trait-based interface for convenience -pub use worker_trait::{Worker, WorkerConfig, spawn_worker}; -pub use sync_worker::SyncWorker; -pub use async_worker_impl::AsyncWorker; +pub use actor_trait::{Actor, ActorConfig, spawn_actor}; -/// Convenience function to spawn a synchronous worker using the trait interface -/// -/// This function provides backward compatibility with the original sync worker API -/// while using the new trait-based implementation. -pub fn spawn_sync_worker( - worker_id: String, - db_path: String, - engine: rhai::Engine, - redis_url: String, - shutdown_rx: mpsc::Receiver<()>, - preserve_tasks: bool, -) -> JoinHandle>> { - use std::sync::Arc; - - let worker = Arc::new( - SyncWorker::builder() - .worker_id(worker_id) - .db_path(db_path) - .redis_url(redis_url) - .preserve_tasks(preserve_tasks) - .build() - .expect("Failed to build SyncWorker") - ); - spawn_worker(worker, engine, shutdown_rx) -} - -/// Convenience function to spawn an asynchronous worker using the trait interface -/// -/// This function provides a clean interface for the new async worker implementation -/// with timeout support. -pub fn spawn_async_worker( - worker_id: String, - db_path: String, - engine: rhai::Engine, - redis_url: String, - shutdown_rx: mpsc::Receiver<()>, - default_timeout: std::time::Duration, -) -> JoinHandle>> { - use std::sync::Arc; - - let worker = Arc::new( - AsyncWorker::builder() - .worker_id(worker_id) - .db_path(db_path) - .redis_url(redis_url) - .default_timeout(default_timeout) - .build() - .expect("Failed to build AsyncWorker") - ); - spawn_worker(worker, engine, shutdown_rx) -} diff --git a/core/docs/architecture.md b/core/docs/architecture.md index 33c8148..f753528 100644 --- a/core/docs/architecture.md +++ b/core/docs/architecture.md @@ -1,5 +1,5 @@ # Architecture -Supervisor runs actors and manages their lifecycle. Additionally supervisor dispatches jobs to workers and provides an API for job supervision. Jobs are dispatched to workers over a redis protocol. Jobs have a script which is the code that is to be executed by the worker. There are two script formats used: Rhai and HeroScript. Jobs also have params such as timeout and priority for job management, and context variables which are available to the script such as CALLER_ID and CONTEXT_ID. There are four different types of workers: OSIS, SAL, V and Python. OSIS and SAL workers use Rhai scripts, while V and Python workers use HeroScript. Each worker has its own queue and is responsible for processing jobs of its type. Each worker has a unique way of executing the script. +Supervisor runs actors and manages their lifecycle. Additionally supervisor dispatches jobs to actors and provides an API for job supervision. Jobs are dispatched to actors over a redis protocol. Jobs have a script which is the code that is to be executed by the actor. There are two script formats used: Rhai and HeroScript. Jobs also have params such as timeout and priority for job management, and context variables which are available to the script such as CALLER_ID and CONTEXT_ID. There are four different types of actors: OSIS, SAL, V and Python. OSIS and SAL actors use Rhai scripts, while V and Python actors use HeroScript. Each actor has its own queue and is responsible for processing jobs of its type. Each actor has a unique way of executing the script. -The OSIS worker executes non-blocking Rhai scripts one after another using the Rhai engine on a single thread. The SAL worker executes blocking asynchronous Rhai scripts concurrently: it spawns a new thread for each script evaluation. V and Python workers execute HeroScript scripts using a V or Python heroscript engine. \ No newline at end of file +The OSIS actor executes non-blocking Rhai scripts one after another using the Rhai engine on a single thread. The SAL actor executes blocking asynchronous Rhai scripts concurrently: it spawns a new thread for each script evaluation. V and Python actors execute HeroScript scripts using a V or Python heroscript engine. \ No newline at end of file diff --git a/core/job/README.md b/core/job/README.md index 7fea356..f812cdd 100644 --- a/core/job/README.md +++ b/core/job/README.md @@ -1,14 +1,14 @@ ### `Job` Represents a script execution request with: - Unique ID and timestamps -- Script content and target worker +- Script content and target actor - Execution settings (timeout, retries, concurrency) - Logging configuration ### `JobBuilder` Fluent builder for configuring jobs: - `script()` - Set the script content -- `worker_id()` - Target specific worker +- `actor_id()` - Target specific actor - `timeout()` - Set execution timeout - `build()` - Create the job - `submit()` - Fire-and-forget submission diff --git a/core/job/src/lib.rs b/core/job/src/lib.rs index 2ebc033..33e319b 100644 --- a/core/job/src/lib.rs +++ b/core/job/src/lib.rs @@ -11,22 +11,22 @@ mod builder; /// Redis namespace prefix for all Hero job-related keys pub const NAMESPACE_PREFIX: &str = "hero:job:"; -/// Script type enumeration for different worker types +/// Script type enumeration for different actor types #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub enum ScriptType { - /// OSIS - A worker that executes Rhai/HeroScript + /// OSIS - A actor that executes Rhai/HeroScript OSIS, - /// SAL - A worker that executes system abstraction layer functionalities in rhai + /// SAL - A actor that executes system abstraction layer functionalities in rhai SAL, - /// V - A worker that executes heroscript in V + /// V - A actor that executes heroscript in V V, - /// Python - A worker that executes heroscript in python + /// Python - A actor that executes heroscript in python Python, } impl ScriptType { - /// Get the worker queue suffix for this script type - pub fn worker_queue_suffix(&self) -> &'static str { + /// Get the actor queue suffix for this script type + pub fn actor_queue_suffix(&self) -> &'static str { match self { ScriptType::OSIS => "osis", ScriptType::SAL => "sal", @@ -81,7 +81,7 @@ impl JobStatus { /// Representation of a script execution request. /// /// This structure contains all the information needed to execute a script -/// on a worker service, including the script content, dependencies, and metadata. +/// on a actor service, including the script content, dependencies, and metadata. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Job { pub id: String, diff --git a/core/supervisor/LIFECYCLE.md b/core/supervisor/LIFECYCLE.md index 59c6bc3..f5f84dc 100644 --- a/core/supervisor/LIFECYCLE.md +++ b/core/supervisor/LIFECYCLE.md @@ -1,20 +1,20 @@ -# Worker Lifecycle Management +# Actor Lifecycle Management -The Hero Supervisor includes comprehensive worker lifecycle management functionality using [Zinit](https://github.com/threefoldtech/zinit) as the process manager. This enables the supervisor to manage worker processes, perform health monitoring, and implement load balancing. +The Hero Supervisor includes comprehensive actor lifecycle management functionality using [Zinit](https://github.com/threefoldtech/zinit) as the process manager. This enables the supervisor to manage actor processes, perform health monitoring, and implement load balancing. ## Overview The lifecycle management system provides: -- **Worker Process Management**: Start, stop, restart, and monitor worker binaries -- **Health Monitoring**: Automatic ping jobs every 10 minutes for idle workers -- **Graceful Shutdown**: Clean termination of worker processes +- **Actor Process Management**: Start, stop, restart, and monitor actor binaries +- **Health Monitoring**: Automatic ping jobs every 10 minutes for idle actors +- **Graceful Shutdown**: Clean termination of actor processes ## Architecture ``` โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -โ”‚ Supervisor โ”‚ โ”‚ WorkerLifecycle โ”‚ โ”‚ Zinit โ”‚ +โ”‚ Supervisor โ”‚ โ”‚ ActorLifecycle โ”‚ โ”‚ Zinit โ”‚ โ”‚ โ”‚โ—„โ”€โ”€โ–บโ”‚ Manager โ”‚โ—„โ”€โ”€โ–บโ”‚ (Process โ”‚ โ”‚ (Job Dispatch) โ”‚ โ”‚ โ”‚ โ”‚ Manager) โ”‚ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ @@ -22,49 +22,49 @@ The lifecycle management system provides: โ”‚ โ”‚ โ”‚ โ–ผ โ–ผ โ–ผ โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -โ”‚ Redis โ”‚ โ”‚ Health Monitor โ”‚ โ”‚ Worker Binaries โ”‚ +โ”‚ Redis โ”‚ โ”‚ Health Monitor โ”‚ โ”‚ Actor Binaries โ”‚ โ”‚ (Job Queue) โ”‚ โ”‚ (Ping Jobs) โ”‚ โ”‚ (OSIS/SAL/V) โ”‚ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ ``` ## Components -### WorkerConfig +### ActorConfig -Defines configuration for a worker binary: +Defines configuration for a actor binary: ```rust -use hero_supervisor::{WorkerConfig, ScriptType}; +use hero_supervisor::{ActorConfig, ScriptType}; use std::path::PathBuf; use std::collections::HashMap; -let config = WorkerConfig::new( - "osis_worker_0".to_string(), - PathBuf::from("/usr/local/bin/osis_worker"), +let config = ActorConfig::new( + "osis_actor_0".to_string(), + PathBuf::from("/usr/local/bin/osis_actor"), ScriptType::OSIS, ) .with_args(vec![ "--redis-url".to_string(), "redis://localhost:6379".to_string(), - "--worker-id".to_string(), - "osis_worker_0".to_string(), + "--actor-id".to_string(), + "osis_actor_0".to_string(), ]) .with_env({ let mut env = HashMap::new(); env.insert("RUST_LOG".to_string(), "info".to_string()); - env.insert("WORKER_TYPE".to_string(), "osis".to_string()); + env.insert("ACTOR_TYPE".to_string(), "osis".to_string()); env }) -.with_health_check("/usr/local/bin/osis_worker --health-check".to_string()) +.with_health_check("/usr/local/bin/osis_actor --health-check".to_string()) .with_dependencies(vec!["redis".to_string()]); ``` -### WorkerLifecycleManager +### ActorLifecycleManager -Main component for managing worker lifecycles: +Main component for managing actor lifecycles: ```rust -use hero_supervisor::{WorkerLifecycleManagerBuilder, Supervisor}; +use hero_supervisor::{ActorLifecycleManagerBuilder, Supervisor}; let supervisor = SupervisorBuilder::new() .redis_url("redis://localhost:6379") @@ -72,11 +72,11 @@ let supervisor = SupervisorBuilder::new() .context_id("production") .build()?; -let mut lifecycle_manager = WorkerLifecycleManagerBuilder::new("/var/run/zinit.sock".to_string()) +let mut lifecycle_manager = ActorLifecycleManagerBuilder::new("/var/run/zinit.sock".to_string()) .with_supervisor(supervisor.clone()) - .add_worker(osis_worker_config) - .add_worker(sal_worker_config) - .add_worker(v_worker_config) + .add_actor(osis_actor_config) + .add_actor(sal_actor_config) + .add_actor(v_actor_config) .build(); ``` @@ -84,45 +84,45 @@ let mut lifecycle_manager = WorkerLifecycleManagerBuilder::new("/var/run/zinit.s The lifecycle manager supports all Hero script types: -- **OSIS**: Rhai/HeroScript execution workers -- **SAL**: System Abstraction Layer workers +- **OSIS**: Rhai/HeroScript execution actors +- **SAL**: System Abstraction Layer actors - **V**: HeroScript execution in V language - **Python**: HeroScript execution in Python ## Key Features -### 1. Worker Management +### 1. Actor Management ```rust -// Start all configured workers -lifecycle_manager.start_all_workers().await?; +// Start all configured actors +lifecycle_manager.start_all_actors().await?; -// Stop all workers -lifecycle_manager.stop_all_workers().await?; +// Stop all actors +lifecycle_manager.stop_all_actors().await?; -// Restart specific worker -lifecycle_manager.restart_worker("osis_worker_0").await?; +// Restart specific actor +lifecycle_manager.restart_actor("osis_actor_0").await?; -// Get worker status -let status = lifecycle_manager.get_worker_status("osis_worker_0").await?; -println!("Worker state: {:?}, PID: {}", status.state, status.pid); +// Get actor status +let status = lifecycle_manager.get_actor_status("osis_actor_0").await?; +println!("Actor state: {:?}, PID: {}", status.state, status.pid); ``` ### 2. Health Monitoring -The system automatically monitors worker health: +The system automatically monitors actor health: -- Tracks last job execution time for each worker -- Sends ping jobs to workers idle for 10+ minutes -- Restarts workers that fail ping checks 3 times -- Updates job times when workers receive tasks +- Tracks last job execution time for each actor +- Sends ping jobs to actors idle for 10+ minutes +- Restarts actors that fail ping checks 3 times +- Updates job times when actors receive tasks ```rust // Manual health check -lifecycle_manager.monitor_worker_health().await?; +lifecycle_manager.monitor_actor_health().await?; // Update job time (called automatically by supervisor) -lifecycle_manager.update_worker_job_time("osis_worker_0"); +lifecycle_manager.update_actor_job_time("osis_actor_0"); // Start continuous health monitoring lifecycle_manager.start_health_monitoring().await; // Runs forever @@ -130,26 +130,26 @@ lifecycle_manager.start_health_monitoring().await; // Runs forever ### 3. Dynamic Scaling -Scale workers up or down based on demand: +Scale actors up or down based on demand: ```rust -// Scale OSIS workers to 5 instances -lifecycle_manager.scale_workers(&ScriptType::OSIS, 5).await?; +// Scale OSIS actors to 5 instances +lifecycle_manager.scale_actors(&ScriptType::OSIS, 5).await?; -// Scale down SAL workers to 1 instance -lifecycle_manager.scale_workers(&ScriptType::SAL, 1).await?; +// Scale down SAL actors to 1 instance +lifecycle_manager.scale_actors(&ScriptType::SAL, 1).await?; // Check current running count -let count = lifecycle_manager.get_running_worker_count(&ScriptType::V).await; -println!("Running V workers: {}", count); +let count = lifecycle_manager.get_running_actor_count(&ScriptType::V).await; +println!("Running V actors: {}", count); ``` ### 4. Service Dependencies -Workers can depend on other services: +Actors can depend on other services: ```rust -let config = WorkerConfig::new(name, binary, script_type) +let config = ActorConfig::new(name, binary, script_type) .with_dependencies(vec![ "redis".to_string(), "database".to_string(), @@ -157,25 +157,25 @@ let config = WorkerConfig::new(name, binary, script_type) ]); ``` -Zinit ensures dependencies start before the worker. +Zinit ensures dependencies start before the actor. ## Integration with Supervisor The lifecycle manager integrates seamlessly with the supervisor: ```rust -use hero_supervisor::{Supervisor, WorkerLifecycleManager}; +use hero_supervisor::{Supervisor, ActorLifecycleManager}; // Create supervisor and lifecycle manager let supervisor = SupervisorBuilder::new().build()?; -let mut lifecycle_manager = WorkerLifecycleManagerBuilder::new(zinit_socket) +let mut lifecycle_manager = ActorLifecycleManagerBuilder::new(zinit_socket) .with_supervisor(supervisor.clone()) .build(); -// Start workers -lifecycle_manager.start_all_workers().await?; +// Start actors +lifecycle_manager.start_all_actors().await?; -// Create and execute jobs (supervisor automatically routes to workers) +// Create and execute jobs (supervisor automatically routes to actors) let job = supervisor .new_job() .script_type(ScriptType::OSIS) @@ -191,15 +191,15 @@ println!("Job result: {}", result); The lifecycle manager automatically creates Zinit service configurations: ```yaml -# Generated service config for osis_worker_0 -exec: "/usr/local/bin/osis_worker --redis-url redis://localhost:6379 --worker-id osis_worker_0" -test: "/usr/local/bin/osis_worker --health-check" +# Generated service config for osis_actor_0 +exec: "/usr/local/bin/osis_actor --redis-url redis://localhost:6379 --actor-id osis_actor_0" +test: "/usr/local/bin/osis_actor --health-check" oneshot: false # Restart on exit after: - redis env: RUST_LOG: "info" - WORKER_TYPE: "osis" + ACTOR_TYPE: "osis" ``` ## Error Handling @@ -209,10 +209,10 @@ The system provides comprehensive error handling: ```rust use hero_supervisor::SupervisorError; -match lifecycle_manager.start_worker(&config).await { - Ok(_) => println!("Worker started successfully"), - Err(SupervisorError::WorkerStartFailed(worker, reason)) => { - eprintln!("Failed to start {}: {}", worker, reason); +match lifecycle_manager.start_actor(&config).await { + Ok(_) => println!("Actor started successfully"), + Err(SupervisorError::ActorStartFailed(actor, reason)) => { + eprintln!("Failed to start {}: {}", actor, reason); } Err(e) => eprintln!("Other error: {}", e), } @@ -243,11 +243,11 @@ REDIS_URL=redis://localhost:6379 cargo run --example lifecycle_demo redis-server ``` -3. **Worker Binaries**: Compiled worker binaries for each script type - - `/usr/local/bin/osis_worker` - - `/usr/local/bin/sal_worker` - - `/usr/local/bin/v_worker` - - `/usr/local/bin/python_worker` +3. **Actor Binaries**: Compiled actor binaries for each script type + - `/usr/local/bin/osis_actor` + - `/usr/local/bin/sal_actor` + - `/usr/local/bin/v_actor` + - `/usr/local/bin/python_actor` ## Configuration Best Practices @@ -267,15 +267,15 @@ REDIS_URL=redis://localhost:6379 cargo run --example lifecycle_demo - Check socket permissions: `ls -la /var/run/zinit.sock` - Verify socket path in configuration -2. **Worker Start Failed** +2. **Actor Start Failed** - Check binary exists and is executable - Verify dependencies are running - Review Zinit logs: `zinit logs ` 3. **Health Check Failures** - - Implement proper health check endpoint in workers + - Implement proper health check endpoint in actors - Verify health check command syntax - - Check worker responsiveness + - Check actor responsiveness 4. **Redis Connection Issues** - Ensure Redis is running and accessible @@ -289,10 +289,10 @@ REDIS_URL=redis://localhost:6379 cargo run --example lifecycle_demo zinit list # View service logs -zinit logs osis_worker_0 +zinit logs osis_actor_0 # Check service status -zinit status osis_worker_0 +zinit status osis_actor_0 # Monitor Redis queues redis-cli keys "hero:job:*" @@ -300,20 +300,20 @@ redis-cli keys "hero:job:*" ## Performance Considerations -- **Scaling**: Start with minimal workers and scale based on queue depth +- **Scaling**: Start with minimal actors and scale based on queue depth - **Health Monitoring**: Adjust ping intervals based on workload patterns -- **Resource Usage**: Monitor CPU/memory usage of worker processes +- **Resource Usage**: Monitor CPU/memory usage of actor processes - **Queue Depth**: Monitor Redis queue lengths for scaling decisions ## Security - **Process Isolation**: Zinit provides process isolation -- **User Permissions**: Run workers with appropriate user permissions +- **User Permissions**: Run actors with appropriate user permissions - **Network Security**: Secure Redis and Zinit socket access -- **Binary Validation**: Verify worker binary integrity before deployment +- **Binary Validation**: Verify actor binary integrity before deployment ## Future -- **Load Balancing**: Dynamic scaling of workers based on demand +- **Load Balancing**: Dynamic scaling of actors based on demand - **Service Dependencies**: Proper startup ordering with dependency management \ No newline at end of file diff --git a/core/supervisor/README.md b/core/supervisor/README.md index 995e383..32be6bf 100644 --- a/core/supervisor/README.md +++ b/core/supervisor/README.md @@ -1,60 +1,60 @@ # Hero Supervisor -The **Hero Supervisor** is responsible for supervising the lifecycle of workers and dispatching jobs to them via Redis queues. +The **Hero Supervisor** is responsible for supervising the lifecycle of actors and dispatching jobs to them via Redis queues. ## Overview The system involves four primary actors: -1. **OSIS**: A worker that executes Rhai and HeroScript. -2. **SAL**: A worker that performs system abstraction layer functionalities using Rhai. -3. **V**: A worker that executes HeroScript in the V programming language. -4. **Python**: A worker that executes HeroScript in Python. +1. **OSIS**: A actor that executes Rhai and HeroScript. +2. **SAL**: A actor that performs system abstraction layer functionalities using Rhai. +3. **V**: A actor that executes HeroScript in the V programming language. +4. **Python**: A actor that executes HeroScript in Python. -The Supervisor utilizes **zinit** to start and monitor these workers, ensuring they are running correctly. +The Supervisor utilizes **zinit** to start and monitor these actors, ensuring they are running correctly. ### Key Features -- **Worker Lifecycle Supervision**: Oversee the lifecycle of workers, including starting, stopping, restarting, and load balancing based on job demand. -- **Job Supervision**: API for efficiently managing jobs dispatched to workers over Redis queues. +- **Actor Lifecycle Supervision**: Oversee the lifecycle of actors, including starting, stopping, restarting, and load balancing based on job demand. +- **Job Supervision**: API for efficiently managing jobs dispatched to actors over Redis queues. -## Worker Lifecycle Supervision +## Actor Lifecycle Supervision -The Supervisor oversees the lifecycle of the workers, ensuring they are operational and efficiently allocated. Load balancing is implemented to dynamically adjust the number of active workers based on job demand. +The Supervisor oversees the lifecycle of the actors, ensuring they are operational and efficiently allocated. Load balancing is implemented to dynamically adjust the number of active actors based on job demand. -Additionally, the Supervisor implements health monitoring for worker engines: if a worker engine does not receive a job within 10 minutes, the Supervisor sends a ping job. The engine must respond immediately; if it fails to do so, the Supervisor restarts the requested job engine. +Additionally, the Supervisor implements health monitoring for actor engines: if a actor engine does not receive a job within 10 minutes, the Supervisor sends a ping job. The engine must respond immediately; if it fails to do so, the Supervisor restarts the requested job engine. ### Prerequisites -**Important**: Before running any lifecycle examples or using worker management features, you must start the Zinit daemon: +**Important**: Before running any lifecycle examples or using actor management features, you must start the Zinit daemon: ```bash -# Start Zinit daemon (required for worker lifecycle management) +# Start Zinit daemon (required for actor lifecycle management) sudo zinit init # Or start Zinit with a custom socket path sudo zinit --socket /var/run/zinit.sock init ``` -**Note**: The Supervisor uses Zinit as the process manager for worker lifecycle operations. The default socket path is `/var/run/zinit.sock`, but you can configure a custom path using the `SupervisorBuilder::zinit_socket_path()` method. +**Note**: The Supervisor uses Zinit as the process manager for actor lifecycle operations. The default socket path is `/var/run/zinit.sock`, but you can configure a custom path using the `SupervisorBuilder::zinit_socket_path()` method. **Troubleshooting**: If you get connection errors when running examples, ensure: 1. Zinit daemon is running (`zinit list` should work) 2. The socket path matches between Zinit and your Supervisor configuration 3. You have appropriate permissions to access the Zinit socket -### Supervisor API for Worker Lifecycle +### Supervisor API for Actor Lifecycle -The Supervisor provides the following methods for supervising the worker lifecycle: +The Supervisor provides the following methods for supervising the actor lifecycle: -- **`start_worker()`**: Initializes and starts a specified worker. -- **`stop_worker()`**: Gracefully stops a specified worker. -- **`restart_worker()`**: Restarts a specified worker to ensure it operates correctly. -- **`get_worker_status()`**: Checks the status of a specific worker. +- **`start_actor()`**: Initializes and starts a specified actor. +- **`stop_actor()`**: Gracefully stops a specified actor. +- **`restart_actor()`**: Restarts a specified actor to ensure it operates correctly. +- **`get_actor_status()`**: Checks the status of a specific actor. ## Job Supervision -Jobs are dispatched to workers through their designated Redis queues, and the Supervisor provides an API for comprehensive job supervision. +Jobs are dispatched to actors through their designated Redis queues, and the Supervisor provides an API for comprehensive job supervision. ### Supervisor API for Job Supervision @@ -95,9 +95,9 @@ You can modify these in the example source code if your setup differs. Jobs are managed within the `hero:` namespace in Redis: - **`hero:job:{job_id}`**: Stores job parameters as a Redis hash. -- **`hero:work_queue:{worker_id}`**: Contains worker-specific job queues for dispatching jobs. +- **`hero:work_queue:{actor_id}`**: Contains actor-specific job queues for dispatching jobs. - **`hero:reply:{job_id}`**: Dedicated queues for job results. ## Prerequisites -- A Redis server must be accessible to both the Supervisor and the workers. \ No newline at end of file +- A Redis server must be accessible to both the Supervisor and the actors. \ No newline at end of file diff --git a/core/supervisor/docs/protocol.md b/core/supervisor/docs/protocol.md index 5ac4731..ddbbfcb 100644 --- a/core/supervisor/docs/protocol.md +++ b/core/supervisor/docs/protocol.md @@ -1,10 +1,10 @@ # Hero Supervisor Protocol -This document describes the Redis-based protocol used by the Hero Supervisor for job management and worker communication. +This document describes the Redis-based protocol used by the Hero Supervisor for job management and actor communication. ## Overview -The Hero Supervisor uses Redis as a message broker and data store for managing distributed job execution. Jobs are stored as Redis hashes, and communication with workers happens through Redis lists (queues). +The Hero Supervisor uses Redis as a message broker and data store for managing distributed job execution. Jobs are stored as Redis hashes, and communication with actors happens through Redis lists (queues). ## Redis Namespace @@ -22,7 +22,7 @@ hero:job:{job_id} **Job Hash Fields:** - `id`: Unique job identifier (UUID v4) - `caller_id`: Identifier of the client that created the job -- `worker_id`: Target worker identifier +- `actor_id`: Target actor identifier - `context_id`: Execution context identifier - `script`: Script content to execute (Rhai or HeroScript) - `timeout`: Execution timeout in seconds @@ -35,8 +35,8 @@ hero:job:{job_id} - `env_vars`: Environment variables as JSON object (optional) - `prerequisites`: JSON array of job IDs that must complete before this job (optional) - `dependents`: JSON array of job IDs that depend on this job completing (optional) -- `output`: Job execution result (set by worker) -- `error`: Error message if job failed (set by worker) +- `output`: Job execution result (set by actor) +- `error`: Error message if job failed (set by actor) - `dependencies`: List of job IDs that this job depends on ### Job Dependencies @@ -47,19 +47,19 @@ Jobs can have dependencies on other jobs, which are stored in the `dependencies` Jobs are queued for execution using Redis lists: ``` -hero:work_queue:{worker_id} +hero:work_queue:{actor_id} ``` -Workers listen on their specific queue using `BLPOP` for job IDs to process. +Actors listen on their specific queue using `BLPOP` for job IDs to process. ### Stop Queues Job stop requests are sent through dedicated stop queues: ``` -hero:stop_queue:{worker_id} +hero:stop_queue:{actor_id} ``` -Workers monitor these queues to receive stop requests for running jobs. +Actors monitor these queues to receive stop requests for running jobs. ### Reply Queues @@ -68,7 +68,7 @@ For synchronous job execution, dedicated reply queues are used: hero:reply:{job_id} ``` -Workers send results to these queues when jobs complete. +Actors send results to these queues when jobs complete. ## Job Lifecycle @@ -79,20 +79,20 @@ Client -> Redis: HSET hero:job:{job_id} {job_fields} ### 2. Job Submission ``` -Client -> Redis: LPUSH hero:work_queue:{worker_id} {job_id} +Client -> Redis: LPUSH hero:work_queue:{actor_id} {job_id} ``` ### 3. Job Processing ``` -Worker -> Redis: BLPOP hero:work_queue:{worker_id} -Worker -> Redis: HSET hero:job:{job_id} status "started" -Worker: Execute script -Worker -> Redis: HSET hero:job:{job_id} status "finished" output "{result}" +Actor -> Redis: BLPOP hero:work_queue:{actor_id} +Actor -> Redis: HSET hero:job:{job_id} status "started" +Actor: Execute script +Actor -> Redis: HSET hero:job:{job_id} status "finished" output "{result}" ``` ### 4. Job Completion (Async) ``` -Worker -> Redis: LPUSH hero:reply:{job_id} {result} +Actor -> Redis: LPUSH hero:reply:{job_id} {result} ``` ## API Operations @@ -110,7 +110,7 @@ supervisor.list_jobs() -> Vec supervisor.stop_job(job_id) -> Result<(), SupervisorError> ``` **Redis Operations:** -- `LPUSH hero:stop_queue:{worker_id} {job_id}` - Send stop request +- `LPUSH hero:stop_queue:{actor_id} {job_id}` - Send stop request ### Get Job Status ```rust @@ -131,20 +131,20 @@ supervisor.get_job_logs(job_id) -> Result, SupervisorError> ### Run Job and Await Result ```rust -supervisor.run_job_and_await_result(job, worker_id) -> Result +supervisor.run_job_and_await_result(job, actor_id) -> Result ``` **Redis Operations:** 1. `HSET hero:job:{job_id} {job_fields}` - Store job -2. `LPUSH hero:work_queue:{worker_id} {job_id}` - Submit job +2. `LPUSH hero:work_queue:{actor_id} {job_id}` - Submit job 3. `BLPOP hero:reply:{job_id} {timeout}` - Wait for result -## Worker Protocol +## Actor Protocol ### Job Processing Loop ```rust loop { // 1. Wait for job - job_id = BLPOP hero:work_queue:{worker_id} + job_id = BLPOP hero:work_queue:{actor_id} // 2. Get job details job_data = HGETALL hero:job:{job_id} @@ -153,8 +153,8 @@ loop { HSET hero:job:{job_id} status "started" // 4. Check for stop requests - if LLEN hero:stop_queue:{worker_id} > 0 { - stop_job_id = LPOP hero:stop_queue:{worker_id} + if LLEN hero:stop_queue:{actor_id} > 0 { + stop_job_id = LPOP hero:stop_queue:{actor_id} if stop_job_id == job_id { HSET hero:job:{job_id} status "error" error "stopped" continue @@ -175,15 +175,15 @@ loop { ``` ### Stop Request Handling -Workers should periodically check the stop queue during long-running jobs: +Actors should periodically check the stop queue during long-running jobs: ```rust -if LLEN hero:stop_queue:{worker_id} > 0 { - stop_requests = LRANGE hero:stop_queue:{worker_id} 0 -1 +if LLEN hero:stop_queue:{actor_id} > 0 { + stop_requests = LRANGE hero:stop_queue:{actor_id} 0 -1 if stop_requests.contains(current_job_id) { // Stop current job execution HSET hero:job:{current_job_id} status "error" error "stopped_by_request" // Remove stop request - LREM hero:stop_queue:{worker_id} 1 current_job_id + LREM hero:stop_queue:{actor_id} 1 current_job_id return } } @@ -193,17 +193,17 @@ if LLEN hero:stop_queue:{worker_id} > 0 { ### Job Timeouts - Client sets timeout when creating job -- Worker should respect timeout and stop execution +- Actor should respect timeout and stop execution - If timeout exceeded: `HSET hero:job:{job_id} status "error" error "timeout"` -### Worker Failures -- If worker crashes, job remains in "started" status +### Actor Failures +- If actor crashes, job remains in "started" status - Monitoring systems can detect stale jobs and retry -- Jobs can be requeued: `LPUSH hero:work_queue:{worker_id} {job_id}` +- Jobs can be requeued: `LPUSH hero:work_queue:{actor_id} {job_id}` ### Redis Connection Issues - Clients should implement retry logic with exponential backoff -- Workers should reconnect and resume processing +- Actors should reconnect and resume processing - Use Redis persistence to survive Redis restarts ## Monitoring and Observability @@ -211,10 +211,10 @@ if LLEN hero:stop_queue:{worker_id} > 0 { ### Queue Monitoring ```bash # Check work queue length -LLEN hero:work_queue:{worker_id} +LLEN hero:work_queue:{actor_id} # Check stop queue length -LLEN hero:stop_queue:{worker_id} +LLEN hero:stop_queue:{actor_id} # List all jobs KEYS hero:job:* @@ -228,7 +228,7 @@ HGETALL hero:job:{job_id} - Jobs completed per second - Average job execution time - Queue depths -- Worker availability +- Actor availability - Error rates by job type ## Security Considerations @@ -237,7 +237,7 @@ HGETALL hero:job:{job_id} - Use Redis AUTH for authentication - Enable TLS for Redis connections - Restrict Redis network access -- Use Redis ACLs to limit worker permissions +- Use Redis ACLs to limit actor permissions ### Job Security - Validate script content before execution @@ -265,8 +265,8 @@ HGETALL hero:job:{job_id} - Batch similar jobs when possible - Implement job prioritization if needed -### Worker Optimization -- Pool worker connections to Redis +### Actor Optimization +- Pool actor connections to Redis - Use async I/O for Redis operations - Implement graceful shutdown handling -- Monitor worker resource usage +- Monitor actor resource usage diff --git a/core/supervisor/examples/cli/README.md b/core/supervisor/examples/cli/README.md index 02ee17e..133402e 100644 --- a/core/supervisor/examples/cli/README.md +++ b/core/supervisor/examples/cli/README.md @@ -1,6 +1,6 @@ # Hero Supervisor CLI Example -This example demonstrates how to use the `hive-supervisor` CLI tool for managing workers and jobs in the Hero ecosystem. +This example demonstrates how to use the `hive-supervisor` CLI tool for managing actors and jobs in the Hero ecosystem. ## Prerequisites @@ -19,20 +19,20 @@ This example demonstrates how to use the `hive-supervisor` CLI tool for managing # Follow Zinit installation instructions for your platform ``` -3. **Worker Binaries**: The configuration references worker binaries that need to be available: - - `/usr/local/bin/osis_worker` - - `/usr/local/bin/sal_worker` - - `/usr/local/bin/v_worker` - - `/usr/local/bin/python_worker` +3. **Actor Binaries**: The configuration references actor binaries that need to be available: + - `/usr/local/bin/osis_actor` + - `/usr/local/bin/sal_actor` + - `/usr/local/bin/v_actor` + - `/usr/local/bin/python_actor` - For testing purposes, you can create mock worker binaries or update the paths in `config.toml` to point to existing binaries. + For testing purposes, you can create mock actor binaries or update the paths in `config.toml` to point to existing binaries. ## Configuration The `config.toml` file contains the supervisor configuration: - **Global settings**: Redis URL and Zinit socket path -- **Worker configurations**: Binary paths and environment variables for each worker type +- **Actor configurations**: Binary paths and environment variables for each actor type ## Usage Examples @@ -43,29 +43,29 @@ The `config.toml` file contains the supervisor configuration: cargo build --bin hive-supervisor --release ``` -### 2. Worker Management +### 2. Actor Management ```bash # Show help ./target/release/hive-supervisor --config examples/cli/config.toml --help -# List all configured workers -./target/release/hive-supervisor --config examples/cli/config.toml workers list +# List all configured actors +./target/release/hive-supervisor --config examples/cli/config.toml actors list -# Start all workers -./target/release/hive-supervisor --config examples/cli/config.toml workers start +# Start all actors +./target/release/hive-supervisor --config examples/cli/config.toml actors start -# Start specific workers -./target/release/hive-supervisor --config examples/cli/config.toml workers start osis_worker sal_worker +# Start specific actors +./target/release/hive-supervisor --config examples/cli/config.toml actors start osis_actor sal_actor -# Check worker status -./target/release/hive-supervisor --config examples/cli/config.toml workers status +# Check actor status +./target/release/hive-supervisor --config examples/cli/config.toml actors status -# Stop all workers -./target/release/hive-supervisor --config examples/cli/config.toml workers stop +# Stop all actors +./target/release/hive-supervisor --config examples/cli/config.toml actors stop -# Restart specific worker -./target/release/hive-supervisor --config examples/cli/config.toml workers restart osis_worker +# Restart specific actor +./target/release/hive-supervisor --config examples/cli/config.toml actors restart osis_actor ``` ### 3. Job Management @@ -73,7 +73,7 @@ cargo build --bin hive-supervisor --release ```bash # Create a job with inline script ./target/release/hive-supervisor --config examples/cli/config.toml jobs create \ - --script 'print("Hello from OSIS worker!");' \ + --script 'print("Hello from OSIS actor!");' \ --script-type osis \ --caller-id "user123" \ --context-id "session456" @@ -118,18 +118,18 @@ cargo build --bin hive-supervisor --release ```bash # Enable debug logging -./target/release/hive-supervisor --config examples/cli/config.toml -v workers status +./target/release/hive-supervisor --config examples/cli/config.toml -v actors status # Enable trace logging -./target/release/hive-supervisor --config examples/cli/config.toml -vv workers status +./target/release/hive-supervisor --config examples/cli/config.toml -vv actors status # Disable timestamps -./target/release/hive-supervisor --config examples/cli/config.toml --no-timestamp workers status +./target/release/hive-supervisor --config examples/cli/config.toml --no-timestamp actors status ``` ## Sample Scripts -The `sample_scripts/` directory contains example scripts for different worker types: +The `sample_scripts/` directory contains example scripts for different actor types: - `hello_osis.rhai` - Simple OSIS/HeroScript example - `system_sal.rhai` - SAL system operation example @@ -148,9 +148,9 @@ The `sample_scripts/` directory contains example scripts for different worker ty - Verify Zinit is running and the socket path is correct - Check permissions on the socket file -3. **Worker Binary Not Found** +3. **Actor Binary Not Found** - Update binary paths in `config.toml` to match your system - - Ensure worker binaries are executable + - Ensure actor binaries are executable 4. **Permission Denied** - Check file permissions on configuration and binary files @@ -161,7 +161,7 @@ The `sample_scripts/` directory contains example scripts for different worker ty Run with verbose logging to see detailed operation information: ```bash -RUST_LOG=debug ./target/release/hive-supervisor --config examples/cli/config.toml -vv workers status +RUST_LOG=debug ./target/release/hive-supervisor --config examples/cli/config.toml -vv actors status ``` ## Configuration Customization @@ -170,15 +170,15 @@ You can customize the configuration for your environment: 1. **Update Redis URL**: Change `redis_url` in the `[global]` section 2. **Update Zinit Socket**: Change `zinit_socket_path` for your Zinit installation -3. **Worker Paths**: Update binary paths in worker sections to match your setup -4. **Environment Variables**: Add or modify environment variables for each worker type +3. **Actor Paths**: Update binary paths in actor sections to match your setup +4. **Environment Variables**: Add or modify environment variables for each actor type ## Integration with Hero Ecosystem This CLI integrates with the broader Hero ecosystem: - **Job Queue**: Uses Redis for job queuing and status tracking -- **Process Management**: Uses Zinit for worker lifecycle management +- **Process Management**: Uses Zinit for actor lifecycle management - **Script Execution**: Supports multiple script types (OSIS, SAL, V, Python) - **Monitoring**: Provides real-time status and logging capabilities diff --git a/core/supervisor/examples/cli/config.toml b/core/supervisor/examples/cli/config.toml index b3ba184..493c1b5 100644 --- a/core/supervisor/examples/cli/config.toml +++ b/core/supervisor/examples/cli/config.toml @@ -1,19 +1,19 @@ # Hero Supervisor CLI Configuration Example # This configuration demonstrates how to set up the hive-supervisor CLI -# with different worker types for script execution. +# with different actor types for script execution. [global] # Redis connection URL for job queuing redis_url = "redis://localhost:6379" -# OSIS Worker Configuration +# OSIS Actor Configuration # Handles OSIS (HeroScript) execution -[osis_worker] +[osis_actor] binary_path = "../../../target/debug/osis" -env_vars = { "RUST_LOG" = "info", "WORKER_TYPE" = "osis", "MAX_CONCURRENT_JOBS" = "5" } +env_vars = { "RUST_LOG" = "info", "ACTOR_TYPE" = "osis", "MAX_CONCURRENT_JOBS" = "5" } -# SAL Worker Configuration +# SAL Actor Configuration # Handles System Abstraction Layer scripts -[sal_worker] +[sal_actor] binary_path = "../../../target/debug/sal" -env_vars = { "RUST_LOG" = "info", "WORKER_TYPE" = "sal", "MAX_CONCURRENT_JOBS" = "3" } \ No newline at end of file +env_vars = { "RUST_LOG" = "info", "ACTOR_TYPE" = "sal", "MAX_CONCURRENT_JOBS" = "3" } \ No newline at end of file diff --git a/core/supervisor/examples/cli/run_examples.sh b/core/supervisor/examples/cli/run_examples.sh index b4d7ffe..760c679 100755 --- a/core/supervisor/examples/cli/run_examples.sh +++ b/core/supervisor/examples/cli/run_examples.sh @@ -58,25 +58,25 @@ fi echo -e "${BLUE}=== CLI Help and Information ===${NC}" run_cli "Show main help" --help -echo -e "${BLUE}=== Worker Management Examples ===${NC}" -run_cli "List configured workers" workers list -run_cli "Show worker management help" workers --help +echo -e "${BLUE}=== Actor Management Examples ===${NC}" +run_cli "List configured actors" actors list +run_cli "Show actor management help" actors --help -# Note: These commands would require actual worker binaries and Zinit setup -echo -e "${YELLOW}Note: The following commands require actual worker binaries and Zinit setup${NC}" +# Note: These commands would require actual actor binaries and Zinit setup +echo -e "${YELLOW}Note: The following commands require actual actor binaries and Zinit setup${NC}" echo -e "${YELLOW}They are shown for demonstration but may fail without proper setup${NC}" echo # Uncomment these if you have the proper setup -# run_cli "Check worker status" workers status -# run_cli "Start all workers" workers start -# run_cli "Check worker status after start" workers status +# run_cli "Check actor status" actors status +# run_cli "Start all actors" actors start +# run_cli "Check actor status after start" actors status echo -e "${BLUE}=== Job Management Examples ===${NC}" run_cli "Show job management help" jobs --help -# Create sample jobs (these will also require workers to be running) -echo -e "${YELLOW}Sample job creation commands (require running workers):${NC}" +# Create sample jobs (these will also require actors to be running) +echo -e "${YELLOW}Sample job creation commands (require running actors):${NC}" echo echo "# Create OSIS job with inline script:" @@ -123,22 +123,22 @@ echo echo -e "${BLUE}=== Verbose Logging Examples ===${NC}" echo "# Debug logging:" -echo "$CLI_BINARY --config $CONFIG_FILE -v workers list" +echo "$CLI_BINARY --config $CONFIG_FILE -v actors list" echo echo "# Trace logging:" -echo "$CLI_BINARY --config $CONFIG_FILE -vv workers list" +echo "$CLI_BINARY --config $CONFIG_FILE -vv actors list" echo echo "# No timestamps:" -echo "$CLI_BINARY --config $CONFIG_FILE --no-timestamp workers list" +echo "$CLI_BINARY --config $CONFIG_FILE --no-timestamp actors list" echo echo -e "${GREEN}=== Example Runner Complete ===${NC}" echo -e "${YELLOW}To run actual commands, ensure you have:${NC}" echo "1. Redis server running on localhost:6379" echo "2. Zinit process manager installed and configured" -echo "3. Worker binaries available at the paths specified in config.toml" +echo "3. Actor binaries available at the paths specified in config.toml" echo echo -e "${YELLOW}For testing without full setup, you can:${NC}" echo "1. Update config.toml with paths to existing binaries" echo "2. Use the CLI help commands and configuration validation" -echo "3. Test the REPL mode (requires workers to be running)" +echo "3. Test the REPL mode (requires actors to be running)" diff --git a/core/supervisor/examples/cli/sample_scripts/data_python.py b/core/supervisor/examples/cli/sample_scripts/data_python.py index 797e875..5bd637e 100644 --- a/core/supervisor/examples/cli/sample_scripts/data_python.py +++ b/core/supervisor/examples/cli/sample_scripts/data_python.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 """ Sample Python script for demonstration -This script demonstrates Python worker functionality +This script demonstrates Python actor functionality """ import json @@ -9,7 +9,7 @@ import datetime from typing import List, Dict def main(): - print("=== Python Worker Demo ===") + print("=== Python Actor Demo ===") print("Python data processing operations") # Data structures diff --git a/core/supervisor/examples/cli/sample_scripts/hello_osis.rhai b/core/supervisor/examples/cli/sample_scripts/hello_osis.rhai index 403daf8..9157e26 100644 --- a/core/supervisor/examples/cli/sample_scripts/hello_osis.rhai +++ b/core/supervisor/examples/cli/sample_scripts/hello_osis.rhai @@ -1,8 +1,8 @@ // Sample OSIS/HeroScript for demonstration -// This script demonstrates basic OSIS worker functionality +// This script demonstrates basic OSIS actor functionality -print("=== OSIS Worker Demo ==="); -print("Hello from the OSIS worker!"); +print("=== OSIS Actor Demo ==="); +print("Hello from the OSIS actor!"); // Basic variable operations let name = "Hero"; diff --git a/core/supervisor/examples/cli/sample_scripts/math_v.v b/core/supervisor/examples/cli/sample_scripts/math_v.v index 1af7383..28f597f 100644 --- a/core/supervisor/examples/cli/sample_scripts/math_v.v +++ b/core/supervisor/examples/cli/sample_scripts/math_v.v @@ -1,12 +1,12 @@ // Sample V language script for demonstration -// This script demonstrates V worker functionality +// This script demonstrates V actor functionality module main import math fn main() { - println("=== V Worker Demo ===") + println("=== V Actor Demo ===") println("V language mathematical operations") // Basic arithmetic diff --git a/core/supervisor/examples/cli/sample_scripts/system_sal.rhai b/core/supervisor/examples/cli/sample_scripts/system_sal.rhai index 76cab09..5bfb1b6 100644 --- a/core/supervisor/examples/cli/sample_scripts/system_sal.rhai +++ b/core/supervisor/examples/cli/sample_scripts/system_sal.rhai @@ -1,7 +1,7 @@ // Sample SAL (System Abstraction Layer) script for demonstration -// This script demonstrates system-level operations through SAL worker +// This script demonstrates system-level operations through SAL actor -print("=== SAL Worker Demo ==="); +print("=== SAL Actor Demo ==="); print("System Abstraction Layer operations"); // System information gathering diff --git a/core/supervisor/examples/lifecycle_demo.rs b/core/supervisor/examples/lifecycle_demo.rs index 79700ea..147e487 100644 --- a/core/supervisor/examples/lifecycle_demo.rs +++ b/core/supervisor/examples/lifecycle_demo.rs @@ -1,6 +1,6 @@ use hero_supervisor::{ - Supervisor, SupervisorBuilder, WorkerConfig, WorkerLifecycleManager, - WorkerLifecycleManagerBuilder, ScriptType + Supervisor, SupervisorBuilder, ActorConfig, ActorLifecycleManager, + ActorLifecycleManagerBuilder, ScriptType }; use log::{info, warn, error}; use std::collections::HashMap; @@ -13,7 +13,7 @@ async fn main() -> Result<(), Box> { // Initialize logging env_logger::init(); - info!("Starting Worker Lifecycle Management Demo"); + info!("Starting Actor Lifecycle Management Demo"); // Configuration let redis_url = "redis://localhost:6379"; @@ -25,154 +25,154 @@ async fn main() -> Result<(), Box> { .context_id("demo_context") .build()?; - // Configure workers for different script types - let mut worker_configs = Vec::new(); + // Configure actors for different script types + let mut actor_configs = Vec::new(); - // OSIS workers (Rhai/HeroScript) + // OSIS actors (Rhai/HeroScript) for i in 0..2 { - let config = WorkerConfig::new( - format!("osis_worker_{}", i), - PathBuf::from("/usr/local/bin/osis_worker"), + let config = ActorConfig::new( + format!("osis_actor_{}", i), + PathBuf::from("/usr/local/bin/osis_actor"), ScriptType::OSIS, ) .with_args(vec![ "--redis-url".to_string(), redis_url.to_string(), - "--worker-id".to_string(), - format!("osis_worker_{}", i), + "--actor-id".to_string(), + format!("osis_actor_{}", i), ]) .with_env({ let mut env = HashMap::new(); env.insert("RUST_LOG".to_string(), "info".to_string()); - env.insert("WORKER_TYPE".to_string(), "osis".to_string()); + env.insert("ACTOR_TYPE".to_string(), "osis".to_string()); env }) - .with_health_check("/usr/local/bin/osis_worker --health-check".to_string()) + .with_health_check("/usr/local/bin/osis_actor --health-check".to_string()) .with_dependencies(vec!["redis".to_string()]); - worker_configs.push(config); + actor_configs.push(config); } - // SAL workers (System Abstraction Layer) + // SAL actors (System Abstraction Layer) for i in 0..3 { - let config = WorkerConfig::new( - format!("sal_worker_{}", i), - PathBuf::from("/usr/local/bin/sal_worker"), + let config = ActorConfig::new( + format!("sal_actor_{}", i), + PathBuf::from("/usr/local/bin/sal_actor"), ScriptType::SAL, ) .with_args(vec![ "--redis-url".to_string(), redis_url.to_string(), - "--worker-id".to_string(), - format!("sal_worker_{}", i), + "--actor-id".to_string(), + format!("sal_actor_{}", i), ]) .with_env({ let mut env = HashMap::new(); env.insert("RUST_LOG".to_string(), "info".to_string()); - env.insert("WORKER_TYPE".to_string(), "sal".to_string()); + env.insert("ACTOR_TYPE".to_string(), "sal".to_string()); env }) - .with_health_check("/usr/local/bin/sal_worker --health-check".to_string()) + .with_health_check("/usr/local/bin/sal_actor --health-check".to_string()) .with_dependencies(vec!["redis".to_string()]); - worker_configs.push(config); + actor_configs.push(config); } - // V workers (HeroScript in V language) + // V actors (HeroScript in V language) for i in 0..2 { - let config = WorkerConfig::new( - format!("v_worker_{}", i), - PathBuf::from("/usr/local/bin/v_worker"), + let config = ActorConfig::new( + format!("v_actor_{}", i), + PathBuf::from("/usr/local/bin/v_actor"), ScriptType::V, ) .with_args(vec![ "--redis-url".to_string(), redis_url.to_string(), - "--worker-id".to_string(), - format!("v_worker_{}", i), + "--actor-id".to_string(), + format!("v_actor_{}", i), ]) .with_env({ let mut env = HashMap::new(); env.insert("RUST_LOG".to_string(), "info".to_string()); - env.insert("WORKER_TYPE".to_string(), "v".to_string()); + env.insert("ACTOR_TYPE".to_string(), "v".to_string()); env }) - .with_health_check("/usr/local/bin/v_worker --health-check".to_string()) + .with_health_check("/usr/local/bin/v_actor --health-check".to_string()) .with_dependencies(vec!["redis".to_string()]); - worker_configs.push(config); + actor_configs.push(config); } // Create lifecycle manager - let mut lifecycle_manager = WorkerLifecycleManagerBuilder::new(zinit_socket.to_string()) + let mut lifecycle_manager = ActorLifecycleManagerBuilder::new(zinit_socket.to_string()) .with_supervisor(supervisor.clone()); - // Add all worker configurations - for config in worker_configs { - lifecycle_manager = lifecycle_manager.add_worker(config); + // Add all actor configurations + for config in actor_configs { + lifecycle_manager = lifecycle_manager.add_actor(config); } let mut lifecycle_manager = lifecycle_manager.build(); // Demonstrate lifecycle operations - info!("=== Starting Worker Lifecycle Demo ==="); + info!("=== Starting Actor Lifecycle Demo ==="); - // 1. Start all workers - info!("1. Starting all workers..."); - match lifecycle_manager.start_all_workers().await { - Ok(_) => info!("โœ… All workers started successfully"), + // 1. Start all actors + info!("1. Starting all actors..."); + match lifecycle_manager.start_all_actors().await { + Ok(_) => info!("โœ… All actors started successfully"), Err(e) => { - error!("โŒ Failed to start workers: {}", e); + error!("โŒ Failed to start actors: {}", e); return Err(e.into()); } } - // Wait for workers to initialize + // Wait for actors to initialize sleep(Duration::from_secs(5)).await; - // 2. Check worker status - info!("2. Checking worker status..."); - match lifecycle_manager.get_all_worker_status().await { + // 2. Check actor status + info!("2. Checking actor status..."); + match lifecycle_manager.get_all_actor_status().await { Ok(status_map) => { - for (worker_name, status) in status_map { - info!(" Worker '{}': State={:?}, PID={}", worker_name, status.state, status.pid); + for (actor_name, status) in status_map { + info!(" Actor '{}': State={:?}, PID={}", actor_name, status.state, status.pid); } } - Err(e) => warn!("Failed to get worker status: {}", e), + Err(e) => warn!("Failed to get actor status: {}", e), } // 3. Demonstrate scaling - info!("3. Demonstrating worker scaling..."); + info!("3. Demonstrating actor scaling..."); - // Scale up OSIS workers - info!(" Scaling up OSIS workers to 3..."); - if let Err(e) = lifecycle_manager.scale_workers(&ScriptType::OSIS, 3).await { - warn!("Failed to scale OSIS workers: {}", e); + // Scale up OSIS actors + info!(" Scaling up OSIS actors to 3..."); + if let Err(e) = lifecycle_manager.scale_actors(&ScriptType::OSIS, 3).await { + warn!("Failed to scale OSIS actors: {}", e); } sleep(Duration::from_secs(3)).await; - // Scale down SAL workers - info!(" Scaling down SAL workers to 1..."); - if let Err(e) = lifecycle_manager.scale_workers(&ScriptType::SAL, 1).await { - warn!("Failed to scale SAL workers: {}", e); + // Scale down SAL actors + info!(" Scaling down SAL actors to 1..."); + if let Err(e) = lifecycle_manager.scale_actors(&ScriptType::SAL, 1).await { + warn!("Failed to scale SAL actors: {}", e); } sleep(Duration::from_secs(3)).await; - // 4. Check running worker counts - info!("4. Checking running worker counts after scaling..."); + // 4. Check running actor counts + info!("4. Checking running actor counts after scaling..."); for script_type in [ScriptType::OSIS, ScriptType::SAL, ScriptType::V] { - let count = lifecycle_manager.get_running_worker_count(&script_type).await; - info!(" {:?}: {} workers running", script_type, count); + let count = lifecycle_manager.get_running_actor_count(&script_type).await; + info!(" {:?}: {} actors running", script_type, count); } // 5. Demonstrate restart functionality - info!("5. Demonstrating worker restart..."); - if let Err(e) = lifecycle_manager.restart_worker("osis_worker_0").await { - warn!("Failed to restart worker: {}", e); + info!("5. Demonstrating actor restart..."); + if let Err(e) = lifecycle_manager.restart_actor("osis_actor_0").await { + warn!("Failed to restart actor: {}", e); } else { - info!(" โœ… Successfully restarted osis_worker_0"); + info!(" โœ… Successfully restarted osis_actor_0"); } sleep(Duration::from_secs(3)).await; @@ -180,12 +180,12 @@ async fn main() -> Result<(), Box> { // 6. Simulate job dispatch and health monitoring info!("6. Simulating job dispatch and health monitoring..."); - // Update job time for a worker (simulating job dispatch) - lifecycle_manager.update_worker_job_time("sal_worker_0"); - info!(" Updated job time for sal_worker_0"); + // Update job time for a actor (simulating job dispatch) + lifecycle_manager.update_actor_job_time("sal_actor_0"); + info!(" Updated job time for sal_actor_0"); // Perform health monitoring check - if let Err(e) = lifecycle_manager.monitor_worker_health().await { + if let Err(e) = lifecycle_manager.monitor_actor_health().await { warn!("Health monitoring failed: {}", e); } else { info!(" โœ… Health monitoring completed"); @@ -196,7 +196,7 @@ async fn main() -> Result<(), Box> { let test_job = supervisor .new_job() .script_type(ScriptType::OSIS) - .script_content("println!(\"Hello from worker!\");".to_string()) + .script_content("println!(\"Hello from actor!\");".to_string()) .timeout(Duration::from_secs(30)) .build()?; @@ -208,27 +208,27 @@ async fn main() -> Result<(), Box> { // 8. Demonstrate graceful shutdown info!("8. Demonstrating graceful shutdown..."); - // Stop specific workers - info!(" Stopping specific workers..."); - for worker_name in ["osis_worker_1", "v_worker_0"] { - if let Err(e) = lifecycle_manager.stop_worker(worker_name).await { - warn!("Failed to stop worker {}: {}", worker_name, e); + // Stop specific actors + info!(" Stopping specific actors..."); + for actor_name in ["osis_actor_1", "v_actor_0"] { + if let Err(e) = lifecycle_manager.stop_actor(actor_name).await { + warn!("Failed to stop actor {}: {}", actor_name, e); } else { - info!(" โœ… Stopped worker: {}", worker_name); + info!(" โœ… Stopped actor: {}", actor_name); } } sleep(Duration::from_secs(2)).await; - // Stop all remaining workers - info!(" Stopping all remaining workers..."); - if let Err(e) = lifecycle_manager.stop_all_workers().await { - error!("Failed to stop all workers: {}", e); + // Stop all remaining actors + info!(" Stopping all remaining actors..."); + if let Err(e) = lifecycle_manager.stop_all_actors().await { + error!("Failed to stop all actors: {}", e); } else { - info!(" โœ… All workers stopped successfully"); + info!(" โœ… All actors stopped successfully"); } - info!("=== Worker Lifecycle Demo Completed ==="); + info!("=== Actor Lifecycle Demo Completed ==="); // Optional: Start health monitoring loop (commented out for demo) // info!("Starting health monitoring loop (Ctrl+C to stop)..."); diff --git a/core/supervisor/examples/simple_lifecycle_demo.rs b/core/supervisor/examples/simple_lifecycle_demo.rs index 29383ac..df7320b 100644 --- a/core/supervisor/examples/simple_lifecycle_demo.rs +++ b/core/supervisor/examples/simple_lifecycle_demo.rs @@ -8,44 +8,44 @@ async fn main() -> Result<(), Box> { info!("Starting Hero Supervisor Lifecycle Demo"); - // Build supervisor with simplified worker configuration - // Workers are automatically launched during build + // Build supervisor with simplified actor configuration + // Actors are automatically launched during build let supervisor = SupervisorBuilder::new() .redis_url("redis://localhost:6379") - .osis_worker("/usr/local/bin/osis_worker") - .sal_worker("/usr/local/bin/sal_worker") - .v_worker("/usr/local/bin/v_worker") - .worker_env_var("REDIS_URL", "redis://localhost:6379") - .worker_env_var("LOG_LEVEL", "info") + .osis_actor("/usr/local/bin/osis_actor") + .sal_actor("/usr/local/bin/sal_actor") + .v_actor("/usr/local/bin/v_actor") + .actor_env_var("REDIS_URL", "redis://localhost:6379") + .actor_env_var("LOG_LEVEL", "info") .build().await?; - info!("Supervisor created and workers launched successfully"); + info!("Supervisor created and actors launched successfully"); - // Wait a moment for workers to start + // Wait a moment for actors to start sleep(Duration::from_secs(2)).await; - // Check worker status using the simplified API - info!("Checking worker status..."); - let workers = supervisor.get_workers(&[]).await; + // Check actor status using the simplified API + info!("Checking actor status..."); + let actors = supervisor.get_actors(&[]).await; - for worker in &workers { - let status_info = if worker.is_running { - format!("Running (PID: {})", worker.status.as_ref().map(|s| s.pid).unwrap_or(0)) + for actor in &actors { + let status_info = if actor.is_running { + format!("Running (PID: {})", actor.status.as_ref().map(|s| s.pid).unwrap_or(0)) } else { "Stopped".to_string() }; - info!(" Worker '{}' ({:?}): {}", worker.config.name, worker.config.script_type, status_info); + info!(" Actor '{}' ({:?}): {}", actor.config.name, actor.config.script_type, status_info); } // Demonstrate lifecycle operations with simplified API - info!("=== Worker Lifecycle Operations ==="); + info!("=== Actor Lifecycle Operations ==="); // 1. Demonstrate restart functionality - info!("1. Demonstrating worker restart..."); - if let Err(e) = supervisor.restart_worker("osis_worker_1").await { - error!("Failed to restart worker: {}", e); + info!("1. Demonstrating actor restart..."); + if let Err(e) = supervisor.restart_actor("osis_actor_1").await { + error!("Failed to restart actor: {}", e); } else { - info!(" โœ… Successfully restarted osis_worker_1"); + info!(" โœ… Successfully restarted osis_actor_1"); } sleep(Duration::from_secs(2)).await; @@ -61,11 +61,11 @@ async fn main() -> Result<(), Box> { // 3. Demonstrate graceful shutdown info!("3. Demonstrating graceful shutdown..."); - // Stop specific workers - if let Err(e) = supervisor.stop_worker("osis_worker_1").await { - error!("Failed to stop worker: {}", e); + // Stop specific actors + if let Err(e) = supervisor.stop_actor("osis_actor_1").await { + error!("Failed to stop actor: {}", e); } else { - info!(" โœ… Worker stopped successfully"); + info!(" โœ… Actor stopped successfully"); } info!("Demo completed successfully!"); diff --git a/core/supervisor/examples/supervisor_config.toml b/core/supervisor/examples/supervisor_config.toml index aa1ec0b..b3328bf 100644 --- a/core/supervisor/examples/supervisor_config.toml +++ b/core/supervisor/examples/supervisor_config.toml @@ -1,18 +1,18 @@ [global] redis_url = "redis://localhost:6379" -[osis_worker] -binary_path = "/path/to/osis_worker" +[osis_actor] +binary_path = "/path/to/osis_actor" env_vars = { "VAR1" = "value1", "VAR2" = "value2" } -[sal_worker] -binary_path = "/path/to/sal_worker" +[sal_actor] +binary_path = "/path/to/sal_actor" env_vars = { "VAR1" = "value1", "VAR2" = "value2" } -[v_worker] -binary_path = "/path/to/v_worker" +[v_actor] +binary_path = "/path/to/v_actor" env_vars = { "VAR1" = "value1", "VAR2" = "value2" } -[python_worker] -binary_path = "/path/to/python_worker" +[python_actor] +binary_path = "/path/to/python_actor" env_vars = { "VAR1" = "value1", "VAR2" = "value2" } diff --git a/core/supervisor/examples/timeout_example.rs b/core/supervisor/examples/timeout_example.rs index 8c2ef34..468729d 100644 --- a/core/supervisor/examples/timeout_example.rs +++ b/core/supervisor/examples/timeout_example.rs @@ -16,14 +16,14 @@ async fn main() -> Result<(), Box> { info!("Supervisor created."); let script_content = r#" - // This script will never be executed by a worker because the recipient does not exist. + // This script will never be executed by a actor because the recipient does not exist. let x = 10; let y = x + 32; y "#; - // The worker_id points to a worker queue that doesn't have a worker. - let non_existent_recipient = "non_existent_worker_for_timeout_test"; + // The actor_id points to a actor queue that doesn't have a actor. + let non_existent_recipient = "non_existent_actor_for_timeout_test"; let very_short_timeout = Duration::from_secs(2); info!( diff --git a/core/supervisor/src/error.rs b/core/supervisor/src/error.rs index 3116876..e077467 100644 --- a/core/supervisor/src/error.rs +++ b/core/supervisor/src/error.rs @@ -21,12 +21,12 @@ pub enum SupervisorError { InvalidInput(String), /// Job operation error JobError(hero_job::JobError), - /// Worker lifecycle management errors - WorkerStartFailed(String, String), - WorkerStopFailed(String, String), - WorkerRestartFailed(String, String), - WorkerStatusFailed(String, String), - WorkerNotFound(String), + /// Actor lifecycle management errors + ActorStartFailed(String, String), + ActorStopFailed(String, String), + ActorRestartFailed(String, String), + ActorStatusFailed(String, String), + ActorNotFound(String), PingJobFailed(String, String), /// Zinit client operation error ZinitError(String), @@ -73,23 +73,23 @@ impl std::fmt::Display for SupervisorError { SupervisorError::JobError(e) => { write!(f, "Job error: {}", e) } - SupervisorError::WorkerStartFailed(worker, reason) => { - write!(f, "Failed to start worker '{}': {}", worker, reason) + SupervisorError::ActorStartFailed(actor, reason) => { + write!(f, "Failed to start actor '{}': {}", actor, reason) } - SupervisorError::WorkerStopFailed(worker, reason) => { - write!(f, "Failed to stop worker '{}': {}", worker, reason) + SupervisorError::ActorStopFailed(actor, reason) => { + write!(f, "Failed to stop actor '{}': {}", actor, reason) } - SupervisorError::WorkerRestartFailed(worker, reason) => { - write!(f, "Failed to restart worker '{}': {}", worker, reason) + SupervisorError::ActorRestartFailed(actor, reason) => { + write!(f, "Failed to restart actor '{}': {}", actor, reason) } - SupervisorError::WorkerStatusFailed(worker, reason) => { - write!(f, "Failed to get status for worker '{}': {}", worker, reason) + SupervisorError::ActorStatusFailed(actor, reason) => { + write!(f, "Failed to get status for actor '{}': {}", actor, reason) } - SupervisorError::WorkerNotFound(worker) => { - write!(f, "Worker '{}' not found", worker) + SupervisorError::ActorNotFound(actor) => { + write!(f, "Actor '{}' not found", actor) } - SupervisorError::PingJobFailed(worker, reason) => { - write!(f, "Ping job failed for worker '{}': {}", worker, reason) + SupervisorError::PingJobFailed(actor, reason) => { + write!(f, "Ping job failed for actor '{}': {}", actor, reason) } SupervisorError::ZinitError(msg) => { write!(f, "Zinit error: {}", msg) diff --git a/core/supervisor/src/lib.rs b/core/supervisor/src/lib.rs index 6ecc96b..e0f0d2f 100644 --- a/core/supervisor/src/lib.rs +++ b/core/supervisor/src/lib.rs @@ -16,7 +16,7 @@ mod lifecycle; pub use crate::error::SupervisorError; pub use crate::job::JobBuilder; -pub use crate::lifecycle::WorkerConfig; +pub use crate::lifecycle::ActorConfig; // Re-export types from hero_job for public API pub use hero_job::{Job, JobStatus, ScriptType}; @@ -28,22 +28,22 @@ pub struct Supervisor { pub struct SupervisorBuilder { redis_url: Option, - osis_worker: Option, - sal_worker: Option, - v_worker: Option, - python_worker: Option, - worker_env_vars: HashMap, + osis_actor: Option, + sal_actor: Option, + v_actor: Option, + python_actor: Option, + actor_env_vars: HashMap, websocket_config: Option, } -/// Helper struct to pass builder data to worker launch method +/// Helper struct to pass builder data to actor launch method #[derive(Clone)] struct SupervisorBuilderData { - osis_worker: Option, - sal_worker: Option, - v_worker: Option, - python_worker: Option, - worker_env_vars: HashMap, + osis_actor: Option, + sal_actor: Option, + v_actor: Option, + python_actor: Option, + actor_env_vars: HashMap, websocket_config: Option, } @@ -52,10 +52,10 @@ struct SupervisorBuilderData { pub struct SupervisorConfig { pub global: GlobalConfig, pub websocket_server: Option, - pub osis_worker: Option, - pub sal_worker: Option, - pub v_worker: Option, - pub python_worker: Option, + pub osis_actor: Option, + pub sal_actor: Option, + pub v_actor: Option, + pub python_actor: Option, } /// Global configuration section @@ -64,12 +64,10 @@ pub struct GlobalConfig { pub redis_url: String, } -/// Worker configuration section in TOML +/// Actor configuration section in TOML #[derive(Debug, Deserialize, Serialize)] -pub struct WorkerConfigToml { +pub struct ActorConfigToml { pub binary_path: String, - #[serde(default)] - pub env_vars: HashMap, } /// WebSocket server configuration section in TOML @@ -127,11 +125,11 @@ impl SupervisorBuilder { pub fn new() -> Self { Self { redis_url: None, - osis_worker: None, - sal_worker: None, - v_worker: None, - python_worker: None, - worker_env_vars: HashMap::new(), + osis_actor: None, + sal_actor: None, + v_actor: None, + python_actor: None, + actor_env_vars: HashMap::new(), websocket_config: None, } } @@ -147,25 +145,21 @@ impl SupervisorBuilder { let mut builder = Self::new() .redis_url(&config.global.redis_url); - // Configure workers based on TOML config - if let Some(osis_config) = config.osis_worker { - builder = builder.osis_worker(&osis_config.binary_path) - .worker_env_vars(osis_config.env_vars); + // Configure actors based on TOML config + if let Some(osis_config) = config.osis_actor { + builder = builder.osis_actor(&osis_config.binary_path); } - if let Some(sal_config) = config.sal_worker { - builder = builder.sal_worker(&sal_config.binary_path) - .worker_env_vars(sal_config.env_vars); + if let Some(sal_config) = config.sal_actor { + builder = builder.sal_actor(&sal_config.binary_path); } - if let Some(v_config) = config.v_worker { - builder = builder.v_worker(&v_config.binary_path) - .worker_env_vars(v_config.env_vars); + if let Some(v_config) = config.v_actor { + builder = builder.v_actor(&v_config.binary_path); } - if let Some(python_config) = config.python_worker { - builder = builder.python_worker(&python_config.binary_path) - .worker_env_vars(python_config.env_vars); + if let Some(python_config) = config.python_actor { + builder = builder.python_actor(&python_config.binary_path); } // Store WebSocket configuration for later use @@ -176,28 +170,28 @@ impl SupervisorBuilder { Ok(builder) } - /// Validate that all configured worker binaries exist and are executable - fn validate_worker_binaries(&self) -> Result<(), SupervisorError> { - let workers = [ - ("OSIS", &self.osis_worker), - ("SAL", &self.sal_worker), - ("V", &self.v_worker), - ("Python", &self.python_worker), + /// Validate that all configured actor binaries exist and are executable + fn validate_actor_binaries(&self) -> Result<(), SupervisorError> { + let actors = [ + ("OSIS", &self.osis_actor), + ("SAL", &self.sal_actor), + ("V", &self.v_actor), + ("Python", &self.python_actor), ]; - for (worker_type, binary_path) in workers { + for (actor_type, binary_path) in actors { if let Some(path) = binary_path { let path_obj = Path::new(path); if !path_obj.exists() { return Err(SupervisorError::ConfigError( - format!("{} worker binary does not exist: {}", worker_type, path) + format!("{} actor binary does not exist: {}", actor_type, path) )); } if !path_obj.is_file() { return Err(SupervisorError::ConfigError( - format!("{} worker path is not a file: {}", worker_type, path) + format!("{} actor path is not a file: {}", actor_type, path) )); } @@ -207,19 +201,19 @@ impl SupervisorBuilder { use std::os::unix::fs::PermissionsExt; let metadata = path_obj.metadata().map_err(|e| { SupervisorError::ConfigError( - format!("Failed to read metadata for {} worker binary {}: {}", worker_type, path, e) + format!("Failed to read metadata for {} actor binary {}: {}", actor_type, path, e) ) })?; let permissions = metadata.permissions(); if permissions.mode() & 0o111 == 0 { return Err(SupervisorError::ConfigError( - format!("{} worker binary is not executable: {}", worker_type, path) + format!("{} actor binary is not executable: {}", actor_type, path) )); } } - info!("Validated {} worker binary: {}", worker_type, path); + info!("Validated {} actor binary: {}", actor_type, path); } } @@ -231,48 +225,48 @@ impl SupervisorBuilder { self } - pub fn osis_worker(mut self, binary_path: &str) -> Self { - self.osis_worker = Some(binary_path.to_string()); + pub fn osis_actor(mut self, binary_path: &str) -> Self { + self.osis_actor = Some(binary_path.to_string()); self } - pub fn sal_worker(mut self, binary_path: &str) -> Self { - self.sal_worker = Some(binary_path.to_string()); + pub fn sal_actor(mut self, binary_path: &str) -> Self { + self.sal_actor = Some(binary_path.to_string()); self } - pub fn v_worker(mut self, binary_path: &str) -> Self { - self.v_worker = Some(binary_path.to_string()); + pub fn v_actor(mut self, binary_path: &str) -> Self { + self.v_actor = Some(binary_path.to_string()); self } - pub fn python_worker(mut self, binary_path: &str) -> Self { - self.python_worker = Some(binary_path.to_string()); + pub fn python_actor(mut self, binary_path: &str) -> Self { + self.python_actor = Some(binary_path.to_string()); self } - pub fn worker_env_var(mut self, key: &str, value: &str) -> Self { - self.worker_env_vars.insert(key.to_string(), value.to_string()); + pub fn actor_env_var(mut self, key: &str, value: &str) -> Self { + self.actor_env_vars.insert(key.to_string(), value.to_string()); self } - pub fn worker_env_vars(mut self, env_vars: HashMap) -> Self { - self.worker_env_vars.extend(env_vars); + pub fn actor_env_vars(mut self, env_vars: HashMap) -> Self { + self.actor_env_vars.extend(env_vars); self } /// Builds the final `Supervisor` instance synchronously. /// - /// This method validates the configuration, checks worker binary existence, - /// and creates the Redis client. Worker launching is deferred to the `start_workers()` method. + /// This method validates the configuration, checks actor binary existence, + /// and creates the Redis client. Actor launching is deferred to the `start_actors()` method. /// /// # Returns /// /// * `Ok(Supervisor)` - Successfully configured client with valid binaries /// * `Err(SupervisorError)` - Configuration, binary validation, or connection error pub async fn build(self) -> Result { - // Validate that all configured worker binaries exist first - Self::validate_worker_binaries(&self)?; + // Validate that all configured actor binaries exist first + Self::validate_actor_binaries(&self)?; let url = self.redis_url .unwrap_or_else(|| "redis://127.0.0.1/".to_string()); @@ -281,13 +275,13 @@ impl SupervisorBuilder { let zinit_client = ZinitClient::unix_socket("/tmp/zinit.sock").await .map_err(|e| SupervisorError::ZinitError(format!("Failed to create Zinit client: {}", e)))?; - // Store builder data for later use in start_workers() + // Store builder data for later use in start_actors() let builder_data = SupervisorBuilderData { - osis_worker: self.osis_worker, - sal_worker: self.sal_worker, - v_worker: self.v_worker, - python_worker: self.python_worker, - worker_env_vars: self.worker_env_vars, + osis_actor: self.osis_actor, + sal_actor: self.sal_actor, + v_actor: self.v_actor, + python_actor: self.python_actor, + actor_env_vars: self.actor_env_vars, websocket_config: self.websocket_config, }; @@ -302,10 +296,10 @@ impl SupervisorBuilder { } impl Supervisor { - /// Start all configured workers asynchronously. - /// This method should be called after build() to launch the workers. - pub async fn start_workers(&self) -> Result<(), SupervisorError> { - info!("Starting Hero Supervisor workers..."); + /// Start all configured actors asynchronously. + /// This method should be called after build() to launch the actors. + pub async fn start_actors(&self) -> Result<(), SupervisorError> { + info!("Starting Hero Supervisor actors..."); // Test Zinit connection first info!("Testing Zinit connection at /tmp/zinit.sock..."); @@ -319,102 +313,102 @@ impl Supervisor { } } - // Clean up any existing worker services first - info!("Cleaning up existing worker services..."); - self.cleanup_existing_workers().await?; + // Clean up any existing actor services first + info!("Cleaning up existing actor services..."); + self.cleanup_existing_actors().await?; - // Launch configured workers if builder data is available + // Launch configured actors if builder data is available if let Some(builder_data) = &self.builder_data { - info!("Launching configured workers..."); - self.launch_configured_workers(builder_data).await?; + info!("Launching configured actors..."); + self.launch_configured_actors(builder_data).await?; } else { - warn!("No builder data available, no workers to start"); + warn!("No builder data available, no actors to start"); } - info!("All workers started successfully!"); + info!("All actors started successfully!"); Ok(()) } - /// Clean up all worker services from zinit on program exit + /// Clean up all actor services from zinit on program exit pub async fn cleanup_and_shutdown(&self) -> Result<(), SupervisorError> { - info!("Cleaning up worker services before shutdown..."); + info!("Cleaning up actor services before shutdown..."); - let worker_names = vec![ - "osis_worker_1", - "sal_worker_1", - "v_worker_1", - "python_worker_1" + let actor_names = vec![ + "osis_actor_1", + "sal_actor_1", + "v_actor_1", + "python_actor_1" ]; - for worker_name in worker_names { - if let Err(e) = self.stop_and_delete_worker(worker_name).await { - warn!("Failed to cleanup worker {}: {}", worker_name, e); + for actor_name in actor_names { + if let Err(e) = self.stop_and_delete_actor(actor_name).await { + warn!("Failed to cleanup actor {}: {}", actor_name, e); } } - info!("Worker cleanup completed"); + info!("Actor cleanup completed"); Ok(()) } - /// Clean up any existing worker services on startup - async fn cleanup_existing_workers(&self) -> Result<(), SupervisorError> { - info!("Cleaning up any existing worker services..."); + /// Clean up any existing actor services on startup + async fn cleanup_existing_actors(&self) -> Result<(), SupervisorError> { + info!("Cleaning up any existing actor services..."); - let worker_names = vec![ - "osis_worker_1", - "sal_worker_1", - "v_worker_1", - "python_worker_1" + let actor_names = vec![ + "osis_actor_1", + "sal_actor_1", + "v_actor_1", + "python_actor_1" ]; - for worker_name in worker_names { + for actor_name in actor_names { // Try to stop and delete, but don't fail if they don't exist - info!("Attempting to cleanup worker: {}", worker_name); - match self.stop_and_delete_worker(worker_name).await { - Ok(_) => info!("Successfully cleaned up worker: {}", worker_name), - Err(e) => debug!("Failed to cleanup worker {}: {}", worker_name, e), + info!("Attempting to cleanup actor: {}", actor_name); + match self.stop_and_delete_actor(actor_name).await { + Ok(_) => info!("Successfully cleaned up actor: {}", actor_name), + Err(e) => debug!("Failed to cleanup actor {}: {}", actor_name, e), } } - info!("Existing worker cleanup completed"); + info!("Existing actor cleanup completed"); Ok(()) } - /// Stop and delete a worker service from zinit - async fn stop_and_delete_worker(&self, worker_name: &str) -> Result<(), SupervisorError> { - info!("Starting cleanup for worker: {}", worker_name); + /// Stop and delete a actor service from zinit + async fn stop_and_delete_actor(&self, actor_name: &str) -> Result<(), SupervisorError> { + info!("Starting cleanup for actor: {}", actor_name); - // First try to stop the worker - info!("Attempting to stop worker: {}", worker_name); - if let Err(e) = self.zinit_client.stop(worker_name).await { - debug!("Worker {} was not running or failed to stop: {}", worker_name, e); + // First try to stop the actor + info!("Attempting to stop actor: {}", actor_name); + if let Err(e) = self.zinit_client.stop(actor_name).await { + debug!("Actor {} was not running or failed to stop: {}", actor_name, e); } else { - info!("Successfully stopped worker: {}", worker_name); + info!("Successfully stopped actor: {}", actor_name); } // Then forget the service to stop monitoring it - info!("Attempting to forget worker: {}", worker_name); - if let Err(e) = self.zinit_client.forget(worker_name).await { - info!("Worker {} was not being monitored or failed to forget: {}", worker_name, e); + info!("Attempting to forget actor: {}", actor_name); + if let Err(e) = self.zinit_client.forget(actor_name).await { + info!("Actor {} was not being monitored or failed to forget: {}", actor_name, e); } else { - info!("Successfully forgot worker service: {}", worker_name); + info!("Successfully forgot actor service: {}", actor_name); } // Finally, delete the service configuration - info!("Attempting to delete service for worker: {}", worker_name); - if let Err(e) = self.zinit_client.delete_service(worker_name).await { - debug!("Worker {} service did not exist or failed to delete: {}", worker_name, e); + info!("Attempting to delete service for actor: {}", actor_name); + if let Err(e) = self.zinit_client.delete_service(actor_name).await { + debug!("Actor {} service did not exist or failed to delete: {}", actor_name, e); } else { - info!("Successfully deleted worker service: {}", worker_name); + info!("Successfully deleted actor service: {}", actor_name); } - info!("Completed cleanup for worker: {}", worker_name); + info!("Completed cleanup for actor: {}", actor_name); Ok(()) } - /// Get the hardcoded worker queue key for the script type - fn get_worker_queue_key(&self, script_type: &ScriptType) -> String { - format!("{}worker_queue:{}", NAMESPACE_PREFIX, script_type.worker_queue_suffix()) + /// Get the hardcoded actor queue key for the script type + fn get_actor_queue_key(&self, script_type: &ScriptType) -> String { + format!("{}actor_queue:{}", NAMESPACE_PREFIX, script_type.actor_queue_suffix()) } pub fn new_job(&self) -> JobBuilder { @@ -432,63 +426,58 @@ impl Supervisor { }) } - /// Extract worker configurations from the supervisor's builder data - pub fn get_worker_configs(&self) -> Result, SupervisorError> { + /// Extract actor configurations from the supervisor's builder data + pub fn get_actor_configs(&self) -> Result, SupervisorError> { let builder_data = self.builder_data.as_ref().ok_or_else(|| { - SupervisorError::ConfigError("No builder data available for worker configs".to_string()) + SupervisorError::ConfigError("No builder data available for actor configs".to_string()) })?; let mut configs = Vec::new(); - let env_vars = builder_data.worker_env_vars.clone(); - if let Some(osis_path) = &builder_data.osis_worker { + if let Some(osis_path) = &builder_data.osis_actor { configs.push( - WorkerConfig::new("osis_worker_1".to_string(), PathBuf::from(osis_path), ScriptType::OSIS) - .with_env(env_vars.clone()) + ActorConfig::new("osis_actor_1".to_string(), PathBuf::from(osis_path), ScriptType::OSIS) ); } - if let Some(sal_path) = &builder_data.sal_worker { + if let Some(sal_path) = &builder_data.sal_actor { configs.push( - WorkerConfig::new("sal_worker_1".to_string(), PathBuf::from(sal_path), ScriptType::SAL) - .with_env(env_vars.clone()) + ActorConfig::new("sal_actor_1".to_string(), PathBuf::from(sal_path), ScriptType::SAL) ); } - if let Some(v_path) = &builder_data.v_worker { + if let Some(v_path) = &builder_data.v_actor { configs.push( - WorkerConfig::new("v_worker_1".to_string(), PathBuf::from(v_path), ScriptType::V) - .with_env(env_vars.clone()) + ActorConfig::new("v_actor_1".to_string(), PathBuf::from(v_path), ScriptType::V) ); } - if let Some(python_path) = &builder_data.python_worker { + if let Some(python_path) = &builder_data.python_actor { configs.push( - WorkerConfig::new("python_worker_1".to_string(), PathBuf::from(python_path), ScriptType::Python) - .with_env(env_vars.clone()) + ActorConfig::new("python_actor_1".to_string(), PathBuf::from(python_path), ScriptType::Python) ); } Ok(configs) } - /// Spawn a background lifecycle manager that continuously monitors and maintains worker health + /// Spawn a background lifecycle manager that continuously monitors and maintains actor health /// Returns a JoinHandle that can be used to stop the lifecycle manager pub fn spawn_lifecycle_manager( self: Arc, - worker_configs: Vec, + actor_configs: Vec, health_check_interval: Duration, ) -> tokio::task::JoinHandle> { let supervisor = self; tokio::spawn(async move { - info!("Starting background lifecycle manager with {} workers", worker_configs.len()); + info!("Starting background lifecycle manager with {} actors", actor_configs.len()); info!("Health check interval: {:?}", health_check_interval); - // Initial worker startup - info!("Performing initial worker startup..."); - if let Err(e) = supervisor.start_workers().await { - error!("Failed to start workers during initialization: {}", e); + // Initial actor startup + info!("Performing initial actor startup..."); + if let Err(e) = supervisor.start_actors().await { + error!("Failed to start actors during initialization: {}", e); return Err(e); } @@ -499,12 +488,12 @@ impl Supervisor { loop { interval.tick().await; - info!("Running periodic worker health check..."); + info!("Running periodic actor health check..."); - // Check each worker's health and restart if needed - for worker_config in &worker_configs { - if let Err(e) = supervisor.check_and_restart_worker(worker_config).await { - error!("Failed to check/restart worker {}: {}", worker_config.name, e); + // Check each actor's health and restart if needed + for actor_config in &actor_configs { + if let Err(e) = supervisor.check_and_restart_actor(actor_config).await { + error!("Failed to check/restart actor {}: {}", actor_config.name, e); } } @@ -513,59 +502,59 @@ impl Supervisor { }) } - /// Check a single worker's health and restart if needed - async fn check_and_restart_worker(&self, worker_config: &WorkerConfig) -> Result<(), SupervisorError> { - let worker_name = &worker_config.name; + /// Check a single actor's health and restart if needed + async fn check_and_restart_actor(&self, actor_config: &ActorConfig) -> Result<(), SupervisorError> { + let actor_name = &actor_config.name; - // Get worker status - match self.zinit_client.status(worker_name).await { + // Get actor status + match self.zinit_client.status(actor_name).await { Ok(status) => { let is_healthy = status.state == "running" && status.pid > 0; if is_healthy { - debug!("Worker {} is healthy (state: {}, pid: {})", worker_name, status.state, status.pid); + debug!("Actor {} is healthy (state: {}, pid: {})", actor_name, status.state, status.pid); // Optionally send a ping job for deeper health check - if let Err(e) = self.send_ping_job(worker_config.script_type.clone()).await { - warn!("Ping job failed for worker {}: {}", worker_name, e); + if let Err(e) = self.send_ping_job(actor_config.script_type.clone()).await { + warn!("Ping job failed for actor {}: {}", actor_name, e); // Note: We don't restart on ping failure as it might be temporary } } else { - warn!("Worker {} is unhealthy (state: {}, pid: {}), restarting...", - worker_name, status.state, status.pid); + warn!("Actor {} is unhealthy (state: {}, pid: {}), restarting...", + actor_name, status.state, status.pid); - // Attempt to restart the worker - if let Err(e) = self.restart_worker(worker_name).await { - error!("Failed to restart unhealthy worker {}: {}", worker_name, e); + // Attempt to restart the actor + if let Err(e) = self.restart_actor(actor_name).await { + error!("Failed to restart unhealthy actor {}: {}", actor_name, e); // If restart fails, try a full stop/start cycle - warn!("Attempting full stop/start cycle for worker: {}", worker_name); - if let Err(e) = self.stop_and_delete_worker(worker_name).await { - error!("Failed to stop worker {} during recovery: {}", worker_name, e); + warn!("Attempting full stop/start cycle for actor: {}", actor_name); + if let Err(e) = self.stop_and_delete_actor(actor_name).await { + error!("Failed to stop actor {} during recovery: {}", actor_name, e); } - if let Err(e) = self.start_worker(worker_config).await { - error!("Failed to start worker {} during recovery: {}", worker_name, e); + if let Err(e) = self.start_actor(actor_config).await { + error!("Failed to start actor {} during recovery: {}", actor_name, e); return Err(e); } - info!("Successfully recovered worker: {}", worker_name); + info!("Successfully recovered actor: {}", actor_name); } else { - info!("Successfully restarted worker: {}", worker_name); + info!("Successfully restarted actor: {}", actor_name); } } } Err(e) => { - warn!("Could not get status for worker {} (may not exist): {}", worker_name, e); + warn!("Could not get status for actor {} (may not exist): {}", actor_name, e); - // Worker doesn't exist, try to start it - info!("Attempting to start missing worker: {}", worker_name); - if let Err(e) = self.start_worker(worker_config).await { - error!("Failed to start missing worker {}: {}", worker_name, e); + // Actor doesn't exist, try to start it + info!("Attempting to start missing actor: {}", actor_name); + if let Err(e) = self.start_actor(actor_config).await { + error!("Failed to start missing actor {}: {}", actor_name, e); return Err(e); } - info!("Successfully started missing worker: {}", worker_name); + info!("Successfully started missing actor: {}", actor_name); } } @@ -597,18 +586,18 @@ impl Supervisor { job_id: String, script_type: &ScriptType ) -> Result<(), SupervisorError> { - let worker_queue_key = self.get_worker_queue_key(script_type); + let actor_queue_key = self.get_actor_queue_key(script_type); // lpush also infers its types, RV is typically i64 (length of list) or () depending on exact command variant // For `redis::AsyncCommands::lpush`, it's `RedisResult` where R: FromRedisValue // Often this is the length of the list. Let's allow inference or specify if needed. let _: redis::RedisResult = - conn.lpush(&worker_queue_key, job_id.clone()).await; + conn.lpush(&actor_queue_key, job_id.clone()).await; Ok(()) } - // Internal helper to await response from worker + // Internal helper to await response from actor async fn await_response_from_connection( &self, conn: &mut redis::aio::MultiplexedConnection, @@ -679,7 +668,7 @@ impl Supervisor { Ok(()) } - // New method using dedicated reply queue with automatic worker selection + // New method using dedicated reply queue with automatic actor selection pub async fn run_job_and_await_result( &self, job: &Job @@ -782,7 +771,7 @@ impl Supervisor { pub async fn stop_job(&self, job_id: &str) -> Result<(), SupervisorError> { let mut conn = self.redis_client.get_multiplexed_async_connection().await?; - // Get job details to determine script type and appropriate worker + // Get job details to determine script type and appropriate actor let job_key = format!("{}job:{}", NAMESPACE_PREFIX, job_id); let job_data: std::collections::HashMap = conn.hgetall(&job_key).await?; @@ -798,7 +787,7 @@ impl Supervisor { .map_err(|e| SupervisorError::InvalidInput(format!("Invalid script type: {}", e)))?; // Use hardcoded stop queue key for this script type - let stop_queue_key = format!("{}stop_queue:{}", NAMESPACE_PREFIX, script_type.worker_queue_suffix()); + let stop_queue_key = format!("{}stop_queue:{}", NAMESPACE_PREFIX, script_type.actor_queue_suffix()); // Push job ID to the stop queue conn.lpush::<_, _, ()>(&stop_queue_key, job_id).await?; @@ -931,7 +920,7 @@ impl Supervisor { /// Dispatch jobs that are ready (have all prerequisites completed) pub async fn dispatch_ready_jobs(&self, ready_job_ids: Vec) -> Result<(), SupervisorError> { for job_id in ready_job_ids { - // Get job data to determine script type and select worker + // Get job data to determine script type and select actor let mut conn = self.redis_client.get_multiplexed_async_connection().await?; let job_key = format!("{}job:{}", NAMESPACE_PREFIX, job_id); let job_data: std::collections::HashMap = conn.hgetall(&job_key).await?; diff --git a/core/supervisor/src/lifecycle.rs b/core/supervisor/src/lifecycle.rs index ebb668c..7f24bed 100644 --- a/core/supervisor/src/lifecycle.rs +++ b/core/supervisor/src/lifecycle.rs @@ -1,6 +1,6 @@ -//! Worker lifecycle management functionality for the Hero Supervisor +//! Actor lifecycle management functionality for the Hero Supervisor //! -//! This module provides worker process lifecycle management using Zinit as the process manager. +//! This module provides actor process lifecycle management using Zinit as the process manager. //! All functionality is implemented as methods on the Supervisor struct for a clean API. use log::{debug, error, info, warn}; @@ -12,28 +12,28 @@ use zinit_client::{Client as ZinitClient, Status}; use hero_job::ScriptType; use crate::{Supervisor, SupervisorError}; -/// Information about a worker including its configuration and current status +/// Information about a actor including its configuration and current status #[derive(Debug, Clone)] -pub struct WorkerInfo { - pub config: WorkerConfig, +pub struct ActorInfo { + pub config: ActorConfig, pub status: Option, pub is_running: bool, } -/// Configuration for a worker binary +/// Configuration for a actor binary #[derive(Debug, Clone)] -pub struct WorkerConfig { - /// Name of the worker service +pub struct ActorConfig { + /// Name of the actor service pub name: String, - /// Path to the worker binary + /// Path to the actor binary pub binary_path: PathBuf, - /// Script type this worker handles + /// Script type this actor handles pub script_type: ScriptType, - /// Command line arguments for the worker + /// Command line arguments for the actor pub args: Vec, - /// Environment variables for the worker + /// Environment variables for the actor pub env: HashMap, - /// Whether this worker should restart on exit + /// Whether this actor should restart on exit pub restart_on_exit: bool, /// Health check command (optional) pub health_check: Option, @@ -41,7 +41,7 @@ pub struct WorkerConfig { pub dependencies: Vec, } -impl WorkerConfig { +impl ActorConfig { pub fn new(name: String, binary_path: PathBuf, script_type: ScriptType) -> Self { Self { name, @@ -81,122 +81,122 @@ impl WorkerConfig { } } -/// Worker lifecycle management methods for Supervisor +/// Actor lifecycle management methods for Supervisor impl Supervisor { - /// Get all workers with their configuration and status - unified method - pub async fn get_workers(&self, worker_configs: &[WorkerConfig]) -> Vec { - let mut workers = Vec::new(); + /// Get all actors with their configuration and status - unified method + pub async fn get_actors(&self, actor_configs: &[ActorConfig]) -> Vec { + let mut actors = Vec::new(); - for config in worker_configs { + for config in actor_configs { let status = self.zinit_client.status(&config.name).await.ok(); let is_running = status.as_ref() .map(|s| s.state == "running" && s.pid > 0) .unwrap_or(false); - workers.push(WorkerInfo { + actors.push(ActorInfo { config: config.clone(), status, is_running, }); } - workers + actors } - /// Start a worker using Zinit - pub async fn start_worker( + /// Start a actor using Zinit + pub async fn start_actor( &self, - worker_config: &WorkerConfig, + actor_config: &ActorConfig, ) -> Result<(), SupervisorError> { - info!("Starting worker: {}", worker_config.name); + info!("Starting actor: {}", actor_config.name); // Create service configuration for Zinit - let service_config = self.create_service_config(worker_config); + let service_config = self.create_service_config(actor_config); // Create the service in Zinit - self.zinit_client.create_service(&worker_config.name, service_config).await + self.zinit_client.create_service(&actor_config.name, service_config).await .map_err(|e| SupervisorError::ZinitError(format!("Failed to create service: {}", e)))?; // Monitor the service so Zinit starts managing it - self.zinit_client.monitor(&worker_config.name).await + self.zinit_client.monitor(&actor_config.name).await .map_err(|e| SupervisorError::ZinitError(format!("Failed to monitor service: {}", e)))?; // Start the service - self.zinit_client.start(&worker_config.name).await - .map_err(|e| SupervisorError::ZinitError(format!("Failed to start worker: {}", e)))?; + self.zinit_client.start(&actor_config.name).await + .map_err(|e| SupervisorError::ZinitError(format!("Failed to start actor: {}", e)))?; - info!("Successfully started worker: {}", worker_config.name); + info!("Successfully started actor: {}", actor_config.name); Ok(()) } - /// Stop a worker using Zinit - pub async fn stop_worker( + /// Stop a actor using Zinit + pub async fn stop_actor( &self, - worker_name: &str, + actor_name: &str, ) -> Result<(), SupervisorError> { - info!("Stopping worker: {}", worker_name); + info!("Stopping actor: {}", actor_name); - match self.zinit_client.stop(worker_name).await { + match self.zinit_client.stop(actor_name).await { Ok(_) => { - info!("Successfully stopped worker: {}", worker_name); + info!("Successfully stopped actor: {}", actor_name); Ok(()) } Err(e) => { - error!("Failed to stop worker {}: {}", worker_name, e); - Err(SupervisorError::WorkerStopFailed(worker_name.to_string(), e.to_string())) + error!("Failed to stop actor {}: {}", actor_name, e); + Err(SupervisorError::ActorStopFailed(actor_name.to_string(), e.to_string())) } } } - /// Restart a worker using Zinit - pub async fn restart_worker( + /// Restart a actor using Zinit + pub async fn restart_actor( &self, - worker_name: &str, + actor_name: &str, ) -> Result<(), SupervisorError> { - info!("Restarting worker: {}", worker_name); + info!("Restarting actor: {}", actor_name); - match self.zinit_client.restart(worker_name).await { + match self.zinit_client.restart(actor_name).await { Ok(_) => { - info!("Successfully restarted worker: {}", worker_name); + info!("Successfully restarted actor: {}", actor_name); Ok(()) } Err(e) => { - error!("Failed to restart worker {}: {}", worker_name, e); - Err(SupervisorError::WorkerRestartFailed(worker_name.to_string(), e.to_string())) + error!("Failed to restart actor {}: {}", actor_name, e); + Err(SupervisorError::ActorRestartFailed(actor_name.to_string(), e.to_string())) } } } - /// Get status of a worker using Zinit - pub async fn get_worker_status( + /// Get status of a actor using Zinit + pub async fn get_actor_status( &self, - worker_name: &str, + actor_name: &str, zinit_client: &ZinitClient, ) -> Result { - match zinit_client.status(worker_name).await { + match zinit_client.status(actor_name).await { Ok(status) => Ok(status), Err(e) => { - error!("Failed to get status for worker {}: {}", worker_name, e); - Err(SupervisorError::WorkerStatusFailed(worker_name.to_string(), e.to_string())) + error!("Failed to get status for actor {}: {}", actor_name, e); + Err(SupervisorError::ActorStatusFailed(actor_name.to_string(), e.to_string())) } } } - /// Get status of all workers - pub async fn get_all_worker_status( + /// Get status of all actors + pub async fn get_all_actor_status( &self, - worker_configs: &[WorkerConfig], + actor_configs: &[ActorConfig], zinit_client: &ZinitClient, ) -> Result, SupervisorError> { let mut status_map = HashMap::new(); - for worker in worker_configs { - match zinit_client.status(&worker.name).await { + for actor in actor_configs { + match zinit_client.status(&actor.name).await { Ok(status) => { - status_map.insert(worker.name.clone(), status); + status_map.insert(actor.name.clone(), status); } Err(e) => { - warn!("Failed to get status for worker {}: {}", worker.name, e); + warn!("Failed to get status for actor {}: {}", actor.name, e); } } } @@ -206,32 +206,32 @@ impl Supervisor { - /// Stop multiple workers - pub async fn stop_workers( + /// Stop multiple actors + pub async fn stop_actors( &self, - worker_names: &[String], + actor_names: &[String], ) -> Result<(), SupervisorError> { - info!("Stopping {} workers", worker_names.len()); + info!("Stopping {} actors", actor_names.len()); - for worker_name in worker_names { - self.stop_worker(worker_name).await?; + for actor_name in actor_names { + self.stop_actor(actor_name).await?; } Ok(()) } - /// Get count of running workers for a script type - pub async fn get_running_worker_count( + /// Get count of running actors for a script type + pub async fn get_running_actor_count( &self, - worker_configs: &[WorkerConfig], + actor_configs: &[ActorConfig], script_type: &ScriptType, zinit_client: &ZinitClient, ) -> usize { let mut running_count = 0; - for worker in worker_configs { - if worker.script_type == *script_type { - if let Ok(status) = zinit_client.status(&worker.name).await { + for actor in actor_configs { + if actor.script_type == *script_type { + if let Ok(status) = zinit_client.status(&actor.name).await { if status.state == "running" { running_count += 1; } @@ -242,7 +242,7 @@ impl Supervisor { running_count } - /// Send a ping job to a worker for health checking + /// Send a ping job to a actor for health checking pub async fn send_ping_job( &self, script_type: ScriptType, @@ -268,8 +268,8 @@ impl Supervisor { } } - /// Create Zinit service configuration from worker config - fn create_service_config(&self, worker: &WorkerConfig) -> serde_json::Map { + /// Create Zinit service configuration from actor config + fn create_service_config(&self, actor: &ActorConfig) -> serde_json::Map { use serde_json::{Map, Value}; let mut config = Map::new(); @@ -277,117 +277,117 @@ impl Supervisor { config.insert( "exec".to_string(), Value::String(format!("{} {}", - worker.binary_path.display(), - worker.args.join(" ") + actor.binary_path.display(), + actor.args.join(" ") )) ); config.insert( "oneshot".to_string(), - Value::Bool(!worker.restart_on_exit) + Value::Bool(!actor.restart_on_exit) ); - if let Some(health_check) = &worker.health_check { + if let Some(health_check) = &actor.health_check { config.insert("test".to_string(), Value::String(health_check.clone())); } - if !worker.dependencies.is_empty() { - config.insert("after".to_string(), json!(worker.dependencies)); + if !actor.dependencies.is_empty() { + config.insert("after".to_string(), json!(actor.dependencies)); } // Add environment variables if any - if !worker.env.is_empty() { - config.insert("env".to_string(), json!(worker.env)); + if !actor.env.is_empty() { + config.insert("env".to_string(), json!(actor.env)); } config } - /// Launch workers based on SupervisorBuilder configuration - pub(crate) async fn launch_configured_workers(&self, builder: &crate::SupervisorBuilderData) -> Result<(), SupervisorError> { + /// Launch actors based on SupervisorBuilder configuration + pub(crate) async fn launch_configured_actors(&self, builder: &crate::SupervisorBuilderData) -> Result<(), SupervisorError> { use hero_job::ScriptType; use std::path::PathBuf; let mut errors = Vec::new(); - // Launch OSIS worker if configured - if let Some(binary_path) = &builder.osis_worker { - let worker_id = "osis_worker_1"; - let mut config = WorkerConfig::new( - worker_id.to_string(), + // Launch OSIS actor if configured + if let Some(binary_path) = &builder.osis_actor { + let actor_id = "osis_actor_1"; + let mut config = ActorConfig::new( + actor_id.to_string(), PathBuf::from(binary_path), ScriptType::OSIS ); - config.env.extend(builder.worker_env_vars.clone()); + config.env.extend(builder.actor_env_vars.clone()); - info!("Launching OSIS worker: {}", worker_id); - if let Err(e) = self.start_worker(&config).await { - let error_msg = format!("Failed to start OSIS worker: {}", e); + info!("Launching OSIS actor: {}", actor_id); + if let Err(e) = self.start_actor(&config).await { + let error_msg = format!("Failed to start OSIS actor: {}", e); warn!("{}", error_msg); errors.push(error_msg); } } - // Launch SAL worker if configured - if let Some(binary_path) = &builder.sal_worker { - let worker_id = "sal_worker_1"; - let mut config = WorkerConfig::new( - worker_id.to_string(), + // Launch SAL actor if configured + if let Some(binary_path) = &builder.sal_actor { + let actor_id = "sal_actor_1"; + let mut config = ActorConfig::new( + actor_id.to_string(), PathBuf::from(binary_path), ScriptType::SAL ); - config.env.extend(builder.worker_env_vars.clone()); + config.env.extend(builder.actor_env_vars.clone()); - info!("Launching SAL worker: {}", worker_id); - if let Err(e) = self.start_worker(&config).await { - let error_msg = format!("Failed to start SAL worker: {}", e); + info!("Launching SAL actor: {}", actor_id); + if let Err(e) = self.start_actor(&config).await { + let error_msg = format!("Failed to start SAL actor: {}", e); warn!("{}", error_msg); errors.push(error_msg); } } - // Launch V worker if configured - if let Some(binary_path) = &builder.v_worker { - let worker_id = "v_worker_1"; - let mut config = WorkerConfig::new( - worker_id.to_string(), + // Launch V actor if configured + if let Some(binary_path) = &builder.v_actor { + let actor_id = "v_actor_1"; + let mut config = ActorConfig::new( + actor_id.to_string(), PathBuf::from(binary_path), ScriptType::V ); - config.env.extend(builder.worker_env_vars.clone()); + config.env.extend(builder.actor_env_vars.clone()); - info!("Launching V worker: {}", worker_id); - if let Err(e) = self.start_worker(&config).await { - let error_msg = format!("Failed to start V worker: {}", e); + info!("Launching V actor: {}", actor_id); + if let Err(e) = self.start_actor(&config).await { + let error_msg = format!("Failed to start V actor: {}", e); warn!("{}", error_msg); errors.push(error_msg); } } - // Launch Python worker if configured - if let Some(binary_path) = &builder.python_worker { - let worker_id = "python_worker_1"; - let mut config = WorkerConfig::new( - worker_id.to_string(), + // Launch Python actor if configured + if let Some(binary_path) = &builder.python_actor { + let actor_id = "python_actor_1"; + let mut config = ActorConfig::new( + actor_id.to_string(), PathBuf::from(binary_path), ScriptType::Python ); - config.env.extend(builder.worker_env_vars.clone()); + config.env.extend(builder.actor_env_vars.clone()); - info!("Launching Python worker: {}", worker_id); - if let Err(e) = self.start_worker(&config).await { - let error_msg = format!("Failed to start Python worker: {}", e); + info!("Launching Python actor: {}", actor_id); + if let Err(e) = self.start_actor(&config).await { + let error_msg = format!("Failed to start Python actor: {}", e); warn!("{}", error_msg); errors.push(error_msg); } } - // Return result based on whether any workers started successfully + // Return result based on whether any actors started successfully if errors.is_empty() { - info!("All configured workers started successfully"); + info!("All configured actors started successfully"); Ok(()) } else { - let combined_error = format!("Some workers failed to start: {}", errors.join("; ")); + let combined_error = format!("Some actors failed to start: {}", errors.join("; ")); warn!("{}", combined_error); Err(SupervisorError::ZinitError(combined_error)) } diff --git a/core/worker/.gitignore b/core/worker/.gitignore deleted file mode 100644 index 6f6c663..0000000 --- a/core/worker/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -/target -worker_rhai_temp_db \ No newline at end of file diff --git a/interfaces/wasm/README.md b/interfaces/wasm/README.md new file mode 100644 index 0000000..23f874d --- /dev/null +++ b/interfaces/wasm/README.md @@ -0,0 +1,3 @@ +# Baobab WASM App + +The Baobab WASM app is a simple web interface for interacting with the backend. It provides a simple way to supervise workers, view and manage jobs, and to execute scripts. \ No newline at end of file diff --git a/interfaces/websocket/server/README.md b/interfaces/websocket/server/README.md index 62f727d..6d113d5 100644 --- a/interfaces/websocket/server/README.md +++ b/interfaces/websocket/server/README.md @@ -5,7 +5,7 @@ An OpenRPC WebSocket Server to interface with the [cores](../../core) of authori - [OpenRPC Specification](openrpc.json) defines the API. - There are RPC Operations specified to authorize a websocket connection. - Authorized clients can manage jobs. -- The server uses the [supervisor] to dispatch [jobs] to the [workers]. +- The server uses the [supervisor] to dispatch [jobs] to the [actors]. ## Circles diff --git a/interfaces/websocket/server/cmd/main.rs b/interfaces/websocket/server/cmd/main.rs index e07f3e1..fc5c98e 100644 --- a/interfaces/websocket/server/cmd/main.rs +++ b/interfaces/websocket/server/cmd/main.rs @@ -88,7 +88,6 @@ async fn main() -> std::io::Result<()> { cert: args.cert.clone(), key: args.key.clone(), tls_port: args.tls_port, - webhooks: args.webhooks, circles: std::collections::HashMap::new(), // Empty circles when using CLI } }; diff --git a/interfaces/websocket/server/docs/webhooks.md b/interfaces/websocket/server/docs/webhooks.md index 5b81685..0e026b6 100644 --- a/interfaces/websocket/server/docs/webhooks.md +++ b/interfaces/websocket/server/docs/webhooks.md @@ -30,7 +30,7 @@ graph TB subgraph "Backend" L[Redis] - M[Rhai Worker] + M[Rhai Actor] end A --> |POST /webhooks/stripe/{circle_pk}| E @@ -94,7 +94,7 @@ sequenceDiagram participant WV as Webhook Verifier participant SD as Script Supervisor participant RC as RhaiSupervisor - participant RW as Rhai Worker + participant RW as Rhai Actor WS->>CS: POST /webhooks/stripe/{circle_pk} CS->>CS: Extract circle_pk from URL diff --git a/interfaces/websocket/server/tests/basic_integration_test.rs b/interfaces/websocket/server/tests/basic_integration_test.rs index b1f1960..04ddb7a 100644 --- a/interfaces/websocket/server/tests/basic_integration_test.rs +++ b/interfaces/websocket/server/tests/basic_integration_test.rs @@ -1,8 +1,8 @@ use circle_ws_lib::{spawn_circle_server, ServerConfig}; -use rhailib_engine::create_heromodels_engine; +use baobab_engine::create_heromodels_engine; use futures_util::{SinkExt, StreamExt}; use heromodels::db::hero::OurDB; -use rhailib_worker::spawn_rhai_worker; +use baobab_actor::spawn_rhai_actor; use serde_json::json; use std::sync::Arc; use tokio::sync::mpsc; @@ -14,13 +14,13 @@ async fn test_server_startup_and_play() { let circle_pk = Uuid::new_v4().to_string(); let redis_url = "redis://127.0.0.1/"; - // --- Worker Setup --- + // --- Actor Setup --- let (shutdown_tx, shutdown_rx) = mpsc::channel(1); let db = Arc::new(OurDB::new("file:memdb_test_server?mode=memory&cache=shared", true).unwrap()); let engine = create_heromodels_engine(); - let worker_id = Uuid::new_v4().to_string(); - let worker_handle = spawn_rhai_worker( - worker_id, + let actor_id = Uuid::new_v4().to_string(); + let actor_handle = spawn_rhai_actor( + actor_id, circle_pk.to_string(), engine, redis_url.to_string(), @@ -37,7 +37,7 @@ async fn test_server_startup_and_play() { let (server_task, server_handle) = spawn_circle_server(config).unwrap(); let server_join_handle = tokio::spawn(server_task); - // Give server and worker a moment to start + // Give server and actor a moment to start tokio::time::sleep(std::time::Duration::from_millis(500)).await; // --- Client Connection and Test --- @@ -72,5 +72,5 @@ async fn test_server_startup_and_play() { server_handle.stop(true).await; let _ = server_join_handle.await; let _ = shutdown_tx.send(()).await; - let _ = worker_handle.await; + let _ = actor_handle.await; }