Updates
This commit is contained in:
parent
04a1af2423
commit
0ebda7c1aa
946
Cargo.lock
generated
946
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -53,13 +53,13 @@ uuid = { version = "1.6", features = ["v4", "serde"] }
|
||||
|
||||
[workspace]
|
||||
members = [
|
||||
"interfaces/unix/client",
|
||||
"interfaces/unix/server",
|
||||
"interfaces/websocket/client",
|
||||
"interfaces/websocket/server",
|
||||
"core/supervisor",
|
||||
"core/actor",
|
||||
"core/job", "interfaces/websocket/examples",
|
||||
"proxies/http",
|
||||
"interfaces/openrpc/client",
|
||||
"interfaces/openrpc/server",
|
||||
]
|
||||
resolver = "2" # Recommended for new workspaces
|
||||
|
@ -24,4 +24,4 @@ tls = false
|
||||
# OSIS Actor Configuration
|
||||
# Handles OSIS (HeroScript) execution
|
||||
[osis_actor]
|
||||
binary_path = "actor_osis"
|
||||
binary_path = "/home/maxime/actor_osis/target/debug/actor_osis"
|
||||
|
@ -27,7 +27,8 @@
|
||||
//! └───────────────┘
|
||||
//! ```
|
||||
|
||||
use hero_job::Job;
|
||||
use hero_job::{Job, ScriptType};
|
||||
use hero_job::keys;
|
||||
use log::{debug, error, info};
|
||||
use redis::AsyncCommands;
|
||||
|
||||
@ -36,7 +37,7 @@ use std::time::Duration;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::task::JoinHandle;
|
||||
|
||||
use crate::{initialize_redis_connection, NAMESPACE_PREFIX, BLPOP_TIMEOUT_SECONDS};
|
||||
use crate::{initialize_redis_connection, BLPOP_TIMEOUT_SECONDS};
|
||||
|
||||
/// Configuration for actor instances
|
||||
#[derive(Debug, Clone)]
|
||||
@ -123,11 +124,14 @@ pub trait Actor: Send + Sync + 'static {
|
||||
tokio::spawn(async move {
|
||||
let actor_id = self.actor_id();
|
||||
let redis_url = self.redis_url();
|
||||
let queue_key = format!("{}{}", NAMESPACE_PREFIX, actor_id);
|
||||
// Canonical work queue based on script type (instance/group selection can be added later)
|
||||
let script_type = derive_script_type_from_actor_id(actor_id);
|
||||
let queue_key = keys::work_type(&script_type);
|
||||
info!(
|
||||
"{} Actor '{}' starting. Connecting to Redis at {}. Listening on queue: {}",
|
||||
"{} Actor '{}' starting. Type {:?}. Connecting to Redis at {}. Listening on queue: {}",
|
||||
self.actor_type(),
|
||||
actor_id,
|
||||
script_type,
|
||||
redis_url,
|
||||
queue_key
|
||||
);
|
||||
@ -254,78 +258,18 @@ pub fn spawn_actor<W: Actor>(
|
||||
actor.spawn(shutdown_rx)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::engine::create_heromodels_engine;
|
||||
|
||||
// Mock actor for testing
|
||||
struct MockActor;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Actor for MockActor {
|
||||
async fn process_job(
|
||||
&self,
|
||||
_job: Job,
|
||||
_redis_conn: &mut redis::aio::MultiplexedConnection,
|
||||
) {
|
||||
// Mock implementation - do nothing
|
||||
// Engine would be owned by the actor implementation as a field
|
||||
}
|
||||
|
||||
fn actor_type(&self) -> &'static str {
|
||||
"Mock"
|
||||
}
|
||||
|
||||
fn actor_id(&self) -> &str {
|
||||
"mock_actor"
|
||||
}
|
||||
|
||||
fn redis_url(&self) -> &str {
|
||||
"redis://localhost:6379"
|
||||
}
|
||||
fn derive_script_type_from_actor_id(actor_id: &str) -> ScriptType {
|
||||
let lower = actor_id.to_lowercase();
|
||||
if lower.contains("sal") {
|
||||
ScriptType::SAL
|
||||
} else if lower.contains("osis") {
|
||||
ScriptType::OSIS
|
||||
} else if lower.contains("python") {
|
||||
ScriptType::Python
|
||||
} else if lower.contains("v") {
|
||||
ScriptType::V
|
||||
} else {
|
||||
// Default to OSIS when uncertain
|
||||
ScriptType::OSIS
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_actor_config_creation() {
|
||||
let config = ActorConfig::new(
|
||||
"test_actor".to_string(),
|
||||
"/tmp".to_string(),
|
||||
"redis://localhost:6379".to_string(),
|
||||
false,
|
||||
);
|
||||
|
||||
assert_eq!(config.actor_id, "test_actor");
|
||||
assert_eq!(config.db_path, "/tmp");
|
||||
assert_eq!(config.redis_url, "redis://localhost:6379");
|
||||
assert!(!config.preserve_tasks);
|
||||
assert!(config.default_timeout.is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_actor_config_with_timeout() {
|
||||
let timeout = Duration::from_secs(300);
|
||||
let config = ActorConfig::new(
|
||||
"test_actor".to_string(),
|
||||
"/tmp".to_string(),
|
||||
"redis://localhost:6379".to_string(),
|
||||
false,
|
||||
).with_default_timeout(timeout);
|
||||
|
||||
assert_eq!(config.default_timeout, Some(timeout));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_spawn_actor_function() {
|
||||
let (_shutdown_tx, shutdown_rx) = mpsc::channel(1);
|
||||
let actor = Arc::new(MockActor);
|
||||
|
||||
let handle = spawn_actor(actor, shutdown_rx);
|
||||
|
||||
// The actor should be created successfully
|
||||
assert!(!handle.is_finished());
|
||||
|
||||
// Abort the actor for cleanup
|
||||
handle.abort();
|
||||
}
|
||||
}
|
||||
}
|
@ -1,4 +1,5 @@
|
||||
use hero_job::{Job, JobStatus};
|
||||
use hero_job::{Job, JobStatus, ScriptType};
|
||||
use hero_job::keys;
|
||||
use log::{debug, error, info};
|
||||
use redis::AsyncCommands;
|
||||
use rhai::{Dynamic, Engine};
|
||||
@ -217,10 +218,11 @@ pub fn spawn_rhai_actor(
|
||||
preserve_tasks: bool,
|
||||
) -> JoinHandle<Result<(), Box<dyn std::error::Error + Send + Sync>>> {
|
||||
tokio::spawn(async move {
|
||||
let queue_key = format!("{}{}", NAMESPACE_PREFIX, actor_id);
|
||||
let script_type = derive_script_type_from_actor_id(&actor_id);
|
||||
let queue_key = keys::work_type(&script_type);
|
||||
info!(
|
||||
"Rhai Actor for Actor ID '{}' starting. Connecting to Redis at {}. Listening on queue: {}. Waiting for tasks or shutdown signal.",
|
||||
actor_id, redis_url, queue_key
|
||||
"Rhai Actor '{}' starting. Type {:?}. Connecting to Redis at {}. Listening on queue: {}. Waiting for tasks or shutdown signal.",
|
||||
actor_id, script_type, redis_url, queue_key
|
||||
);
|
||||
|
||||
let mut redis_conn = initialize_redis_connection(&actor_id, &redis_url).await?;
|
||||
@ -259,6 +261,23 @@ pub fn spawn_rhai_actor(
|
||||
})
|
||||
}
|
||||
|
||||
// Helper to derive script type from actor_id for canonical queue selection
|
||||
fn derive_script_type_from_actor_id(actor_id: &str) -> ScriptType {
|
||||
let lower = actor_id.to_lowercase();
|
||||
if lower.contains("sal") {
|
||||
ScriptType::SAL
|
||||
} else if lower.contains("osis") {
|
||||
ScriptType::OSIS
|
||||
} else if lower.contains("python") {
|
||||
ScriptType::Python
|
||||
} else if lower == "v" || lower.contains(":v") || lower.contains(" v") {
|
||||
ScriptType::V
|
||||
} else {
|
||||
// Default to OSIS when uncertain
|
||||
ScriptType::OSIS
|
||||
}
|
||||
}
|
||||
|
||||
// Re-export the main trait-based interface for convenience
|
||||
pub use actor_trait::{Actor, ActorConfig, spawn_actor};
|
||||
|
||||
|
@ -10,6 +10,7 @@ use crossterm::{
|
||||
execute,
|
||||
};
|
||||
use hero_job::{Job, JobStatus, ScriptType};
|
||||
use hero_job::keys;
|
||||
|
||||
use ratatui::{
|
||||
backend::{Backend, CrosstermBackend},
|
||||
@ -457,9 +458,9 @@ impl App {
|
||||
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
|
||||
job.store_in_redis(&mut conn).await?;
|
||||
|
||||
// Add to work queue
|
||||
let queue_name = format!("hero:job:actor_queue:{}", self.actor_id.to_lowercase());
|
||||
let _: () = conn.lpush(&queue_name, &job_id).await?;
|
||||
// Add to work queue (canonical type queue)
|
||||
let queue_name = keys::work_type(&self.job_form.script_type);
|
||||
let _: () = conn.lpush(&queue_name, &job.id).await?;
|
||||
|
||||
self.status_message = Some(format!("Job {} dispatched successfully", job_id));
|
||||
|
||||
|
@ -387,3 +387,47 @@ impl Job {
|
||||
Ok(job_ids)
|
||||
}
|
||||
}
|
||||
|
||||
// Canonical Redis key builders for queues and hashes
|
||||
pub mod keys {
|
||||
use super::{NAMESPACE_PREFIX, ScriptType};
|
||||
|
||||
// hero:job:{job_id}
|
||||
pub fn job_hash(job_id: &str) -> String {
|
||||
format!("{}{}", NAMESPACE_PREFIX, job_id)
|
||||
}
|
||||
|
||||
// hero:q:reply:{job_id}
|
||||
pub fn reply(job_id: &str) -> String {
|
||||
format!("hero:q:reply:{}", job_id)
|
||||
}
|
||||
|
||||
// hero:q:work:type:{script_type}
|
||||
pub fn work_type(script_type: &ScriptType) -> String {
|
||||
format!("hero:q:work:type:{}", script_type.actor_queue_suffix())
|
||||
}
|
||||
|
||||
// hero:q:work:type:{script_type}:group:{group}
|
||||
pub fn work_group(script_type: &ScriptType, group: &str) -> String {
|
||||
format!(
|
||||
"hero:q:work:type:{}:group:{}",
|
||||
script_type.actor_queue_suffix(),
|
||||
group
|
||||
)
|
||||
}
|
||||
|
||||
// hero:q:work:type:{script_type}:group:{group}:inst:{instance}
|
||||
pub fn work_instance(script_type: &ScriptType, group: &str, instance: &str) -> String {
|
||||
format!(
|
||||
"hero:q:work:type:{}:group:{}:inst:{}",
|
||||
script_type.actor_queue_suffix(),
|
||||
group,
|
||||
instance
|
||||
)
|
||||
}
|
||||
|
||||
// hero:q:ctl:type:{script_type}
|
||||
pub fn stop_type(script_type: &ScriptType) -> String {
|
||||
format!("hero:q:ctl:type:{}", script_type.actor_queue_suffix())
|
||||
}
|
||||
}
|
||||
|
52
core/supervisor/examples/simple_job.rs
Normal file
52
core/supervisor/examples/simple_job.rs
Normal file
@ -0,0 +1,52 @@
|
||||
use hero_supervisor::{SupervisorBuilder, ScriptType};
|
||||
use hero_job::JobBuilder as CoreJobBuilder;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
// 1) Build a Supervisor
|
||||
let supervisor = SupervisorBuilder::new()
|
||||
.redis_url("redis://127.0.0.1/")
|
||||
.build()
|
||||
.await?;
|
||||
|
||||
// 2) Build a Job (using core job builder to set caller_id, context_id)
|
||||
let job = CoreJobBuilder::new()
|
||||
.caller_id("02abc...caller") // required
|
||||
.context_id("02def...context") // required
|
||||
.script_type(ScriptType::OSIS) // select the OSIS actor (matches configured osis_actor_1)
|
||||
.script("40 + 3") // simple Rhai script
|
||||
.timeout(std::time::Duration::from_secs(10))
|
||||
.build()?; // returns hero_job::Job
|
||||
|
||||
let job_id = job.id.clone();
|
||||
|
||||
// 3a) Store the job in Redis
|
||||
supervisor.create_job(&job).await?;
|
||||
|
||||
// 3b) Start the job (pushes ID to the actor’s Redis queue)
|
||||
supervisor.start_job(&job_id).await?;
|
||||
|
||||
// 3c) Wait until finished, then fetch output
|
||||
use tokio::time::sleep;
|
||||
|
||||
let deadline = std::time::Instant::now() + std::time::Duration::from_secs(10);
|
||||
loop {
|
||||
let status = supervisor.get_job_status(&job_id).await?;
|
||||
if status == hero_supervisor::JobStatus::Finished {
|
||||
break;
|
||||
}
|
||||
if std::time::Instant::now() >= deadline {
|
||||
println!("Job {} timed out waiting for completion (status: {:?})", job_id, status);
|
||||
break;
|
||||
}
|
||||
sleep(std::time::Duration::from_millis(250)).await;
|
||||
}
|
||||
|
||||
if let Some(output) = supervisor.get_job_output(&job_id).await? {
|
||||
println!("Job {} output: {}", job_id, output);
|
||||
} else {
|
||||
println!("Job {} completed with no output field set", job_id);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
@ -408,7 +408,8 @@ impl Supervisor {
|
||||
|
||||
/// Get the hardcoded actor queue key for the script type
|
||||
fn get_actor_queue_key(&self, script_type: &ScriptType) -> String {
|
||||
format!("{}actor_queue:{}", NAMESPACE_PREFIX, script_type.actor_queue_suffix())
|
||||
// Canonical type queue
|
||||
hero_job::keys::work_type(script_type)
|
||||
}
|
||||
|
||||
pub fn new_job(&self) -> JobBuilder {
|
||||
@ -586,14 +587,9 @@ impl Supervisor {
|
||||
job_id: String,
|
||||
script_type: &ScriptType
|
||||
) -> Result<(), SupervisorError> {
|
||||
let actor_queue_key = self.get_actor_queue_key(script_type);
|
||||
|
||||
// lpush also infers its types, RV is typically i64 (length of list) or () depending on exact command variant
|
||||
// For `redis::AsyncCommands::lpush`, it's `RedisResult<R>` where R: FromRedisValue
|
||||
// Often this is the length of the list. Let's allow inference or specify if needed.
|
||||
let _: redis::RedisResult<i64> =
|
||||
conn.lpush(&actor_queue_key, job_id.clone()).await;
|
||||
|
||||
// Canonical dispatch to type queue
|
||||
let actor_queue_key = hero_job::keys::work_type(script_type);
|
||||
let _: redis::RedisResult<i64> = conn.lpush(&actor_queue_key, job_id.clone()).await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -675,7 +671,8 @@ impl Supervisor {
|
||||
) -> Result<String, SupervisorError> {
|
||||
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
|
||||
|
||||
let reply_queue_key = format!("{}:reply:{}", NAMESPACE_PREFIX, job.id); // Derived from the passed job_id
|
||||
// Canonical reply queue
|
||||
let reply_queue_key = hero_job::keys::reply(&job.id);
|
||||
|
||||
self.create_job_using_connection(
|
||||
&mut conn,
|
||||
@ -692,13 +689,48 @@ impl Supervisor {
|
||||
job.timeout
|
||||
);
|
||||
|
||||
self.await_response_from_connection(
|
||||
&mut conn,
|
||||
&job.id,
|
||||
&reply_queue_key,
|
||||
job.timeout,
|
||||
)
|
||||
.await
|
||||
// Some actors update the job hash directly and do not use reply queues.
|
||||
// Poll the job hash for output until timeout to support both models.
|
||||
let start_time = std::time::Instant::now();
|
||||
|
||||
loop {
|
||||
// If output is present in the job hash, return it immediately
|
||||
match self.get_job_output(&job.id).await {
|
||||
Ok(Some(output)) => {
|
||||
// Optional: cleanup reply queue in case it was created
|
||||
let _: redis::RedisResult<i32> = conn.del(&reply_queue_key).await;
|
||||
return Ok(output);
|
||||
}
|
||||
Ok(None) => {
|
||||
// Check for error state
|
||||
match self.get_job_status(&job.id).await {
|
||||
Ok(JobStatus::Error) => {
|
||||
// Try to read the error field for context
|
||||
let mut conn2 = self.redis_client.get_multiplexed_async_connection().await?;
|
||||
let job_key = format!("{}{}", NAMESPACE_PREFIX, job.id);
|
||||
let err: Option<String> = conn2.hget(&job_key, "error").await.ok();
|
||||
return Err(SupervisorError::InvalidInput(
|
||||
err.unwrap_or_else(|| "Job failed".to_string())
|
||||
));
|
||||
}
|
||||
_ => {
|
||||
// keep polling
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(_) => {
|
||||
// Ignore transient read errors and continue polling
|
||||
}
|
||||
}
|
||||
|
||||
if start_time.elapsed() >= job.timeout {
|
||||
// On timeout, ensure any reply queue is cleaned up and return a Timeout error
|
||||
let _: redis::RedisResult<i32> = conn.del(&reply_queue_key).await;
|
||||
return Err(SupervisorError::Timeout(job.id.clone()));
|
||||
}
|
||||
|
||||
tokio::time::sleep(std::time::Duration::from_millis(200)).await;
|
||||
}
|
||||
}
|
||||
|
||||
// Method to get job status
|
||||
@ -772,7 +804,7 @@ impl Supervisor {
|
||||
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
|
||||
|
||||
// Get job details to determine script type and appropriate actor
|
||||
let job_key = format!("{}job:{}", NAMESPACE_PREFIX, job_id);
|
||||
let job_key = format!("{}{}", NAMESPACE_PREFIX, job_id);
|
||||
let job_data: std::collections::HashMap<String, String> = conn.hgetall(&job_key).await?;
|
||||
|
||||
if job_data.is_empty() {
|
||||
@ -787,7 +819,8 @@ impl Supervisor {
|
||||
.map_err(|e| SupervisorError::InvalidInput(format!("Invalid script type: {}", e)))?;
|
||||
|
||||
// Use hardcoded stop queue key for this script type
|
||||
let stop_queue_key = format!("{}stop_queue:{}", NAMESPACE_PREFIX, script_type.actor_queue_suffix());
|
||||
// Stop queue per protocol: hero:stop_queue:{suffix}
|
||||
let stop_queue_key = format!("hero:stop_queue:{}", script_type.actor_queue_suffix());
|
||||
|
||||
// Push job ID to the stop queue
|
||||
conn.lpush::<_, _, ()>(&stop_queue_key, job_id).await?;
|
||||
@ -799,7 +832,7 @@ impl Supervisor {
|
||||
/// Get logs for a job by reading from its log file
|
||||
pub async fn get_job_logs(&self, job_id: &str) -> Result<Option<String>, SupervisorError> {
|
||||
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
|
||||
let job_key = format!("{}job:{}", NAMESPACE_PREFIX, job_id);
|
||||
let job_key = format!("{}{}", NAMESPACE_PREFIX, job_id);
|
||||
|
||||
// Get the job data to find the log path
|
||||
let result_map: Option<std::collections::HashMap<String, String>> =
|
||||
@ -922,7 +955,7 @@ impl Supervisor {
|
||||
for job_id in ready_job_ids {
|
||||
// Get job data to determine script type and select actor
|
||||
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
|
||||
let job_key = format!("{}job:{}", NAMESPACE_PREFIX, job_id);
|
||||
let job_key = format!("{}{}", NAMESPACE_PREFIX, job_id);
|
||||
let job_data: std::collections::HashMap<String, String> = conn.hgetall(&job_key).await?;
|
||||
|
||||
if let Some(script_type_str) = job_data.get("script_type") {
|
||||
|
209
docs/JOBS_QUICKSTART.md
Normal file
209
docs/JOBS_QUICKSTART.md
Normal file
@ -0,0 +1,209 @@
|
||||
# Jobs Quickstart: Create and Send a Simple Job to the Supervisor
|
||||
|
||||
This guide shows how a new (simple) job looks, how to construct it, and how to submit it to the Supervisor. It covers:
|
||||
- The minimal fields a job needs
|
||||
- Picking an actor via script type
|
||||
- Submitting a job using the Rust API
|
||||
- Submitting a job via the OpenRPC server over Unix IPC (and WS)
|
||||
|
||||
Key references:
|
||||
- [rust.ScriptType](core/job/src/lib.rs:16) determines the target actor queue
|
||||
- [rust.Job](core/job/src/lib.rs:87) is the canonical job payload stored in Redis
|
||||
- [rust.JobBuilder::new()](core/job/src/builder.rs:47), [rust.JobBuilder::caller_id()](core/job/src/builder.rs:79), [rust.JobBuilder::context_id()](core/job/src/builder.rs:74), [rust.JobBuilder::script_type()](core/job/src/builder.rs:69), [rust.JobBuilder::script()](core/job/src/builder.rs:84), [rust.JobBuilder::timeout()](core/job/src/builder.rs:94), [rust.JobBuilder::build()](core/job/src/builder.rs:158)
|
||||
- [rust.SupervisorBuilder::new()](core/supervisor/src/lib.rs:124), [rust.SupervisorBuilder::build()](core/supervisor/src/lib.rs:267)
|
||||
- [rust.Supervisor::create_job()](core/supervisor/src/lib.rs:642), [rust.Supervisor::start_job()](core/supervisor/src/lib.rs:658), [rust.Supervisor::run_job_and_await_result()](core/supervisor/src/lib.rs:672), [rust.Supervisor::get_job_output()](core/supervisor/src/lib.rs:740)
|
||||
- Redis key namespace: [rust.NAMESPACE_PREFIX](core/job/src/lib.rs:13)
|
||||
|
||||
|
||||
## 1) What is a “simple job”?
|
||||
|
||||
A simple job is the minimal unit of work that an actor can execute. At minimum, you must provide:
|
||||
- caller_id: String (identifier of the requester; often a public key)
|
||||
- context_id: String (the “circle” or execution context)
|
||||
- script: String (the code to run; Rhai for OSIS/SAL; HeroScript for V/Python)
|
||||
- script_type: ScriptType (OSIS | SAL | V | Python)
|
||||
- timeout: Duration (optional; default used if not set)
|
||||
|
||||
The job’s script_type selects the actor and thus the queue. See [rust.ScriptType::actor_queue_suffix()](core/job/src/lib.rs:29) for mapping.
|
||||
|
||||
|
||||
## 2) Choosing the actor by ScriptType
|
||||
|
||||
- OSIS: Rhai script, sequential non-blocking
|
||||
- SAL: Rhai script, blocking async, concurrent
|
||||
- V: HeroScript via V engine
|
||||
- Python: HeroScript via Python engine
|
||||
|
||||
Pick the script_type that matches your script/runtime requirements. See design summary in [core/docs/architecture.md](core/docs/architecture.md).
|
||||
|
||||
|
||||
## 3) Build and submit a job using the Rust API
|
||||
|
||||
This is the most direct, strongly-typed integration. You will:
|
||||
1) Build a Supervisor
|
||||
2) Construct a Job (using the “core” job builder for explicit caller_id/context_id)
|
||||
3) Submit it with either:
|
||||
- create_job + start_job (two-step)
|
||||
- run_job_and_await_result (one-shot request-reply)
|
||||
|
||||
Note: We deliberately use the core job builder (hero_job) so we can set caller_id explicitly via [rust.JobBuilder::caller_id()](core/job/src/builder.rs:79).
|
||||
|
||||
Example Rhai script (returns 42):
|
||||
```rhai
|
||||
40 + 2
|
||||
```
|
||||
|
||||
Rust example (two-step create + start + poll output):
|
||||
```rust
|
||||
use hero_supervisor::{SupervisorBuilder, ScriptType};
|
||||
use hero_job::JobBuilder as CoreJobBuilder;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
// 1) Build a Supervisor
|
||||
let supervisor = SupervisorBuilder::new()
|
||||
.redis_url("redis://127.0.0.1/")
|
||||
.build()
|
||||
.await?;
|
||||
|
||||
// 2) Build a Job (using core job builder to set caller_id, context_id)
|
||||
let job = CoreJobBuilder::new()
|
||||
.caller_id("02abc...caller") // required
|
||||
.context_id("02def...context") // required
|
||||
.script_type(ScriptType::SAL) // select the SAL actor
|
||||
.script("40 + 2") // simple Rhai script
|
||||
.timeout(std::time::Duration::from_secs(10))
|
||||
.build()?; // returns hero_job::Job
|
||||
|
||||
let job_id = job.id.clone();
|
||||
|
||||
// 3a) Store the job in Redis
|
||||
supervisor.create_job(&job).await?;
|
||||
|
||||
// 3b) Start the job (pushes ID to the actor’s Redis queue)
|
||||
supervisor.start_job(&job_id).await?;
|
||||
|
||||
// 3c) Fetch output when finished (or poll status via get_job_status)
|
||||
if let Some(output) = supervisor.get_job_output(&job_id).await? {
|
||||
println!("Job {} output: {}", job_id, output);
|
||||
} else {
|
||||
println!("Job {} has no output yet", job_id);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
Rust example (one-shot request-reply):
|
||||
```rust
|
||||
use hero_supervisor::{SupervisorBuilder, ScriptType};
|
||||
use hero_job::JobBuilder as CoreJobBuilder;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let supervisor = SupervisorBuilder::new()
|
||||
.redis_url("redis://127.0.0.1/")
|
||||
.build()
|
||||
.await?;
|
||||
|
||||
let job = CoreJobBuilder::new()
|
||||
.caller_id("02abc...caller")
|
||||
.context_id("02def...context")
|
||||
.script_type(ScriptType::SAL)
|
||||
.script("40 + 2")
|
||||
.timeout(std::time::Duration::from_secs(10))
|
||||
.build()?;
|
||||
|
||||
// Creates the job, dispatches it to the correct actor queue,
|
||||
// and waits for a reply on the dedicated reply queue.
|
||||
let output = supervisor.run_job_and_await_result(&job).await?;
|
||||
println!("Synchronous output: {}", output);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
References used in this flow:
|
||||
- [rust.SupervisorBuilder::new()](core/supervisor/src/lib.rs:124), [rust.SupervisorBuilder::build()](core/supervisor/src/lib.rs:267)
|
||||
- [rust.JobBuilder::caller_id()](core/job/src/builder.rs:79), [rust.JobBuilder::context_id()](core/job/src/builder.rs:74), [rust.JobBuilder::script_type()](core/job/src/builder.rs:69), [rust.JobBuilder::script()](core/job/src/builder.rs:84), [rust.JobBuilder::timeout()](core/job/src/builder.rs:94), [rust.JobBuilder::build()](core/job/src/builder.rs:158)
|
||||
- [rust.Supervisor::create_job()](core/supervisor/src/lib.rs:642), [rust.Supervisor::start_job()](core/supervisor/src/lib.rs:658), [rust.Supervisor::get_job_output()](core/supervisor/src/lib.rs:740)
|
||||
- [rust.Supervisor::run_job_and_await_result()](core/supervisor/src/lib.rs:672)
|
||||
|
||||
|
||||
## 4) Submit a job via the OpenRPC server (Unix IPC or WebSocket)
|
||||
|
||||
The OpenRPC server exposes JSON-RPC 2.0 methods which proxy to the Supervisor:
|
||||
- Types: [rust.JobParams](interfaces/openrpc/server/src/types.rs:6)
|
||||
- Methods registered in [interfaces/openrpc/server/src/lib.rs](interfaces/openrpc/server/src/lib.rs:117)
|
||||
|
||||
Unix IPC launcher and client:
|
||||
- Server: [interfaces/unix/server/src/main.rs](interfaces/unix/server/src/main.rs)
|
||||
- Client: [interfaces/unix/client/src/main.rs](interfaces/unix/client/src/main.rs)
|
||||
|
||||
Start the IPC server:
|
||||
```bash
|
||||
cargo run -p hero-unix-server -- \
|
||||
--socket /tmp/baobab.ipc \
|
||||
--db-path ./db
|
||||
```
|
||||
|
||||
Create a job (JSON-RPC, IPC):
|
||||
```bash
|
||||
cargo run -p hero-unix-client -- \
|
||||
--socket /tmp/baobab.ipc \
|
||||
--method create_job \
|
||||
--params '{
|
||||
"script": "40 + 2",
|
||||
"script_type": "SAL",
|
||||
"caller_id": "02abc...caller",
|
||||
"context_id": "02def...context",
|
||||
"timeout": 10
|
||||
}'
|
||||
```
|
||||
|
||||
This returns the job_id. Then start the job:
|
||||
```bash
|
||||
cargo run -p hero-unix-client -- \
|
||||
--socket /tmp/baobab.ipc \
|
||||
--method start_job \
|
||||
--params '["<job_id_from_create>"]'
|
||||
```
|
||||
|
||||
Fetch output (optional):
|
||||
```bash
|
||||
cargo run -p hero-unix-client -- \
|
||||
--socket /tmp/baobab.ipc \
|
||||
--method get_job_output \
|
||||
--params '["<job_id_from_create>"]'
|
||||
```
|
||||
|
||||
Notes:
|
||||
- The “run_job” JSON-RPC method is present but not fully wired to the full request-reply flow; prefer create_job + start_job + get_job_output for now.
|
||||
- JobParams fields are defined in [rust.JobParams](interfaces/openrpc/server/src/types.rs:6).
|
||||
|
||||
|
||||
## 5) What happens under the hood
|
||||
|
||||
- The job is serialized to Redis under the namespace [rust.NAMESPACE_PREFIX](core/job/src/lib.rs:13)
|
||||
- The Supervisor picks the actor queue from [rust.ScriptType::actor_queue_suffix()](core/job/src/lib.rs:29) and LPUSHes your job ID
|
||||
- The actor BLPOPs its queue, loads the job, executes your script, and stores the result back into the Redis job hash
|
||||
- For synchronous flows, Supervisor waits on a dedicated reply queue until the result arrives via [rust.Supervisor::run_job_and_await_result()](core/supervisor/src/lib.rs:672)
|
||||
|
||||
|
||||
## 6) Minimal scripts by actor type
|
||||
|
||||
- OSIS/SAL (Rhai):
|
||||
- "40 + 2"
|
||||
- "let x = 21; x * 2"
|
||||
- You can access injected context variables such as CALLER_ID, CONTEXT_ID (see architecture doc in [core/docs/architecture.md](core/docs/architecture.md)).
|
||||
|
||||
- V/Python (HeroScript):
|
||||
- Provide a valid HeroScript snippet appropriate for the selected engine and your deployment.
|
||||
|
||||
|
||||
## 7) Troubleshooting
|
||||
|
||||
- Ensure Redis is running and reachable at the configured URL
|
||||
- SAL vs OSIS: pick SAL if your script is blocking/IO-heavy and needs concurrency; otherwise OSIS is fine for sequential non-blocking tasks
|
||||
- If using OpenRPC IPC, ensure the socket path matches between server and client
|
||||
- For lifecycle of actors (starting/restarting/health checks), see [core/supervisor/README.md](core/supervisor/README.md)
|
216
docs/PROJECT_OVERVIEW.md
Normal file
216
docs/PROJECT_OVERVIEW.md
Normal file
@ -0,0 +1,216 @@
|
||||
# Baobab Project Overview
|
||||
|
||||
This document explains the system architecture and execution model: what a supervisor is, what an actor is (including each actor type and how they are used), how jobs flow through Redis, and how the various interfaces expose functionality over WebSocket and Unix IPC.
|
||||
|
||||
References point directly into the codebase for quick lookup.
|
||||
|
||||
|
||||
## 1. Core Concepts
|
||||
|
||||
- Supervisor
|
||||
- A long-lived orchestrator that:
|
||||
- Supervises actor lifecycles (start/restart/stop/health checks),
|
||||
- Dispatches jobs to actors via Redis queues,
|
||||
- Exposes a high-level API for creating, starting, running, and inspecting jobs.
|
||||
- Key types and entry points:
|
||||
- [Supervisor](core/supervisor/src/lib.rs:23)
|
||||
- [SupervisorBuilder](core/supervisor/src/lib.rs:29)
|
||||
- [SupervisorBuilder::from_toml()](core/supervisor/src/lib.rs:137)
|
||||
- [Supervisor::start_actors()](core/supervisor/src/lib.rs:299)
|
||||
- [Supervisor::run_job_and_await_result()](core/supervisor/src/lib.rs:672)
|
||||
- [Supervisor::start_job()](core/supervisor/src/lib.rs:658)
|
||||
- [Supervisor::get_job_status()](core/supervisor/src/lib.rs:705)
|
||||
- [Supervisor::get_job_output()](core/supervisor/src/lib.rs:740)
|
||||
- [Supervisor::list_jobs()](core/supervisor/src/lib.rs:761)
|
||||
- [Supervisor::stop_job()](core/supervisor/src/lib.rs:771)
|
||||
- [Supervisor::delete_job()](core/supervisor/src/lib.rs:831)
|
||||
- [Supervisor::clear_all_jobs()](core/supervisor/src/lib.rs:844)
|
||||
|
||||
- Actor
|
||||
- A worker service that pulls jobs from a Redis queue and executes the job’s script with the appropriate engine/runtime for its type.
|
||||
- Trait and common loop:
|
||||
- [Actor](core/actor/src/actor_trait.rs:80)
|
||||
- [ActorConfig](core/actor/src/actor_trait.rs:41)
|
||||
- [Actor::spawn() (common loop)](core/actor/src/actor_trait.rs:119)
|
||||
- [spawn_actor()](core/actor/src/actor_trait.rs:250)
|
||||
|
||||
- Job and Redis schema
|
||||
- A job encapsulates a unit of work: script, script type (which selects the actor queue), caller/context IDs, timeout, etc.
|
||||
- Canonical data and status types are re-exported by the supervisor:
|
||||
- [Job](core/supervisor/src/lib.rs:21)
|
||||
- [JobStatus](core/supervisor/src/lib.rs:21)
|
||||
- [ScriptType](core/supervisor/src/lib.rs:21)
|
||||
- Redis schema used by the supervisor for job supervision is documented in:
|
||||
- [core/supervisor/README.md](core/supervisor/README.md)
|
||||
- Keys overview (jobs, actor work queues, reply queues): see lines 95–100 in that file.
|
||||
|
||||
|
||||
## 2. Actors and Script Execution
|
||||
|
||||
The system defines four actor types. Each actor has its own queue and executes scripts differently, with standardized context variables injected into script execution (e.g., CALLER_ID, CONTEXT_ID).
|
||||
|
||||
- Design summary:
|
||||
- [core/docs/architecture.md](core/docs/architecture.md:3)
|
||||
- [core/docs/architecture.md](core/docs/architecture.md:5)
|
||||
|
||||
Actor types and behavior:
|
||||
|
||||
- OSIS (Rhai, non-blocking, sequential)
|
||||
- Executes Rhai scripts one after another on a single thread using the Rhai engine.
|
||||
- Intended for non-blocking tasks.
|
||||
|
||||
- SAL (Rhai, blocking async, concurrent)
|
||||
- Executes blocking asynchronous Rhai scripts concurrently by spawning a new thread per evaluation.
|
||||
- Intended for IO-bound or blocking tasks requiring concurrency.
|
||||
|
||||
- V (HeroScript via V engine) and Python (HeroScript via Python engine)
|
||||
- Execute HeroScript scripts in their respective engines.
|
||||
|
||||
Execution context:
|
||||
|
||||
- Both CALLER_ID and CONTEXT_ID are injected in scope for scripts. See description at:
|
||||
- [core/docs/architecture.md](core/docs/architecture.md:3)
|
||||
|
||||
Actor implementation surface:
|
||||
|
||||
- Actors implement [Actor](core/actor/src/actor_trait.rs:80) and plug into the provided [Actor::spawn()](core/actor/src/actor_trait.rs:119) loop.
|
||||
- The common loop:
|
||||
- Connects to Redis (per-actor id),
|
||||
- Blocks on the actor’s queue with BLPOP,
|
||||
- Handles a special “ping” script inline (health check),
|
||||
- Delegates other jobs to Actor::process_job().
|
||||
|
||||
|
||||
## 3. Supervisor Responsibilities and Guarantees
|
||||
|
||||
- Lifecycle management
|
||||
- Starts/zinit-registers actors, monitors health, restarts if unhealthy or unresponsive, and cleans up services on shutdown.
|
||||
- Health checking includes a ping job if idle (actor must respond “pong” immediately).
|
||||
- Key entry points:
|
||||
- [Supervisor::start_actors()](core/supervisor/src/lib.rs:299)
|
||||
- Background lifecycle manager (health loop):
|
||||
- [Supervisor::spawn_lifecycle_manager()](core/supervisor/src/lib.rs:466)
|
||||
- Per-actor health handling and restart:
|
||||
- [Supervisor::check_and_restart_actor()](core/supervisor/src/lib.rs:506)
|
||||
- Uses zinit as the process manager; see the supervisor readme:
|
||||
- [core/supervisor/README.md](core/supervisor/README.md)
|
||||
|
||||
- Job supervision
|
||||
- Create, start, run-and-await, inspect, stop, delete jobs; dispatch based on script type using hardcoded per-type queues:
|
||||
- [Supervisor::get_actor_queue_key()](core/supervisor/src/lib.rs:410)
|
||||
- [Supervisor::create_job()](core/supervisor/src/lib.rs:642)
|
||||
- [Supervisor::start_job()](core/supervisor/src/lib.rs:658)
|
||||
- [Supervisor::run_job_and_await_result()](core/supervisor/src/lib.rs:672)
|
||||
- [Supervisor::get_job_status()](core/supervisor/src/lib.rs:705)
|
||||
- [Supervisor::get_job_output()](core/supervisor/src/lib.rs:740)
|
||||
- [Supervisor::list_jobs()](core/supervisor/src/lib.rs:761)
|
||||
- [Supervisor::stop_job()](core/supervisor/src/lib.rs:771)
|
||||
- [Supervisor::delete_job()](core/supervisor/src/lib.rs:831)
|
||||
- [Supervisor::clear_all_jobs()](core/supervisor/src/lib.rs:844)
|
||||
|
||||
- Job dependency utilities
|
||||
- Check prerequisites and update dependents upon completion:
|
||||
- [Supervisor::check_prerequisites_completed()](core/supervisor/src/lib.rs:862)
|
||||
- [Supervisor::update_job_status_and_check_dependents()](core/supervisor/src/lib.rs:884)
|
||||
- [Supervisor::dispatch_ready_jobs()](core/supervisor/src/lib.rs:920)
|
||||
|
||||
- Redis naming and keys (namespace “hero:”)
|
||||
- See “Redis Schema” section:
|
||||
- [core/supervisor/README.md](core/supervisor/README.md)
|
||||
|
||||
|
||||
## 4. Interfaces (APIs and Transports)
|
||||
|
||||
The project exposes two complementary ways to interact with the supervisor and job system.
|
||||
|
||||
A. OpenRPC Server (JSON-RPC 2.0 over WebSocket or Unix IPC)
|
||||
- Core types:
|
||||
- [Transport](interfaces/openrpc/server/src/lib.rs:21)
|
||||
- [OpenRpcServer](interfaces/openrpc/server/src/lib.rs:37)
|
||||
- [OpenRpcApi](interfaces/openrpc/server/src/lib.rs:45)
|
||||
- Server lifecycle:
|
||||
- [OpenRpcServer::new()](interfaces/openrpc/server/src/lib.rs:98)
|
||||
- [OpenRpcServer::start()](interfaces/openrpc/server/src/lib.rs:117)
|
||||
- Methods exposed (selected):
|
||||
- Authentication: fetch_nonce, authenticate, whoami
|
||||
- Script execution: play
|
||||
- Job management: create_job, start_job, run_job, get_job_status, get_job_output, get_job_logs, list_jobs, stop_job, delete_job, clear_all_jobs
|
||||
- All are registered inside [OpenRpcServer::start()](interfaces/openrpc/server/src/lib.rs:117) using jsonrpsee.
|
||||
- Transports:
|
||||
- WebSocket server binding is provided via jsonrpsee when using [Transport::WebSocket](interfaces/openrpc/server/src/lib.rs:21).
|
||||
- Unix Domain Socket (IPC) is implemented using reth-ipc when using [Transport::Unix](interfaces/openrpc/server/src/lib.rs:21).
|
||||
- Launchers:
|
||||
- IPC server binary:
|
||||
- [interfaces/unix/server/src/main.rs](interfaces/unix/server/src/main.rs)
|
||||
- IPC client (manual testing tool):
|
||||
- [interfaces/unix/client/src/main.rs](interfaces/unix/client/src/main.rs)
|
||||
|
||||
B. WebSocket Server (Actix)
|
||||
- A dedicated Actix-based WebSocket server that runs a multi-circle endpoint: each connected circle uses its path “/{circle_pk}”. Each connection is handled by a dedicated Actix actor.
|
||||
- Server runtime and session actor:
|
||||
- [Server](interfaces/websocket/server/src/lib.rs:197)
|
||||
- Starts HTTP/WS server, binds routes, and spawns the WS actor per connection:
|
||||
- [Server::spawn_circle_server()](interfaces/websocket/server/src/lib.rs:229)
|
||||
- per-connection handler:
|
||||
- [ws_handler()](interfaces/websocket/server/src/lib.rs:688)
|
||||
- Auth and flow:
|
||||
- Signature-based auth and session lifecycle are documented in:
|
||||
- [interfaces/websocket/server/docs/ARCHITECTURE.md](interfaces/websocket/server/docs/ARCHITECTURE.md)
|
||||
- Nonce issuing, signature verification, and circle membership checks gate protected actions (e.g., play).
|
||||
- Integration with supervisor:
|
||||
- The WS server issues job requests via the supervisor (e.g., a “play” call builds and runs a job through [Supervisor](core/supervisor/src/lib.rs:23)).
|
||||
|
||||
|
||||
## 5. End-to-End Job Flow
|
||||
|
||||
- Creating and starting a job via the OpenRPC server
|
||||
- Client calls OpenRPC “create_job”, which builds a [Job](core/supervisor/src/lib.rs:21) and stores it in Redis via [Supervisor::create_job()](core/supervisor/src/lib.rs:642).
|
||||
- Client then calls “start_job”, which reads the job to determine its [ScriptType](core/supervisor/src/lib.rs:21), computes the actor queue via [Supervisor::get_actor_queue_key()](core/supervisor/src/lib.rs:410), and pushes the job ID to the actor’s Redis list via [Supervisor::start_job()](core/supervisor/src/lib.rs:658).
|
||||
|
||||
- Running-and-awaiting a job in one step
|
||||
- Client calls “run_job” or equivalent flow; the server uses [Supervisor::run_job_and_await_result()](core/supervisor/src/lib.rs:672):
|
||||
- Stores the job,
|
||||
- Pushes to the appropriate actor queue,
|
||||
- Waits for the result on a dedicated reply queue “hero::reply:{job_id}”.
|
||||
|
||||
- Actor processing loop
|
||||
- The actor BLPOP’s its queue (timeout), receives a job ID, loads the job, handles “ping” inline, otherwise calls [Actor::process_job()](core/actor/src/actor_trait.rs:80) for execution, and writes status/output back to Redis.
|
||||
- The common loop is provided by [Actor::spawn()](core/actor/src/actor_trait.rs:119).
|
||||
|
||||
- Health checks
|
||||
- The supervisor periodically checks zinit state and may issue ping jobs if idle; failure to respond leads to restart. See lifecycle logic:
|
||||
- [Supervisor::spawn_lifecycle_manager()](core/supervisor/src/lib.rs:466)
|
||||
- [Supervisor::check_and_restart_actor()](core/supervisor/src/lib.rs:506)
|
||||
|
||||
- Redis schema pointers (namespace hero:)
|
||||
- See section “Redis Schema for Job Supervision”:
|
||||
- [core/supervisor/README.md](core/supervisor/README.md)
|
||||
|
||||
|
||||
## 6. How the Interfaces Fit Together
|
||||
|
||||
- The OpenRPC server provides a JSON-RPC 2.0 façade for programmatic control (automation, services).
|
||||
- Choose between WebSocket and Unix IPC transports via [Transport](interfaces/openrpc/server/src/lib.rs:21).
|
||||
- It wraps the [Supervisor](core/supervisor/src/lib.rs:23), delegating all job and lifecycle supervision calls.
|
||||
|
||||
- The WebSocket (Actix) server provides a multi-circle, session-based, interactive API well-suited for browser or persistent WS clients.
|
||||
- It authenticates users per-circle, then issues supervisor-backed job calls within the authenticated context.
|
||||
- Session isolation is per WS actor instance; see:
|
||||
- [interfaces/websocket/server/docs/ARCHITECTURE.md](interfaces/websocket/server/docs/ARCHITECTURE.md)
|
||||
|
||||
Both interfaces ultimately converge on the same core abstraction: the [Supervisor](core/supervisor/src/lib.rs:23) orchestrating jobs and actors over Redis with zinit-backed lifecycle guarantees.
|
||||
|
||||
|
||||
## 7. Additional References
|
||||
|
||||
- Architecture summary for actor types and scripting:
|
||||
- [core/docs/architecture.md](core/docs/architecture.md)
|
||||
|
||||
- Supervisor documentation and prerequisites (Redis, zinit):
|
||||
- [core/supervisor/README.md](core/supervisor/README.md)
|
||||
|
||||
- TUI/CLI examples and lifecycle demos:
|
||||
- [core/supervisor/examples](core/supervisor/examples)
|
||||
|
||||
- Actor README (queue consumption, Rhai execution, context variables):
|
||||
- [core/actor/README.md](core/actor/README.md)
|
199
docs/REDIS_QUEUES_GUIDE.md
Normal file
199
docs/REDIS_QUEUES_GUIDE.md
Normal file
@ -0,0 +1,199 @@
|
||||
# Redis Queues Guide: Who Pushes Where, When, and How to Inspect
|
||||
|
||||
This guide documents the canonical queues used in the project, explains which component pushes to which queue at each step, and provides redis-cli commands to inspect state during development.
|
||||
|
||||
Canonical keys
|
||||
- Job hash (immutable key shape):
|
||||
- hero:job:{job_id}
|
||||
- Builder: [rust.keys::job_hash()](core/job/src/lib.rs:396)
|
||||
- Work queues (push here to dispatch work):
|
||||
- Type queue: hero:q:work:type:{script_type}
|
||||
- Builders:
|
||||
- [rust.keys::work_type()](core/job/src/lib.rs:405)
|
||||
- [rust.keys::work_group()](core/job/src/lib.rs:411)
|
||||
- [rust.keys::work_instance()](core/job/src/lib.rs:420)
|
||||
- Reply queue (optional, for actors that send explicit replies):
|
||||
- hero:q:reply:{job_id}
|
||||
- Builder: [rust.keys::reply()](core/job/src/lib.rs:401)
|
||||
- Control queue (optional stop/control per-type):
|
||||
- hero:q:ctl:type:{script_type}
|
||||
- Builder: [rust.keys::stop_type()](core/job/src/lib.rs:429)
|
||||
|
||||
|
||||
1) Who pushes where
|
||||
|
||||
A. Supervisor: creating, starting, and running jobs
|
||||
- Create job (stores job hash):
|
||||
- [rust.Supervisor::create_job()](core/supervisor/src/lib.rs:660)
|
||||
- Persists hero:job:{job_id} via [rust.Job::store_in_redis()](core/job/src/lib.rs:147)
|
||||
- Start job (dispatch to worker queue):
|
||||
- [rust.Supervisor::start_job()](core/supervisor/src/lib.rs:675) → [rust.Supervisor::start_job_using_connection()](core/supervisor/src/lib.rs:599)
|
||||
- LPUSH hero:q:work:type:{script_type} using [rust.keys::work_type()](core/job/src/lib.rs:405)
|
||||
- Run-and-wait (one-shot):
|
||||
- [rust.Supervisor::run_job_and_await_result()](core/supervisor/src/lib.rs:689)
|
||||
- Stores hero:job:{job_id}, LPUSH hero:q:work:type:{script_type} (same as start)
|
||||
- Waits on hero:q:reply:{job_id} (via [rust.keys::reply()](core/job/src/lib.rs:401)) and also polls hero:job:{job_id} for output to support hash-only actors
|
||||
|
||||
B. Terminal UI: quick dispatch from the actor TUI
|
||||
- Stores job using Job::store_in_redis, then pushes to type queue:
|
||||
- Dispatch code: [core/actor/src/terminal_ui.rs](core/actor/src/terminal_ui.rs:460)
|
||||
- LPUSH hero:q:work:type:{script_type} using [rust.keys::work_type()](core/job/src/lib.rs:405)
|
||||
|
||||
C. Actors: consuming and completing work
|
||||
- Consume jobs:
|
||||
- Standalone Rhai actor: [rust.spawn_rhai_actor()](core/actor/src/lib.rs:211)
|
||||
- BLPOP hero:q:work:type:{script_type} (queue selection computed via [rust.derive_script_type_from_actor_id()](core/actor/src/lib.rs:262), then [rust.keys::work_type()](core/job/src/lib.rs:405))
|
||||
- Trait-based actor loop: [rust.Actor::spawn()](core/actor/src/actor_trait.rs:119)
|
||||
- BLPOP hero:q:work:type:{script_type} using [rust.keys::work_type()](core/job/src/lib.rs:405)
|
||||
- Write results:
|
||||
- Hash-only (current default): [rust.Job::set_result()](core/job/src/lib.rs:322) updates hero:job:{job_id} with output and status=finished
|
||||
- Optional reply queue model: actor may LPUSH hero:q:reply:{job_id} (if implemented)
|
||||
|
||||
|
||||
2) End-to-end flows and the queues involved
|
||||
|
||||
Flow A: Two-step (create + start) with Supervisor
|
||||
- Code path:
|
||||
- [rust.Supervisor::create_job()](core/supervisor/src/lib.rs:660)
|
||||
- [rust.Supervisor::start_job()](core/supervisor/src/lib.rs:675)
|
||||
- Keys touched:
|
||||
- hero:job:{job_id} (created)
|
||||
- hero:q:work:type:{script_type} (LPUSH job_id)
|
||||
- Expected actor behavior:
|
||||
- BLPOP hero:q:work:type:{script_type}
|
||||
- Execute script, then [rust.Job::set_result()](core/job/src/lib.rs:322)
|
||||
- How to inspect with redis-cli:
|
||||
- FLUSHALL (fresh dev) then run create and start
|
||||
- Verify job hash:
|
||||
- HGETALL hero:job:{job_id}
|
||||
- Verify queue length before consumption:
|
||||
- LLEN hero:q:work:type:osis
|
||||
- See pending items:
|
||||
- LRANGE hero:q:work:type:osis 0 -1
|
||||
- After actor runs, verify result in job hash:
|
||||
- HGET hero:job:{job_id} status
|
||||
- HGET hero:job:{job_id} output
|
||||
|
||||
Flow B: One-shot (run and await result) with Supervisor
|
||||
- Code path:
|
||||
- [rust.Supervisor::run_job_and_await_result()](core/supervisor/src/lib.rs:689)
|
||||
- Uses [rust.keys::reply()](core/job/src/lib.rs:401) and polls the hash for output
|
||||
- Keys touched:
|
||||
- hero:job:{job_id}
|
||||
- hero:q:work:type:{script_type}
|
||||
- hero:q:reply:{job_id} (only if an actor uses reply queues)
|
||||
- How to inspect with redis-cli:
|
||||
- While waiting:
|
||||
- LLEN hero:q:work:type:osis
|
||||
- HGET hero:job:{job_id} status
|
||||
- If an actor uses reply queues (optional):
|
||||
- LLEN hero:q:reply:{job_id}
|
||||
- LRANGE hero:q:reply:{job_id} 0 -1
|
||||
- After completion:
|
||||
- HGET hero:job:{job_id} output
|
||||
|
||||
Flow C: Dispatch from the Actor TUI (manual testing)
|
||||
- Code path:
|
||||
- [core/actor/src/terminal_ui.rs](core/actor/src/terminal_ui.rs:460) stores job and LPUSH to [rust.keys::work_type()](core/job/src/lib.rs:405)
|
||||
- Keys touched:
|
||||
- hero:job:{job_id}
|
||||
- hero:q:work:type:{script_type}
|
||||
- How to inspect with redis-cli:
|
||||
- List all work queues:
|
||||
- KEYS hero:q:work:type:*
|
||||
- Show items in a specific type queue:
|
||||
- LRANGE hero:q:work:type:osis 0 -1
|
||||
- Read one pending job:
|
||||
- HGETALL hero:job:{job_id}
|
||||
- After actor runs:
|
||||
- HGET hero:job:{job_id} status
|
||||
- HGET hero:job:{job_id} output
|
||||
|
||||
|
||||
3) Example redis-cli sequences
|
||||
|
||||
A. Basic OSIS job lifecycle (two-step)
|
||||
- Prepare
|
||||
- FLUSHALL
|
||||
- Create and start (via code or supervisor-cli)
|
||||
- Inspect queue and job
|
||||
- KEYS hero:q:work:type:*
|
||||
- LLEN hero:q:work:type:osis
|
||||
- LRANGE hero:q:work:type:osis 0 -1
|
||||
- HGETALL hero:job:{job_id}
|
||||
- After actor consumes the job:
|
||||
- HGET hero:job:{job_id} status → finished
|
||||
- HGET hero:job:{job_id} output → script result
|
||||
- LLEN hero:q:work:type:osis → likely 0 if all consumed
|
||||
|
||||
B. One-shot run-and-wait (hash-only actor)
|
||||
- Prepare
|
||||
- FLUSHALL
|
||||
- Submit via run_job_and_await_result()
|
||||
- While supervisor waits:
|
||||
- HGET hero:job:{job_id} status → started/finished
|
||||
- (Optional) LLEN hero:q:reply:{job_id} → typically 0 if actor doesn’t use reply queues
|
||||
- When done:
|
||||
- HGET hero:job:{job_id} output → result
|
||||
|
||||
C. Listing and cleanup helpers
|
||||
- List jobs
|
||||
- KEYS hero:job:*
|
||||
- Show a specific job
|
||||
- HGETALL hero:job:{job_id}
|
||||
- Clear all keys (dev only)
|
||||
- FLUSHALL
|
||||
|
||||
|
||||
4) Where the queue names are computed in code
|
||||
|
||||
- Builders for canonical keys:
|
||||
- [rust.keys::job_hash()](core/job/src/lib.rs:396)
|
||||
- [rust.keys::reply()](core/job/src/lib.rs:401)
|
||||
- [rust.keys::work_type()](core/job/src/lib.rs:405)
|
||||
- [rust.keys::work_group()](core/job/src/lib.rs:411)
|
||||
- [rust.keys::work_instance()](core/job/src/lib.rs:420)
|
||||
- Supervisor routing and waiting:
|
||||
- Type queue selection: [rust.Supervisor::get_actor_queue_key()](core/supervisor/src/lib.rs:410)
|
||||
- LPUSH to type queue: [rust.Supervisor::start_job_using_connection()](core/supervisor/src/lib.rs:599)
|
||||
- One-shot run and wait: [rust.Supervisor::run_job_and_await_result()](core/supervisor/src/lib.rs:689)
|
||||
- Actor consumption:
|
||||
- Standalone Rhai actor: [rust.spawn_rhai_actor()](core/actor/src/lib.rs:211)
|
||||
- Type queue computed via [rust.derive_script_type_from_actor_id()](core/actor/src/lib.rs:262) + [rust.keys::work_type()](core/job/src/lib.rs:405)
|
||||
- Trait-based actor loop: [rust.Actor::spawn()](core/actor/src/actor_trait.rs:119)
|
||||
- BLPOP type queue via [rust.keys::work_type()](core/job/src/lib.rs:405)
|
||||
|
||||
|
||||
5) Quick checklist for debugging
|
||||
|
||||
- Nothing consumes from the type queue
|
||||
- Is at least one actor process running that BLPOPs hero:q:work:type:{script_type}?
|
||||
- LLEN hero:q:work:type:{script_type} shows > 0 means unconsumed backlog
|
||||
- Job “Dispatched” but never “Finished”
|
||||
- HGET hero:job:{job_id} status
|
||||
- Actor logs: check for script errors and verify it is connected to the same Redis
|
||||
- “run-and-wait” timeout
|
||||
- Hash-only actors don’t push to reply queues; the supervisor will still return once it sees hero:job:{job_id}.output set by [rust.Job::set_result()](core/job/src/lib.rs:322)
|
||||
- Mixed types:
|
||||
- Verify you targeted the correct type queue (e.g., osis vs sal): LLEN hero:q:work:type:osis, hero:q:work:type:sal
|
||||
|
||||
|
||||
6) Canonical patterns to remember
|
||||
|
||||
- To dispatch a job:
|
||||
- LPUSH hero:q:work:type:{script_type} {job_id}
|
||||
- To read job data:
|
||||
- HGETALL hero:job:{job_id}
|
||||
- To wait for output (optional reply model):
|
||||
- BLPOP hero:q:reply:{job_id} {timeout_secs}
|
||||
- To verify system state:
|
||||
- KEYS hero:q:*
|
||||
- KEYS hero:job:*
|
||||
|
||||
|
||||
This guide reflects the canonical scheme implemented in:
|
||||
- [rust.Supervisor](core/supervisor/src/lib.rs:1)
|
||||
- [rust.keys](core/job/src/lib.rs:392)
|
||||
- [core/actor/src/lib.rs](core/actor/src/lib.rs:1)
|
||||
- [core/actor/src/actor_trait.rs](core/actor/src/actor_trait.rs:1)
|
||||
- [core/actor/src/terminal_ui.rs](core/actor/src/terminal_ui.rs:1)
|
231
docs/REDIS_QUEUES_NAMING_PROPOSAL.md
Normal file
231
docs/REDIS_QUEUES_NAMING_PROPOSAL.md
Normal file
@ -0,0 +1,231 @@
|
||||
# Redis Queue Naming Proposal (Multi-Actor, Multi-Type, Scalable)
|
||||
|
||||
Goal
|
||||
- Define a consistent, future-proof Redis naming scheme that:
|
||||
- Supports multiple actor types (OSIS, SAL, V, Python)
|
||||
- Supports multiple pools/groups and instances per type
|
||||
- Enables fair load-balancing and targeted dispatch
|
||||
- Works with both “hash-output” actors and “reply-queue” actors
|
||||
- Keeps migration straightforward from the current keys
|
||||
|
||||
Motivation
|
||||
- Today, multiple non-unified patterns exist:
|
||||
- Per-actor keys like "hero:job:{actor_id}" consumed by in-crate Rhai actor
|
||||
- Per-type keys like "hero:job:actor_queue:{suffix}" used by other components
|
||||
- Protocol docs that reference "hero:work_queue:{actor_id}" and "hero:reply:{job_id}"
|
||||
- This fragmentation causes stuck “Dispatched” jobs when the LPUSH target doesn’t match the BLPOP listener. We need one canonical scheme, with well-defined fallbacks.
|
||||
|
||||
|
||||
## 1) Canonical Key Names
|
||||
|
||||
Prefix conventions
|
||||
- Namespace prefix: hero:
|
||||
- All queues collected under hero:q:* to separate from job hashes hero:job:*
|
||||
- All metadata under hero:meta:* for discoverability
|
||||
|
||||
Job and result keys
|
||||
- Job hash (unchanged): hero:job:{job_id}
|
||||
- Reply queue: hero:q:reply:{job_id}
|
||||
|
||||
Work queues (new canonical)
|
||||
- Type queue (shared): hero:q:work:type:{script_type}
|
||||
- Examples:
|
||||
- hero:q:work:type:osis
|
||||
- hero:q:work:type:sal
|
||||
- hero:q:work:type:v
|
||||
- hero:q:work:type:python
|
||||
- Group queue (optional, shared within a group): hero:q:work:type:{script_type}:group:{group}
|
||||
- Examples:
|
||||
- hero:q:work:type:osis:group:default
|
||||
- hero:q:work:type:sal:group:io
|
||||
- Instance queue (most specific, used for targeted dispatch): hero:q:work:type:{script_type}:group:{group}:inst:{instance}
|
||||
- Examples:
|
||||
- hero:q:work:type:osis:group:default:inst:1
|
||||
- hero:q:work:type:sal:group:io:inst:3
|
||||
|
||||
Control queues (optional, future)
|
||||
- Stop/control per-type: hero:q:ctl:type:{script_type}
|
||||
- Stop/control per-instance: hero:q:ctl:type:{script_type}:group:{group}:inst:{instance}
|
||||
|
||||
Actor presence and metadata
|
||||
- Instance presence (ephemeral, with TTL refresh): hero:meta:actor:inst:{script_type}:{group}:{instance}
|
||||
- Value: JSON { pid, hostname, started_at, version, capabilities, last_heartbeat }
|
||||
- Used by the supervisor to discover live consumers and to select targeted queueing
|
||||
|
||||
|
||||
## 2) Dispatch Strategy
|
||||
|
||||
- Default: Push to the Type queue hero:q:work:type:{script_type}
|
||||
- Allows N instances to BLPOP the same shared queue (standard fan-out).
|
||||
- Targeted: If user or scheduler specifies a group and/or instance, push to the most specific queue
|
||||
- Instance queue (highest specificity):
|
||||
- hero:q:work:type:{script_type}:group:{group}:inst:{instance}
|
||||
- Else Group queue:
|
||||
- hero:q:work:type:{script_type}:group:{group}
|
||||
- Else Type queue (fallback):
|
||||
- hero:q:work:type:{script_type}
|
||||
- Priority queues (optional extension):
|
||||
- Append :prio:{level} to any of the above
|
||||
- Actors BLPOP a list of queues in priority order
|
||||
|
||||
Example routing
|
||||
- No group/instance specified:
|
||||
- LPUSH hero:q:work:type:osis {job_id}
|
||||
- Group specified ("default"), no instance:
|
||||
- LPUSH hero:q:work:type:osis:group:default {job_id}
|
||||
- Specific instance:
|
||||
- LPUSH hero:q:work:type:osis:group:default:inst:2 {job_id}
|
||||
|
||||
|
||||
## 3) Actor Consumption Strategy
|
||||
|
||||
- Actor identifies itself with:
|
||||
- script_type (osis/sal/v/python)
|
||||
- group (defaults to "default")
|
||||
- instance number (unique within group)
|
||||
- Actor registers presence:
|
||||
- SET hero:meta:actor:inst:{script_type}:{group}:{instance} {...} EX 15
|
||||
- Periodically refresh to act as heartbeat
|
||||
- Actor BLPOP order:
|
||||
1) Instance queue (most specific)
|
||||
2) Group queue
|
||||
3) Type queue
|
||||
- This ensures targeted jobs are taken first (if any), otherwise fall back to group or shared type queue.
|
||||
- Actors that implement reply-queue semantics will also LPUSH to hero:q:reply:{job_id} on completion. Others just update hero:job:{job_id} with status+output.
|
||||
|
||||
|
||||
## 4) Backward Compatibility And Migration
|
||||
|
||||
- During transition, Supervisor can LPUSH to both:
|
||||
- New canonical queues (hero:q:work:type:...)
|
||||
- Selected legacy queues (hero:job:actor_queue:{suffix}, hero:job:{actor_id}, hero:work_queue:...)
|
||||
- Actors:
|
||||
- Update actors to BLPOP the canonical queues first, then legacy fallback
|
||||
- Phased plan:
|
||||
1) Introduce canonical queues alongside legacy; Supervisor pushes to both (compat mode)
|
||||
2) Switch actors to consume canonical first
|
||||
3) Deprecate legacy queues and remove dual-push
|
||||
- No change to job hashes hero:job:{job_id}
|
||||
|
||||
|
||||
## 5) Required Code Changes (by file)
|
||||
|
||||
Supervisor (routing and reply queue)
|
||||
- Replace queue computation with canonical builder:
|
||||
- [rust.Supervisor::get_actor_queue_key()](core/supervisor/src/lib.rs:410)
|
||||
- Change to build canonical keys given script_type (+ optional group/instance from Job or policy)
|
||||
- Update start logic to LPUSH to canonical queue(s):
|
||||
- [rust.Supervisor::start_job_using_connection()](core/supervisor/src/lib.rs:599)
|
||||
- Use only canonical queue(s). In migration phase, also LPUSH legacy queues.
|
||||
- Standardize reply queue name:
|
||||
- [rust.Supervisor::run_job_and_await_result()](core/supervisor/src/lib.rs:689)
|
||||
- Use hero:q:reply:{job_id}
|
||||
- Keep “poll job hash” fallback for actors that don’t use reply queues
|
||||
- Stop queue naming:
|
||||
- [rust.Supervisor::stop_job()](core/supervisor/src/lib.rs:789)
|
||||
- Use hero:q:ctl:type:{script_type} in canonical mode
|
||||
|
||||
Actor (consumption and presence)
|
||||
- In-crate Rhai actor:
|
||||
- Queue key construction and BLPOP list:
|
||||
- [rust.spawn_rhai_actor()](core/actor/src/lib.rs:211)
|
||||
- Current queue_key at [core/actor/src/lib.rs:220]
|
||||
- Replace single-queue BLPOP with multi-key BLPOP in priority order:
|
||||
1) hero:q:work:type:{script_type}:group:{group}:inst:{instance}
|
||||
2) hero:q:work:type:{script_type}:group:{group}
|
||||
3) hero:q:work:type:{script_type}
|
||||
- For migration, optionally include legacy queues last.
|
||||
- Presence registration (periodic SET with TTL):
|
||||
- Add at actor startup and refresh on loop tick
|
||||
- For actors that implement reply queues:
|
||||
- After finishing job, LPUSH hero:q:reply:{job_id} {result}
|
||||
- For hash-only actors, continue to call [rust.Job::set_result()](core/job/src/lib.rs:322)
|
||||
|
||||
Shared constants (avoid string drift)
|
||||
- Introduce constants and helpers in a central crate (hero_job) to build keys consistently:
|
||||
- fn job_hash_key(job_id) -> "hero:job:{job_id}"
|
||||
- fn reply_queue_key(job_id) -> "hero:q:reply:{job_id}"
|
||||
- fn work_queue_type(script_type) -> "hero:q:work:type:{type}"
|
||||
- fn work_queue_group(script_type, group) -> "hero:q:work:type:{type}:group:{group}"
|
||||
- fn work_queue_instance(script_type, group, inst) -> "hero:q:work:type:{type}:group:{group}:inst:{inst}"
|
||||
- Replace open-coded strings in:
|
||||
- [rust.Supervisor](core/supervisor/src/lib.rs:1)
|
||||
- [rust.Actor code](core/actor/src/lib.rs:1)
|
||||
- Any CLI/TUI or interface components that reference queues
|
||||
|
||||
Interfaces
|
||||
- OpenRPC/WebSocket servers do not need to know queue names; they call Supervisor API. No changes except to follow the Supervisor’s behavior for “run-and-wait” vs “create+start+get_output” flows.
|
||||
|
||||
|
||||
## 6) Example Scenarios
|
||||
|
||||
Scenario A: Single OSIS pool with two instances
|
||||
- Actors:
|
||||
- osis group=default inst=1
|
||||
- osis group=default inst=2
|
||||
- Incoming job (no targeting):
|
||||
- LPUSH hero:q:work:type:osis {job_id}
|
||||
- Actors BLPOP order:
|
||||
- inst queue
|
||||
- group queue
|
||||
- type queue (this one will supply)
|
||||
- Effective result: classic round-robin-like behavior, two workers share load.
|
||||
|
||||
Scenario B: SAL pool “io” with instance 3; targeted dispatch
|
||||
- Job sets target group=io and instance=3
|
||||
- Supervisor LPUSH hero:q:work:type:sal:group:io:inst:3 {job_id}
|
||||
- Only that instance consumes it, enabling pinning to a specific worker.
|
||||
|
||||
Scenario C: Mixed old and new actors (migration window)
|
||||
- Supervisor pushes to canonical queue(s) and to a legacy queue hero:job:actor_queue:osis
|
||||
- New actors consume canonical queues
|
||||
- Legacy actors consume legacy queue
|
||||
- No job is stuck; both ecosystems coexist until the legacy path is removed.
|
||||
|
||||
|
||||
## 7) Phased Migration Plan
|
||||
|
||||
Phase 0 (Docs + helpers)
|
||||
- Add helpers in hero_job to compute keys (see “Shared constants”)
|
||||
- Document the new scheme and consumption order (this file)
|
||||
|
||||
Phase 1 (Supervisor)
|
||||
- Update [rust.Supervisor::get_actor_queue_key()](core/supervisor/src/lib.rs:410) and [rust.Supervisor::start_job_using_connection()](core/supervisor/src/lib.rs:599) to use canonical queues
|
||||
- Keep dual-push to legacy queues behind a feature flag or config for rollout
|
||||
- Standardize reply queue to hero:q:reply:{job_id} in [rust.Supervisor::run_job_and_await_result()](core/supervisor/src/lib.rs:689)
|
||||
|
||||
Phase 2 (Actors)
|
||||
- Update [rust.spawn_rhai_actor()](core/actor/src/lib.rs:211) to BLPOP from canonical queues in priority order and to register presence keys
|
||||
- Optionally emit reply to hero:q:reply:{job_id} in addition to hash-based result (feature flag)
|
||||
|
||||
Phase 3 (Cleanup)
|
||||
- After all actors and Supervisor deployments are updated and stable, remove the legacy dual-push and fallback consume paths
|
||||
|
||||
|
||||
## 8) Optional Enhancements
|
||||
|
||||
- Priority queues:
|
||||
- Suffix queues with :prio:{0|1|2}; actors BLPOP [inst prio0, group prio0, type prio0, inst prio1, group prio1, type prio1, ...]
|
||||
- Rate limiting/back-pressure:
|
||||
- Use metadata to signal busy state or reported in-flight jobs; Supervisor can target instance queues accordingly.
|
||||
- Resilience:
|
||||
- Move to Redis Streams for job event logs; lists remain fine for simple FIFO processing.
|
||||
- Observability:
|
||||
- hero:meta:actor:* and hero:meta:queue:stats:* to keep simple metrics for dashboards.
|
||||
|
||||
|
||||
## 9) Summary
|
||||
|
||||
- Canonicalize to hero:q:work:type:{...} (+ group, + instance), and hero:q:reply:{job_id}
|
||||
- Actors consume instance → group → type
|
||||
- Supervisor pushes to most specific queue available, defaulting to type
|
||||
- Provide helpers to build keys and remove ad-hoc string formatting
|
||||
- Migrate with a dual-push (canonical + legacy) phase to avoid downtime
|
||||
|
||||
Proposed touchpoints to implement (clickable references)
|
||||
- [rust.Supervisor::get_actor_queue_key()](core/supervisor/src/lib.rs:410)
|
||||
- [rust.Supervisor::start_job_using_connection()](core/supervisor/src/lib.rs:599)
|
||||
- [rust.Supervisor::run_job_and_await_result()](core/supervisor/src/lib.rs:689)
|
||||
- [rust.spawn_rhai_actor()](core/actor/src/lib.rs:211)
|
||||
- [core/actor/src/lib.rs](core/actor/src/lib.rs:220)
|
||||
- [rust.Job::set_result()](core/job/src/lib.rs:322)
|
124
docs/RPC_IMPLEMENTATION.md
Normal file
124
docs/RPC_IMPLEMENTATION.md
Normal file
@ -0,0 +1,124 @@
|
||||
# RPC Implementation (jsonrpsee) for Supervisor
|
||||
|
||||
Objective
|
||||
- Provide an HTTP/WS JSON-RPC server with jsonrpsee that exposes all Supervisor job operations.
|
||||
- Use the current Supervisor and job model directly; methods should map 1:1 to Supervisor APIs.
|
||||
- Keep the implementation simple: a single transport (jsonrpsee::server::Server on SocketAddr).
|
||||
|
||||
Canonical model
|
||||
- Jobs are stored and updated in Redis under hero:job:{job_id}.
|
||||
- Work is dispatched to type queues hero:q:work:type:{script_type}.
|
||||
- Actors consume by script type and update the job hash status/output.
|
||||
- Server-side types and queues are already aligned in code (see keys in [rust.keys](core/job/src/lib.rs:392)).
|
||||
|
||||
What exists today (summary)
|
||||
- Server state and registry
|
||||
- [rust.OpenRpcServer](interfaces/openrpc/server/src/lib.rs:37) holds a Supervisor inside RwLock.
|
||||
- Methods are registered manually with jsonrpsee::RpcModule in [rust.OpenRpcServer::start()](interfaces/openrpc/server/src/lib.rs:117).
|
||||
- Methods wired vs. stubbed
|
||||
- Wired: create_job, start_job, get_job_status, get_job_output, stop_job, delete_job, clear_all_jobs.
|
||||
- Stubbed or partial: run_job (returns a formatted string), play (returns canned output), get_job_logs (mocked), list_jobs (returns fabricated Job objects from IDs).
|
||||
- Transports
|
||||
- start() supports a Unix transport through reth-ipc and a WebSocket SocketAddr. We only need HTTP/WS via jsonrpsee::server::Server::builder().build(addr).
|
||||
|
||||
Target surface (final)
|
||||
- Methods
|
||||
- fetch_nonce(pubkey: String) -> String [optional now]
|
||||
- authenticate(pubkey: String, signature: String, nonce: String) -> bool [optional now]
|
||||
- whoami() -> String [optional now]
|
||||
- play(script: String) -> PlayResult { output: String } [maps to run_job with a chosen default ScriptType]
|
||||
- create_job(job: JobParams) -> String (job_id)
|
||||
- start_job(job_id: String) -> { success: bool }
|
||||
- run_job(script: String, script_type: ScriptType, prerequisites?: Vec<String>) -> String (output)
|
||||
- get_job_status(job_id: String) -> JobStatus
|
||||
- get_job_output(job_id: String) -> String
|
||||
- get_job_logs(job_id: String) -> JobLogsResult { logs: String | null }
|
||||
- list_jobs() -> Vec<String>
|
||||
- stop_job(job_id: String) -> null
|
||||
- delete_job(job_id: String) -> null
|
||||
- clear_all_jobs() -> null
|
||||
- Types
|
||||
- ScriptType = OSIS | SAL | V | Python ([rust.ScriptType](core/job/src/lib.rs:16))
|
||||
- JobParams: script, script_type, caller_id, context_id, timeout?, prerequisites?
|
||||
- JobStatus: Dispatched | WaitingForPrerequisites | Started | Error | Finished
|
||||
- DTOs in [rust.interfaces/openrpc/server/src/types.rs](interfaces/openrpc/server/src/types.rs:1)
|
||||
|
||||
Required changes
|
||||
|
||||
1) Transport: simplify to HTTP/WS on SocketAddr
|
||||
- Remove Unix transport: in [rust.OpenRpcServer::start()](interfaces/openrpc/server/src/lib.rs:247), delete Transport::Unix and reth-ipc usage.
|
||||
- Use jsonrpsee::server::Server::builder().build(addr) and server.start(module), per upstream examples:
|
||||
- [rust.http](reference_jsonrpsee_crate_examples/http.rs:53)
|
||||
- [rust.ws](reference_jsonrpsee_crate_examples/ws.rs:55)
|
||||
|
||||
2) ScriptType consistency end-to-end
|
||||
- Ensure ScriptType is hero_job::ScriptType (OSIS | SAL | V | Python) in request/response types (already used in [rust.JobParams](interfaces/openrpc/server/src/types.rs:6)). If openrpc.json is used to generate docs or clients, update its enum to match.
|
||||
|
||||
3) Implement run_job (one-shot)
|
||||
- In [rust.OpenRpcApiServer::run_job](interfaces/openrpc/server/src/lib.rs:366):
|
||||
- Build a hero_job::JobBuilder with caller_id/context_id placeholders (or accept them as parameters later).
|
||||
- Set script, script_type, optional prerequisites, timeout default.
|
||||
- Call supervisor.run_job_and_await_result(&job) and return the output string.
|
||||
|
||||
4) Implement play as a thin wrapper
|
||||
- In [rust.OpenRpcApiServer::play](interfaces/openrpc/server/src/lib.rs:304):
|
||||
- Choose a default ScriptType (recommendation: SAL), then delegate to run_job(script, SAL, None).
|
||||
- Return PlayResult { output }.
|
||||
|
||||
5) Implement get_job_logs via Supervisor
|
||||
- Replace the mocked return in [rust.get_job_logs](interfaces/openrpc/server/src/lib.rs:400) with a call to:
|
||||
- supervisor.get_job_logs(&job_id) -> Option<String> and wrap into JobLogsResult { logs }.
|
||||
|
||||
6) list_jobs should return Vec<String> (IDs only)
|
||||
- Replace placeholder construction in [rust.list_jobs](interfaces/openrpc/server/src/lib.rs:407) with:
|
||||
- supervisor.list_jobs() returning Vec<String> directly.
|
||||
- Optionally add get_job(job_id) later if needed.
|
||||
|
||||
7) Error handling
|
||||
- Map SupervisorError to jsonrpsee error codes:
|
||||
- Invalid input → ErrorCode::InvalidParams
|
||||
- Timeout → a custom code or InvalidParams; optionally use -32002 as a custom timeout code.
|
||||
- Internal IO/Redis errors → ErrorCode::InternalError
|
||||
- Keep server logs descriptive; return minimal error messages to clients.
|
||||
|
||||
8) Server lifecycle
|
||||
- Keep OpenRpcServer::new() to build with TOML or builder defaults (see [rust.OpenRpcServer::new()](interfaces/openrpc/server/src/lib.rs:98)).
|
||||
- Expose a “start_on(addr)” function that returns a ServerHandle (just like upstream examples).
|
||||
- Optional: expose Supervisor::start_rpc_server(host, port) to own lifecycle from Supervisor; or leave it in interfaces/openrpc with a thin cmd binary to start it.
|
||||
|
||||
Non-goals (for this phase)
|
||||
- Unix IPC transport (reth-ipc).
|
||||
- Advanced middleware (CORS, host filters, rate-limiting).
|
||||
- RPC auth flows (fetch_nonce/authenticate/whoami) beyond placeholders.
|
||||
- Pub/Sub over RPC.
|
||||
|
||||
Reference mapping (clickable)
|
||||
- Server core and methods:
|
||||
- [rust.OpenRpcServer](interfaces/openrpc/server/src/lib.rs:37)
|
||||
- [rust.OpenRpcApi](interfaces/openrpc/server/src/lib.rs:45)
|
||||
- [rust.OpenRpcServer::start()](interfaces/openrpc/server/src/lib.rs:117)
|
||||
- [rust.JobParams](interfaces/openrpc/server/src/types.rs:6)
|
||||
- [rust.StartJobResult](interfaces/openrpc/server/src/types.rs:23)
|
||||
- [rust.JobLogsResult](interfaces/openrpc/server/src/types.rs:29)
|
||||
- Supervisor backend:
|
||||
- [rust.Supervisor::create_job()](core/supervisor/src/lib.rs:660)
|
||||
- [rust.Supervisor::start_job()](core/supervisor/src/lib.rs:675)
|
||||
- [rust.Supervisor::run_job_and_await_result()](core/supervisor/src/lib.rs:689)
|
||||
- [rust.Supervisor::get_job_status()](core/supervisor/src/lib.rs:723)
|
||||
- [rust.Supervisor::get_job_output()](core/supervisor/src/lib.rs:758)
|
||||
- [rust.Supervisor::get_job_logs()](core/supervisor/src/lib.rs:817)
|
||||
- [rust.Supervisor::list_jobs()](core/supervisor/src/lib.rs:780)
|
||||
- [rust.Supervisor::stop_job()](core/supervisor/src/lib.rs:789)
|
||||
- [rust.Supervisor::delete_job()](core/supervisor/src/lib.rs:850)
|
||||
- [rust.Supervisor::clear_all_jobs()](core/supervisor/src/lib.rs:862)
|
||||
- jsonrpsee examples to replicate transport and registration patterns:
|
||||
- HTTP: [rust.http example](reference_jsonrpsee_crate_examples/http.rs:53)
|
||||
- WS: [rust.ws example](reference_jsonrpsee_crate_examples/ws.rs:55)
|
||||
|
||||
Acceptance checklist
|
||||
- Server starts on a host:port using jsonrpsee::server::Server.
|
||||
- All Supervisor operations callable over RPC, 1:1 mapping, returning correct DTOs.
|
||||
- ScriptType uses OSIS|SAL|V|Python.
|
||||
- list_jobs returns Vec<String> and no fake job objects.
|
||||
- run_job and play perform real execution and return actual outputs.
|
||||
- No Unix IPC code path remains in start().
|
@ -265,11 +265,11 @@
|
||||
"params": [],
|
||||
"result": {
|
||||
"name": "jobList",
|
||||
"description": "List of all jobs.",
|
||||
"description": "List of all job IDs.",
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/Job"
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -343,7 +343,7 @@
|
||||
},
|
||||
"ScriptType": {
|
||||
"type": "string",
|
||||
"enum": ["HeroScript", "RhaiSAL", "RhaiDSL"],
|
||||
"enum": ["OSIS", "SAL", "V", "Python"],
|
||||
"description": "The type of script to execute."
|
||||
},
|
||||
"JobStatus": {
|
||||
|
@ -38,12 +38,6 @@ enum Commands {
|
||||
#[arg(long, default_value = "ws://127.0.0.1:9944")]
|
||||
url: String,
|
||||
},
|
||||
/// Connect to Unix socket server
|
||||
Unix {
|
||||
/// Unix socket path
|
||||
#[arg(long, default_value = "/tmp/hero-openrpc.sock")]
|
||||
socket_path: PathBuf,
|
||||
},
|
||||
}
|
||||
|
||||
/// Available RPC methods with descriptions
|
||||
@ -161,10 +155,6 @@ async fn main() -> Result<()> {
|
||||
println!("{} {}", "Connecting to WebSocket server:".green(), url.cyan());
|
||||
ClientTransport::WebSocket(url)
|
||||
}
|
||||
Commands::Unix { socket_path } => {
|
||||
println!("{} {:?}", "Connecting to Unix socket server:".green(), socket_path);
|
||||
ClientTransport::Unix(socket_path)
|
||||
}
|
||||
};
|
||||
|
||||
// Connect to the server
|
||||
@ -282,15 +272,18 @@ async fn execute_method(client: &HeroOpenRpcClient, method_name: &str) -> Result
|
||||
.with_prompt("Signature (hex)")
|
||||
.interact_text()?;
|
||||
|
||||
let nonce: String = Input::new()
|
||||
.with_prompt("Nonce (hex) - fetch via fetch_nonce first")
|
||||
.interact_text()?;
|
||||
|
||||
let result = client.authenticate(pubkey, signature, nonce).await?;
|
||||
println!("{} {}", "Authentication result:".green().bold(),
|
||||
println!("{} {}", "Authentication result:".green().bold(),
|
||||
if result { "Success".green() } else { "Failed".red() });
|
||||
}
|
||||
|
||||
"whoami" => {
|
||||
let result = client.whoami().await?;
|
||||
println!("{} {}", "User info:".green().bold(),
|
||||
serde_json::to_string_pretty(&result)?.cyan());
|
||||
println!("{} {}", "User info:".green().bold(), result.cyan());
|
||||
}
|
||||
|
||||
"play" => {
|
||||
@ -307,7 +300,7 @@ async fn execute_method(client: &HeroOpenRpcClient, method_name: &str) -> Result
|
||||
.with_prompt("Script content")
|
||||
.interact_text()?;
|
||||
|
||||
let script_types = ["HeroScript", "RhaiSAL", "RhaiDSL"];
|
||||
let script_types = ["OSIS", "SAL", "V", "Python"];
|
||||
let script_type_selection = Select::new()
|
||||
.with_prompt("Script type")
|
||||
.items(&script_types)
|
||||
@ -315,10 +308,10 @@ async fn execute_method(client: &HeroOpenRpcClient, method_name: &str) -> Result
|
||||
.interact()?;
|
||||
|
||||
let script_type = match script_type_selection {
|
||||
0 => ScriptType::HeroScript,
|
||||
1 => ScriptType::RhaiSAL,
|
||||
2 => ScriptType::RhaiDSL,
|
||||
_ => ScriptType::HeroScript,
|
||||
0 => ScriptType::OSIS,
|
||||
1 => ScriptType::SAL,
|
||||
2 => ScriptType::V,
|
||||
_ => ScriptType::Python,
|
||||
};
|
||||
|
||||
let add_prerequisites = Confirm::new()
|
||||
@ -335,9 +328,34 @@ async fn execute_method(client: &HeroOpenRpcClient, method_name: &str) -> Result
|
||||
None
|
||||
};
|
||||
|
||||
let caller_id: String = Input::new()
|
||||
.with_prompt("Caller ID")
|
||||
.interact_text()?;
|
||||
|
||||
let context_id: String = Input::new()
|
||||
.with_prompt("Context ID")
|
||||
.interact_text()?;
|
||||
|
||||
let specify_timeout = Confirm::new()
|
||||
.with_prompt("Specify timeout (seconds)?")
|
||||
.default(false)
|
||||
.interact()?;
|
||||
|
||||
let timeout = if specify_timeout {
|
||||
let t: u64 = Input::new()
|
||||
.with_prompt("Timeout (seconds)")
|
||||
.interact_text()?;
|
||||
Some(t)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let job_params = JobParams {
|
||||
script,
|
||||
script_type,
|
||||
caller_id,
|
||||
context_id,
|
||||
timeout,
|
||||
prerequisites,
|
||||
};
|
||||
|
||||
@ -360,7 +378,7 @@ async fn execute_method(client: &HeroOpenRpcClient, method_name: &str) -> Result
|
||||
.with_prompt("Script content")
|
||||
.interact_text()?;
|
||||
|
||||
let script_types = ["HeroScript", "RhaiSAL", "RhaiDSL"];
|
||||
let script_types = ["OSIS", "SAL", "V", "Python"];
|
||||
let script_type_selection = Select::new()
|
||||
.with_prompt("Script type")
|
||||
.items(&script_types)
|
||||
@ -368,10 +386,10 @@ async fn execute_method(client: &HeroOpenRpcClient, method_name: &str) -> Result
|
||||
.interact()?;
|
||||
|
||||
let script_type = match script_type_selection {
|
||||
0 => ScriptType::HeroScript,
|
||||
1 => ScriptType::RhaiSAL,
|
||||
2 => ScriptType::RhaiDSL,
|
||||
_ => ScriptType::HeroScript,
|
||||
0 => ScriptType::OSIS,
|
||||
1 => ScriptType::SAL,
|
||||
2 => ScriptType::V,
|
||||
_ => ScriptType::Python,
|
||||
};
|
||||
|
||||
let add_prerequisites = Confirm::new()
|
||||
@ -416,18 +434,17 @@ async fn execute_method(client: &HeroOpenRpcClient, method_name: &str) -> Result
|
||||
.interact_text()?;
|
||||
|
||||
let result = client.get_job_logs(job_id).await?;
|
||||
println!("{} {}", "Job logs:".green().bold(), result.logs.cyan());
|
||||
match result.logs {
|
||||
Some(logs) => println!("{} {}", "Job logs:".green().bold(), logs.cyan()),
|
||||
None => println!("{} {}", "Job logs:".green().bold(), "(no logs)".yellow()),
|
||||
}
|
||||
}
|
||||
|
||||
"list_jobs" => {
|
||||
let result = client.list_jobs().await?;
|
||||
println!("{}", "Jobs:".green().bold());
|
||||
for job in result {
|
||||
println!(" {} - {} ({:?})",
|
||||
job.id().yellow(),
|
||||
job.script_type(),
|
||||
job.status()
|
||||
);
|
||||
println!("{}", "Job IDs:".green().bold());
|
||||
for id in result {
|
||||
println!(" {}", id.yellow());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
use anyhow::Result;
|
||||
use async_trait::async_trait;
|
||||
use hero_job::{Job, JobStatus, ScriptType};
|
||||
use hero_job::{JobStatus, ScriptType};
|
||||
use jsonrpsee::core::client::ClientT;
|
||||
use jsonrpsee::core::ClientError;
|
||||
use jsonrpsee::proc_macros::rpc;
|
||||
@ -37,7 +37,7 @@ pub trait OpenRpcClient {
|
||||
) -> Result<bool, ClientError>;
|
||||
|
||||
#[method(name = "whoami")]
|
||||
async fn whoami(&self) -> Result<serde_json::Value, ClientError>;
|
||||
async fn whoami(&self) -> Result<String, ClientError>;
|
||||
|
||||
// Script execution
|
||||
#[method(name = "play")]
|
||||
@ -68,7 +68,7 @@ pub trait OpenRpcClient {
|
||||
async fn get_job_logs(&self, job_id: String) -> Result<JobLogsResult, ClientError>;
|
||||
|
||||
#[method(name = "list_jobs")]
|
||||
async fn list_jobs(&self) -> Result<Vec<Job>, ClientError>;
|
||||
async fn list_jobs(&self) -> Result<Vec<String>, ClientError>;
|
||||
|
||||
#[method(name = "stop_job")]
|
||||
async fn stop_job(&self, job_id: String) -> Result<(), ClientError>;
|
||||
@ -146,7 +146,7 @@ impl HeroOpenRpcClient {
|
||||
}
|
||||
|
||||
/// Delegate to whoami on the underlying client
|
||||
pub async fn whoami(&self) -> Result<serde_json::Value, ClientError> {
|
||||
pub async fn whoami(&self) -> Result<String, ClientError> {
|
||||
self.client.whoami().await
|
||||
}
|
||||
|
||||
@ -191,7 +191,7 @@ impl HeroOpenRpcClient {
|
||||
}
|
||||
|
||||
/// Delegate to list_jobs on the underlying client
|
||||
pub async fn list_jobs(&self) -> Result<Vec<Job>, ClientError> {
|
||||
pub async fn list_jobs(&self) -> Result<Vec<String>, ClientError> {
|
||||
self.client.list_jobs().await
|
||||
}
|
||||
|
||||
|
@ -1,11 +1,14 @@
|
||||
use hero_job::ScriptType;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// Parameters for creating a job
|
||||
/** Parameters for creating a job (must mirror server DTO) */
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct JobParams {
|
||||
pub script: String,
|
||||
pub script_type: ScriptType,
|
||||
pub caller_id: String,
|
||||
pub context_id: String,
|
||||
pub timeout: Option<u64>, // seconds
|
||||
pub prerequisites: Option<Vec<String>>,
|
||||
}
|
||||
|
||||
@ -21,8 +24,8 @@ pub struct StartJobResult {
|
||||
pub success: bool,
|
||||
}
|
||||
|
||||
/// Result of getting job logs
|
||||
/** Result of getting job logs */
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct JobLogsResult {
|
||||
pub logs: String,
|
||||
pub logs: Option<String>,
|
||||
}
|
||||
|
@ -19,10 +19,7 @@ tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
clap = { version = "4.0", features = ["derive"] }
|
||||
|
||||
# JSON-RPC dependencies
|
||||
jsonrpsee = { version = "0.21", features = [
|
||||
"server",
|
||||
"macros"
|
||||
] }
|
||||
jsonrpsee = { version = "0.21", features = ["server", "macros"] }
|
||||
jsonrpsee-types = "0.21"
|
||||
uuid = { version = "1.6", features = ["v4", "serde"] }
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
|
@ -8,7 +8,7 @@ use tracing_subscriber;
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(name = "hero-openrpc-server")]
|
||||
#[command(about = "Hero OpenRPC Server - WebSocket and Unix socket JSON-RPC server")]
|
||||
#[command(about = "Hero OpenRPC Server - JSON-RPC over HTTP/WS")]
|
||||
struct Cli {
|
||||
#[command(subcommand)]
|
||||
command: Commands,
|
||||
@ -34,12 +34,6 @@ enum Commands {
|
||||
#[arg(long, default_value = "127.0.0.1:9944")]
|
||||
addr: SocketAddr,
|
||||
},
|
||||
/// Start Unix socket server
|
||||
Unix {
|
||||
/// Unix socket path
|
||||
#[arg(long, default_value = "/tmp/hero-openrpc.sock")]
|
||||
socket_path: PathBuf,
|
||||
},
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
@ -65,14 +59,6 @@ async fn main() -> Result<()> {
|
||||
info!("Starting WebSocket server on {}", addr);
|
||||
Transport::WebSocket(addr)
|
||||
}
|
||||
Commands::Unix { socket_path } => {
|
||||
info!("Starting Unix socket server on {:?}", socket_path);
|
||||
// Remove existing socket file if it exists
|
||||
if socket_path.exists() {
|
||||
std::fs::remove_file(&socket_path)?;
|
||||
}
|
||||
Transport::Unix(socket_path)
|
||||
}
|
||||
};
|
||||
|
||||
let config = OpenRpcServerConfig {
|
||||
|
@ -1,6 +1,6 @@
|
||||
use anyhow::Result;
|
||||
use hero_job::{Job, JobBuilder, JobStatus, ScriptType};
|
||||
use hero_supervisor::{Supervisor, SupervisorBuilder};
|
||||
use hero_supervisor::{Supervisor, SupervisorBuilder, SupervisorError};
|
||||
use jsonrpsee::core::async_trait;
|
||||
use jsonrpsee::proc_macros::rpc;
|
||||
use jsonrpsee::server::{ServerBuilder, ServerHandle};
|
||||
@ -12,17 +12,24 @@ use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::error;
|
||||
|
||||
fn map_sup_error_to_rpc(e: &SupervisorError) -> ErrorCode {
|
||||
match e {
|
||||
SupervisorError::InvalidInput(_) | SupervisorError::JobError(_) => ErrorCode::InvalidParams,
|
||||
SupervisorError::Timeout(_) => ErrorCode::ServerError(-32002),
|
||||
_ => ErrorCode::InternalError,
|
||||
}
|
||||
}
|
||||
|
||||
mod auth;
|
||||
pub mod types;
|
||||
|
||||
pub use auth::*;
|
||||
pub use types::*;
|
||||
|
||||
/// Transport type for the OpenRPC server
|
||||
/** Transport type for the OpenRPC server */
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum Transport {
|
||||
WebSocket(SocketAddr),
|
||||
Unix(PathBuf),
|
||||
}
|
||||
|
||||
/// OpenRPC server configuration
|
||||
@ -82,7 +89,7 @@ pub trait OpenRpcApi {
|
||||
async fn get_job_logs(&self, job_id: String) -> Result<JobLogsResult, ErrorCode>;
|
||||
|
||||
#[method(name = "list_jobs")]
|
||||
async fn list_jobs(&self) -> Result<Vec<Job>, ErrorCode>;
|
||||
async fn list_jobs(&self) -> Result<Vec<String>, ErrorCode>;
|
||||
|
||||
#[method(name = "stop_job")]
|
||||
async fn stop_job(&self, job_id: String) -> Result<(), ErrorCode>;
|
||||
@ -114,8 +121,8 @@ impl OpenRpcServer {
|
||||
})
|
||||
}
|
||||
|
||||
/// Start the OpenRPC server
|
||||
pub async fn start(self, config: OpenRpcServerConfig) -> Result<ServerHandle> {
|
||||
/// Start the OpenRPC server on the given SocketAddr (HTTP/WS only)
|
||||
pub async fn start_on(self, addr: SocketAddr) -> Result<ServerHandle> {
|
||||
let mut module = RpcModule::new(());
|
||||
|
||||
// Register all the RPC methods
|
||||
@ -244,18 +251,17 @@ impl OpenRpcServer {
|
||||
}
|
||||
})?;
|
||||
|
||||
let server = ServerBuilder::default()
|
||||
.build(addr)
|
||||
.await?;
|
||||
let handle = server.start(module);
|
||||
Ok(handle)
|
||||
}
|
||||
|
||||
/// Start the OpenRPC server (config wrapper)
|
||||
pub async fn start(self, config: OpenRpcServerConfig) -> Result<ServerHandle> {
|
||||
match config.transport {
|
||||
Transport::WebSocket(addr) => {
|
||||
let server = ServerBuilder::default()
|
||||
.build(addr)
|
||||
.await?;
|
||||
let handle = server.start(module);
|
||||
Ok(handle)
|
||||
}
|
||||
Transport::Unix(_path) => {
|
||||
// Unix socket transport not yet implemented in jsonrpsee 0.21
|
||||
return Err(anyhow::anyhow!("Unix socket transport not yet supported").into());
|
||||
}
|
||||
Transport::WebSocket(addr) => self.start_on(addr).await,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -295,12 +301,8 @@ impl OpenRpcApiServer for OpenRpcServer {
|
||||
}
|
||||
|
||||
async fn play(&self, script: String) -> Result<PlayResult, ErrorCode> {
|
||||
let _supervisor = self.supervisor.read().await;
|
||||
|
||||
// For now, return a simple result since we need to implement execute_script method
|
||||
Ok(PlayResult {
|
||||
output: format!("Script executed: {}", script)
|
||||
})
|
||||
let output = self.run_job(script, ScriptType::SAL, None).await?;
|
||||
Ok(PlayResult { output })
|
||||
}
|
||||
|
||||
async fn create_job(&self, job_params: JobParams) -> Result<String, ErrorCode> {
|
||||
@ -360,10 +362,37 @@ impl OpenRpcApiServer for OpenRpcServer {
|
||||
&self,
|
||||
script: String,
|
||||
script_type: ScriptType,
|
||||
_prerequisites: Option<Vec<String>>,
|
||||
prerequisites: Option<Vec<String>>,
|
||||
) -> Result<String, ErrorCode> {
|
||||
// For now, return a simple result
|
||||
Ok(format!("Job executed with script: {} (type: {:?})", script, script_type))
|
||||
let supervisor = self.supervisor.read().await;
|
||||
|
||||
// Build job with defaults and optional prerequisites
|
||||
let mut builder = JobBuilder::new()
|
||||
.caller_id("rpc-caller")
|
||||
.context_id("rpc-context")
|
||||
.script(&script)
|
||||
.script_type(script_type)
|
||||
.timeout(std::time::Duration::from_secs(30));
|
||||
|
||||
if let Some(prs) = prerequisites {
|
||||
builder = builder.prerequisites(prs);
|
||||
}
|
||||
|
||||
let job = match builder.build() {
|
||||
Ok(j) => j,
|
||||
Err(e) => {
|
||||
error!("Failed to build job in run_job: {}", e);
|
||||
return Err(ErrorCode::InvalidParams);
|
||||
}
|
||||
};
|
||||
|
||||
match supervisor.run_job_and_await_result(&job).await {
|
||||
Ok(output) => Ok(output),
|
||||
Err(e) => {
|
||||
error!("run_job failed: {}", e);
|
||||
Err(map_sup_error_to_rpc(&e))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_job_status(&self, job_id: String) -> Result<JobStatus, ErrorCode> {
|
||||
@ -373,7 +402,7 @@ impl OpenRpcApiServer for OpenRpcServer {
|
||||
Ok(status) => Ok(status),
|
||||
Err(e) => {
|
||||
error!("Failed to get job status for {}: {}", job_id, e);
|
||||
Err(ErrorCode::InvalidParams)
|
||||
Err(map_sup_error_to_rpc(&e))
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -385,50 +414,29 @@ impl OpenRpcApiServer for OpenRpcServer {
|
||||
Ok(output) => Ok(output.unwrap_or_else(|| "No output available".to_string())),
|
||||
Err(e) => {
|
||||
error!("Failed to get job output for {}: {}", job_id, e);
|
||||
Err(ErrorCode::InvalidParams)
|
||||
Err(map_sup_error_to_rpc(&e))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_job_logs(&self, job_id: String) -> Result<JobLogsResult, ErrorCode> {
|
||||
// For now, return mock logs
|
||||
Ok(JobLogsResult {
|
||||
logs: format!("Logs for job {}", job_id),
|
||||
})
|
||||
let supervisor = self.supervisor.read().await;
|
||||
match supervisor.get_job_logs(&job_id).await {
|
||||
Ok(logs_opt) => Ok(JobLogsResult { logs: logs_opt }),
|
||||
Err(e) => {
|
||||
error!("Failed to get job logs for {}: {}", job_id, e);
|
||||
Err(map_sup_error_to_rpc(&e))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn list_jobs(&self) -> Result<Vec<Job>, ErrorCode> {
|
||||
async fn list_jobs(&self) -> Result<Vec<String>, ErrorCode> {
|
||||
let supervisor = self.supervisor.read().await;
|
||||
|
||||
match supervisor.list_jobs().await {
|
||||
Ok(job_ids) => {
|
||||
// For now, create minimal Job objects with just the IDs
|
||||
// In a real implementation, we'd need a supervisor.get_job() method
|
||||
let jobs: Vec<Job> = job_ids.into_iter().map(|job_id| {
|
||||
// Create a minimal job object - this is a temporary solution
|
||||
// until supervisor.get_job() is implemented
|
||||
Job {
|
||||
id: job_id,
|
||||
caller_id: "unknown".to_string(),
|
||||
context_id: "unknown".to_string(),
|
||||
script: "unknown".to_string(),
|
||||
script_type: ScriptType::OSIS,
|
||||
timeout: std::time::Duration::from_secs(30),
|
||||
retries: 0,
|
||||
concurrent: false,
|
||||
log_path: None,
|
||||
env_vars: std::collections::HashMap::new(),
|
||||
prerequisites: Vec::new(),
|
||||
dependents: Vec::new(),
|
||||
created_at: chrono::Utc::now(),
|
||||
updated_at: chrono::Utc::now(),
|
||||
}
|
||||
}).collect();
|
||||
Ok(jobs)
|
||||
},
|
||||
Ok(job_ids) => Ok(job_ids),
|
||||
Err(e) => {
|
||||
error!("Failed to list jobs: {}", e);
|
||||
Err(ErrorCode::InternalError)
|
||||
Err(map_sup_error_to_rpc(&e))
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -440,7 +448,7 @@ impl OpenRpcApiServer for OpenRpcServer {
|
||||
Ok(_) => Ok(()),
|
||||
Err(e) => {
|
||||
error!("Failed to stop job {}: {}", job_id, e);
|
||||
Err(ErrorCode::InvalidParams)
|
||||
Err(map_sup_error_to_rpc(&e))
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -452,7 +460,7 @@ impl OpenRpcApiServer for OpenRpcServer {
|
||||
Ok(_) => Ok(()),
|
||||
Err(e) => {
|
||||
error!("Failed to delete job {}: {}", job_id, e);
|
||||
Err(ErrorCode::InvalidParams)
|
||||
Err(map_sup_error_to_rpc(&e))
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -464,7 +472,7 @@ impl OpenRpcApiServer for OpenRpcServer {
|
||||
Ok(_) => Ok(()),
|
||||
Err(e) => {
|
||||
error!("Failed to clear all jobs: {}", e);
|
||||
Err(ErrorCode::InternalError)
|
||||
Err(map_sup_error_to_rpc(&e))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -24,8 +24,8 @@ pub struct StartJobResult {
|
||||
pub success: bool,
|
||||
}
|
||||
|
||||
/// Result of getting job logs
|
||||
/** Result of getting job logs */
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
pub struct JobLogsResult {
|
||||
pub logs: String,
|
||||
pub logs: Option<String>,
|
||||
}
|
||||
|
@ -204,13 +204,13 @@ async fn test_list_jobs() {
|
||||
let result = server.list_jobs().await;
|
||||
assert!(result.is_ok());
|
||||
|
||||
let jobs = result.unwrap();
|
||||
assert!(jobs.len() >= 3); // Should have at least the 3 jobs we created
|
||||
let job_ids = result.unwrap();
|
||||
assert!(job_ids.len() >= 3); // Should have at least the 3 jobs we created
|
||||
|
||||
// Verify job structure
|
||||
for job in jobs {
|
||||
assert!(!job.id.is_empty());
|
||||
assert!(uuid::Uuid::parse_str(&job.id).is_ok());
|
||||
// Verify job IDs are valid UUIDs
|
||||
for id in job_ids {
|
||||
assert!(!id.is_empty());
|
||||
assert!(uuid::Uuid::parse_str(&id).is_ok());
|
||||
}
|
||||
}
|
||||
|
||||
@ -337,7 +337,10 @@ async fn test_get_job_logs() {
|
||||
assert!(result.is_ok());
|
||||
|
||||
let logs_result = result.unwrap();
|
||||
assert!(!logs_result.logs.is_empty());
|
||||
match logs_result.logs {
|
||||
Some(ref logs) => assert!(!logs.is_empty()),
|
||||
None => {} // acceptable when no logs are available
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
@ -1,6 +1,18 @@
|
||||
[package]
|
||||
name = "hero-client-unix"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0"
|
||||
clap = { version = "4.5", features = ["derive"] }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
tokio = { version = "1", features = ["macros", "rt-multi-thread", "time"] }
|
||||
|
||||
# JSON-RPC async client and params types
|
||||
jsonrpsee = { version = "0.21", features = ["macros", "async-client"] }
|
||||
jsonrpsee-types = "0.21"
|
||||
|
||||
# IPC transport
|
||||
reth-ipc = { git = "https://github.com/paradigmxyz/reth", package = "reth-ipc" }
|
||||
|
@ -1,3 +1,124 @@
|
||||
fn main() {
|
||||
println!("Hello, world!");
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::Result;
|
||||
use clap::Parser;
|
||||
use jsonrpsee::core::client::ClientT;
|
||||
use jsonrpsee::rpc_params;
|
||||
use reth_ipc::client::IpcClientBuilder;
|
||||
use serde_json::Value;
|
||||
use tracing_subscriber::EnvFilter;
|
||||
|
||||
/// Simple IPC (Unix socket) JSON-RPC client for manual testing.
|
||||
///
|
||||
/// Examples:
|
||||
/// - Call method without params:
|
||||
/// hero-client-unix --socket /tmp/baobab.ipc --method whoami
|
||||
///
|
||||
/// - Call method with positional params (as JSON array):
|
||||
/// hero-client-unix --socket /tmp/baobab.ipc --method authenticate --params '["pubkey","signature","nonce"]'
|
||||
///
|
||||
/// - Call method with single object param:
|
||||
/// hero-client-unix --socket /tmp/baobab.ipc --method create_job --params '{"job_id":"abc"}'
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(name = "hero-client-unix", version, about = "IPC JSON-RPC client")]
|
||||
struct Args {
|
||||
/// Filesystem path to the Unix domain socket
|
||||
#[arg(long, default_value = "/tmp/baobab.ipc", env = "HERO_IPC_SOCKET")]
|
||||
socket: PathBuf,
|
||||
|
||||
/// JSON-RPC method name to call
|
||||
#[arg(long)]
|
||||
method: String,
|
||||
|
||||
/// JSON string for params. Either an array for positional params or an object for named params.
|
||||
/// Defaults to [] (no params).
|
||||
#[arg(long, default_value = "[]")]
|
||||
params: String,
|
||||
|
||||
/// Log filter (e.g., info, debug, trace)
|
||||
#[arg(long, default_value = "info", env = "RUST_LOG")]
|
||||
log: String,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
let args = Args::parse();
|
||||
|
||||
tracing_subscriber::FmtSubscriber::builder()
|
||||
.with_env_filter(EnvFilter::new(args.log.clone()))
|
||||
.try_init()
|
||||
.expect("setting default subscriber failed");
|
||||
|
||||
let socket_str = args.socket.to_string_lossy().to_string();
|
||||
let client = IpcClientBuilder::default().build(&socket_str).await?;
|
||||
|
||||
let params_value: Value = serde_json::from_str(&args.params)?;
|
||||
|
||||
// We deserialize responses to serde_json::Value for generality.
|
||||
// You can set a concrete type instead if needed.
|
||||
let result: Value = match params_value {
|
||||
Value::Array(arr) => match arr.len() {
|
||||
0 => client.request(&args.method, rpc_params![]).await?,
|
||||
1 => client.request(&args.method, rpc_params![arr[0].clone()]).await?,
|
||||
2 => client.request(&args.method, rpc_params![arr[0].clone(), arr[1].clone()]).await?,
|
||||
3 => client
|
||||
.request(&args.method, rpc_params![arr[0].clone(), arr[1].clone(), arr[2].clone()])
|
||||
.await?,
|
||||
4 => client
|
||||
.request(
|
||||
&args.method,
|
||||
rpc_params![arr[0].clone(), arr[1].clone(), arr[2].clone(), arr[3].clone()],
|
||||
)
|
||||
.await?,
|
||||
5 => client
|
||||
.request(
|
||||
&args.method,
|
||||
rpc_params![
|
||||
arr[0].clone(),
|
||||
arr[1].clone(),
|
||||
arr[2].clone(),
|
||||
arr[3].clone(),
|
||||
arr[4].clone()
|
||||
],
|
||||
)
|
||||
.await?,
|
||||
6 => client
|
||||
.request(
|
||||
&args.method,
|
||||
rpc_params![
|
||||
arr[0].clone(),
|
||||
arr[1].clone(),
|
||||
arr[2].clone(),
|
||||
arr[3].clone(),
|
||||
arr[4].clone(),
|
||||
arr[5].clone()
|
||||
],
|
||||
)
|
||||
.await?,
|
||||
7 => client
|
||||
.request(
|
||||
&args.method,
|
||||
rpc_params![
|
||||
arr[0].clone(),
|
||||
arr[1].clone(),
|
||||
arr[2].clone(),
|
||||
arr[3].clone(),
|
||||
arr[4].clone(),
|
||||
arr[5].clone(),
|
||||
arr[6].clone()
|
||||
],
|
||||
)
|
||||
.await?,
|
||||
_ => {
|
||||
// Fallback: send entire array as a single param to avoid combinatorial explosion.
|
||||
// Adjust if your server expects strictly positional expansion beyond 7 items.
|
||||
client.request(&args.method, rpc_params![Value::Array(arr)]).await?
|
||||
}
|
||||
},
|
||||
// Single non-array param (object, string, number, etc.)
|
||||
other => client.request(&args.method, rpc_params![other]).await?,
|
||||
};
|
||||
|
||||
println!("{}", serde_json::to_string_pretty(&result)?);
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1,6 +1,14 @@
|
||||
[package]
|
||||
name = "hero-server-unix"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0"
|
||||
clap = { version = "4.5", features = ["derive"] }
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
tokio = { version = "1", features = ["macros", "rt-multi-thread", "signal"] }
|
||||
|
||||
# Reuse the OpenRPC server crate that registers all methods and now supports IPC
|
||||
hero-openrpc-server = { path = "../../openrpc/server" }
|
||||
|
@ -1,3 +1,64 @@
|
||||
fn main() {
|
||||
println!("Hello, world!");
|
||||
use std::path::PathBuf;
|
||||
|
||||
use clap::Parser;
|
||||
use tracing_subscriber::EnvFilter;
|
||||
|
||||
use hero_openrpc_server::{OpenRpcServer, OpenRpcServerConfig, Transport};
|
||||
|
||||
/// IPC (Unix socket) JSON-RPC server launcher.
|
||||
///
|
||||
/// This binary starts the OpenRPC server over a Unix domain socket using the reth-ipc transport.
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(name = "hero-server-unix", version, about = "Start the JSON-RPC IPC server")]
|
||||
struct Args {
|
||||
/// Filesystem path to the Unix domain socket
|
||||
#[arg(long, default_value = "/tmp/baobab.ipc", env = "HERO_IPC_SOCKET")]
|
||||
socket_path: PathBuf,
|
||||
|
||||
/// Optional path to a supervisor configuration file
|
||||
#[arg(long)]
|
||||
supervisor_config: Option<PathBuf>,
|
||||
|
||||
/// Database path (reserved for future use)
|
||||
#[arg(long, default_value = "./db", env = "HERO_DB_PATH")]
|
||||
db_path: PathBuf,
|
||||
|
||||
/// Log filter (e.g., info, debug, trace)
|
||||
#[arg(long, default_value = "info", env = "RUST_LOG")]
|
||||
log: String,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let args = Args::parse();
|
||||
|
||||
// Initialize tracing with provided log filter
|
||||
tracing_subscriber::FmtSubscriber::builder()
|
||||
.with_env_filter(EnvFilter::new(args.log.clone()))
|
||||
.try_init()
|
||||
.expect("setting default subscriber failed");
|
||||
|
||||
let cfg = OpenRpcServerConfig {
|
||||
transport: Transport::Unix(args.socket_path.clone()),
|
||||
supervisor_config_path: args.supervisor_config.clone(),
|
||||
db_path: args.db_path.clone(),
|
||||
};
|
||||
|
||||
// Build server state
|
||||
let server = OpenRpcServer::new(cfg.clone()).await?;
|
||||
|
||||
// Start IPC server
|
||||
let handle = server.start(cfg).await?;
|
||||
|
||||
tracing::info!(
|
||||
"IPC server started on {} (press Ctrl+C to stop)",
|
||||
args.socket_path.display()
|
||||
);
|
||||
|
||||
// Run until stopped
|
||||
tokio::spawn(handle.stopped());
|
||||
tokio::signal::ctrl_c().await?;
|
||||
tracing::info!("Shutting down IPC server");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -0,0 +1,127 @@
|
||||
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any
|
||||
// person obtaining a copy of this software and associated
|
||||
// documentation files (the "Software"), to deal in the
|
||||
// Software without restriction, including without
|
||||
// limitation the rights to use, copy, modify, merge,
|
||||
// publish, distribute, sublicense, and/or sell copies of
|
||||
// the Software, and to permit persons to whom the Software
|
||||
// is furnished to do so, subject to the following
|
||||
// conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice
|
||||
// shall be included in all copies or substantial portions
|
||||
// of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
use std::net::SocketAddr;
|
||||
use std::time::Duration;
|
||||
|
||||
use futures::{Stream, StreamExt};
|
||||
use jsonrpsee::core::DeserializeOwned;
|
||||
use jsonrpsee::core::client::{Subscription, SubscriptionClientT};
|
||||
use jsonrpsee::rpc_params;
|
||||
use jsonrpsee::server::{RpcModule, Server};
|
||||
use jsonrpsee::ws_client::WsClientBuilder;
|
||||
use tokio_stream::wrappers::BroadcastStream;
|
||||
use tokio_stream::wrappers::errors::BroadcastStreamRecvError;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
tracing_subscriber::FmtSubscriber::builder()
|
||||
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
||||
.try_init()
|
||||
.expect("setting default subscriber failed");
|
||||
|
||||
let addr = run_server().await?;
|
||||
let url = format!("ws://{}", addr);
|
||||
|
||||
let client = WsClientBuilder::default().build(&url).await?;
|
||||
|
||||
let sub: Subscription<i32> = client.subscribe("subscribe_hello", rpc_params![], "unsubscribe_hello").await?;
|
||||
|
||||
// drop oldest messages from subscription:
|
||||
let mut sub = drop_oldest_when_lagging(sub, 10);
|
||||
|
||||
// Simulate that polling takes a long time.
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
|
||||
// The subscription starts from zero but you can
|
||||
// notice that many items have been replaced
|
||||
// because the subscription wasn't polled.
|
||||
for _ in 0..10 {
|
||||
match sub.next().await.unwrap() {
|
||||
Ok(n) => {
|
||||
tracing::info!("recv={n}");
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::info!("{e}");
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn drop_oldest_when_lagging<T: Clone + DeserializeOwned + Send + Sync + 'static>(
|
||||
mut sub: Subscription<T>,
|
||||
buffer_size: usize,
|
||||
) -> impl Stream<Item = Result<T, BroadcastStreamRecvError>> {
|
||||
let (tx, rx) = tokio::sync::broadcast::channel(buffer_size);
|
||||
|
||||
tokio::spawn(async move {
|
||||
// Poll the subscription which ignores errors.
|
||||
while let Some(n) = sub.next().await {
|
||||
let msg = match n {
|
||||
Ok(msg) => msg,
|
||||
Err(e) => {
|
||||
tracing::error!("Failed to decode the subscription message: {e}");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
if tx.send(msg).is_err() {
|
||||
return;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
BroadcastStream::new(rx)
|
||||
}
|
||||
|
||||
async fn run_server() -> anyhow::Result<SocketAddr> {
|
||||
let server = Server::builder().build("127.0.0.1:0").await?;
|
||||
let mut module = RpcModule::new(());
|
||||
module
|
||||
.register_subscription("subscribe_hello", "s_hello", "unsubscribe_hello", |_, pending, _, _| async move {
|
||||
let sub = pending.accept().await.unwrap();
|
||||
|
||||
for i in 0..usize::MAX {
|
||||
let json = serde_json::value::to_raw_value(&i).unwrap();
|
||||
sub.send(json).await.unwrap();
|
||||
tokio::time::sleep(Duration::from_millis(10)).await;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.unwrap();
|
||||
let addr = server.local_addr()?;
|
||||
|
||||
let handle = server.start(module);
|
||||
|
||||
// In this example we don't care about doing shutdown so let's it run forever.
|
||||
// You may use the `ServerHandle` to shut it down or manage it yourself.
|
||||
tokio::spawn(handle.stopped());
|
||||
|
||||
Ok(addr)
|
||||
}
|
65
reference_jsonrpsee_crate_examples/core_client.rs
Normal file
65
reference_jsonrpsee_crate_examples/core_client.rs
Normal file
@ -0,0 +1,65 @@
|
||||
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any
|
||||
// person obtaining a copy of this software and associated
|
||||
// documentation files (the "Software"), to deal in the
|
||||
// Software without restriction, including without
|
||||
// limitation the rights to use, copy, modify, merge,
|
||||
// publish, distribute, sublicense, and/or sell copies of
|
||||
// the Software, and to permit persons to whom the Software
|
||||
// is furnished to do so, subject to the following
|
||||
// conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice
|
||||
// shall be included in all copies or substantial portions
|
||||
// of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
use std::net::SocketAddr;
|
||||
|
||||
use jsonrpsee::client_transport::ws::{Url, WsTransportClientBuilder};
|
||||
use jsonrpsee::core::client::{ClientBuilder, ClientT};
|
||||
use jsonrpsee::rpc_params;
|
||||
use jsonrpsee::server::{RpcModule, Server};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
tracing_subscriber::FmtSubscriber::builder()
|
||||
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
||||
.try_init()
|
||||
.expect("setting default subscriber failed");
|
||||
|
||||
let addr = run_server().await?;
|
||||
let uri = Url::parse(&format!("ws://{}", addr))?;
|
||||
|
||||
let (tx, rx) = WsTransportClientBuilder::default().build(uri).await?;
|
||||
let client = ClientBuilder::default().build_with_tokio(tx, rx);
|
||||
let response: String = client.request("say_hello", rpc_params![]).await?;
|
||||
tracing::info!("response: {:?}", response);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_server() -> anyhow::Result<SocketAddr> {
|
||||
let server = Server::builder().build("127.0.0.1:0").await?;
|
||||
let mut module = RpcModule::new(());
|
||||
module.register_method("say_hello", |_, _, _| "lo")?;
|
||||
let addr = server.local_addr()?;
|
||||
|
||||
let handle = server.start(module);
|
||||
|
||||
// In this example we don't care about doing shutdown so let's it run forever.
|
||||
// You may use the `ServerHandle` to shut it down or manage it yourself.
|
||||
tokio::spawn(handle.stopped());
|
||||
|
||||
Ok(addr)
|
||||
}
|
104
reference_jsonrpsee_crate_examples/cors_server.rs
Normal file
104
reference_jsonrpsee_crate_examples/cors_server.rs
Normal file
@ -0,0 +1,104 @@
|
||||
// Copyright 2019-2022 Parity Technologies (UK) Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any
|
||||
// person obtaining a copy of this software and associated
|
||||
// documentation files (the "Software"), to deal in the
|
||||
// Software without restriction, including without
|
||||
// limitation the rights to use, copy, modify, merge,
|
||||
// publish, distribute, sublicense, and/or sell copies of
|
||||
// the Software, and to permit persons to whom the Software
|
||||
// is furnished to do so, subject to the following
|
||||
// conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice
|
||||
// shall be included in all copies or substantial portions
|
||||
// of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
//! This example adds upstream CORS layers to the RPC service,
|
||||
//! with access control allowing requests from all hosts.
|
||||
|
||||
use hyper::Method;
|
||||
use jsonrpsee::server::{RpcModule, Server};
|
||||
use std::net::SocketAddr;
|
||||
use tower_http::cors::{Any, CorsLayer};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
tracing_subscriber::FmtSubscriber::builder()
|
||||
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
||||
.try_init()
|
||||
.expect("setting default subscriber failed");
|
||||
|
||||
// Start up a JSON-RPC server that allows cross origin requests.
|
||||
let server_addr = run_server().await?;
|
||||
|
||||
// Print instructions for testing CORS from a browser.
|
||||
println!("Run the following snippet in the developer console in any Website.");
|
||||
println!(
|
||||
r#"
|
||||
fetch("http://{}", {{
|
||||
method: 'POST',
|
||||
mode: 'cors',
|
||||
headers: {{ 'Content-Type': 'application/json' }},
|
||||
body: JSON.stringify({{
|
||||
jsonrpc: '2.0',
|
||||
method: 'say_hello',
|
||||
id: 1
|
||||
}})
|
||||
}}).then(res => {{
|
||||
console.log("Response:", res);
|
||||
return res.text()
|
||||
}}).then(body => {{
|
||||
console.log("Response Body:", body)
|
||||
}});
|
||||
"#,
|
||||
server_addr
|
||||
);
|
||||
|
||||
futures::future::pending().await
|
||||
}
|
||||
|
||||
async fn run_server() -> anyhow::Result<SocketAddr> {
|
||||
// Add a CORS middleware for handling HTTP requests.
|
||||
// This middleware does affect the response, including appropriate
|
||||
// headers to satisfy CORS. Because any origins are allowed, the
|
||||
// "Access-Control-Allow-Origin: *" header is appended to the response.
|
||||
let cors = CorsLayer::new()
|
||||
// Allow `POST` when accessing the resource
|
||||
.allow_methods([Method::POST])
|
||||
// Allow requests from any origin
|
||||
.allow_origin(Any)
|
||||
.allow_headers([hyper::header::CONTENT_TYPE]);
|
||||
let middleware = tower::ServiceBuilder::new().layer(cors);
|
||||
|
||||
// The RPC exposes the access control for filtering and the middleware for
|
||||
// modifying requests / responses. These features are independent of one another
|
||||
// and can also be used separately.
|
||||
// In this example, we use both features.
|
||||
let server = Server::builder().set_http_middleware(middleware).build("127.0.0.1:0".parse::<SocketAddr>()?).await?;
|
||||
|
||||
let mut module = RpcModule::new(());
|
||||
module.register_method("say_hello", |_, _, _| {
|
||||
println!("say_hello method called!");
|
||||
"Hello there!!"
|
||||
})?;
|
||||
|
||||
let addr = server.local_addr()?;
|
||||
let handle = server.start(module);
|
||||
|
||||
// In this example we don't care about doing shutdown so let's it run forever.
|
||||
// You may use the `ServerHandle` to shut it down or manage it yourself.
|
||||
tokio::spawn(handle.stopped());
|
||||
|
||||
Ok(addr)
|
||||
}
|
83
reference_jsonrpsee_crate_examples/host_filter_middleware.rs
Normal file
83
reference_jsonrpsee_crate_examples/host_filter_middleware.rs
Normal file
@ -0,0 +1,83 @@
|
||||
// Copyright 2019-2022 Parity Technologies (UK) Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any
|
||||
// person obtaining a copy of this software and associated
|
||||
// documentation files (the "Software"), to deal in the
|
||||
// Software without restriction, including without
|
||||
// limitation the rights to use, copy, modify, merge,
|
||||
// publish, distribute, sublicense, and/or sell copies of
|
||||
// the Software, and to permit persons to whom the Software
|
||||
// is furnished to do so, subject to the following
|
||||
// conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice
|
||||
// shall be included in all copies or substantial portions
|
||||
// of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
//! This example shows how to configure `host filtering` by tower middleware on the jsonrpsee server.
|
||||
//!
|
||||
//! The server whitelist's only `example.com` and any call from localhost will be
|
||||
//! rejected both by HTTP and WebSocket transports.
|
||||
|
||||
use std::net::SocketAddr;
|
||||
|
||||
use jsonrpsee::core::client::ClientT;
|
||||
use jsonrpsee::http_client::HttpClient;
|
||||
use jsonrpsee::rpc_params;
|
||||
use jsonrpsee::server::middleware::http::HostFilterLayer;
|
||||
use jsonrpsee::server::{RpcModule, Server};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
tracing_subscriber::FmtSubscriber::builder()
|
||||
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
||||
.try_init()
|
||||
.expect("setting default subscriber failed");
|
||||
|
||||
let addr = run_server().await?;
|
||||
let url = format!("http://{}", addr);
|
||||
|
||||
// Use RPC client to get the response of `say_hello` method.
|
||||
let client = HttpClient::builder().build(&url)?;
|
||||
// This call will be denied because only `example.com` URIs/hosts are allowed by the host filter.
|
||||
let response = client.request::<String, _>("say_hello", rpc_params![]).await.unwrap_err();
|
||||
println!("[main]: response: {}", response);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_server() -> anyhow::Result<SocketAddr> {
|
||||
// Custom tower service to handle the RPC requests
|
||||
let service_builder = tower::ServiceBuilder::new()
|
||||
// For this example we only want to permit requests from `example.com`
|
||||
// all other request are denied.
|
||||
//
|
||||
// `HostFilerLayer::new` only fails on invalid URIs..
|
||||
.layer(HostFilterLayer::new(["example.com"]).unwrap());
|
||||
|
||||
let server =
|
||||
Server::builder().set_http_middleware(service_builder).build("127.0.0.1:0".parse::<SocketAddr>()?).await?;
|
||||
|
||||
let addr = server.local_addr()?;
|
||||
|
||||
let mut module = RpcModule::new(());
|
||||
module.register_method("say_hello", |_, _, _| "lo").unwrap();
|
||||
|
||||
let handle = server.start(module);
|
||||
|
||||
// In this example we don't care about doing shutdown so let's it run forever.
|
||||
// You may use the `ServerHandle` to shut it down or manage it yourself.
|
||||
tokio::spawn(handle.stopped());
|
||||
|
||||
Ok(addr)
|
||||
}
|
65
reference_jsonrpsee_crate_examples/http.rs
Normal file
65
reference_jsonrpsee_crate_examples/http.rs
Normal file
@ -0,0 +1,65 @@
|
||||
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any
|
||||
// person obtaining a copy of this software and associated
|
||||
// documentation files (the "Software"), to deal in the
|
||||
// Software without restriction, including without
|
||||
// limitation the rights to use, copy, modify, merge,
|
||||
// publish, distribute, sublicense, and/or sell copies of
|
||||
// the Software, and to permit persons to whom the Software
|
||||
// is furnished to do so, subject to the following
|
||||
// conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice
|
||||
// shall be included in all copies or substantial portions
|
||||
// of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
use std::net::SocketAddr;
|
||||
|
||||
use jsonrpsee::core::client::ClientT;
|
||||
use jsonrpsee::http_client::HttpClient;
|
||||
use jsonrpsee::rpc_params;
|
||||
use jsonrpsee::server::{RpcModule, Server};
|
||||
use tracing_subscriber::util::SubscriberInitExt;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let filter = tracing_subscriber::EnvFilter::try_from_default_env()?
|
||||
.add_directive("jsonrpsee[method_call{name = \"say_hello\"}]=trace".parse()?);
|
||||
tracing_subscriber::FmtSubscriber::builder().with_env_filter(filter).finish().try_init()?;
|
||||
|
||||
let server_addr = run_server().await?;
|
||||
let url = format!("http://{}", server_addr);
|
||||
|
||||
let client = HttpClient::builder().build(url)?;
|
||||
let params = rpc_params![1_u64, 2, 3];
|
||||
let response: Result<String, _> = client.request("say_hello", params).await;
|
||||
tracing::info!("r: {:?}", response);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_server() -> anyhow::Result<SocketAddr> {
|
||||
let server = Server::builder().build("127.0.0.1:0".parse::<SocketAddr>()?).await?;
|
||||
let mut module = RpcModule::new(());
|
||||
module.register_method("say_hello", |_, _, _| "lo")?;
|
||||
|
||||
let addr = server.local_addr()?;
|
||||
let handle = server.start(module);
|
||||
|
||||
// In this example we don't care about doing shutdown so let's it run forever.
|
||||
// You may use the `ServerHandle` to shut it down or manage it yourself.
|
||||
tokio::spawn(handle.stopped());
|
||||
|
||||
Ok(addr)
|
||||
}
|
129
reference_jsonrpsee_crate_examples/http_middleware.rs
Normal file
129
reference_jsonrpsee_crate_examples/http_middleware.rs
Normal file
@ -0,0 +1,129 @@
|
||||
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any
|
||||
// person obtaining a copy of this software and associated
|
||||
// documentation files (the "Software"), to deal in the
|
||||
// Software without restriction, including without
|
||||
// limitation the rights to use, copy, modify, merge,
|
||||
// publish, distribute, sublicense, and/or sell copies of
|
||||
// the Software, and to permit persons to whom the Software
|
||||
// is furnished to do so, subject to the following
|
||||
// conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice
|
||||
// shall be included in all copies or substantial portions
|
||||
// of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
//! jsonrpsee supports two kinds of middlewares `http_middleware` and `rpc_middleware`.
|
||||
//!
|
||||
//! This example demonstrates how to use the `http_middleware` which applies for each
|
||||
//! HTTP request.
|
||||
//!
|
||||
//! A typical use-case for this it to apply a specific CORS policy which applies both
|
||||
//! for HTTP and WebSocket.
|
||||
//!
|
||||
|
||||
use hyper::Method;
|
||||
use hyper::body::Bytes;
|
||||
use hyper::http::HeaderValue;
|
||||
use jsonrpsee::rpc_params;
|
||||
use std::iter::once;
|
||||
use std::net::SocketAddr;
|
||||
use std::time::Duration;
|
||||
use tower_http::LatencyUnit;
|
||||
use tower_http::compression::CompressionLayer;
|
||||
use tower_http::cors::CorsLayer;
|
||||
use tower_http::sensitive_headers::SetSensitiveRequestHeadersLayer;
|
||||
use tower_http::trace::{DefaultMakeSpan, DefaultOnResponse, TraceLayer};
|
||||
|
||||
use jsonrpsee::core::client::ClientT;
|
||||
use jsonrpsee::http_client::HttpClient;
|
||||
use jsonrpsee::server::{RpcModule, Server};
|
||||
use jsonrpsee::ws_client::WsClientBuilder;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
tracing_subscriber::FmtSubscriber::builder()
|
||||
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
||||
.try_init()
|
||||
.expect("setting default subscriber failed");
|
||||
|
||||
let addr = run_server().await?;
|
||||
|
||||
// WebSocket.
|
||||
{
|
||||
let client = WsClientBuilder::default().build(format!("ws://{}", addr)).await?;
|
||||
let response: String = client.request("say_hello", rpc_params![]).await?;
|
||||
println!("[main]: ws response: {:?}", response);
|
||||
let _response: Result<String, _> = client.request("unknown_method", rpc_params![]).await;
|
||||
let _ = client.request::<String, _>("say_hello", rpc_params![]).await?;
|
||||
}
|
||||
|
||||
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
|
||||
|
||||
// HTTP.
|
||||
{
|
||||
let client = HttpClient::builder().build(format!("http://{}", addr))?;
|
||||
let response: String = client.request("say_hello", rpc_params![]).await?;
|
||||
println!("[main]: http response: {:?}", response);
|
||||
let _response: Result<String, _> = client.request("unknown_method", rpc_params![]).await;
|
||||
let _ = client.request::<String, _>("say_hello", rpc_params![]).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_server() -> anyhow::Result<SocketAddr> {
|
||||
let cors = CorsLayer::new()
|
||||
// Allow `POST` when accessing the resource
|
||||
.allow_methods([Method::POST])
|
||||
// Allow requests from any origin
|
||||
.allow_origin(HeaderValue::from_str("http://example.com").unwrap())
|
||||
.allow_headers([hyper::header::CONTENT_TYPE]);
|
||||
|
||||
// Custom tower service to handle the RPC requests
|
||||
let service_builder = tower::ServiceBuilder::new()
|
||||
// Add high level tracing/logging to all requests
|
||||
.layer(
|
||||
TraceLayer::new_for_http()
|
||||
.on_request(
|
||||
|request: &hyper::Request<_>, _span: &tracing::Span| tracing::info!(request = ?request, "on_request"),
|
||||
)
|
||||
.on_body_chunk(|chunk: &Bytes, latency: Duration, _: &tracing::Span| {
|
||||
tracing::info!(size_bytes = chunk.len(), latency = ?latency, "sending body chunk")
|
||||
})
|
||||
.make_span_with(DefaultMakeSpan::new().include_headers(true))
|
||||
.on_response(DefaultOnResponse::new().include_headers(true).latency_unit(LatencyUnit::Micros)),
|
||||
)
|
||||
// Mark the `Authorization` request header as sensitive so it doesn't show in logs
|
||||
.layer(SetSensitiveRequestHeadersLayer::new(once(hyper::header::AUTHORIZATION)))
|
||||
.layer(cors)
|
||||
.layer(CompressionLayer::new())
|
||||
.timeout(Duration::from_secs(2));
|
||||
|
||||
let server =
|
||||
Server::builder().set_http_middleware(service_builder).build("127.0.0.1:0".parse::<SocketAddr>()?).await?;
|
||||
|
||||
let addr = server.local_addr()?;
|
||||
|
||||
let mut module = RpcModule::new(());
|
||||
module.register_method("say_hello", |_, _, _| "lo").unwrap();
|
||||
|
||||
let handle = server.start(module);
|
||||
|
||||
// In this example we don't care about doing shutdown so let's it run forever.
|
||||
// You may use the `ServerHandle` to shut it down or manage it yourself.
|
||||
tokio::spawn(handle.stopped());
|
||||
|
||||
Ok(addr)
|
||||
}
|
109
reference_jsonrpsee_crate_examples/http_proxy_middleware.rs
Normal file
109
reference_jsonrpsee_crate_examples/http_proxy_middleware.rs
Normal file
@ -0,0 +1,109 @@
|
||||
// Copyright 2019-2022 Parity Technologies (UK) Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any
|
||||
// person obtaining a copy of this software and associated
|
||||
// documentation files (the "Software"), to deal in the
|
||||
// Software without restriction, including without
|
||||
// limitation the rights to use, copy, modify, merge,
|
||||
// publish, distribute, sublicense, and/or sell copies of
|
||||
// the Software, and to permit persons to whom the Software
|
||||
// is furnished to do so, subject to the following
|
||||
// conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice
|
||||
// shall be included in all copies or substantial portions
|
||||
// of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
//! This example utilizes the `ProxyRequest` layer for redirecting
|
||||
//! `GET /path` requests to internal RPC methods.
|
||||
//!
|
||||
//! The RPC server registers a method named `system_health` which
|
||||
//! returns `serde_json::Value`. Redirect any `GET /health`
|
||||
//! requests to the internal method, and return only the method's
|
||||
//! response in the body (ie, without any jsonRPC 2.0 overhead).
|
||||
//!
|
||||
//! # Note
|
||||
//!
|
||||
//! This functionality is useful for services which would
|
||||
//! like to query a certain `URI` path for statistics.
|
||||
|
||||
use hyper_util::client::legacy::Client;
|
||||
use hyper_util::rt::TokioExecutor;
|
||||
use std::net::SocketAddr;
|
||||
use std::time::Duration;
|
||||
|
||||
use jsonrpsee::core::client::ClientT;
|
||||
use jsonrpsee::http_client::HttpClient;
|
||||
use jsonrpsee::rpc_params;
|
||||
use jsonrpsee::server::middleware::http::ProxyGetRequestLayer;
|
||||
use jsonrpsee::server::{RpcModule, Server};
|
||||
|
||||
type EmptyBody = http_body_util::Empty<hyper::body::Bytes>;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
tracing_subscriber::FmtSubscriber::builder()
|
||||
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
||||
.try_init()
|
||||
.expect("setting default subscriber failed");
|
||||
|
||||
let addr = run_server().await?;
|
||||
let url = format!("http://{}", addr);
|
||||
|
||||
// Use RPC client to get the response of `say_hello` method.
|
||||
let client = HttpClient::builder().build(&url)?;
|
||||
let response: String = client.request("say_hello", rpc_params![]).await?;
|
||||
println!("[main]: response: {:?}", response);
|
||||
|
||||
// Use hyper client to manually submit a `GET /health` request.
|
||||
let http_client = Client::builder(TokioExecutor::new()).build_http();
|
||||
let uri = format!("http://{}/health", addr);
|
||||
|
||||
let req = hyper::Request::builder().method("GET").uri(&uri).body(EmptyBody::new())?;
|
||||
println!("[main]: Submit proxy request: {:?}", req);
|
||||
let res = http_client.request(req).await?;
|
||||
println!("[main]: Received proxy response: {:?}", res);
|
||||
|
||||
// Interpret the response as String.
|
||||
let collected = http_body_util::BodyExt::collect(res.into_body()).await?;
|
||||
let out = String::from_utf8(collected.to_bytes().to_vec()).unwrap();
|
||||
println!("[main]: Interpret proxy response: {:?}", out);
|
||||
assert_eq!(out.as_str(), "{\"health\":true}");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_server() -> anyhow::Result<SocketAddr> {
|
||||
// Custom tower service to handle the RPC requests
|
||||
let service_builder = tower::ServiceBuilder::new()
|
||||
// Proxy `GET /health` requests to internal `system_health` method.
|
||||
.layer(ProxyGetRequestLayer::new([("/health", "system_health")])?)
|
||||
.timeout(Duration::from_secs(2));
|
||||
|
||||
let server =
|
||||
Server::builder().set_http_middleware(service_builder).build("127.0.0.1:0".parse::<SocketAddr>()?).await?;
|
||||
|
||||
let addr = server.local_addr()?;
|
||||
|
||||
let mut module = RpcModule::new(());
|
||||
module.register_method("say_hello", |_, _, _| "lo").unwrap();
|
||||
module.register_method("system_health", |_, _, _| serde_json::json!({ "health": true })).unwrap();
|
||||
|
||||
let handle = server.start(module);
|
||||
|
||||
// In this example we don't care about doing shutdown so let's it run forever.
|
||||
// You may use the `ServerHandle` to shut it down or manage it yourself.
|
||||
tokio::spawn(handle.stopped());
|
||||
|
||||
Ok(addr)
|
||||
}
|
380
reference_jsonrpsee_crate_examples/jsonrpsee_as_service.rs
Normal file
380
reference_jsonrpsee_crate_examples/jsonrpsee_as_service.rs
Normal file
@ -0,0 +1,380 @@
|
||||
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any
|
||||
// person obtaining a copy of this software and associated
|
||||
// documentation files (the "Software"), to deal in the
|
||||
// Software without restriction, including without
|
||||
// limitation the rights to use, copy, modify, merge,
|
||||
// publish, distribute, sublicense, and/or sell copies of
|
||||
// the Software, and to permit persons to whom the Software
|
||||
// is furnished to do so, subject to the following
|
||||
// conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice
|
||||
// shall be included in all copies or substantial portions
|
||||
// of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
//! This example shows how to use the `jsonrpsee::server` as
|
||||
//! a tower service such that it's possible to get access
|
||||
//! HTTP related things by launching a `hyper::service_fn`.
|
||||
//!
|
||||
//! The typical use-case for this is when one wants to have
|
||||
//! access to HTTP related things.
|
||||
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
use futures::FutureExt;
|
||||
use hyper::HeaderMap;
|
||||
use hyper::header::AUTHORIZATION;
|
||||
use jsonrpsee::core::async_trait;
|
||||
use jsonrpsee::core::middleware::{Batch, BatchEntry, BatchEntryErr, Notification, RpcServiceBuilder, RpcServiceT};
|
||||
use jsonrpsee::http_client::HttpClient;
|
||||
use jsonrpsee::proc_macros::rpc;
|
||||
use jsonrpsee::server::middleware::http::{HostFilterLayer, ProxyGetRequestLayer};
|
||||
use jsonrpsee::server::{
|
||||
ServerConfig, ServerHandle, StopHandle, TowerServiceBuilder, serve_with_graceful_shutdown, stop_channel,
|
||||
};
|
||||
use jsonrpsee::types::{ErrorObject, ErrorObjectOwned, Request};
|
||||
use jsonrpsee::ws_client::{HeaderValue, WsClientBuilder};
|
||||
use jsonrpsee::{MethodResponse, Methods};
|
||||
use tokio::net::TcpListener;
|
||||
use tower::Service;
|
||||
use tower_http::cors::CorsLayer;
|
||||
use tracing_subscriber::util::SubscriberInitExt;
|
||||
|
||||
#[derive(Clone)]
|
||||
struct IdentityLayer;
|
||||
|
||||
impl<S> tower::Layer<S> for IdentityLayer
|
||||
where
|
||||
S: RpcServiceT + Send + Sync + Clone + 'static,
|
||||
{
|
||||
type Service = Identity<S>;
|
||||
|
||||
fn layer(&self, inner: S) -> Self::Service {
|
||||
Identity(inner)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct Identity<S>(S);
|
||||
|
||||
impl<S> RpcServiceT for Identity<S>
|
||||
where
|
||||
S: RpcServiceT + Send + Sync + Clone + 'static,
|
||||
{
|
||||
type MethodResponse = S::MethodResponse;
|
||||
type BatchResponse = S::BatchResponse;
|
||||
type NotificationResponse = S::NotificationResponse;
|
||||
|
||||
fn batch<'a>(&self, batch: Batch<'a>) -> impl Future<Output = Self::BatchResponse> + Send + 'a {
|
||||
self.0.batch(batch)
|
||||
}
|
||||
|
||||
fn call<'a>(&self, request: Request<'a>) -> impl Future<Output = Self::MethodResponse> + Send + 'a {
|
||||
self.0.call(request)
|
||||
}
|
||||
|
||||
fn notification<'a>(&self, n: Notification<'a>) -> impl Future<Output = Self::NotificationResponse> + Send + 'a {
|
||||
self.0.notification(n)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Clone, Debug)]
|
||||
struct Metrics {
|
||||
opened_ws_connections: Arc<AtomicUsize>,
|
||||
closed_ws_connections: Arc<AtomicUsize>,
|
||||
http_calls: Arc<AtomicUsize>,
|
||||
success_http_calls: Arc<AtomicUsize>,
|
||||
}
|
||||
|
||||
fn auth_reject_error() -> ErrorObjectOwned {
|
||||
ErrorObject::owned(-32999, "HTTP Authorization header is missing", None::<()>)
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct AuthorizationMiddleware<S> {
|
||||
headers: HeaderMap,
|
||||
inner: S,
|
||||
#[allow(unused)]
|
||||
transport_label: &'static str,
|
||||
}
|
||||
|
||||
impl<S> AuthorizationMiddleware<S> {
|
||||
/// Authorize the request by checking the `Authorization` header.
|
||||
///
|
||||
///
|
||||
/// In this example for simplicity, the authorization value is not checked
|
||||
// and used because it's just a toy example.
|
||||
fn auth_method_call(&self, req: &Request<'_>) -> bool {
|
||||
if req.method_name() == "trusted_call" {
|
||||
let Some(Ok(_)) = self.headers.get(AUTHORIZATION).map(|auth| auth.to_str()) else { return false };
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
/// Authorize the notification by checking the `Authorization` header.
|
||||
///
|
||||
/// Because notifications are not expected to return a response, we
|
||||
/// return a `MethodResponse` by injecting an error into the extensions
|
||||
/// which could be read by other middleware or the server.
|
||||
fn auth_notif(&self, notif: &Notification<'_>) -> bool {
|
||||
if notif.method_name() == "trusted_call" {
|
||||
let Some(Ok(_)) = self.headers.get(AUTHORIZATION).map(|auth| auth.to_str()) else { return false };
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> RpcServiceT for AuthorizationMiddleware<S>
|
||||
where
|
||||
// We need to specify the concrete types here because otherwise we return an error or specific response
|
||||
// in the middleware implementation.
|
||||
S: RpcServiceT<MethodResponse = MethodResponse, BatchResponse = MethodResponse> + Send + Sync + Clone + 'static,
|
||||
{
|
||||
type MethodResponse = S::MethodResponse;
|
||||
type BatchResponse = S::BatchResponse;
|
||||
type NotificationResponse = S::NotificationResponse;
|
||||
|
||||
fn call<'a>(&self, req: Request<'a>) -> impl Future<Output = Self::MethodResponse> + Send + 'a {
|
||||
let this = self.clone();
|
||||
let auth_ok = this.auth_method_call(&req);
|
||||
|
||||
async move {
|
||||
// If the authorization header is missing, it's recommended to
|
||||
// to return the response as MethodResponse::error instead of
|
||||
// returning an error from the service.
|
||||
//
|
||||
// This way the error is returned as a JSON-RPC error
|
||||
if !auth_ok {
|
||||
return MethodResponse::error(req.id, auth_reject_error());
|
||||
}
|
||||
this.inner.call(req).await
|
||||
}
|
||||
}
|
||||
|
||||
fn batch<'a>(&self, batch: Batch<'a>) -> impl Future<Output = Self::BatchResponse> + Send + 'a {
|
||||
// Check the authorization header for each entry in the batch.
|
||||
let entries: Vec<_> = batch
|
||||
.into_iter()
|
||||
.filter_map(|entry| match entry {
|
||||
Ok(BatchEntry::Call(req)) => {
|
||||
if self.auth_method_call(&req) {
|
||||
Some(Ok(BatchEntry::Call(req)))
|
||||
} else {
|
||||
// If the authorization header is missing, we return
|
||||
// a JSON-RPC error instead of an error from the service.
|
||||
Some(Err(BatchEntryErr::new(req.id, auth_reject_error())))
|
||||
}
|
||||
}
|
||||
Ok(BatchEntry::Notification(notif)) => {
|
||||
if self.auth_notif(¬if) {
|
||||
Some(Ok(BatchEntry::Notification(notif)))
|
||||
} else {
|
||||
// Just filter out the notification if the auth fails
|
||||
// because notifications are not expected to return a response.
|
||||
None
|
||||
}
|
||||
}
|
||||
// Errors which could happen such as invalid JSON-RPC call
|
||||
// or invalid JSON are just passed through.
|
||||
Err(err) => Some(Err(err)),
|
||||
})
|
||||
.collect();
|
||||
|
||||
self.inner.batch(Batch::from(entries))
|
||||
}
|
||||
|
||||
fn notification<'a>(&self, n: Notification<'a>) -> impl Future<Output = Self::NotificationResponse> + Send + 'a {
|
||||
self.inner.notification(n)
|
||||
}
|
||||
}
|
||||
|
||||
#[rpc(server, client)]
|
||||
pub trait Rpc {
|
||||
#[method(name = "trusted_call")]
|
||||
async fn trusted_call(&self) -> Result<String, ErrorObjectOwned>;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl RpcServer for () {
|
||||
async fn trusted_call(&self) -> Result<String, ErrorObjectOwned> {
|
||||
Ok("mysecret".to_string())
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let filter = tracing_subscriber::EnvFilter::try_from_default_env()?;
|
||||
tracing_subscriber::FmtSubscriber::builder().with_env_filter(filter).finish().try_init()?;
|
||||
|
||||
let metrics = Metrics::default();
|
||||
|
||||
let handle = run_server(metrics.clone()).await?;
|
||||
tokio::spawn(handle.stopped());
|
||||
|
||||
{
|
||||
let client = HttpClient::builder().build("http://127.0.0.1:9944").unwrap();
|
||||
|
||||
// Fails because the authorization header is missing.
|
||||
let x = client.trusted_call().await.unwrap_err();
|
||||
tracing::info!("response: {x}");
|
||||
}
|
||||
|
||||
{
|
||||
let client = WsClientBuilder::default().build("ws://127.0.0.1:9944").await.unwrap();
|
||||
|
||||
// Fails because the authorization header is missing.
|
||||
let x = client.trusted_call().await.unwrap_err();
|
||||
tracing::info!("response: {x}");
|
||||
}
|
||||
|
||||
{
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert(AUTHORIZATION, HeaderValue::from_static("don't care in this example"));
|
||||
|
||||
let client = HttpClient::builder().set_headers(headers).build("http://127.0.0.1:9944").unwrap();
|
||||
|
||||
let x = client.trusted_call().await.unwrap();
|
||||
tracing::info!("response: {x}");
|
||||
}
|
||||
|
||||
tracing::info!("{:?}", metrics);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_server(metrics: Metrics) -> anyhow::Result<ServerHandle> {
|
||||
let listener = TcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 9944))).await?;
|
||||
|
||||
// This state is cloned for every connection
|
||||
// all these types based on Arcs and it should
|
||||
// be relatively cheap to clone them.
|
||||
//
|
||||
// Make sure that nothing expensive is cloned here
|
||||
// when doing this or use an `Arc`.
|
||||
#[derive(Clone)]
|
||||
struct PerConnection<RpcMiddleware, HttpMiddleware> {
|
||||
methods: Methods,
|
||||
stop_handle: StopHandle,
|
||||
metrics: Metrics,
|
||||
svc_builder: TowerServiceBuilder<RpcMiddleware, HttpMiddleware>,
|
||||
}
|
||||
|
||||
// Each RPC call/connection get its own `stop_handle`
|
||||
// to able to determine whether the server has been stopped or not.
|
||||
//
|
||||
// To keep the server running the `server_handle`
|
||||
// must be kept and it can also be used to stop the server.
|
||||
let (stop_handle, server_handle) = stop_channel();
|
||||
|
||||
let per_conn = PerConnection {
|
||||
methods: ().into_rpc().into(),
|
||||
stop_handle: stop_handle.clone(),
|
||||
metrics,
|
||||
svc_builder: jsonrpsee::server::Server::builder()
|
||||
.set_config(ServerConfig::builder().max_connections(33).build())
|
||||
.set_http_middleware(
|
||||
tower::ServiceBuilder::new()
|
||||
.layer(CorsLayer::permissive())
|
||||
.layer(ProxyGetRequestLayer::new(vec![("trusted_call", "foo")]).unwrap())
|
||||
.layer(HostFilterLayer::new(["example.com"]).unwrap()),
|
||||
)
|
||||
.to_service_builder(),
|
||||
};
|
||||
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
// The `tokio::select!` macro is used to wait for either of the
|
||||
// listeners to accept a new connection or for the server to be
|
||||
// stopped.
|
||||
let sock = tokio::select! {
|
||||
res = listener.accept() => {
|
||||
match res {
|
||||
Ok((stream, _remote_addr)) => stream,
|
||||
Err(e) => {
|
||||
tracing::error!("failed to accept v4 connection: {:?}", e);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
_ = per_conn.stop_handle.clone().shutdown() => break,
|
||||
};
|
||||
let per_conn2 = per_conn.clone();
|
||||
|
||||
let svc = tower::service_fn(move |req: hyper::Request<hyper::body::Incoming>| {
|
||||
let is_websocket = jsonrpsee::server::ws::is_upgrade_request(&req);
|
||||
let transport_label = if is_websocket { "ws" } else { "http" };
|
||||
let PerConnection { methods, stop_handle, metrics, svc_builder } = per_conn2.clone();
|
||||
|
||||
// NOTE, the rpc middleware must be initialized here to be able to created once per connection
|
||||
// with data from the connection such as the headers in this example
|
||||
let headers = req.headers().clone();
|
||||
let rpc_middleware = RpcServiceBuilder::new()
|
||||
.rpc_logger(1024)
|
||||
.layer_fn(move |service| AuthorizationMiddleware {
|
||||
inner: service,
|
||||
headers: headers.clone(),
|
||||
transport_label,
|
||||
})
|
||||
.option_layer(Some(IdentityLayer));
|
||||
|
||||
let mut svc = svc_builder.set_rpc_middleware(rpc_middleware).build(methods, stop_handle);
|
||||
|
||||
if is_websocket {
|
||||
// Utilize the session close future to know when the actual WebSocket
|
||||
// session was closed.
|
||||
let session_close = svc.on_session_closed();
|
||||
|
||||
// A little bit weird API but the response to HTTP request must be returned below
|
||||
// and we spawn a task to register when the session is closed.
|
||||
tokio::spawn(async move {
|
||||
session_close.await;
|
||||
tracing::info!("Closed WebSocket connection");
|
||||
metrics.closed_ws_connections.fetch_add(1, Ordering::Relaxed);
|
||||
});
|
||||
|
||||
async move {
|
||||
tracing::info!("Opened WebSocket connection");
|
||||
metrics.opened_ws_connections.fetch_add(1, Ordering::Relaxed);
|
||||
svc.call(req).await
|
||||
}
|
||||
.boxed()
|
||||
} else {
|
||||
// HTTP.
|
||||
async move {
|
||||
tracing::info!("Opened HTTP connection");
|
||||
metrics.http_calls.fetch_add(1, Ordering::Relaxed);
|
||||
let rp = svc.call(req).await;
|
||||
|
||||
if rp.is_ok() {
|
||||
metrics.success_http_calls.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
tracing::info!("Closed HTTP connection");
|
||||
rp
|
||||
}
|
||||
.boxed()
|
||||
}
|
||||
});
|
||||
|
||||
tokio::spawn(serve_with_graceful_shutdown(sock, svc, stop_handle.clone().shutdown()));
|
||||
}
|
||||
});
|
||||
|
||||
Ok(server_handle)
|
||||
}
|
@ -0,0 +1,222 @@
|
||||
// Copyright 2024 Parity Technologies (UK) Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any
|
||||
// person obtaining a copy of this software and associated
|
||||
// documentation files (the "Software"), to deal in the
|
||||
// Software without restriction, including without
|
||||
// limitation the rights to use, copy, modify, merge,
|
||||
// publish, distribute, sublicense, and/or sell copies of
|
||||
// the Software, and to permit persons to whom the Software
|
||||
// is furnished to do so, subject to the following
|
||||
// conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice
|
||||
// shall be included in all copies or substantial portions
|
||||
// of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
//! This example shows how to use the low-level server API
|
||||
//! in jsonrpsee and inject a `mpsc::Sender<()>` into the
|
||||
//! request extensions to be able to close the connection from
|
||||
//! a rpc handler (method call or subscription).
|
||||
|
||||
use std::convert::Infallible;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicU32, Ordering};
|
||||
|
||||
use futures::FutureExt;
|
||||
use jsonrpsee::core::middleware::RpcServiceBuilder;
|
||||
use jsonrpsee::core::{SubscriptionResult, async_trait};
|
||||
use jsonrpsee::proc_macros::rpc;
|
||||
use jsonrpsee::server::{
|
||||
ConnectionGuard, ConnectionState, HttpRequest, ServerConfig, ServerHandle, StopHandle, http,
|
||||
serve_with_graceful_shutdown, stop_channel, ws,
|
||||
};
|
||||
use jsonrpsee::types::ErrorObjectOwned;
|
||||
use jsonrpsee::ws_client::WsClientBuilder;
|
||||
use jsonrpsee::{Extensions, Methods, PendingSubscriptionSink};
|
||||
use tokio::net::TcpListener;
|
||||
use tokio::sync::mpsc;
|
||||
use tracing_subscriber::util::SubscriberInitExt;
|
||||
|
||||
#[rpc(server, client)]
|
||||
pub trait Rpc {
|
||||
#[method(name = "closeConn", with_extensions)]
|
||||
async fn close_conn(&self) -> Result<(), ErrorObjectOwned>;
|
||||
|
||||
#[subscription(name = "subscribeCloseConn", item = String, with_extensions)]
|
||||
async fn close_conn_from_sub(&self) -> SubscriptionResult;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl RpcServer for () {
|
||||
async fn close_conn(&self, ext: &Extensions) -> Result<(), ErrorObjectOwned> {
|
||||
let tx = ext.get::<mpsc::Sender<()>>().unwrap();
|
||||
tx.send(()).await.unwrap();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn close_conn_from_sub(&self, _pending: PendingSubscriptionSink, ext: &Extensions) -> SubscriptionResult {
|
||||
let tx = ext.get::<mpsc::Sender<()>>().unwrap();
|
||||
tx.send(()).await.unwrap();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let filter = tracing_subscriber::EnvFilter::try_from_default_env()?;
|
||||
tracing_subscriber::FmtSubscriber::builder().with_env_filter(filter).finish().try_init()?;
|
||||
|
||||
let handle = run_server().await?;
|
||||
|
||||
{
|
||||
let client = WsClientBuilder::default().build("ws://127.0.0.1:9944").await?;
|
||||
let _ = client.close_conn().await;
|
||||
client.on_disconnect().await;
|
||||
eprintln!("Connection closed from RPC call");
|
||||
}
|
||||
|
||||
{
|
||||
let client = WsClientBuilder::default().build("ws://127.0.0.1:9944").await?;
|
||||
let _ = client.close_conn_from_sub().await;
|
||||
client.on_disconnect().await;
|
||||
eprintln!("Connection closed from RPC subscription");
|
||||
}
|
||||
|
||||
let _ = handle.stop();
|
||||
handle.stopped().await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_server() -> anyhow::Result<ServerHandle> {
|
||||
// Construct our SocketAddr to listen on...
|
||||
let listener = TcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 9944))).await?;
|
||||
|
||||
// Each RPC call/connection get its own `stop_handle`
|
||||
// to able to determine whether the server has been stopped or not.
|
||||
//
|
||||
// To keep the server running the `server_handle`
|
||||
// must be kept and it can also be used to stop the server.
|
||||
let (stop_handle, server_handle) = stop_channel();
|
||||
|
||||
// This state is cloned for every connection
|
||||
// all these types based on Arcs and it should
|
||||
// be relatively cheap to clone them.
|
||||
//
|
||||
// Make sure that nothing expensive is cloned here
|
||||
// when doing this or use an `Arc`.
|
||||
#[derive(Clone)]
|
||||
struct PerConnection {
|
||||
methods: Methods,
|
||||
stop_handle: StopHandle,
|
||||
conn_id: Arc<AtomicU32>,
|
||||
conn_guard: ConnectionGuard,
|
||||
}
|
||||
|
||||
let per_conn = PerConnection {
|
||||
methods: ().into_rpc().into(),
|
||||
stop_handle: stop_handle.clone(),
|
||||
conn_id: Default::default(),
|
||||
conn_guard: ConnectionGuard::new(100),
|
||||
};
|
||||
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
// The `tokio::select!` macro is used to wait for either of the
|
||||
// listeners to accept a new connection or for the server to be
|
||||
// stopped.
|
||||
let (sock, _) = tokio::select! {
|
||||
res = listener.accept() => {
|
||||
match res {
|
||||
Ok(sock) => sock,
|
||||
Err(e) => {
|
||||
tracing::error!("failed to accept v4 connection: {:?}", e);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
_ = per_conn.stop_handle.clone().shutdown() => break,
|
||||
};
|
||||
let per_conn = per_conn.clone();
|
||||
|
||||
// Create a service handler.
|
||||
let stop_handle2 = per_conn.stop_handle.clone();
|
||||
let per_conn = per_conn.clone();
|
||||
let svc = tower::service_fn(move |mut req: HttpRequest<hyper::body::Incoming>| {
|
||||
let PerConnection { methods, stop_handle, conn_guard, conn_id } = per_conn.clone();
|
||||
let (tx, mut disconnect) = mpsc::channel::<()>(1);
|
||||
|
||||
// Insert the `tx` into the request extensions to be able to close the connection
|
||||
// from method or subscription handlers.
|
||||
req.extensions_mut().insert(tx.clone());
|
||||
|
||||
// jsonrpsee expects a `conn permit` for each connection.
|
||||
//
|
||||
// This may be omitted if don't want to limit the number of connections
|
||||
// to the server.
|
||||
let Some(conn_permit) = conn_guard.try_acquire() else {
|
||||
return async { Ok::<_, Infallible>(http::response::too_many_requests()) }.boxed();
|
||||
};
|
||||
|
||||
let conn = ConnectionState::new(stop_handle, conn_id.fetch_add(1, Ordering::Relaxed), conn_permit);
|
||||
|
||||
if ws::is_upgrade_request(&req) {
|
||||
let rpc_service = RpcServiceBuilder::new();
|
||||
|
||||
// Establishes the websocket connection
|
||||
async move {
|
||||
match ws::connect(req, ServerConfig::default(), methods, conn, rpc_service).await {
|
||||
Ok((rp, conn_fut)) => {
|
||||
tokio::spawn(async move {
|
||||
tokio::select! {
|
||||
_ = conn_fut => (),
|
||||
_ = disconnect.recv() => {
|
||||
eprintln!("Server closed connection");
|
||||
},
|
||||
}
|
||||
});
|
||||
Ok(rp)
|
||||
}
|
||||
Err(rp) => Ok(rp),
|
||||
}
|
||||
}
|
||||
.boxed()
|
||||
} else if !ws::is_upgrade_request(&req) {
|
||||
// There is another API for making call with just a service as well.
|
||||
//
|
||||
// See [`jsonrpsee::server::http::call_with_service`]
|
||||
async move {
|
||||
tokio::select! {
|
||||
// RPC call finished successfully.
|
||||
res = http::call_with_service_builder(req, ServerConfig::default(), conn, methods, RpcServiceBuilder::new()) => Ok(res),
|
||||
// The connection was closed by a RPC handler
|
||||
_ = disconnect.recv() => Ok(http::response::denied()),
|
||||
}
|
||||
}
|
||||
.boxed()
|
||||
} else {
|
||||
async { Ok(http::response::denied()) }.boxed()
|
||||
}
|
||||
});
|
||||
|
||||
// Upgrade the connection to a HTTP service.
|
||||
tokio::spawn(serve_with_graceful_shutdown(sock, svc, stop_handle2.shutdown()));
|
||||
}
|
||||
});
|
||||
|
||||
Ok(server_handle)
|
||||
}
|
@ -0,0 +1,349 @@
|
||||
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any
|
||||
// person obtaining a copy of this software and associated
|
||||
// documentation files (the "Software"), to deal in the
|
||||
// Software without restriction, including without
|
||||
// limitation the rights to use, copy, modify, merge,
|
||||
// publish, distribute, sublicense, and/or sell copies of
|
||||
// the Software, and to permit persons to whom the Software
|
||||
// is furnished to do so, subject to the following
|
||||
// conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice
|
||||
// shall be included in all copies or substantial portions
|
||||
// of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
//! This example shows how to use the low-level server API
|
||||
//! in jsonrpsee.
|
||||
//!
|
||||
//! The particular example disconnects peers that
|
||||
//! makes more than ten RPC calls and bans the IP addr.
|
||||
//!
|
||||
//! NOTE:
|
||||
//!
|
||||
//! Enabling tower middleware in this example doesn't work,
|
||||
//! to do so then the low-level API in hyper must be used.
|
||||
//!
|
||||
//! See <https://docs.rs/hyper/latest/hyper/server/conn/index.html>
|
||||
//! for further information regarding the "low-level API" in hyper.
|
||||
|
||||
use std::collections::HashSet;
|
||||
use std::convert::Infallible;
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::sync::atomic::{AtomicU32, Ordering};
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use futures::FutureExt;
|
||||
use jsonrpsee::core::async_trait;
|
||||
use jsonrpsee::core::middleware::{Batch, Notification, RpcServiceBuilder, RpcServiceT};
|
||||
use jsonrpsee::http_client::HttpClient;
|
||||
use jsonrpsee::proc_macros::rpc;
|
||||
use jsonrpsee::server::{
|
||||
ConnectionGuard, ConnectionState, ServerConfig, ServerHandle, StopHandle, http, serve_with_graceful_shutdown,
|
||||
stop_channel, ws,
|
||||
};
|
||||
use jsonrpsee::types::{ErrorObject, ErrorObjectOwned, Id, Request};
|
||||
use jsonrpsee::ws_client::WsClientBuilder;
|
||||
use jsonrpsee::{MethodResponse, Methods};
|
||||
use tokio::net::TcpListener;
|
||||
use tokio::sync::Mutex as AsyncMutex;
|
||||
use tokio::sync::mpsc;
|
||||
use tracing_subscriber::util::SubscriberInitExt;
|
||||
|
||||
/// This is just a counter to limit
|
||||
/// the number of calls per connection.
|
||||
/// Once the limit has been exceeded
|
||||
/// all future calls are rejected.
|
||||
#[derive(Clone)]
|
||||
struct CallLimit<S> {
|
||||
service: S,
|
||||
count: Arc<AsyncMutex<usize>>,
|
||||
state: mpsc::Sender<()>,
|
||||
}
|
||||
|
||||
impl<S> RpcServiceT for CallLimit<S>
|
||||
where
|
||||
S: RpcServiceT<
|
||||
MethodResponse = MethodResponse,
|
||||
BatchResponse = MethodResponse,
|
||||
NotificationResponse = MethodResponse,
|
||||
> + Send
|
||||
+ Sync
|
||||
+ Clone
|
||||
+ 'static,
|
||||
{
|
||||
type MethodResponse = S::MethodResponse;
|
||||
type NotificationResponse = S::NotificationResponse;
|
||||
type BatchResponse = S::BatchResponse;
|
||||
|
||||
fn call<'a>(&self, req: Request<'a>) -> impl Future<Output = Self::MethodResponse> + Send + 'a {
|
||||
let count = self.count.clone();
|
||||
let state = self.state.clone();
|
||||
let service = self.service.clone();
|
||||
|
||||
async move {
|
||||
let mut lock = count.lock().await;
|
||||
|
||||
if *lock >= 10 {
|
||||
let _ = state.try_send(());
|
||||
MethodResponse::error(req.id, ErrorObject::borrowed(-32000, "RPC rate limit", None))
|
||||
} else {
|
||||
let rp = service.call(req).await;
|
||||
*lock += 1;
|
||||
rp
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn batch<'a>(&self, batch: Batch<'a>) -> impl Future<Output = Self::BatchResponse> + Send + 'a {
|
||||
let count = self.count.clone();
|
||||
let state = self.state.clone();
|
||||
let service = self.service.clone();
|
||||
|
||||
async move {
|
||||
let mut lock = count.lock().await;
|
||||
let batch_len = batch.len();
|
||||
|
||||
if *lock >= 10 + batch_len {
|
||||
let _ = state.try_send(());
|
||||
MethodResponse::error(Id::Null, ErrorObject::borrowed(-32000, "RPC rate limit", None))
|
||||
} else {
|
||||
let rp = service.batch(batch).await;
|
||||
*lock += batch_len;
|
||||
rp
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn notification<'a>(&self, n: Notification<'a>) -> impl Future<Output = Self::NotificationResponse> + Send + 'a {
|
||||
let count = self.count.clone();
|
||||
let service = self.service.clone();
|
||||
|
||||
// A notification is not expected to return a response so the result here doesn't matter
|
||||
// rather than other middlewares may not be invoked.
|
||||
async move { if *count.lock().await >= 10 { MethodResponse::notification() } else { service.notification(n).await } }
|
||||
}
|
||||
}
|
||||
|
||||
#[rpc(server, client)]
|
||||
pub trait Rpc {
|
||||
#[method(name = "say_hello")]
|
||||
async fn say_hello(&self) -> Result<String, ErrorObjectOwned>;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl RpcServer for () {
|
||||
async fn say_hello(&self) -> Result<String, ErrorObjectOwned> {
|
||||
Ok("lo".to_string())
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let filter = tracing_subscriber::EnvFilter::try_from_default_env()?;
|
||||
tracing_subscriber::FmtSubscriber::builder().with_env_filter(filter).finish().try_init()?;
|
||||
|
||||
// Make a bunch of WebSocket calls to be blacklisted by server.
|
||||
{
|
||||
let mut i = 0;
|
||||
let handle = run_server().await?;
|
||||
|
||||
let client = WsClientBuilder::default().build("ws://127.0.0.1:9944").await.unwrap();
|
||||
while client.is_connected() {
|
||||
let rp: Result<String, _> = client.say_hello().await;
|
||||
if rp.is_ok() {
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
|
||||
// After the server has blacklisted the IP address, the connection is denied.
|
||||
assert!(WsClientBuilder::default().build("ws://127.0.0.1:9944").await.is_err());
|
||||
tracing::info!("WS client made {i} successful calls before getting blacklisted");
|
||||
|
||||
handle.stop().unwrap();
|
||||
handle.stopped().await;
|
||||
}
|
||||
|
||||
// Make a bunch of HTTP calls to be blacklisted by server.
|
||||
{
|
||||
let mut i = 0;
|
||||
let handle = run_server().await?;
|
||||
|
||||
let client = HttpClient::builder().build("http://127.0.0.1:9944").unwrap();
|
||||
while client.say_hello().await.is_ok() {
|
||||
i += 1;
|
||||
}
|
||||
tracing::info!("HTTP client made {i} successful calls before getting blacklisted");
|
||||
|
||||
handle.stop().unwrap();
|
||||
handle.stopped().await;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_server() -> anyhow::Result<ServerHandle> {
|
||||
// Construct our SocketAddr to listen on...
|
||||
let listener = TcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 9944))).await?;
|
||||
|
||||
// Each RPC call/connection get its own `stop_handle`
|
||||
// to able to determine whether the server has been stopped or not.
|
||||
//
|
||||
// To keep the server running the `server_handle`
|
||||
// must be kept and it can also be used to stop the server.
|
||||
let (stop_handle, server_handle) = stop_channel();
|
||||
|
||||
// This state is cloned for every connection
|
||||
// all these types based on Arcs and it should
|
||||
// be relatively cheap to clone them.
|
||||
//
|
||||
// Make sure that nothing expensive is cloned here
|
||||
// when doing this or use an `Arc`.
|
||||
#[derive(Clone)]
|
||||
struct PerConnection {
|
||||
methods: Methods,
|
||||
stop_handle: StopHandle,
|
||||
conn_id: Arc<AtomicU32>,
|
||||
conn_guard: ConnectionGuard,
|
||||
blacklisted_peers: Arc<Mutex<HashSet<IpAddr>>>,
|
||||
// HTTP rate limit that is shared by all connections.
|
||||
//
|
||||
// This is just a toy-example and one not should "limit" HTTP connections
|
||||
// like this because the actual IP addr of each request is not checked.
|
||||
//
|
||||
// Because it's possible to blacklist a peer which has only made one or
|
||||
// a few calls.
|
||||
global_http_rate_limit: Arc<AsyncMutex<usize>>,
|
||||
}
|
||||
|
||||
let per_conn = PerConnection {
|
||||
methods: ().into_rpc().into(),
|
||||
stop_handle: stop_handle.clone(),
|
||||
conn_id: Default::default(),
|
||||
conn_guard: ConnectionGuard::new(100),
|
||||
blacklisted_peers: Default::default(),
|
||||
global_http_rate_limit: Default::default(),
|
||||
};
|
||||
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
// The `tokio::select!` macro is used to wait for either of the
|
||||
// listeners to accept a new connection or for the server to be
|
||||
// stopped.
|
||||
let (sock, remote_addr) = tokio::select! {
|
||||
res = listener.accept() => {
|
||||
match res {
|
||||
Ok(sock) => sock,
|
||||
Err(e) => {
|
||||
tracing::error!("failed to accept v4 connection: {:?}", e);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
_ = per_conn.stop_handle.clone().shutdown() => break,
|
||||
};
|
||||
let per_conn = per_conn.clone();
|
||||
|
||||
// Create a service handler.
|
||||
let stop_handle2 = per_conn.stop_handle.clone();
|
||||
let per_conn = per_conn.clone();
|
||||
let svc = tower::service_fn(move |req| {
|
||||
let PerConnection {
|
||||
methods,
|
||||
stop_handle,
|
||||
conn_guard,
|
||||
conn_id,
|
||||
blacklisted_peers,
|
||||
global_http_rate_limit,
|
||||
} = per_conn.clone();
|
||||
|
||||
// jsonrpsee expects a `conn permit` for each connection.
|
||||
//
|
||||
// This may be omitted if don't want to limit the number of connections
|
||||
// to the server.
|
||||
let Some(conn_permit) = conn_guard.try_acquire() else {
|
||||
return async { Ok::<_, Infallible>(http::response::too_many_requests()) }.boxed();
|
||||
};
|
||||
|
||||
// The IP addr was blacklisted.
|
||||
if blacklisted_peers.lock().unwrap().get(&remote_addr.ip()).is_some() {
|
||||
return async { Ok(http::response::denied()) }.boxed();
|
||||
}
|
||||
|
||||
if ws::is_upgrade_request(&req) {
|
||||
let (tx, mut disconnect) = mpsc::channel(1);
|
||||
let rpc_service = RpcServiceBuilder::new().layer_fn(move |service| CallLimit {
|
||||
service,
|
||||
count: Default::default(),
|
||||
state: tx.clone(),
|
||||
});
|
||||
|
||||
let conn = ConnectionState::new(stop_handle, conn_id.fetch_add(1, Ordering::Relaxed), conn_permit);
|
||||
|
||||
// Establishes the websocket connection
|
||||
// and if the `CallLimit` middleware triggers the hard limit
|
||||
// then the connection is closed i.e, the `conn_fut` is dropped.
|
||||
async move {
|
||||
match ws::connect(req, ServerConfig::default(), methods, conn, rpc_service).await {
|
||||
Ok((rp, conn_fut)) => {
|
||||
tokio::spawn(async move {
|
||||
tokio::select! {
|
||||
_ = conn_fut => (),
|
||||
_ = disconnect.recv() => {
|
||||
blacklisted_peers.lock().unwrap().insert(remote_addr.ip());
|
||||
},
|
||||
}
|
||||
});
|
||||
Ok(rp)
|
||||
}
|
||||
Err(rp) => Ok(rp),
|
||||
}
|
||||
}
|
||||
.boxed()
|
||||
} else if !ws::is_upgrade_request(&req) {
|
||||
let (tx, mut disconnect) = mpsc::channel(1);
|
||||
|
||||
let rpc_service = RpcServiceBuilder::new().layer_fn(move |service| CallLimit {
|
||||
service,
|
||||
count: global_http_rate_limit.clone(),
|
||||
state: tx.clone(),
|
||||
});
|
||||
|
||||
let server_cfg = ServerConfig::default();
|
||||
let conn = ConnectionState::new(stop_handle, conn_id.fetch_add(1, Ordering::Relaxed), conn_permit);
|
||||
|
||||
// There is another API for making call with just a service as well.
|
||||
//
|
||||
// See [`jsonrpsee::server::http::call_with_service`]
|
||||
async move {
|
||||
tokio::select! {
|
||||
// Rpc call finished successfully.
|
||||
res = http::call_with_service_builder(req, server_cfg, conn, methods, rpc_service) => Ok(res),
|
||||
// Deny the call if the call limit is exceeded.
|
||||
_ = disconnect.recv() => Ok(http::response::denied()),
|
||||
}
|
||||
}
|
||||
.boxed()
|
||||
} else {
|
||||
async { Ok(http::response::denied()) }.boxed()
|
||||
}
|
||||
});
|
||||
|
||||
// Upgrade the connection to a HTTP service.
|
||||
tokio::spawn(serve_with_graceful_shutdown(sock, svc, stop_handle2.shutdown()));
|
||||
}
|
||||
});
|
||||
|
||||
Ok(server_handle)
|
||||
}
|
123
reference_jsonrpsee_crate_examples/proc_macro.rs
Normal file
123
reference_jsonrpsee_crate_examples/proc_macro.rs
Normal file
@ -0,0 +1,123 @@
|
||||
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any
|
||||
// person obtaining a copy of this software and associated
|
||||
// documentation files (the "Software"), to deal in the
|
||||
// Software without restriction, including without
|
||||
// limitation the rights to use, copy, modify, merge,
|
||||
// publish, distribute, sublicense, and/or sell copies of
|
||||
// the Software, and to permit persons to whom the Software
|
||||
// is furnished to do so, subject to the following
|
||||
// conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice
|
||||
// shall be included in all copies or substantial portions
|
||||
// of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
use std::net::SocketAddr;
|
||||
|
||||
use jsonrpsee::core::{SubscriptionResult, async_trait, client::Subscription};
|
||||
use jsonrpsee::proc_macros::rpc;
|
||||
use jsonrpsee::server::{PendingSubscriptionSink, Server};
|
||||
use jsonrpsee::types::ErrorObjectOwned;
|
||||
use jsonrpsee::ws_client::WsClientBuilder;
|
||||
|
||||
type ExampleHash = [u8; 32];
|
||||
type ExampleStorageKey = Vec<u8>;
|
||||
|
||||
#[rpc(server, client, namespace = "state")]
|
||||
pub trait Rpc<Hash, StorageKey>
|
||||
where
|
||||
Hash: std::fmt::Debug,
|
||||
{
|
||||
/// Async method call example.
|
||||
#[method(name = "getKeys")]
|
||||
async fn storage_keys(
|
||||
&self,
|
||||
storage_key: StorageKey,
|
||||
hash: Option<Hash>,
|
||||
) -> Result<Vec<StorageKey>, ErrorObjectOwned>;
|
||||
|
||||
/// Subscription that takes a `StorageKey` as input and produces a `Vec<Hash>`.
|
||||
#[subscription(name = "subscribeStorage" => "override", item = Vec<Hash>)]
|
||||
async fn subscribe_storage(&self, keys: Option<Vec<StorageKey>>) -> SubscriptionResult;
|
||||
|
||||
#[subscription(name = "subscribeSync" => "sync", item = Vec<Hash>)]
|
||||
fn s(&self, keys: Option<Vec<StorageKey>>);
|
||||
}
|
||||
|
||||
pub struct RpcServerImpl;
|
||||
|
||||
#[async_trait]
|
||||
impl RpcServer<ExampleHash, ExampleStorageKey> for RpcServerImpl {
|
||||
async fn storage_keys(
|
||||
&self,
|
||||
storage_key: ExampleStorageKey,
|
||||
_hash: Option<ExampleHash>,
|
||||
) -> Result<Vec<ExampleStorageKey>, ErrorObjectOwned> {
|
||||
Ok(vec![storage_key])
|
||||
}
|
||||
|
||||
async fn subscribe_storage(
|
||||
&self,
|
||||
pending: PendingSubscriptionSink,
|
||||
_keys: Option<Vec<ExampleStorageKey>>,
|
||||
) -> SubscriptionResult {
|
||||
let sink = pending.accept().await?;
|
||||
let json = serde_json::value::to_raw_value(&vec![[0; 32]])?;
|
||||
sink.send(json).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn s(&self, pending: PendingSubscriptionSink, _keys: Option<Vec<ExampleStorageKey>>) {
|
||||
tokio::spawn(async move {
|
||||
let sink = pending.accept().await.unwrap();
|
||||
let json = serde_json::value::to_raw_value(&vec![[0; 32]]).unwrap();
|
||||
sink.send(json).await.unwrap();
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
tracing_subscriber::FmtSubscriber::builder()
|
||||
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
||||
.try_init()
|
||||
.expect("setting default subscriber failed");
|
||||
|
||||
let server_addr = run_server().await?;
|
||||
let url = format!("ws://{}", server_addr);
|
||||
|
||||
let client = WsClientBuilder::default().build(&url).await?;
|
||||
assert_eq!(client.storage_keys(vec![1, 2, 3, 4], None::<ExampleHash>).await.unwrap(), vec![vec![1, 2, 3, 4]]);
|
||||
|
||||
let mut sub: Subscription<Vec<ExampleHash>> =
|
||||
RpcClient::<ExampleHash, ExampleStorageKey>::subscribe_storage(&client, None).await.unwrap();
|
||||
assert_eq!(Some(vec![[0; 32]]), sub.next().await.transpose().unwrap());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_server() -> anyhow::Result<SocketAddr> {
|
||||
let server = Server::builder().build("127.0.0.1:0").await?;
|
||||
|
||||
let addr = server.local_addr()?;
|
||||
let handle = server.start(RpcServerImpl.into_rpc());
|
||||
|
||||
// In this example we don't care about doing shutdown so let's it run forever.
|
||||
// You may use the `ServerHandle` to shut it down or manage it yourself.
|
||||
tokio::spawn(handle.stopped());
|
||||
|
||||
Ok(addr)
|
||||
}
|
95
reference_jsonrpsee_crate_examples/proc_macro_bounds.rs
Normal file
95
reference_jsonrpsee_crate_examples/proc_macro_bounds.rs
Normal file
@ -0,0 +1,95 @@
|
||||
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any
|
||||
// person obtaining a copy of this software and associated
|
||||
// documentation files (the "Software"), to deal in the
|
||||
// Software without restriction, including without
|
||||
// limitation the rights to use, copy, modify, merge,
|
||||
// publish, distribute, sublicense, and/or sell copies of
|
||||
// the Software, and to permit persons to whom the Software
|
||||
// is furnished to do so, subject to the following
|
||||
// conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice
|
||||
// shall be included in all copies or substantial portions
|
||||
// of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
use std::net::SocketAddr;
|
||||
|
||||
use jsonrpsee::core::async_trait;
|
||||
use jsonrpsee::proc_macros::rpc;
|
||||
use jsonrpsee::server::Server;
|
||||
use jsonrpsee::types::ErrorObjectOwned;
|
||||
use jsonrpsee::ws_client::WsClientBuilder;
|
||||
type ExampleHash = [u8; 32];
|
||||
|
||||
pub trait Config {
|
||||
type Hash: Send + Sync + 'static;
|
||||
}
|
||||
|
||||
impl Config for ExampleHash {
|
||||
type Hash = Self;
|
||||
}
|
||||
|
||||
/// The RPC macro requires `DeserializeOwned` for output types for the client implementation, while the
|
||||
/// server implementation requires output types to be bounded by `Serialize`.
|
||||
///
|
||||
/// In this example, we don't want the `Conf` to be bounded by default to
|
||||
/// `Conf : Send + Sync + 'static + jsonrpsee::core::DeserializeOwned` for client implementation and
|
||||
/// `Conf : Send + Sync + 'static + jsonrpsee::core::Serialize` for server implementation.
|
||||
///
|
||||
/// Explicitly, specify client and server bounds to handle the `Serialize` and `DeserializeOwned` cases
|
||||
/// just for the `Conf::hash` part.
|
||||
#[rpc(server, client, namespace = "foo", client_bounds(T::Hash: jsonrpsee::core::DeserializeOwned), server_bounds(T::Hash: jsonrpsee::core::Serialize + Clone))]
|
||||
pub trait Rpc<T: Config> {
|
||||
#[method(name = "bar")]
|
||||
fn method(&self) -> Result<T::Hash, ErrorObjectOwned>;
|
||||
}
|
||||
|
||||
pub struct RpcServerImpl;
|
||||
|
||||
#[async_trait]
|
||||
impl RpcServer<ExampleHash> for RpcServerImpl {
|
||||
fn method(&self) -> Result<<ExampleHash as Config>::Hash, ErrorObjectOwned> {
|
||||
Ok([0u8; 32])
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
tracing_subscriber::FmtSubscriber::builder()
|
||||
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
||||
.try_init()
|
||||
.expect("setting default subscriber failed");
|
||||
|
||||
let server_addr = run_server().await?;
|
||||
let url = format!("ws://{}", server_addr);
|
||||
|
||||
let client = WsClientBuilder::default().build(&url).await?;
|
||||
assert_eq!(RpcClient::<ExampleHash>::method(&client).await.unwrap(), [0u8; 32]);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_server() -> anyhow::Result<SocketAddr> {
|
||||
let server = Server::builder().build("127.0.0.1:0").await?;
|
||||
|
||||
let addr = server.local_addr()?;
|
||||
let handle = server.start(RpcServerImpl.into_rpc());
|
||||
|
||||
// In this example we don't care about doing shutdown so let's it run forever.
|
||||
// You may use the `ServerHandle` to shut it down or manage it yourself.
|
||||
tokio::spawn(handle.stopped());
|
||||
|
||||
Ok(addr)
|
||||
}
|
@ -0,0 +1,84 @@
|
||||
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any
|
||||
// person obtaining a copy of this software and associated
|
||||
// documentation files (the "Software"), to deal in the
|
||||
// Software without restriction, including without
|
||||
// limitation the rights to use, copy, modify, merge,
|
||||
// publish, distribute, sublicense, and/or sell copies of
|
||||
// the Software, and to permit persons to whom the Software
|
||||
// is furnished to do so, subject to the following
|
||||
// conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice
|
||||
// shall be included in all copies or substantial portions
|
||||
// of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
use std::net::SocketAddr;
|
||||
|
||||
use jsonrpsee::core::client::ClientT;
|
||||
use jsonrpsee::proc_macros::rpc;
|
||||
use jsonrpsee::server::Server;
|
||||
use jsonrpsee::ws_client::WsClientBuilder;
|
||||
use jsonrpsee::{ResponsePayload, rpc_params};
|
||||
|
||||
#[rpc(client, server, namespace = "state")]
|
||||
pub trait Rpc {
|
||||
/// Async method call example.
|
||||
#[method(name = "getKeys")]
|
||||
fn storage_keys(&self) -> ResponsePayload<'static, String>;
|
||||
}
|
||||
|
||||
pub struct RpcServerImpl;
|
||||
|
||||
impl RpcServer for RpcServerImpl {
|
||||
fn storage_keys(&self) -> ResponsePayload<'static, String> {
|
||||
let (rp, rp_future) = ResponsePayload::success("ehheeheh".to_string()).notify_on_completion();
|
||||
|
||||
tokio::spawn(async move {
|
||||
rp_future.await.unwrap();
|
||||
println!("Method response to `state_getKeys` finished");
|
||||
});
|
||||
|
||||
rp
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
tracing_subscriber::FmtSubscriber::builder()
|
||||
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
||||
.try_init()
|
||||
.expect("setting default subscriber failed");
|
||||
|
||||
let server_addr = run_server().await?;
|
||||
let url = format!("ws://{}", server_addr);
|
||||
|
||||
let client = WsClientBuilder::default().build(&url).await?;
|
||||
assert_eq!("ehheeheh".to_string(), client.request::<String, _>("state_getKeys", rpc_params![]).await.unwrap());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_server() -> anyhow::Result<SocketAddr> {
|
||||
let server = Server::builder().build("127.0.0.1:0").await?;
|
||||
|
||||
let addr = server.local_addr()?;
|
||||
let handle = server.start(RpcServerImpl.into_rpc());
|
||||
|
||||
// In this example we don't care about doing shutdown so let's it run forever.
|
||||
// You may use the `ServerHandle` to shut it down or manage it yourself.
|
||||
tokio::spawn(handle.stopped());
|
||||
|
||||
Ok(addr)
|
||||
}
|
265
reference_jsonrpsee_crate_examples/rpc_middleware.rs
Normal file
265
reference_jsonrpsee_crate_examples/rpc_middleware.rs
Normal file
@ -0,0 +1,265 @@
|
||||
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any
|
||||
// person obtaining a copy of this software and associated
|
||||
// documentation files (the "Software"), to deal in the
|
||||
// Software without restriction, including without
|
||||
// limitation the rights to use, copy, modify, merge,
|
||||
// publish, distribute, sublicense, and/or sell copies of
|
||||
// the Software, and to permit persons to whom the Software
|
||||
// is furnished to do so, subject to the following
|
||||
// conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice
|
||||
// shall be included in all copies or substantial portions
|
||||
// of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
//! jsonrpsee supports two kinds of middlewares `http_middleware` and `rpc_middleware`.
|
||||
//!
|
||||
//! This example demonstrates how to use the `rpc_middleware` which applies for each
|
||||
//! JSON-RPC method call and batch requests may call the middleware more than once.
|
||||
//!
|
||||
//! A typical use-case for this is to implement rate-limiting based on the actual
|
||||
//! number of JSON-RPC methods calls and a request could potentially be made
|
||||
//! by HTTP or WebSocket which this middleware is agnostic to.
|
||||
//!
|
||||
//! Contrary the HTTP middleware does only apply per HTTP request and
|
||||
//! may be handy in some scenarios such CORS but if you want to access
|
||||
//! to the actual JSON-RPC details this is the middleware to use.
|
||||
//!
|
||||
//! This example enables the same middleware for both the server and client which
|
||||
//! can be confusing when one runs this but it is just to demonstrate the API.
|
||||
//!
|
||||
//! That the middleware is applied to the server and client in the same way.
|
||||
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
use jsonrpsee::core::client::ClientT;
|
||||
use jsonrpsee::core::middleware::{Batch, Notification, RpcServiceBuilder, RpcServiceT};
|
||||
use jsonrpsee::rpc_params;
|
||||
use jsonrpsee::server::{RpcModule, Server};
|
||||
use jsonrpsee::types::Request;
|
||||
use jsonrpsee::ws_client::WsClientBuilder;
|
||||
|
||||
#[derive(Clone)]
|
||||
struct IdentityLayer;
|
||||
|
||||
impl<S> tower::Layer<S> for IdentityLayer
|
||||
where
|
||||
S: RpcServiceT + Send + Sync + Clone + 'static,
|
||||
{
|
||||
type Service = Identity<S>;
|
||||
|
||||
fn layer(&self, inner: S) -> Self::Service {
|
||||
Identity(inner)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct Identity<S>(S);
|
||||
|
||||
impl<S> RpcServiceT for Identity<S>
|
||||
where
|
||||
S: RpcServiceT + Send + Sync + Clone + 'static,
|
||||
{
|
||||
type MethodResponse = S::MethodResponse;
|
||||
type NotificationResponse = S::NotificationResponse;
|
||||
type BatchResponse = S::BatchResponse;
|
||||
|
||||
fn batch<'a>(&self, batch: Batch<'a>) -> impl Future<Output = Self::BatchResponse> + Send + 'a {
|
||||
self.0.batch(batch)
|
||||
}
|
||||
|
||||
fn call<'a>(&self, request: Request<'a>) -> impl Future<Output = Self::MethodResponse> + Send + 'a {
|
||||
self.0.call(request)
|
||||
}
|
||||
|
||||
fn notification<'a>(&self, n: Notification<'a>) -> impl Future<Output = Self::NotificationResponse> + Send + 'a {
|
||||
self.0.notification(n)
|
||||
}
|
||||
}
|
||||
|
||||
// It's possible to access the connection ID
|
||||
// by using the low-level API.
|
||||
#[derive(Clone)]
|
||||
pub struct CallsPerConn<S> {
|
||||
service: S,
|
||||
count: Arc<AtomicUsize>,
|
||||
role: &'static str,
|
||||
}
|
||||
|
||||
impl<S> RpcServiceT for CallsPerConn<S>
|
||||
where
|
||||
S: RpcServiceT + Send + Sync + Clone + 'static,
|
||||
{
|
||||
type MethodResponse = S::MethodResponse;
|
||||
type NotificationResponse = S::NotificationResponse;
|
||||
type BatchResponse = S::BatchResponse;
|
||||
|
||||
fn call<'a>(&self, req: Request<'a>) -> impl Future<Output = Self::MethodResponse> + Send + 'a {
|
||||
let count = self.count.clone();
|
||||
let service = self.service.clone();
|
||||
let role = self.role;
|
||||
|
||||
async move {
|
||||
let rp = service.call(req).await;
|
||||
count.fetch_add(1, Ordering::SeqCst);
|
||||
println!("{role} processed calls={} on the connection", count.load(Ordering::SeqCst));
|
||||
rp
|
||||
}
|
||||
}
|
||||
|
||||
fn batch<'a>(&self, batch: Batch<'a>) -> impl Future<Output = Self::BatchResponse> + Send + 'a {
|
||||
let len = batch.len();
|
||||
self.count.fetch_add(len, Ordering::SeqCst);
|
||||
println!("{} processed calls={} on the connection", self.role, self.count.load(Ordering::SeqCst));
|
||||
self.service.batch(batch)
|
||||
}
|
||||
|
||||
fn notification<'a>(&self, n: Notification<'a>) -> impl Future<Output = Self::NotificationResponse> + Send + 'a {
|
||||
self.service.notification(n)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct GlobalCalls<S> {
|
||||
service: S,
|
||||
count: Arc<AtomicUsize>,
|
||||
role: &'static str,
|
||||
}
|
||||
|
||||
impl<S> RpcServiceT for GlobalCalls<S>
|
||||
where
|
||||
S: RpcServiceT + Send + Sync + Clone + 'static,
|
||||
{
|
||||
type MethodResponse = S::MethodResponse;
|
||||
type NotificationResponse = S::NotificationResponse;
|
||||
type BatchResponse = S::BatchResponse;
|
||||
|
||||
fn call<'a>(&self, req: Request<'a>) -> impl Future<Output = Self::MethodResponse> + Send + 'a {
|
||||
let count = self.count.clone();
|
||||
let service = self.service.clone();
|
||||
let role = self.role;
|
||||
|
||||
async move {
|
||||
let rp = service.call(req).await;
|
||||
count.fetch_add(1, Ordering::SeqCst);
|
||||
println!("{role} processed calls={} in total", count.load(Ordering::SeqCst));
|
||||
|
||||
rp
|
||||
}
|
||||
}
|
||||
|
||||
fn batch<'a>(&self, batch: Batch<'a>) -> impl Future<Output = Self::BatchResponse> + Send + 'a {
|
||||
let len = batch.len();
|
||||
self.count.fetch_add(len, Ordering::SeqCst);
|
||||
println!("{}, processed calls={} in total", self.role, self.count.load(Ordering::SeqCst));
|
||||
self.service.batch(batch)
|
||||
}
|
||||
|
||||
fn notification<'a>(&self, n: Notification<'a>) -> impl Future<Output = Self::NotificationResponse> + Send + 'a {
|
||||
self.service.notification(n)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Logger<S> {
|
||||
service: S,
|
||||
role: &'static str,
|
||||
}
|
||||
|
||||
impl<S> RpcServiceT for Logger<S>
|
||||
where
|
||||
S: RpcServiceT + Send + Sync + Clone + 'static,
|
||||
{
|
||||
type MethodResponse = S::MethodResponse;
|
||||
type NotificationResponse = S::NotificationResponse;
|
||||
type BatchResponse = S::BatchResponse;
|
||||
|
||||
fn call<'a>(&self, req: Request<'a>) -> impl Future<Output = Self::MethodResponse> + Send + 'a {
|
||||
println!("{} logger middleware: method `{}`", self.role, req.method);
|
||||
self.service.call(req)
|
||||
}
|
||||
|
||||
fn batch<'a>(&self, batch: Batch<'a>) -> impl Future<Output = Self::BatchResponse> + Send + 'a {
|
||||
println!("{} logger middleware: batch {batch}", self.role);
|
||||
self.service.batch(batch)
|
||||
}
|
||||
fn notification<'a>(&self, n: Notification<'a>) -> impl Future<Output = Self::NotificationResponse> + Send + 'a {
|
||||
self.service.notification(n)
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
tracing_subscriber::FmtSubscriber::builder()
|
||||
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
||||
.try_init()
|
||||
.expect("setting default subscriber failed");
|
||||
|
||||
let addr = run_server().await?;
|
||||
let url = format!("ws://{}", addr);
|
||||
|
||||
for _ in 0..2 {
|
||||
let global_cnt = Arc::new(AtomicUsize::new(0));
|
||||
let rpc_middleware = RpcServiceBuilder::new()
|
||||
.layer_fn(|service| Logger { service, role: "client" })
|
||||
// This state is created per connection.
|
||||
.layer_fn(|service| CallsPerConn { service, count: Default::default(), role: "client" })
|
||||
// This state is shared by all connections.
|
||||
.layer_fn(move |service| GlobalCalls { service, count: global_cnt.clone(), role: "client" });
|
||||
let client = WsClientBuilder::new().set_rpc_middleware(rpc_middleware).build(&url).await?;
|
||||
let response: String = client.request("say_hello", rpc_params![]).await?;
|
||||
println!("response: {:?}", response);
|
||||
let _response: Result<String, _> = client.request("unknown_method", rpc_params![]).await;
|
||||
let _: String = client.request("say_hello", rpc_params![]).await?;
|
||||
let _: String = client.request("thready", rpc_params![4]).await?;
|
||||
|
||||
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_server() -> anyhow::Result<SocketAddr> {
|
||||
let global_cnt = Arc::new(AtomicUsize::new(0));
|
||||
|
||||
let rpc_middleware = RpcServiceBuilder::new()
|
||||
.layer_fn(|service| Logger { service, role: "server" })
|
||||
// This state is created per connection.
|
||||
.layer_fn(|service| CallsPerConn { service, count: Default::default(), role: "server" })
|
||||
// This state is shared by all connections.
|
||||
.layer_fn(move |service| GlobalCalls { service, count: global_cnt.clone(), role: "server" })
|
||||
// Optional layer that does nothing, just an example to be useful if one has an optional layer.
|
||||
.option_layer(Some(IdentityLayer));
|
||||
let server = Server::builder().set_rpc_middleware(rpc_middleware).build("127.0.0.1:0").await?;
|
||||
let mut module = RpcModule::new(());
|
||||
module.register_method("say_hello", |_, _, _| "lo")?;
|
||||
module.register_method("thready", |params, _, _| {
|
||||
let thread_count: usize = params.one().unwrap();
|
||||
for _ in 0..thread_count {
|
||||
std::thread::spawn(|| std::thread::sleep(std::time::Duration::from_secs(1)));
|
||||
}
|
||||
""
|
||||
})?;
|
||||
let addr = server.local_addr()?;
|
||||
let handle = server.start(module);
|
||||
|
||||
// In this example we don't care about doing shutdown so let's it run forever.
|
||||
// You may use the `ServerHandle` to shut it down or manage it yourself.
|
||||
tokio::spawn(handle.stopped());
|
||||
|
||||
Ok(addr)
|
||||
}
|
173
reference_jsonrpsee_crate_examples/rpc_middleware_client.rs
Normal file
173
reference_jsonrpsee_crate_examples/rpc_middleware_client.rs
Normal file
@ -0,0 +1,173 @@
|
||||
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any
|
||||
// person obtaining a copy of this software and associated
|
||||
// documentation files (the "Software"), to deal in the
|
||||
// Software without restriction, including without
|
||||
// limitation the rights to use, copy, modify, merge,
|
||||
// publish, distribute, sublicense, and/or sell copies of
|
||||
// the Software, and to permit persons to whom the Software
|
||||
// is furnished to do so, subject to the following
|
||||
// conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice
|
||||
// shall be included in all copies or substantial portions
|
||||
// of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
//! jsonrpsee supports two kinds of middlewares `http_middleware` and `rpc_middleware`.
|
||||
//!
|
||||
//! This example demonstrates how to use the `rpc_middleware` which applies for each
|
||||
//! JSON-RPC method calls, notifications and batch requests.
|
||||
//!
|
||||
//! This example demonstrates how to use the `rpc_middleware` for the client
|
||||
//! and you may benefit specifying the response type to `core::client::MethodResponse`
|
||||
//! to actually inspect the response instead of using the serialized JSON-RPC response.
|
||||
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use jsonrpsee::core::client::{ClientT, MiddlewareMethodResponse, error::Error};
|
||||
use jsonrpsee::core::middleware::{Batch, Notification, RpcServiceBuilder, RpcServiceT};
|
||||
use jsonrpsee::rpc_params;
|
||||
use jsonrpsee::server::{RpcModule, Server};
|
||||
use jsonrpsee::types::{ErrorCode, ErrorObject, Request};
|
||||
use jsonrpsee::ws_client::WsClientBuilder;
|
||||
|
||||
#[derive(Default)]
|
||||
struct InnerMetrics {
|
||||
method_calls_success: usize,
|
||||
method_calls_failure: usize,
|
||||
notifications: usize,
|
||||
batch_calls: usize,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Metrics<S> {
|
||||
service: S,
|
||||
metrics: Arc<Mutex<InnerMetrics>>,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for InnerMetrics {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("InnerMetrics")
|
||||
.field("method_calls_success", &self.method_calls_success)
|
||||
.field("method_calls_failure", &self.method_calls_failure)
|
||||
.field("notifications", &self.notifications)
|
||||
.field("batch_calls", &self.batch_calls)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> Metrics<S> {
|
||||
pub fn new(service: S) -> Self {
|
||||
Self { service, metrics: Arc::new(Mutex::new(InnerMetrics::default())) }
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: We are using MethodResponse as the response type here to be able to inspect the response
|
||||
// and not just the serialized JSON-RPC response. This is not necessary if you only care about
|
||||
// the serialized JSON-RPC response.
|
||||
impl<S> RpcServiceT for Metrics<S>
|
||||
where
|
||||
S: RpcServiceT<MethodResponse = Result<MiddlewareMethodResponse, Error>> + Send + Sync + Clone + 'static,
|
||||
{
|
||||
type MethodResponse = Result<MiddlewareMethodResponse, Error>;
|
||||
type NotificationResponse = S::NotificationResponse;
|
||||
type BatchResponse = S::BatchResponse;
|
||||
|
||||
fn call<'a>(&self, req: Request<'a>) -> impl Future<Output = Self::MethodResponse> + Send + 'a {
|
||||
let m = self.metrics.clone();
|
||||
let service = self.service.clone();
|
||||
|
||||
async move {
|
||||
let rp = service.call(req).await;
|
||||
|
||||
// Access to inner response via the deref implementation.
|
||||
match &rp {
|
||||
Ok(rp) => {
|
||||
if rp.is_success() {
|
||||
m.lock().unwrap().method_calls_success += 1;
|
||||
} else {
|
||||
m.lock().unwrap().method_calls_failure += 1;
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
m.lock().unwrap().method_calls_failure += 1;
|
||||
tracing::error!("Error: {:?}", e);
|
||||
}
|
||||
}
|
||||
|
||||
rp
|
||||
}
|
||||
}
|
||||
|
||||
fn batch<'a>(&self, batch: Batch<'a>) -> impl Future<Output = Self::BatchResponse> + Send + 'a {
|
||||
self.metrics.lock().unwrap().batch_calls += 1;
|
||||
self.service.batch(batch)
|
||||
}
|
||||
|
||||
fn notification<'a>(&self, n: Notification<'a>) -> impl Future<Output = Self::NotificationResponse> + Send + 'a {
|
||||
self.metrics.lock().unwrap().notifications += 1;
|
||||
self.service.notification(n)
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
tracing_subscriber::FmtSubscriber::builder()
|
||||
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
||||
.try_init()
|
||||
.expect("setting default subscriber failed");
|
||||
|
||||
let addr = run_server().await?;
|
||||
let url = format!("ws://{}", addr);
|
||||
|
||||
let metrics = Arc::new(Mutex::new(InnerMetrics::default()));
|
||||
|
||||
for _ in 0..2 {
|
||||
let metrics = metrics.clone();
|
||||
let rpc_middleware =
|
||||
RpcServiceBuilder::new().layer_fn(move |s| Metrics { service: s, metrics: metrics.clone() });
|
||||
let client = WsClientBuilder::new().set_rpc_middleware(rpc_middleware).build(&url).await?;
|
||||
let _: Result<String, _> = client.request("say_hello", rpc_params![]).await;
|
||||
let _: Result<String, _> = client.request("unknown_method", rpc_params![]).await;
|
||||
let _: Result<String, _> = client.request("thready", rpc_params![4]).await;
|
||||
let _: Result<String, _> = client.request("mul", rpc_params![4]).await;
|
||||
let _: Result<String, _> = client.request("err", rpc_params![4]).await;
|
||||
|
||||
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
|
||||
}
|
||||
|
||||
println!("Metrics: {:?}", metrics.lock().unwrap());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_server() -> anyhow::Result<SocketAddr> {
|
||||
let server = Server::builder().build("127.0.0.1:0").await?;
|
||||
let mut module = RpcModule::new(());
|
||||
module.register_method("say_hello", |_, _, _| "lo")?;
|
||||
module.register_method("mul", |params, _, _| {
|
||||
let count: usize = params.one().unwrap();
|
||||
count * 2
|
||||
})?;
|
||||
module.register_method("error", |_, _, _| ErrorObject::from(ErrorCode::InternalError))?;
|
||||
let addr = server.local_addr()?;
|
||||
let handle = server.start(module);
|
||||
|
||||
// In this example we don't care about doing shutdown so let's it run forever.
|
||||
// You may use the `ServerHandle` to shut it down or manage it yourself.
|
||||
tokio::spawn(handle.stopped());
|
||||
|
||||
Ok(addr)
|
||||
}
|
@ -0,0 +1,139 @@
|
||||
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any
|
||||
// person obtaining a copy of this software and associated
|
||||
// documentation files (the "Software"), to deal in the
|
||||
// Software without restriction, including without
|
||||
// limitation the rights to use, copy, modify, merge,
|
||||
// publish, distribute, sublicense, and/or sell copies of
|
||||
// the Software, and to permit persons to whom the Software
|
||||
// is furnished to do so, subject to the following
|
||||
// conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice
|
||||
// shall be included in all copies or substantial portions
|
||||
// of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
use jsonrpsee::core::client::ClientT;
|
||||
use jsonrpsee::core::middleware::{Batch, BatchEntry, Notification, RpcServiceBuilder, RpcServiceT};
|
||||
use jsonrpsee::server::Server;
|
||||
use jsonrpsee::types::Request;
|
||||
use jsonrpsee::ws_client::WsClientBuilder;
|
||||
use jsonrpsee::{RpcModule, rpc_params};
|
||||
use std::borrow::Cow as StdCow;
|
||||
use std::net::SocketAddr;
|
||||
|
||||
fn modify_method_call(req: &mut Request<'_>) {
|
||||
// Example how to modify the params in the call.
|
||||
if req.method == "say_hello" {
|
||||
// It's a bit awkward to create new params in the request
|
||||
// but this shows how to do it.
|
||||
let raw_value = serde_json::value::to_raw_value("myparams").unwrap();
|
||||
req.params = Some(StdCow::Owned(raw_value));
|
||||
}
|
||||
// Re-direct all calls that isn't `say_hello` to `say_goodbye`
|
||||
else if req.method != "say_hello" {
|
||||
req.method = "say_goodbye".into();
|
||||
}
|
||||
}
|
||||
|
||||
fn modify_notif(n: &mut Notification<'_>) {
|
||||
// Example how to modify the params in the notification.
|
||||
if n.method == "say_hello" {
|
||||
// It's a bit awkward to create new params in the request
|
||||
// but this shows how to do it.
|
||||
let raw_value = serde_json::value::to_raw_value("myparams").unwrap();
|
||||
n.params = Some(StdCow::Owned(raw_value));
|
||||
}
|
||||
// Re-direct all notifs that isn't `say_hello` to `say_goodbye`
|
||||
else if n.method != "say_hello" {
|
||||
n.method = "say_goodbye".into();
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct ModifyRequestIf<S>(S);
|
||||
|
||||
impl<S> RpcServiceT for ModifyRequestIf<S>
|
||||
where
|
||||
S: RpcServiceT + Send + Sync + Clone + 'static,
|
||||
{
|
||||
type MethodResponse = S::MethodResponse;
|
||||
type NotificationResponse = S::NotificationResponse;
|
||||
type BatchResponse = S::BatchResponse;
|
||||
|
||||
fn call<'a>(&self, mut req: Request<'a>) -> impl Future<Output = Self::MethodResponse> + Send + 'a {
|
||||
modify_method_call(&mut req);
|
||||
self.0.call(req)
|
||||
}
|
||||
|
||||
fn batch<'a>(&self, mut batch: Batch<'a>) -> impl Future<Output = Self::BatchResponse> + Send + 'a {
|
||||
for call in batch.iter_mut() {
|
||||
match call {
|
||||
Ok(BatchEntry::Call(call)) => {
|
||||
modify_method_call(call);
|
||||
}
|
||||
Ok(BatchEntry::Notification(n)) => {
|
||||
modify_notif(n);
|
||||
}
|
||||
// Invalid request, we don't care about it.
|
||||
Err(_err) => {}
|
||||
}
|
||||
}
|
||||
|
||||
self.0.batch(batch)
|
||||
}
|
||||
|
||||
fn notification<'a>(
|
||||
&self,
|
||||
mut n: Notification<'a>,
|
||||
) -> impl Future<Output = Self::NotificationResponse> + Send + 'a {
|
||||
modify_notif(&mut n);
|
||||
self.0.notification(n)
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
tracing_subscriber::FmtSubscriber::builder()
|
||||
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
||||
.try_init()
|
||||
.expect("setting default subscriber failed");
|
||||
|
||||
let addr = run_server().await?;
|
||||
let url = format!("ws://{}", addr);
|
||||
|
||||
let client = WsClientBuilder::default().build(&url).await?;
|
||||
let _response: String = client.request("say_hello", rpc_params![]).await?;
|
||||
let _response: Result<String, _> = client.request("unknown_method", rpc_params![]).await;
|
||||
let _: String = client.request("say_hello", rpc_params![]).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_server() -> anyhow::Result<SocketAddr> {
|
||||
let rpc_middleware = RpcServiceBuilder::new().layer_fn(ModifyRequestIf);
|
||||
let server = Server::builder().set_rpc_middleware(rpc_middleware).build("127.0.0.1:0").await?;
|
||||
let mut module = RpcModule::new(());
|
||||
module.register_method("say_hello", |_, _, _| "lo")?;
|
||||
module.register_method("say_goodbye", |_, _, _| "goodbye")?;
|
||||
let addr = server.local_addr()?;
|
||||
|
||||
let handle = server.start(module);
|
||||
|
||||
// In this example we don't care about doing shutdown so let's it run forever.
|
||||
// You may use the `ServerHandle` to shut it down or manage it yourself.
|
||||
tokio::spawn(handle.stopped());
|
||||
|
||||
Ok(addr)
|
||||
}
|
@ -0,0 +1,218 @@
|
||||
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any
|
||||
// person obtaining a copy of this software and associated
|
||||
// documentation files (the "Software"), to deal in the
|
||||
// Software without restriction, including without
|
||||
// limitation the rights to use, copy, modify, merge,
|
||||
// publish, distribute, sublicense, and/or sell copies of
|
||||
// the Software, and to permit persons to whom the Software
|
||||
// is furnished to do so, subject to the following
|
||||
// conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice
|
||||
// shall be included in all copies or substantial portions
|
||||
// of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
//! Example middleware to rate limit based on the number
|
||||
//! JSON-RPC calls.
|
||||
//!
|
||||
//! As demonstrated in this example any state must be
|
||||
//! stored in something to provide interior mutability
|
||||
//! such as `Arc<Mutex>`
|
||||
|
||||
use jsonrpsee::core::client::ClientT;
|
||||
use jsonrpsee::core::middleware::{
|
||||
Batch, BatchEntry, BatchEntryErr, Notification, ResponseFuture, RpcServiceBuilder, RpcServiceT,
|
||||
};
|
||||
use jsonrpsee::server::Server;
|
||||
use jsonrpsee::types::{ErrorObject, Request};
|
||||
use jsonrpsee::ws_client::WsClientBuilder;
|
||||
use jsonrpsee::{MethodResponse, RpcModule, rpc_params};
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
struct Rate {
|
||||
num: u64,
|
||||
period: Duration,
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
enum State {
|
||||
Deny { until: Instant },
|
||||
Allow { until: Instant, rem: u64 },
|
||||
}
|
||||
|
||||
/// Depending on how the rate limit is instantiated
|
||||
/// it's possible to select whether the rate limit
|
||||
/// is be applied per connection or shared by
|
||||
/// all connections.
|
||||
///
|
||||
/// Have a look at `async fn run_server` below which
|
||||
/// shows how do it.
|
||||
#[derive(Clone)]
|
||||
pub struct RateLimit<S> {
|
||||
service: S,
|
||||
state: Arc<Mutex<State>>,
|
||||
rate: Rate,
|
||||
}
|
||||
|
||||
impl<S> RateLimit<S> {
|
||||
fn new(service: S, rate: Rate) -> Self {
|
||||
let period = rate.period;
|
||||
let num = rate.num;
|
||||
|
||||
Self {
|
||||
service,
|
||||
rate,
|
||||
state: Arc::new(Mutex::new(State::Allow { until: Instant::now() + period, rem: num + 1 })),
|
||||
}
|
||||
}
|
||||
|
||||
fn rate_limit_deny(&self) -> bool {
|
||||
let now = Instant::now();
|
||||
let mut lock = self.state.lock().unwrap();
|
||||
let next_state = match *lock {
|
||||
State::Deny { until } => {
|
||||
if now > until {
|
||||
State::Allow { until: now + self.rate.period, rem: self.rate.num - 1 }
|
||||
} else {
|
||||
State::Deny { until }
|
||||
}
|
||||
}
|
||||
State::Allow { until, rem } => {
|
||||
if now > until {
|
||||
State::Allow { until: now + self.rate.period, rem: self.rate.num - 1 }
|
||||
} else {
|
||||
let n = rem - 1;
|
||||
if n > 0 { State::Allow { until: now + self.rate.period, rem: n } } else { State::Deny { until } }
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
*lock = next_state;
|
||||
matches!(next_state, State::Deny { .. })
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> RpcServiceT for RateLimit<S>
|
||||
where
|
||||
S: RpcServiceT<
|
||||
MethodResponse = MethodResponse,
|
||||
BatchResponse = MethodResponse,
|
||||
NotificationResponse = MethodResponse,
|
||||
> + Send
|
||||
+ Sync
|
||||
+ Clone
|
||||
+ 'static,
|
||||
{
|
||||
type MethodResponse = S::MethodResponse;
|
||||
type NotificationResponse = S::NotificationResponse;
|
||||
type BatchResponse = S::BatchResponse;
|
||||
|
||||
fn call<'a>(&self, req: Request<'a>) -> impl Future<Output = Self::MethodResponse> + Send + 'a {
|
||||
if self.rate_limit_deny() {
|
||||
ResponseFuture::ready(MethodResponse::error(req.id, ErrorObject::borrowed(-32000, "RPC rate limit", None)))
|
||||
} else {
|
||||
ResponseFuture::future(self.service.call(req))
|
||||
}
|
||||
}
|
||||
|
||||
fn batch<'a>(&self, mut batch: Batch<'a>) -> impl Future<Output = Self::BatchResponse> + Send + 'a {
|
||||
// If the rate limit is reached then we modify each entry
|
||||
// in the batch to be a request with an error.
|
||||
//
|
||||
// This makes sure that the client will receive an error
|
||||
// for each request in the batch.
|
||||
if self.rate_limit_deny() {
|
||||
for entry in batch.iter_mut() {
|
||||
let id = match entry {
|
||||
Ok(BatchEntry::Call(req)) => req.id.clone(),
|
||||
Ok(BatchEntry::Notification(_)) => continue,
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
||||
// This will create a new error response for batch and replace the method call
|
||||
*entry = Err(BatchEntryErr::new(id, ErrorObject::borrowed(-32000, "RPC rate limit", None)));
|
||||
}
|
||||
}
|
||||
|
||||
self.service.batch(batch)
|
||||
}
|
||||
|
||||
fn notification<'a>(&self, n: Notification<'a>) -> impl Future<Output = Self::NotificationResponse> + Send + 'a {
|
||||
if self.rate_limit_deny() {
|
||||
// Notifications are not expected to return a response so just ignore
|
||||
// if the rate limit is reached.
|
||||
ResponseFuture::ready(MethodResponse::notification())
|
||||
} else {
|
||||
ResponseFuture::future(self.service.notification(n))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
tracing_subscriber::FmtSubscriber::builder()
|
||||
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
||||
.try_init()
|
||||
.expect("setting default subscriber failed");
|
||||
|
||||
let addr = run_server().await?;
|
||||
let url = format!("ws://{}", addr);
|
||||
|
||||
let client1 = WsClientBuilder::default().build(&url).await?;
|
||||
let _response: String = client1.request("say_hello", rpc_params![]).await?;
|
||||
|
||||
// The rate limit should trigger an error here.
|
||||
let _response = client1.request::<String, _>("unknown_method", rpc_params![]).await.unwrap_err();
|
||||
|
||||
// Make a new connection and the server will allow it because our `RateLimit`
|
||||
// applies per connection and not globally on the server.
|
||||
let client2 = WsClientBuilder::default().build(&url).await?;
|
||||
let _response: String = client2.request("say_hello", rpc_params![]).await?;
|
||||
|
||||
// The first connection should allow a call now again.
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
let _response: String = client1.request("say_hello", rpc_params![]).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_server() -> anyhow::Result<SocketAddr> {
|
||||
// This will create a new `RateLimit` per connection.
|
||||
//
|
||||
// In this particular example the server will only
|
||||
// allow one RPC call per second.
|
||||
//
|
||||
// Have a look at the `rpc_middleware example` if you want see an example
|
||||
// how to share state of the "middleware" for all connections on the server.
|
||||
let rpc_middleware = RpcServiceBuilder::new()
|
||||
.layer_fn(|service| RateLimit::new(service, Rate { num: 1, period: Duration::from_secs(1) }));
|
||||
|
||||
let server = Server::builder().set_rpc_middleware(rpc_middleware).build("127.0.0.1:0").await?;
|
||||
let mut module = RpcModule::new(());
|
||||
module.register_method("say_hello", |_, _, _| "lo")?;
|
||||
module.register_method("say_goodbye", |_, _, _| "goodbye")?;
|
||||
let addr = server.local_addr()?;
|
||||
|
||||
let handle = server.start(module);
|
||||
|
||||
// In this example we don't care about doing shutdown so let's it run forever.
|
||||
// You may use the `ServerHandle` to shut it down or manage it yourself.
|
||||
tokio::spawn(handle.stopped());
|
||||
|
||||
Ok(addr)
|
||||
}
|
@ -0,0 +1,156 @@
|
||||
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any
|
||||
// person obtaining a copy of this software and associated
|
||||
// documentation files (the "Software"), to deal in the
|
||||
// Software without restriction, including without
|
||||
// limitation the rights to use, copy, modify, merge,
|
||||
// publish, distribute, sublicense, and/or sell copies of
|
||||
// the Software, and to permit persons to whom the Software
|
||||
// is furnished to do so, subject to the following
|
||||
// conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice
|
||||
// shall be included in all copies or substantial portions
|
||||
// of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
use std::net::SocketAddr;
|
||||
|
||||
use jsonrpsee::core::middleware::{Batch, Notification, Request, RpcServiceT};
|
||||
use jsonrpsee::core::{SubscriptionResult, async_trait};
|
||||
use jsonrpsee::proc_macros::rpc;
|
||||
use jsonrpsee::server::PendingSubscriptionSink;
|
||||
use jsonrpsee::types::{ErrorObject, ErrorObjectOwned};
|
||||
use jsonrpsee::ws_client::WsClientBuilder;
|
||||
use jsonrpsee::{ConnectionId, Extensions};
|
||||
|
||||
#[rpc(server, client)]
|
||||
pub trait Rpc {
|
||||
/// method with connection ID.
|
||||
#[method(name = "connectionIdMethod", with_extensions)]
|
||||
async fn method(&self) -> Result<usize, ErrorObjectOwned>;
|
||||
|
||||
#[subscription(name = "subscribeConnectionId", item = usize, with_extensions)]
|
||||
async fn sub(&self) -> SubscriptionResult;
|
||||
|
||||
#[subscription(name = "subscribeSyncConnectionId", item = usize, with_extensions)]
|
||||
fn sub2(&self) -> SubscriptionResult;
|
||||
}
|
||||
|
||||
struct LoggingMiddleware<S>(S);
|
||||
|
||||
impl<S> RpcServiceT for LoggingMiddleware<S>
|
||||
where
|
||||
S: RpcServiceT,
|
||||
{
|
||||
type MethodResponse = S::MethodResponse;
|
||||
type NotificationResponse = S::NotificationResponse;
|
||||
type BatchResponse = S::BatchResponse;
|
||||
|
||||
fn call<'a>(&self, request: Request<'a>) -> impl Future<Output = Self::MethodResponse> + Send + 'a {
|
||||
tracing::info!("Received request: {:?}", request);
|
||||
assert!(request.extensions().get::<ConnectionId>().is_some());
|
||||
|
||||
self.0.call(request)
|
||||
}
|
||||
|
||||
fn batch<'a>(&self, batch: Batch<'a>) -> impl Future<Output = Self::BatchResponse> + Send + 'a {
|
||||
tracing::info!("Received batch: {:?}", batch);
|
||||
self.0.batch(batch)
|
||||
}
|
||||
|
||||
fn notification<'a>(&self, n: Notification<'a>) -> impl Future<Output = Self::NotificationResponse> + Send + 'a {
|
||||
tracing::info!("Received notif: {:?}", n);
|
||||
self.0.notification(n)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RpcServerImpl;
|
||||
|
||||
#[async_trait]
|
||||
impl RpcServer for RpcServerImpl {
|
||||
async fn method(&self, ext: &Extensions) -> Result<usize, ErrorObjectOwned> {
|
||||
let conn_id = ext
|
||||
.get::<ConnectionId>()
|
||||
.cloned()
|
||||
.ok_or_else(|| ErrorObject::owned(0, "No connection details found", None::<()>))?;
|
||||
|
||||
Ok(conn_id.0)
|
||||
}
|
||||
|
||||
async fn sub(&self, pending: PendingSubscriptionSink, ext: &Extensions) -> SubscriptionResult {
|
||||
let sink = pending.accept().await?;
|
||||
let conn_id = ext
|
||||
.get::<ConnectionId>()
|
||||
.cloned()
|
||||
.ok_or_else(|| ErrorObject::owned(0, "No connection details found", None::<()>))?;
|
||||
let json = serde_json::value::to_raw_value(&conn_id)
|
||||
.map_err(|e| ErrorObject::owned(0, format!("Failed to serialize connection ID: {e}"), None::<()>))?;
|
||||
sink.send(json).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn sub2(&self, pending: PendingSubscriptionSink, ext: &Extensions) -> SubscriptionResult {
|
||||
let conn_id = ext.get::<ConnectionId>().cloned().unwrap();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let sink = pending.accept().await.unwrap();
|
||||
let json = serde_json::value::to_raw_value(&conn_id).unwrap();
|
||||
sink.send(json).await.unwrap();
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
tracing_subscriber::FmtSubscriber::builder()
|
||||
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
||||
.try_init()
|
||||
.expect("setting default subscriber failed");
|
||||
|
||||
let server_addr = run_server().await?;
|
||||
let url = format!("ws://{}", server_addr);
|
||||
|
||||
let client = WsClientBuilder::default().build(&url).await?;
|
||||
let connection_id_first = client.method().await.unwrap();
|
||||
|
||||
// Second call from the same connection ID.
|
||||
assert_eq!(client.method().await.unwrap(), connection_id_first);
|
||||
|
||||
// Second client will increment the connection ID.
|
||||
let client2 = WsClientBuilder::default().build(&url).await?;
|
||||
let connection_id_second = client2.method().await.unwrap();
|
||||
assert_ne!(connection_id_first, connection_id_second);
|
||||
|
||||
let mut sub = client.sub().await.unwrap();
|
||||
assert_eq!(connection_id_first, sub.next().await.transpose().unwrap().unwrap());
|
||||
|
||||
let mut sub = client2.sub().await.unwrap();
|
||||
assert_eq!(connection_id_second, sub.next().await.transpose().unwrap().unwrap());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_server() -> anyhow::Result<SocketAddr> {
|
||||
let rpc_middleware = jsonrpsee::server::middleware::rpc::RpcServiceBuilder::new().layer_fn(LoggingMiddleware);
|
||||
|
||||
let server = jsonrpsee::server::Server::builder().set_rpc_middleware(rpc_middleware).build("127.0.0.1:0").await?;
|
||||
let addr = server.local_addr()?;
|
||||
|
||||
let handle = server.start(RpcServerImpl.into_rpc());
|
||||
|
||||
tokio::spawn(handle.stopped());
|
||||
|
||||
Ok(addr)
|
||||
}
|
69
reference_jsonrpsee_crate_examples/tokio_console.rs
Normal file
69
reference_jsonrpsee_crate_examples/tokio_console.rs
Normal file
@ -0,0 +1,69 @@
|
||||
// Copyright 2022 Parity Technologies (UK) Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any
|
||||
// person obtaining a copy of this software and associated
|
||||
// documentation files (the "Software"), to deal in the
|
||||
// Software without restriction, including without
|
||||
// limitation the rights to use, copy, modify, merge,
|
||||
// publish, distribute, sublicense, and/or sell copies of
|
||||
// the Software, and to permit persons to whom the Software
|
||||
// is furnished to do so, subject to the following
|
||||
// conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice
|
||||
// shall be included in all copies or substantial portions
|
||||
// of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
//! Example how to use `tokio-console` to debug async tasks `jsonrpsee`.
|
||||
//! For further information see https://docs.rs/console-subscriber.
|
||||
//!
|
||||
//! To run it:
|
||||
//! `$ cargo install --locked tokio-console`
|
||||
//! `$ RUSTFLAGS="--cfg tokio_unstable" cargo run --example tokio_console`
|
||||
//! `$ tokio-console`
|
||||
//!
|
||||
//! It will start a server on http://127.0.0.1:6669 for `tokio-console` to connect to.
|
||||
|
||||
use std::net::SocketAddr;
|
||||
|
||||
use jsonrpsee::RpcModule;
|
||||
use jsonrpsee::server::Server;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
console_subscriber::init();
|
||||
|
||||
let _ = run_server().await?;
|
||||
|
||||
futures::future::pending().await
|
||||
}
|
||||
|
||||
async fn run_server() -> anyhow::Result<SocketAddr> {
|
||||
let server = Server::builder().build("127.0.0.1:9944").await?;
|
||||
let mut module = RpcModule::new(());
|
||||
module.register_method("say_hello", |_, _, _| "lo")?;
|
||||
module.register_method("memory_call", |_, _, _| "A".repeat(1024 * 1024))?;
|
||||
module.register_async_method("sleep", |_, _, _| async {
|
||||
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
|
||||
"lo"
|
||||
})?;
|
||||
|
||||
let addr = server.local_addr()?;
|
||||
let handle = server.start(module);
|
||||
|
||||
// In this example we don't care about doing a stopping the server so let it run forever.
|
||||
// You may use the `ServerHandle` to shut it down or manage it yourself.
|
||||
tokio::spawn(handle.stopped());
|
||||
|
||||
Ok(addr)
|
||||
}
|
67
reference_jsonrpsee_crate_examples/ws.rs
Normal file
67
reference_jsonrpsee_crate_examples/ws.rs
Normal file
@ -0,0 +1,67 @@
|
||||
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any
|
||||
// person obtaining a copy of this software and associated
|
||||
// documentation files (the "Software"), to deal in the
|
||||
// Software without restriction, including without
|
||||
// limitation the rights to use, copy, modify, merge,
|
||||
// publish, distribute, sublicense, and/or sell copies of
|
||||
// the Software, and to permit persons to whom the Software
|
||||
// is furnished to do so, subject to the following
|
||||
// conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice
|
||||
// shall be included in all copies or substantial portions
|
||||
// of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
use std::net::SocketAddr;
|
||||
|
||||
use jsonrpsee::core::client::ClientT;
|
||||
use jsonrpsee::core::middleware::RpcServiceBuilder;
|
||||
use jsonrpsee::server::Server;
|
||||
use jsonrpsee::ws_client::{WsClient, WsClientBuilder};
|
||||
use jsonrpsee::{RpcModule, rpc_params};
|
||||
use tracing_subscriber::util::SubscriberInitExt;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let filter = tracing_subscriber::EnvFilter::try_from_default_env()?
|
||||
.add_directive("jsonrpsee[method_call{name = \"say_hello\"}]=trace".parse()?);
|
||||
|
||||
tracing_subscriber::FmtSubscriber::builder().with_env_filter(filter).finish().try_init()?;
|
||||
|
||||
let addr = run_server().await?;
|
||||
let url = format!("ws://{}", addr);
|
||||
|
||||
let client: WsClient = WsClientBuilder::new().build(&url).await?;
|
||||
let response: String = client.request("say_hello", rpc_params![]).await?;
|
||||
tracing::info!("response: {:?}", response);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_server() -> anyhow::Result<SocketAddr> {
|
||||
let rpc_middleware = RpcServiceBuilder::new().rpc_logger(1024);
|
||||
let server = Server::builder().set_rpc_middleware(rpc_middleware).build("127.0.0.1:0").await?;
|
||||
let mut module = RpcModule::new(());
|
||||
module.register_method("say_hello", |_, _, _| "lo")?;
|
||||
let addr = server.local_addr()?;
|
||||
|
||||
let handle = server.start(module);
|
||||
|
||||
// In this example we don't care about doing shutdown so let's it run forever.
|
||||
// You may use the `ServerHandle` to shut it down or manage it yourself.
|
||||
tokio::spawn(handle.stopped());
|
||||
|
||||
Ok(addr)
|
||||
}
|
122
reference_jsonrpsee_crate_examples/ws_dual_stack.rs
Normal file
122
reference_jsonrpsee_crate_examples/ws_dual_stack.rs
Normal file
@ -0,0 +1,122 @@
|
||||
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any
|
||||
// person obtaining a copy of this software and associated
|
||||
// documentation files (the "Software"), to deal in the
|
||||
// Software without restriction, including without
|
||||
// limitation the rights to use, copy, modify, merge,
|
||||
// publish, distribute, sublicense, and/or sell copies of
|
||||
// the Software, and to permit persons to whom the Software
|
||||
// is furnished to do so, subject to the following
|
||||
// conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice
|
||||
// shall be included in all copies or substantial portions
|
||||
// of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
use jsonrpsee::core::client::ClientT;
|
||||
use jsonrpsee::server::{ServerHandle, serve_with_graceful_shutdown, stop_channel};
|
||||
use jsonrpsee::ws_client::WsClientBuilder;
|
||||
use jsonrpsee::{RpcModule, rpc_params};
|
||||
use std::net::SocketAddr;
|
||||
use tokio::net::TcpListener;
|
||||
use tracing_subscriber::util::SubscriberInitExt;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let filter = tracing_subscriber::EnvFilter::try_from_default_env()?
|
||||
.add_directive("jsonrpsee[method_call{name = \"say_hello\"}]=trace".parse()?)
|
||||
.add_directive("jsonrpsee-client=trace".parse()?);
|
||||
|
||||
tracing_subscriber::FmtSubscriber::builder().with_env_filter(filter).finish().try_init()?;
|
||||
|
||||
let (_server_hdl, addrs) = run_server().await?;
|
||||
let url_v4 = format!("ws://{}", addrs.v4);
|
||||
let url_v6 = format!("ws://{}", addrs.v6);
|
||||
|
||||
let client_v4 = WsClientBuilder::default().build(&url_v4).await?;
|
||||
let client_v6 = WsClientBuilder::default().build(&url_v6).await?;
|
||||
|
||||
let response_v4: String = client_v4.request("say_hello", rpc_params![]).await?;
|
||||
let response_v6: String = client_v6.request("say_hello", rpc_params![]).await?;
|
||||
|
||||
tracing::info!("response V4: {:?}", response_v4);
|
||||
tracing::info!("response V6: {:?}", response_v6);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_server() -> anyhow::Result<(ServerHandle, Addrs)> {
|
||||
let port = 9944;
|
||||
// V4 address
|
||||
let v4_addr = SocketAddr::from(([127, 0, 0, 1], port));
|
||||
// V6 address
|
||||
let v6_addr = SocketAddr::new("::1".parse().unwrap(), port);
|
||||
|
||||
let mut module = RpcModule::new(());
|
||||
module.register_method("say_hello", |_, _, _| "lo")?;
|
||||
|
||||
// Bind to both IPv4 and IPv6 addresses.
|
||||
let listener_v4 = TcpListener::bind(&v4_addr).await?;
|
||||
let listener_v6 = TcpListener::bind(&v6_addr).await?;
|
||||
|
||||
// Each RPC call/connection get its own `stop_handle`
|
||||
// to able to determine whether the server has been stopped or not.
|
||||
//
|
||||
// To keep the server running the `server_handle`
|
||||
// must be kept and it can also be used to stop the server.
|
||||
let (stop_hdl, server_hdl) = stop_channel();
|
||||
|
||||
// Create and finalize a server configuration from a TowerServiceBuilder
|
||||
// given an RpcModule and the stop handle.
|
||||
let svc = jsonrpsee::server::Server::builder().to_service_builder().build(module, stop_hdl.clone());
|
||||
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
// The `tokio::select!` macro is used to wait for either of the
|
||||
// listeners to accept a new connection or for the server to be
|
||||
// stopped.
|
||||
let stream = tokio::select! {
|
||||
res = listener_v4.accept() => {
|
||||
match res {
|
||||
Ok((stream, _remote_addr)) => stream,
|
||||
Err(e) => {
|
||||
tracing::error!("failed to accept v4 connection: {:?}", e);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
res = listener_v6.accept() => {
|
||||
match res {
|
||||
Ok((stream, _remote_addr)) => stream,
|
||||
Err(e) => {
|
||||
tracing::error!("failed to accept v6 connection: {:?}", e);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
_ = stop_hdl.clone().shutdown() => break,
|
||||
};
|
||||
|
||||
// Spawn a new task to serve each respective (Hyper) connection.
|
||||
tokio::spawn(serve_with_graceful_shutdown(stream, svc.clone(), stop_hdl.clone().shutdown()));
|
||||
}
|
||||
});
|
||||
|
||||
Ok((server_hdl, Addrs { v4: v4_addr, v6: v6_addr }))
|
||||
}
|
||||
|
||||
struct Addrs {
|
||||
v4: SocketAddr,
|
||||
v6: SocketAddr,
|
||||
}
|
149
reference_jsonrpsee_crate_examples/ws_pubsub_broadcast.rs
Normal file
149
reference_jsonrpsee_crate_examples/ws_pubsub_broadcast.rs
Normal file
@ -0,0 +1,149 @@
|
||||
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any
|
||||
// person obtaining a copy of this software and associated
|
||||
// documentation files (the "Software"), to deal in the
|
||||
// Software without restriction, including without
|
||||
// limitation the rights to use, copy, modify, merge,
|
||||
// publish, distribute, sublicense, and/or sell copies of
|
||||
// the Software, and to permit persons to whom the Software
|
||||
// is furnished to do so, subject to the following
|
||||
// conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice
|
||||
// shall be included in all copies or substantial portions
|
||||
// of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
//! Example that shows how to broadcast to all active subscriptions using `tokio::sync::broadcast`.
|
||||
|
||||
use std::net::SocketAddr;
|
||||
|
||||
use futures::StreamExt;
|
||||
use futures::future::{self, Either};
|
||||
use jsonrpsee::PendingSubscriptionSink;
|
||||
use jsonrpsee::core::client::{Subscription, SubscriptionClientT};
|
||||
use jsonrpsee::core::middleware::RpcServiceBuilder;
|
||||
use jsonrpsee::rpc_params;
|
||||
use jsonrpsee::server::{RpcModule, Server, ServerConfig};
|
||||
use jsonrpsee::ws_client::WsClientBuilder;
|
||||
use tokio::sync::broadcast;
|
||||
use tokio_stream::wrappers::BroadcastStream;
|
||||
|
||||
const NUM_SUBSCRIPTION_RESPONSES: usize = 5;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
tracing_subscriber::FmtSubscriber::builder()
|
||||
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
||||
.try_init()
|
||||
.expect("setting default subscriber failed");
|
||||
|
||||
let addr = run_server().await?;
|
||||
let url = format!("ws://{}", addr);
|
||||
|
||||
let client1 =
|
||||
WsClientBuilder::default().set_rpc_middleware(RpcServiceBuilder::new().rpc_logger(1024)).build(&url).await?;
|
||||
let client2 =
|
||||
WsClientBuilder::default().set_rpc_middleware(RpcServiceBuilder::new().rpc_logger(1024)).build(&url).await?;
|
||||
let sub1: Subscription<i32> = client1.subscribe("subscribe_hello", rpc_params![], "unsubscribe_hello").await?;
|
||||
let sub2: Subscription<i32> = client2.subscribe("subscribe_hello", rpc_params![], "unsubscribe_hello").await?;
|
||||
|
||||
let fut1 = sub1.take(NUM_SUBSCRIPTION_RESPONSES).for_each(|r| async move { tracing::info!("sub1 rx: {:?}", r) });
|
||||
let fut2 = sub2.take(NUM_SUBSCRIPTION_RESPONSES).for_each(|r| async move { tracing::info!("sub2 rx: {:?}", r) });
|
||||
|
||||
future::join(fut1, fut2).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_server() -> anyhow::Result<SocketAddr> {
|
||||
// let's configure the server only hold 5 messages in memory.
|
||||
let config = ServerConfig::builder().set_message_buffer_capacity(5).build();
|
||||
let server = Server::builder()
|
||||
.set_config(config)
|
||||
.set_rpc_middleware(RpcServiceBuilder::new().rpc_logger(1024))
|
||||
.build("127.0.0.1:0")
|
||||
.await?;
|
||||
let (tx, _rx) = broadcast::channel::<usize>(16);
|
||||
|
||||
let mut module = RpcModule::new(tx.clone());
|
||||
|
||||
std::thread::spawn(move || produce_items(tx));
|
||||
|
||||
module
|
||||
.register_subscription("subscribe_hello", "s_hello", "unsubscribe_hello", |_, pending, tx, _| async move {
|
||||
let rx = tx.subscribe();
|
||||
let stream = BroadcastStream::new(rx);
|
||||
pipe_from_stream_with_bounded_buffer(pending, stream).await?;
|
||||
Ok(())
|
||||
})
|
||||
.unwrap();
|
||||
let addr = server.local_addr()?;
|
||||
let handle = server.start(module);
|
||||
|
||||
// In this example we don't care about doing shutdown so let's it run forever.
|
||||
// You may use the `ServerHandle` to shut it down or manage it yourself.
|
||||
tokio::spawn(handle.stopped());
|
||||
|
||||
Ok(addr)
|
||||
}
|
||||
|
||||
async fn pipe_from_stream_with_bounded_buffer(
|
||||
pending: PendingSubscriptionSink,
|
||||
stream: BroadcastStream<usize>,
|
||||
) -> Result<(), anyhow::Error> {
|
||||
let sink = pending.accept().await?;
|
||||
let closed = sink.closed();
|
||||
|
||||
futures::pin_mut!(closed, stream);
|
||||
|
||||
loop {
|
||||
match future::select(closed, stream.next()).await {
|
||||
// subscription closed.
|
||||
Either::Left((_, _)) => break Ok(()),
|
||||
|
||||
// received new item from the stream.
|
||||
Either::Right((Some(Ok(item)), c)) => {
|
||||
let msg = serde_json::value::to_raw_value(&item)?;
|
||||
|
||||
// NOTE: this will block until there a spot in the queue
|
||||
// and you might want to do something smarter if it's
|
||||
// critical that "the most recent item" must be sent when it is produced.
|
||||
if sink.send(msg).await.is_err() {
|
||||
break Ok(());
|
||||
}
|
||||
|
||||
closed = c;
|
||||
}
|
||||
|
||||
// Send back back the error.
|
||||
Either::Right((Some(Err(e)), _)) => break Err(e.into()),
|
||||
|
||||
// Stream is closed.
|
||||
Either::Right((None, _)) => break Ok(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Naive example that broadcasts the produced values to all active subscribers.
|
||||
fn produce_items(tx: broadcast::Sender<usize>) {
|
||||
for c in 1..=100 {
|
||||
std::thread::sleep(std::time::Duration::from_millis(1));
|
||||
|
||||
// This might fail if no receivers are alive, could occur if no subscriptions are active...
|
||||
// Also be aware that this will succeed when at least one receiver is alive
|
||||
// Thus, clients connecting at different point in time will not receive
|
||||
// the items sent before the subscription got established.
|
||||
let _ = tx.send(c);
|
||||
}
|
||||
}
|
138
reference_jsonrpsee_crate_examples/ws_pubsub_with_params.rs
Normal file
138
reference_jsonrpsee_crate_examples/ws_pubsub_with_params.rs
Normal file
@ -0,0 +1,138 @@
|
||||
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any
|
||||
// person obtaining a copy of this software and associated
|
||||
// documentation files (the "Software"), to deal in the
|
||||
// Software without restriction, including without
|
||||
// limitation the rights to use, copy, modify, merge,
|
||||
// publish, distribute, sublicense, and/or sell copies of
|
||||
// the Software, and to permit persons to whom the Software
|
||||
// is furnished to do so, subject to the following
|
||||
// conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice
|
||||
// shall be included in all copies or substantial portions
|
||||
// of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
use std::net::SocketAddr;
|
||||
use std::time::Duration;
|
||||
|
||||
use futures::{Stream, StreamExt};
|
||||
use jsonrpsee::core::Serialize;
|
||||
use jsonrpsee::core::client::{Subscription, SubscriptionClientT};
|
||||
use jsonrpsee::server::{RpcModule, Server, ServerConfig, TrySendError};
|
||||
use jsonrpsee::ws_client::WsClientBuilder;
|
||||
use jsonrpsee::{PendingSubscriptionSink, rpc_params};
|
||||
use tokio::time::interval;
|
||||
use tokio_stream::wrappers::IntervalStream;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
tracing_subscriber::FmtSubscriber::builder()
|
||||
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
||||
.try_init()
|
||||
.expect("setting default subscriber failed");
|
||||
|
||||
let addr = run_server().await?;
|
||||
let url = format!("ws://{}", addr);
|
||||
|
||||
let client = WsClientBuilder::default().build(&url).await?;
|
||||
|
||||
// Subscription with a single parameter
|
||||
let mut sub_params_one: Subscription<Option<char>> =
|
||||
client.subscribe("sub_one_param", rpc_params![3], "unsub_one_param").await?;
|
||||
tracing::info!("subscription with one param: {:?}", sub_params_one.next().await);
|
||||
|
||||
// Subscription with multiple parameters
|
||||
let mut sub_params_two: Subscription<String> =
|
||||
client.subscribe("sub_params_two", rpc_params![2, 5], "unsub_params_two").await?;
|
||||
tracing::info!("subscription with two params: {:?}", sub_params_two.next().await);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_server() -> anyhow::Result<SocketAddr> {
|
||||
const LETTERS: &str = "abcdefghijklmnopqrstuvxyz";
|
||||
let config = ServerConfig::builder().set_message_buffer_capacity(10).build();
|
||||
let server = Server::builder().set_config(config).build("127.0.0.1:0").await?;
|
||||
let mut module = RpcModule::new(());
|
||||
module
|
||||
.register_subscription(
|
||||
"sub_one_param",
|
||||
"sub_one_param",
|
||||
"unsub_one_param",
|
||||
|params, pending, _, _| async move {
|
||||
// we are doing this verbose way to get a customized reject error on the subscription.
|
||||
let idx = match params.one::<usize>() {
|
||||
Ok(p) => p,
|
||||
Err(e) => {
|
||||
let _ = pending.reject(e).await;
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
let item = LETTERS.chars().nth(idx);
|
||||
|
||||
let interval = interval(Duration::from_millis(200));
|
||||
let stream = IntervalStream::new(interval).map(move |_| item);
|
||||
|
||||
pipe_from_stream_and_drop(pending, stream).await.map_err(Into::into)
|
||||
},
|
||||
)
|
||||
.unwrap();
|
||||
module
|
||||
.register_subscription("sub_params_two", "params_two", "unsub_params_two", |params, pending, _, _| async move {
|
||||
let (one, two) = params.parse::<(usize, usize)>()?;
|
||||
|
||||
let item = &LETTERS[one..two];
|
||||
let interval = interval(Duration::from_millis(200));
|
||||
let stream = IntervalStream::new(interval).map(move |_| item);
|
||||
pipe_from_stream_and_drop(pending, stream).await.map_err(Into::into)
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
let addr = server.local_addr()?;
|
||||
let handle = server.start(module);
|
||||
|
||||
// In this example we don't care about doing shutdown so let's it run forever.
|
||||
// You may use the `ServerHandle` to shut it down or manage it yourself.
|
||||
tokio::spawn(handle.stopped());
|
||||
|
||||
Ok(addr)
|
||||
}
|
||||
|
||||
pub async fn pipe_from_stream_and_drop<T: Serialize>(
|
||||
pending: PendingSubscriptionSink,
|
||||
mut stream: impl Stream<Item = T> + Unpin,
|
||||
) -> Result<(), anyhow::Error> {
|
||||
let mut sink = pending.accept().await?;
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = sink.closed() => break Err(anyhow::anyhow!("Subscription was closed")),
|
||||
maybe_item = stream.next() => {
|
||||
let item = match maybe_item {
|
||||
Some(item) => item,
|
||||
None => break Err(anyhow::anyhow!("Subscription was closed")),
|
||||
};
|
||||
let msg = serde_json::value::to_raw_value(&item)?;
|
||||
match sink.try_send(msg) {
|
||||
Ok(_) => (),
|
||||
Err(TrySendError::Closed(_)) => break Err(anyhow::anyhow!("Subscription was closed")),
|
||||
// channel is full, let's be naive an just drop the message.
|
||||
Err(TrySendError::Full(_)) => (),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
54
reference_osis_actor/Cargo.toml
Normal file
54
reference_osis_actor/Cargo.toml
Normal file
@ -0,0 +1,54 @@
|
||||
[package]
|
||||
name = "actor_osis"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
|
||||
[lib]
|
||||
name = "actor_osis" # Can be different from package name, or same
|
||||
path = "src/lib.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "actor_osis"
|
||||
path = "cmd/actor_osis.rs"
|
||||
|
||||
[[example]]
|
||||
name = "engine"
|
||||
path = "examples/engine.rs"
|
||||
|
||||
[[example]]
|
||||
name = "actor"
|
||||
path = "examples/actor.rs"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
redis = { version = "0.25.0", features = ["tokio-comp"] }
|
||||
rhai = { version = "1.21.0", features = ["std", "sync", "decimal", "internals"] }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
tokio = { version = "1", features = ["macros", "rt-multi-thread", "time"] }
|
||||
log = "0.4"
|
||||
env_logger = "0.10"
|
||||
clap = { version = "4.4", features = ["derive"] }
|
||||
uuid = { version = "1.6", features = ["v4", "serde"] } # Though task_id is string, uuid might be useful
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
toml = "0.8"
|
||||
thiserror = "1.0"
|
||||
async-trait = "0.1"
|
||||
hero_job = { git = "https://git.ourworld.tf/herocode/baobab.git"}
|
||||
baobab_actor = { git = "https://git.ourworld.tf/herocode/baobab.git"}
|
||||
heromodels = { git = "https://git.ourworld.tf/herocode/db.git" }
|
||||
heromodels_core = { git = "https://git.ourworld.tf/herocode/db.git" }
|
||||
heromodels-derive = { git = "https://git.ourworld.tf/herocode/db.git" }
|
||||
rhailib_dsl = { git = "https://git.ourworld.tf/herocode/rhailib.git" }
|
||||
hero_logger = { git = "https://git.ourworld.tf/herocode/baobab.git", branch = "logger" }
|
||||
tracing = "0.1.41"
|
||||
|
||||
[features]
|
||||
default = ["calendar", "finance"]
|
||||
calendar = []
|
||||
finance = []
|
||||
flow = []
|
||||
legal = []
|
||||
projects = []
|
||||
biz = []
|
79
reference_osis_actor/README.md
Normal file
79
reference_osis_actor/README.md
Normal file
@ -0,0 +1,79 @@
|
||||
# Object Storage and Indexing System (OSIS) Actor
|
||||
|
||||
The OSIS Actor is responsible for storing and indexing objects in the system. It implements the actor interface to process jobs in a **blocking, synchronized manner**.
|
||||
|
||||
## Job Processing Behavior
|
||||
|
||||
The OSISActor processes jobs sequentially with the following characteristics:
|
||||
|
||||
- **Blocking Processing**: Each job is processed completely before the next job begins
|
||||
- **Synchronized Execution**: Jobs are executed one at a time in the order they are received
|
||||
- **No Concurrency**: Unlike async actors, OSIS ensures no parallel job execution
|
||||
- **Deterministic Order**: Job completion follows the exact order of job submission
|
||||
|
||||
This design ensures data consistency and prevents race conditions when performing storage and indexing operations.
|
||||
|
||||
## Usage
|
||||
|
||||
```rust
|
||||
use actor_osis::{OSISActor, spawn_osis_actor};
|
||||
|
||||
// Create an OSIS actor with builder pattern
|
||||
let actor = OSISActor::builder()
|
||||
.db_path("/path/to/database")
|
||||
.redis_url("redis://localhost:6379")
|
||||
.build()
|
||||
.expect("Failed to build OSISActor");
|
||||
|
||||
// Or spawn directly with convenience function
|
||||
let handle = spawn_osis_actor(
|
||||
"/path/to/database".to_string(),
|
||||
"redis://localhost:6379".to_string(),
|
||||
shutdown_rx,
|
||||
);
|
||||
```
|
||||
|
||||
## Actor Properties
|
||||
|
||||
- **Actor ID**: `"osis"` (constant)
|
||||
- **Actor Type**: `"OSIS"`
|
||||
- **Processing Model**: Sequential, blocking
|
||||
- **Script Engine**: Rhai with OSIS-specific DSL extensions
|
||||
## Canonical Redis queues and verification
|
||||
|
||||
The project uses canonical dispatch queues per script type. For OSIS, the work queue is:
|
||||
- hero:q:work:type:osis
|
||||
|
||||
Consumer behavior:
|
||||
- The in-repo actor derives ScriptType=OSIS from its actor_id containing "osis" and BLPOPs hero:q:work:type:osis.
|
||||
- This repo’s OSIS actor has been updated so its actor_id is "osis", ensuring it consumes the canonical queue.
|
||||
|
||||
Quick verification (redis-cli):
|
||||
- List work queues:
|
||||
- KEYS hero:q:work:type:*
|
||||
- Check OSIS queue length:
|
||||
- LLEN hero:q:work:type:osis
|
||||
- Inspect a specific job (replace {job_id} with the printed id):
|
||||
- HGET hero:job:{job_id} status
|
||||
- HGET hero:job:{job_id} output
|
||||
|
||||
Run options:
|
||||
- Option A: Run the example which spawns the OSIS actor and dispatches jobs to the canonical queue.
|
||||
1) Start Redis (if not already): redis-server
|
||||
2) In this repo:
|
||||
- cargo run --example actor
|
||||
3) Observe the console: job IDs will be printed as they are created and dispatched.
|
||||
4) In a separate terminal, verify with redis-cli:
|
||||
- LLEN hero:q:work:type:osis (will briefly increment, then return to 0 as the actor consumes)
|
||||
- HGET hero:job:{job_id} status (should transition to started then finished)
|
||||
- HGET hero:job:{job_id} output (should contain the script result)
|
||||
|
||||
- Option B: Run the standalone actor binary and dispatch from another process that pushes to the canonical type queue.
|
||||
1) Start the actor:
|
||||
- cargo run --bin actor_osis
|
||||
2) From any producer, LPUSH hero:q:work:type:osis {job_id} after persisting the job hash hero:job:{job_id}.
|
||||
3) Use the same redis-cli checks above to confirm consumption and completion.
|
||||
|
||||
Notes:
|
||||
- Hash-only result model is the default. The job result is written to hero:job:{job_id}.output and status=finished.
|
||||
- Reply queues (hero:q:reply:{job_id}) are optional and not required for OSIS to function.
|
60
reference_osis_actor/cmd/actor_osis.rs
Normal file
60
reference_osis_actor/cmd/actor_osis.rs
Normal file
@ -0,0 +1,60 @@
|
||||
use actor_osis::OSISActor;
|
||||
use clap::Parser;
|
||||
use log::info;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(name = "actor_osis")]
|
||||
#[command(about = "OSIS Actor - Synchronous job processing actor")]
|
||||
struct Args {
|
||||
/// Database path
|
||||
#[arg(short, long, default_value = "/tmp/osis_db")]
|
||||
db_path: String,
|
||||
|
||||
/// Redis URL
|
||||
#[arg(short, long, default_value = "redis://localhost:6379")]
|
||||
redis_url: String,
|
||||
|
||||
/// Preserve completed tasks in Redis
|
||||
#[arg(short, long)]
|
||||
preserve_tasks: bool,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
env_logger::init();
|
||||
|
||||
let args = Args::parse();
|
||||
|
||||
info!("Starting OSIS Actor");
|
||||
|
||||
// Create shutdown channel
|
||||
let (shutdown_tx, shutdown_rx) = mpsc::channel(1);
|
||||
|
||||
// Setup signal handler for graceful shutdown
|
||||
let shutdown_tx_clone = shutdown_tx.clone();
|
||||
tokio::spawn(async move {
|
||||
tokio::signal::ctrl_c().await.expect("Failed to listen for Ctrl+C");
|
||||
info!("Received Ctrl+C, initiating shutdown...");
|
||||
let _ = shutdown_tx_clone.send(()).await;
|
||||
});
|
||||
|
||||
// Create and start the actor
|
||||
let actor = Arc::new(
|
||||
OSISActor::builder()
|
||||
.db_path(args.db_path)
|
||||
.redis_url(args.redis_url)
|
||||
.build()?
|
||||
);
|
||||
|
||||
let handle = baobab_actor::spawn_actor(actor, shutdown_rx);
|
||||
|
||||
info!("OSIS Actor started, waiting for jobs...");
|
||||
|
||||
// Wait for the actor to complete
|
||||
handle.await??;
|
||||
|
||||
info!("OSIS Actor shutdown complete");
|
||||
Ok(())
|
||||
}
|
179
reference_osis_actor/src/engine.rs
Normal file
179
reference_osis_actor/src/engine.rs
Normal file
@ -0,0 +1,179 @@
|
||||
//! # Rhailib Domain-Specific Language (DSL) Engine
|
||||
//!
|
||||
//! This module provides a comprehensive Domain-Specific Language implementation for the Rhai
|
||||
//! scripting engine, exposing business domain models and operations through a fluent,
|
||||
//! chainable API.
|
||||
//!
|
||||
//! ## Overview
|
||||
//!
|
||||
//! The DSL is organized into business domain modules, each providing Rhai-compatible
|
||||
//! functions for creating, manipulating, and persisting domain entities. All operations
|
||||
//! include proper authorization checks and type safety.
|
||||
//!
|
||||
//! ## Available Domains
|
||||
//!
|
||||
//! - **Business Operations** (`biz`): Companies, products, sales, shareholders
|
||||
//! - **Financial Models** (`finance`): Accounts, assets, marketplace operations
|
||||
//! - **Content Management** (`library`): Collections, images, PDFs, books, slideshows
|
||||
//! - **Workflow Management** (`flow`): Flows, steps, signature requirements
|
||||
//! - **Community Management** (`circle`): Circles, themes, membership
|
||||
//! - **Contact Management** (`contact`): Contact information and relationships
|
||||
//! - **Access Control** (`access`): Security and permissions
|
||||
//! - **Time Management** (`calendar`): Calendar and scheduling
|
||||
//! - **Core Utilities** (`core`): Comments and fundamental operations
|
||||
//! - **Generic Objects** (`object`): Generic object manipulation
|
||||
//!
|
||||
//! ## Usage Example
|
||||
//!
|
||||
//! ```rust
|
||||
//! use rhai::Engine;
|
||||
//! use crate::engine::register_dsl_modules;
|
||||
//!
|
||||
//! let mut engine = Engine::new();
|
||||
//! register_dsl_modules(&mut engine);
|
||||
//!
|
||||
//! // Now the engine can execute scripts like:
|
||||
//! // let company = new_company().name("Acme Corp").email("contact@acme.com");
|
||||
//! // let saved = save_company(company);
|
||||
//! ```
|
||||
|
||||
use rhai::Engine;
|
||||
use rhailib_dsl;
|
||||
use std::sync::{Arc, OnceLock};
|
||||
|
||||
/// Engine factory for creating and sharing Rhai engines.
|
||||
pub struct EngineFactory {
|
||||
engine: Arc<Engine>,
|
||||
}
|
||||
|
||||
impl EngineFactory {
|
||||
/// Create a new engine factory with a configured Rhai engine.
|
||||
pub fn new() -> Self {
|
||||
let mut engine = Engine::new();
|
||||
register_dsl_modules(&mut engine);
|
||||
// Logger
|
||||
hero_logger::rhai_integration::configure_rhai_logging(&mut engine, "osis_actor");
|
||||
|
||||
Self {
|
||||
engine: Arc::new(engine),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a shared reference to the engine.
|
||||
pub fn get_engine(&self) -> Arc<Engine> {
|
||||
Arc::clone(&self.engine)
|
||||
}
|
||||
|
||||
/// Get the global singleton engine factory.
|
||||
pub fn global() -> &'static EngineFactory {
|
||||
static FACTORY: OnceLock<EngineFactory> = OnceLock::new();
|
||||
FACTORY.get_or_init(|| EngineFactory::new())
|
||||
}
|
||||
}
|
||||
|
||||
/// Register basic object functions directly in the engine.
|
||||
/// This provides object functionality without relying on the problematic rhailib_dsl object module.
|
||||
fn register_object_functions(engine: &mut Engine) {
|
||||
use heromodels::models::object::Object;
|
||||
|
||||
// Register the Object type
|
||||
engine.register_type_with_name::<Object>("Object");
|
||||
|
||||
// Register constructor function
|
||||
engine.register_fn("new_object", || Object::new());
|
||||
|
||||
// Register setter functions
|
||||
engine.register_fn("object_title", |obj: &mut Object, title: String| {
|
||||
obj.title = title;
|
||||
obj.clone()
|
||||
});
|
||||
|
||||
engine.register_fn(
|
||||
"object_description",
|
||||
|obj: &mut Object, description: String| {
|
||||
obj.description = description;
|
||||
obj.clone()
|
||||
},
|
||||
);
|
||||
|
||||
// Register getter functions
|
||||
engine.register_fn("get_object_id", |obj: &mut Object| obj.id() as i64);
|
||||
engine.register_fn("get_object_title", |obj: &mut Object| obj.title.clone());
|
||||
engine.register_fn("get_object_description", |obj: &mut Object| {
|
||||
obj.description.clone()
|
||||
});
|
||||
}
|
||||
|
||||
/// Registers all DSL modules with the provided Rhai engine.
|
||||
///
|
||||
/// This function is the main entry point for integrating the rhailib DSL with a Rhai engine.
|
||||
/// It registers all business domain modules, making their functions available to Rhai scripts.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `engine` - A mutable reference to the Rhai engine to register modules with
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use rhai::Engine;
|
||||
/// use crate::engine::register_dsl_modules;
|
||||
///
|
||||
/// let mut engine = Engine::new();
|
||||
/// register_dsl_modules(&mut engine);
|
||||
///
|
||||
/// // Engine now has access to all DSL functions
|
||||
/// let result = engine.eval::<String>(r#"
|
||||
/// let company = new_company().name("Test Corp");
|
||||
/// company.name
|
||||
/// "#).unwrap();
|
||||
/// assert_eq!(result, "Test Corp");
|
||||
/// ```
|
||||
///
|
||||
/// # Registered Modules
|
||||
///
|
||||
/// This function registers the following domain modules:
|
||||
/// - Access control functions
|
||||
/// - Business operation functions (companies, products, sales, shareholders)
|
||||
/// - Calendar and scheduling functions
|
||||
/// - Circle and community management functions
|
||||
/// - Company management functions
|
||||
/// - Contact management functions
|
||||
/// - Core utility functions
|
||||
/// - Financial operation functions (accounts, assets, marketplace)
|
||||
/// - Workflow management functions (flows, steps, signatures)
|
||||
/// - Library and content management functions
|
||||
/// - Generic object manipulation functions (custom implementation)
|
||||
pub fn register_dsl_modules(engine: &mut Engine) {
|
||||
rhailib_dsl::access::register_access_rhai_module(engine);
|
||||
rhailib_dsl::biz::register_biz_rhai_module(engine);
|
||||
rhailib_dsl::calendar::register_calendar_rhai_module(engine);
|
||||
rhailib_dsl::circle::register_circle_rhai_module(engine);
|
||||
rhailib_dsl::company::register_company_rhai_module(engine);
|
||||
rhailib_dsl::contact::register_contact_rhai_module(engine);
|
||||
rhailib_dsl::core::register_core_rhai_module(engine);
|
||||
rhailib_dsl::finance::register_finance_rhai_modules(engine);
|
||||
// rhailib_dsl::flow::register_flow_rhai_modules(engine);
|
||||
rhailib_dsl::library::register_library_rhai_module(engine);
|
||||
// Skip problematic object module for now - can be implemented separately if needed
|
||||
// rhailib_dsl::object::register_object_fns(engine);
|
||||
rhailib_dsl::payment::register_payment_rhai_module(engine);
|
||||
|
||||
// Register basic object functionality directly
|
||||
register_object_functions(engine);
|
||||
|
||||
println!("Rhailib Domain Specific Language modules registered successfully.");
|
||||
}
|
||||
|
||||
/// Create a shared heromodels engine using the factory.
|
||||
pub fn create_osis_engine() -> Arc<Engine> {
|
||||
EngineFactory::global().get_engine()
|
||||
}
|
||||
|
||||
/// Evaluate a Rhai script string.
|
||||
pub fn eval_script(
|
||||
engine: &Engine,
|
||||
script: &str,
|
||||
) -> Result<rhai::Dynamic, Box<rhai::EvalAltResult>> {
|
||||
engine.eval(script)
|
||||
}
|
332
reference_osis_actor/src/lib.rs
Normal file
332
reference_osis_actor/src/lib.rs
Normal file
@ -0,0 +1,332 @@
|
||||
mod engine;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use baobab_actor::execute_job_with_engine;
|
||||
use hero_job::{Job, JobStatus, ScriptType};
|
||||
use hero_logger::{create_job_logger, create_job_logger_with_guard};
|
||||
use log::{error, info};
|
||||
use redis::AsyncCommands;
|
||||
use rhai::Engine;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::task::JoinHandle;
|
||||
use tracing::subscriber::with_default;
|
||||
|
||||
use baobab_actor::{actor_trait::Actor, spawn_actor};
|
||||
|
||||
/// Constant actor ID for OSIS actor
|
||||
const OSIS: &str = "osis";
|
||||
|
||||
/// Builder for OSISActor
|
||||
#[derive(Debug)]
|
||||
pub struct OSISActorBuilder {
|
||||
engine: Option<Arc<Engine>>,
|
||||
db_path: Option<String>,
|
||||
redis_url: Option<String>,
|
||||
}
|
||||
|
||||
impl Default for OSISActorBuilder {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
engine: None,
|
||||
db_path: None,
|
||||
redis_url: Some("redis://localhost:6379".to_string()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl OSISActorBuilder {
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
pub fn engine(mut self, engine: Engine) -> Self {
|
||||
self.engine = Some(Arc::new(engine));
|
||||
self
|
||||
}
|
||||
|
||||
pub fn shared_engine(mut self, engine: Arc<Engine>) -> Self {
|
||||
self.engine = Some(engine);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn db_path<S: Into<String>>(mut self, db_path: S) -> Self {
|
||||
self.db_path = Some(db_path.into());
|
||||
self
|
||||
}
|
||||
|
||||
pub fn redis_url<S: Into<String>>(mut self, redis_url: S) -> Self {
|
||||
self.redis_url = Some(redis_url.into());
|
||||
self
|
||||
}
|
||||
|
||||
pub fn build(self) -> Result<OSISActor, String> {
|
||||
let engine = self
|
||||
.engine
|
||||
.unwrap_or_else(|| crate::engine::create_osis_engine());
|
||||
|
||||
Ok(OSISActor {
|
||||
engine,
|
||||
db_path: self.db_path.ok_or("db_path is required")?,
|
||||
redis_url: self
|
||||
.redis_url
|
||||
.unwrap_or("redis://localhost:6379".to_string()),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// OSIS actor that processes jobs in a blocking, synchronized manner
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct OSISActor {
|
||||
pub engine: Arc<Engine>,
|
||||
pub db_path: String,
|
||||
pub redis_url: String,
|
||||
}
|
||||
|
||||
impl OSISActor {
|
||||
/// Create a new OSISActorBuilder
|
||||
pub fn builder() -> OSISActorBuilder {
|
||||
OSISActorBuilder::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for OSISActor {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
engine: crate::engine::create_osis_engine(),
|
||||
db_path: "/tmp".to_string(),
|
||||
redis_url: "redis://localhost:6379".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Actor for OSISActor {
|
||||
async fn process_job(&self, job: Job, redis_conn: &mut redis::aio::MultiplexedConnection) {
|
||||
let job_id = &job.id;
|
||||
let _db_path = &self.db_path;
|
||||
|
||||
// Debug: Log job details
|
||||
info!(
|
||||
"OSIS Actor '{}', Job {}: Processing job with context_id: {}, script length: {}",
|
||||
OSIS, job_id, job.context_id, job.script.len()
|
||||
);
|
||||
|
||||
// Create job-specific logger
|
||||
let (job_logger, guard) = match create_job_logger_with_guard("logs", "osis", job_id) {
|
||||
Ok((logger, guard)) => {
|
||||
info!(
|
||||
"OSIS Actor '{}', Job {}: Job logger created successfully",
|
||||
OSIS, job_id
|
||||
);
|
||||
(logger, guard)
|
||||
},
|
||||
Err(e) => {
|
||||
error!(
|
||||
"OSIS Actor '{}', Job {}: Failed to create job logger: {}",
|
||||
OSIS, job_id, e
|
||||
);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
info!(
|
||||
"OSIS Actor '{}', Job {}: Starting sequential processing",
|
||||
OSIS, job_id
|
||||
);
|
||||
|
||||
// Update job status to Started
|
||||
if let Err(e) = Job::update_status(redis_conn, job_id, JobStatus::Started).await {
|
||||
error!(
|
||||
"OSIS Actor '{}', Job {}: Failed to update status to Started: {}",
|
||||
OSIS, job_id, e
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
// Execute ALL job processing within logging context
|
||||
let job_result = with_default(job_logger, || {
|
||||
tracing::info!(target: "osis_actor", "Job {} started", job_id);
|
||||
|
||||
// Move the Rhai script execution inside this scope
|
||||
// IMPORTANT: Create a new engine and configure Rhai logging for this job context
|
||||
let mut job_engine = Engine::new();
|
||||
register_dsl_modules(&mut job_engine);
|
||||
// Configure Rhai logging integration for this engine instance
|
||||
hero_logger::rhai_integration::configure_rhai_logging(&mut job_engine, "osis_actor");
|
||||
|
||||
// Execute the script within the job logger context
|
||||
let script_result = tokio::task::block_in_place(|| {
|
||||
tokio::runtime::Handle::current().block_on(async {
|
||||
execute_job_with_engine(&mut job_engine, &job, &self.db_path).await
|
||||
})
|
||||
});
|
||||
|
||||
tracing::info!(target: "osis_actor", "Job {} completed", job_id);
|
||||
|
||||
script_result // Return the result
|
||||
});
|
||||
|
||||
// Handle the result outside the logging context
|
||||
match job_result {
|
||||
Ok(result) => {
|
||||
let result_str = format!("{:?}", result);
|
||||
info!(
|
||||
"OSIS Actor '{}', Job {}: Script executed successfully. Result: {}",
|
||||
OSIS, job_id, result_str
|
||||
);
|
||||
|
||||
// Update job with success result (stores in job hash output field)
|
||||
if let Err(e) = Job::set_result(redis_conn, job_id, &result_str).await {
|
||||
error!(
|
||||
"OSIS Actor '{}', Job {}: Failed to set result: {}",
|
||||
OSIS, job_id, e
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
// Also push result to result queue for retrieval
|
||||
let result_queue_key = format!("hero:job:{}:result", job_id);
|
||||
if let Err(e) = redis_conn
|
||||
.lpush::<_, _, ()>(&result_queue_key, &result_str)
|
||||
.await
|
||||
{
|
||||
error!(
|
||||
"OSIS Actor '{}', Job {}: Failed to push result to queue {}: {}",
|
||||
OSIS, job_id, result_queue_key, e
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
"OSIS Actor '{}', Job {}: Result pushed to queue: {}",
|
||||
OSIS, job_id, result_queue_key
|
||||
);
|
||||
}
|
||||
|
||||
if let Err(e) = Job::update_status(redis_conn, job_id, JobStatus::Finished).await {
|
||||
error!(
|
||||
"OSIS Actor '{}', Job {}: Failed to update status to Finished: {}",
|
||||
OSIS, job_id, e
|
||||
);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
let error_msg = format!("Script execution error: {}", e);
|
||||
error!("OSIS Actor '{}', Job {}: {}", OSIS, job_id, error_msg);
|
||||
|
||||
// Update job with error (stores in job hash error field)
|
||||
if let Err(e) = Job::set_error(redis_conn, job_id, &error_msg).await {
|
||||
error!(
|
||||
"OSIS Actor '{}', Job {}: Failed to set error: {}",
|
||||
OSIS, job_id, e
|
||||
);
|
||||
}
|
||||
|
||||
// Also push error to error queue for retrieval
|
||||
let error_queue_key = format!("hero:job:{}:error", job_id);
|
||||
if let Err(e) = redis_conn
|
||||
.lpush::<_, _, ()>(&error_queue_key, &error_msg)
|
||||
.await
|
||||
{
|
||||
error!(
|
||||
"OSIS Actor '{}', Job {}: Failed to push error to queue {}: {}",
|
||||
OSIS, job_id, error_queue_key, e
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
"OSIS Actor '{}', Job {}: Error pushed to queue: {}",
|
||||
OSIS, job_id, error_queue_key
|
||||
);
|
||||
}
|
||||
|
||||
if let Err(e) = Job::update_status(redis_conn, job_id, JobStatus::Error).await {
|
||||
error!(
|
||||
"OSIS Actor '{}', Job {}: Failed to update status to Error: {}",
|
||||
OSIS, job_id, e
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Force flush logs before dropping guard
|
||||
std::thread::sleep(std::time::Duration::from_millis(100));
|
||||
|
||||
// Keep the guard alive until after processing
|
||||
drop(guard);
|
||||
|
||||
info!(
|
||||
"OSIS Actor '{}', Job {}: Sequential processing completed",
|
||||
OSIS, job_id
|
||||
);
|
||||
}
|
||||
|
||||
fn actor_type(&self) -> &'static str {
|
||||
"OSIS"
|
||||
}
|
||||
|
||||
fn actor_id(&self) -> &str {
|
||||
// Actor ID contains "osis" so the runtime derives ScriptType=OSIS and consumes the canonical type queue.
|
||||
"osis"
|
||||
}
|
||||
|
||||
fn redis_url(&self) -> &str {
|
||||
&self.redis_url
|
||||
}
|
||||
}
|
||||
|
||||
/// Convenience function to spawn an OSIS actor using the trait interface
|
||||
///
|
||||
/// This function provides backward compatibility with the original actor API
|
||||
/// while using the new trait-based implementation.
|
||||
pub fn spawn_osis_actor(
|
||||
db_path: String,
|
||||
redis_url: String,
|
||||
shutdown_rx: mpsc::Receiver<()>,
|
||||
) -> JoinHandle<Result<(), Box<dyn std::error::Error + Send + Sync>>> {
|
||||
let actor = Arc::new(
|
||||
OSISActor::builder()
|
||||
.db_path(db_path)
|
||||
.redis_url(redis_url)
|
||||
.build()
|
||||
.expect("Failed to build OSISActor"),
|
||||
);
|
||||
spawn_actor(actor, shutdown_rx)
|
||||
}
|
||||
|
||||
// Re-export engine functions for examples and external use
|
||||
pub use crate::engine::{create_osis_engine, register_dsl_modules};
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_osis_actor_creation() {
|
||||
let actor = OSISActor::builder().build().unwrap();
|
||||
assert_eq!(actor.actor_type(), "OSIS");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_osis_actor_default() {
|
||||
let actor = OSISActor::default();
|
||||
assert_eq!(actor.actor_type(), "OSIS");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_osis_actor_process_job_interface() {
|
||||
let actor = OSISActor::default();
|
||||
|
||||
// Create a simple test job
|
||||
let _job = Job::new(
|
||||
"test_caller".to_string(),
|
||||
"test_context".to_string(),
|
||||
r#"print("Hello from sync actor test!"); 42"#.to_string(),
|
||||
ScriptType::OSIS,
|
||||
);
|
||||
|
||||
// Note: This test doesn't actually connect to Redis, it just tests the interface
|
||||
// In a real test environment, you'd need a Redis instance or mock
|
||||
|
||||
// For now, just verify the actor was created successfully
|
||||
assert_eq!(actor.actor_type(), "OSIS");
|
||||
}
|
||||
}
|
109
tools/gen_auth.py
Normal file
109
tools/gen_auth.py
Normal file
@ -0,0 +1,109 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Generate secp256k1 keypair and sign a nonce in the exact format the server expects.
|
||||
|
||||
Install dependencies once:
|
||||
python3 -m pip install -r tools/requirements.txt
|
||||
|
||||
Usage examples:
|
||||
# Generate a new keypair and sign a nonce (prints PRIVATE_HEX, PUBLIC_HEX, SIGNATURE_HEX)
|
||||
python tools/gen_auth.py --nonce "PASTE_NONCE_FROM_fetch_nonce"
|
||||
|
||||
# Sign with an existing private key (64 hex chars)
|
||||
python tools/gen_auth.py --nonce "PASTE_NONCE" --priv "YOUR_PRIVATE_KEY_HEX"
|
||||
|
||||
# Output JSON instead of key=value lines
|
||||
python tools/gen_auth.py --nonce "PASTE_NONCE" --json
|
||||
|
||||
Notes:
|
||||
- Public key is compressed (33 bytes) hex, starting with 02/03 (66 hex chars total).
|
||||
- Signature is compact ECDSA (r||s) 64 bytes (128 hex chars).
|
||||
- The nonce should be the exact ASCII string returned by fetch_nonce().
|
||||
- The message signed is sha256(nonce_ascii) to match client/server behavior:
|
||||
- [rust.AuthHelper::sign_message()](interfaces/openrpc/client/src/auth.rs:55)
|
||||
- [rust.AuthManager::verify_signature()](interfaces/openrpc/server/src/auth.rs:85)
|
||||
"""
|
||||
import argparse
|
||||
import hashlib
|
||||
import json
|
||||
import sys
|
||||
from typing import Dict, Tuple, Optional
|
||||
|
||||
try:
|
||||
from ecdsa import SigningKey, VerifyingKey, SECP256k1, util
|
||||
except Exception as e:
|
||||
print("Missing dependency 'ecdsa'. Install with:", file=sys.stderr)
|
||||
print(" python3 -m pip install -r tools/requirements.txt", file=sys.stderr)
|
||||
raise
|
||||
|
||||
|
||||
def sha256_ascii(s: str) -> bytes:
|
||||
return hashlib.sha256(s.encode()).digest()
|
||||
|
||||
|
||||
def to_compact_signature_hex(sk: SigningKey, nonce_ascii: str) -> str:
|
||||
digest = sha256_ascii(nonce_ascii)
|
||||
sig = sk.sign_digest(digest, sigencode=util.sigencode_string) # 64 bytes r||s
|
||||
return sig.hex()
|
||||
|
||||
|
||||
def compressed_pubkey_hex(vk: VerifyingKey) -> str:
|
||||
# Prefer compressed output if library supports it directly (ecdsa>=0.18)
|
||||
try:
|
||||
return vk.to_string("compressed").hex()
|
||||
except TypeError:
|
||||
# Manual compression (02/03 + X)
|
||||
p = vk.pubkey.point
|
||||
x = p.x()
|
||||
y = p.y()
|
||||
prefix = b"\x02" if (y % 2 == 0) else b"\x03"
|
||||
return (prefix + x.to_bytes(32, "big")).hex()
|
||||
|
||||
|
||||
def generate_or_load_sk(priv_hex: Optional[str]) -> Tuple[SigningKey, bool]:
|
||||
if priv_hex:
|
||||
if len(priv_hex) != 64:
|
||||
raise ValueError("Provided --priv must be 64 hex chars (32 bytes).")
|
||||
return SigningKey.from_string(bytes.fromhex(priv_hex), curve=SECP256k1), False
|
||||
return SigningKey.generate(curve=SECP256k1), True
|
||||
|
||||
|
||||
def run(nonce: str, priv_hex: Optional[str], as_json: bool) -> int:
|
||||
sk, generated = generate_or_load_sk(priv_hex)
|
||||
vk = sk.get_verifying_key()
|
||||
|
||||
out: Dict[str, str] = {
|
||||
"PUBLIC_HEX": compressed_pubkey_hex(vk),
|
||||
"NONCE": nonce,
|
||||
"SIGNATURE_HEX": to_compact_signature_hex(sk, nonce),
|
||||
}
|
||||
# Always print the private key for convenience (either generated or provided)
|
||||
out["PRIVATE_HEX"] = sk.to_string().hex()
|
||||
|
||||
if as_json:
|
||||
print(json.dumps(out, separators=(",", ":")))
|
||||
else:
|
||||
# key=value form for easy copy/paste
|
||||
print(f"PRIVATE_HEX={out['PRIVATE_HEX']}")
|
||||
print(f"PUBLIC_HEX={out['PUBLIC_HEX']}")
|
||||
print(f"NONCE={out['NONCE']}")
|
||||
print(f"SIGNATURE_HEX={out['SIGNATURE_HEX']}")
|
||||
return 0
|
||||
|
||||
|
||||
def main() -> int:
|
||||
parser = argparse.ArgumentParser(description="Generate secp256k1 auth material and signature for a nonce.")
|
||||
parser.add_argument("--nonce", required=True, help="Nonce string returned by fetch_nonce (paste as-is)")
|
||||
parser.add_argument("--priv", help="Existing private key hex (64 hex chars). If omitted, a new keypair is generated.")
|
||||
parser.add_argument("--json", action="store_true", help="Output JSON instead of key=value lines.")
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
return run(args.nonce, args.priv, args.json)
|
||||
except Exception as e:
|
||||
print(f"Error: {e}", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
124
tools/gen_auth.sh
Executable file
124
tools/gen_auth.sh
Executable file
@ -0,0 +1,124 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
usage() {
|
||||
cat <<'USAGE'
|
||||
Usage:
|
||||
gen_auth.sh --nonce "<nonce_string>" [--priv <private_key_hex>] [--json]
|
||||
|
||||
Options:
|
||||
--nonce The nonce string returned by fetch_nonce (paste as-is).
|
||||
--priv Optional private key hex (64 hex chars). If omitted, a new key is generated.
|
||||
--json Output JSON instead of plain KEY=VALUE lines.
|
||||
|
||||
Outputs:
|
||||
PRIVATE_HEX Private key hex (only when generated, or echoed back if provided)
|
||||
PUBLIC_HEX Compressed secp256k1 public key hex (33 bytes, 66 hex chars)
|
||||
NONCE The nonce string you passed in
|
||||
SIGNATURE_HEX Compact ECDSA signature hex (64 bytes, 128 hex chars)
|
||||
|
||||
Notes:
|
||||
- The signature is produced by signing sha256(nonce_ascii) and encoded as compact r||s (64 bytes),
|
||||
which matches the server/client behavior ([interfaces/openrpc/client/src/auth.rs](interfaces/openrpc/client/src/auth.rs:55), [interfaces/openrpc/server/src/auth.rs](interfaces/openrpc/server/src/auth.rs:85)).
|
||||
USAGE
|
||||
}
|
||||
|
||||
NONCE=""
|
||||
PRIV_HEX=""
|
||||
OUT_JSON=0
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--nonce)
|
||||
NONCE="${2:-}"; shift 2 ;;
|
||||
--priv)
|
||||
PRIV_HEX="${2:-}"; shift 2 ;;
|
||||
--json)
|
||||
OUT_JSON=1; shift ;;
|
||||
-h|--help)
|
||||
usage; exit 0 ;;
|
||||
*)
|
||||
echo "Unknown arg: $1" >&2; usage; exit 1 ;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ -z "$NONCE" ]]; then
|
||||
echo "Error: --nonce is required" >&2
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v python3 >/dev/null 2>&1; then
|
||||
echo "Error: python3 not found. Install Python 3 (e.g., sudo pacman -S python) and retry." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Ensure 'ecdsa' module is available; install to user site if missing.
|
||||
if ! python3 - <<'PY' >/dev/null 2>&1
|
||||
import importlib; importlib.import_module("ecdsa")
|
||||
PY
|
||||
then
|
||||
echo "Installing Python 'ecdsa' package in user site..." >&2
|
||||
if ! python3 -m pip install --user --quiet ecdsa; then
|
||||
echo "Error: failed to install 'ecdsa'. Install manually: python3 -m pip install --user ecdsa" >&2
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Now run Python to generate/derive keys and sign the nonce (ASCII) with compact ECDSA.
|
||||
python3 - "$NONCE" "$PRIV_HEX" "$OUT_JSON" <<'PY'
|
||||
import sys, json, hashlib
|
||||
from ecdsa import SigningKey, VerifyingKey, SECP256k1, util
|
||||
|
||||
NONCE = sys.argv[1]
|
||||
PRIV_HEX = sys.argv[2]
|
||||
OUT_JSON = int(sys.argv[3]) == 1
|
||||
|
||||
def to_compact_signature(sk: SigningKey, msg_ascii: str) -> bytes:
|
||||
digest = hashlib.sha256(msg_ascii.encode()).digest()
|
||||
return sk.sign_digest(digest, sigencode=util.sigencode_string) # 64 bytes r||s
|
||||
|
||||
def compressed_pubkey(vk: VerifyingKey) -> bytes:
|
||||
try:
|
||||
return vk.to_string("compressed")
|
||||
except TypeError:
|
||||
p = vk.pubkey.point
|
||||
x = p.x()
|
||||
y = vk.pubkey.point.y()
|
||||
prefix = b'\x02' if (y % 2 == 0) else b'\x03'
|
||||
return prefix + x.to_bytes(32, "big")
|
||||
|
||||
generated = False
|
||||
if PRIV_HEX:
|
||||
if len(PRIV_HEX) != 64:
|
||||
print("ERROR: Provided --priv must be 64 hex chars", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
sk = SigningKey.from_string(bytes.fromhex(PRIV_HEX), curve=SECP256k1)
|
||||
else:
|
||||
sk = SigningKey.generate(curve=SECP256k1)
|
||||
generated = True
|
||||
|
||||
vk = sk.get_verifying_key()
|
||||
pub_hex = compressed_pubkey(vk).hex()
|
||||
sig_hex = to_compact_signature(sk, NONCE).hex()
|
||||
priv_hex = sk.to_string().hex()
|
||||
|
||||
out = {
|
||||
"PUBLIC_HEX": pub_hex,
|
||||
"NONCE": NONCE,
|
||||
"SIGNATURE_HEX": sig_hex,
|
||||
}
|
||||
if generated or PRIV_HEX:
|
||||
out["PRIVATE_HEX"] = priv_hex
|
||||
|
||||
if OUT_JSON:
|
||||
print(json.dumps(out, separators=(",", ":")))
|
||||
else:
|
||||
if "PRIVATE_HEX" in out:
|
||||
print(f"PRIVATE_HEX={out['PRIVATE_HEX']}")
|
||||
print(f"PUBLIC_HEX={out['PUBLIC_HEX']}")
|
||||
print(f"NONCE={out['NONCE']}")
|
||||
print(f"SIGNATURE_HEX={out['SIGNATURE_HEX']}")
|
||||
PY
|
||||
|
||||
# End
|
2
tools/requirements.txt
Normal file
2
tools/requirements.txt
Normal file
@ -0,0 +1,2 @@
|
||||
ecdsa==0.18.0
|
||||
requests==2.32.3
|
204
tools/rpc_smoke_test.py
Normal file
204
tools/rpc_smoke_test.py
Normal file
@ -0,0 +1,204 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Non-destructive JSON-RPC smoke tests against the OpenRPC server.
|
||||
|
||||
Installs:
|
||||
python3 -m pip install -r tools/requirements.txt
|
||||
|
||||
Usage:
|
||||
# Default URL http://127.0.0.1:9944
|
||||
python tools/rpc_smoke_test.py
|
||||
|
||||
# Specify a different URL
|
||||
python tools/rpc_smoke_test.py --url http://127.0.0.1:9944
|
||||
|
||||
# Provide a specific pubkey for fetch_nonce (compressed 33-byte hex)
|
||||
python tools/rpc_smoke_test.py --pubkey 02deadbeef...
|
||||
|
||||
# Lookup details for first N jobs returned by list_jobs
|
||||
python tools/rpc_smoke_test.py --limit 5
|
||||
|
||||
What it tests (non-destructive):
|
||||
- fetch_nonce(pubkey) -> returns a nonce string from the server auth manager
|
||||
- whoami() -> returns a JSON string with basic server info
|
||||
- list_jobs() -> returns job IDs only (no mutation)
|
||||
- get_job_status(id) -> reads status (for up to --limit items)
|
||||
- get_job_output(id) -> reads output (for up to --limit items)
|
||||
- get_job_logs(id) -> reads logs (for up to --limit items)
|
||||
|
||||
Notes:
|
||||
- If you don't pass --pubkey, this script will generate a random secp256k1 keypair
|
||||
and derive a compressed public key (no persistence, just for testing fetch_nonce).
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import random
|
||||
import sys
|
||||
import time
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
try:
|
||||
import requests
|
||||
except Exception:
|
||||
print("Missing dependency 'requests'. Install with:\n python3 -m pip install -r tools/requirements.txt", file=sys.stderr)
|
||||
raise
|
||||
|
||||
try:
|
||||
from ecdsa import SigningKey, SECP256k1
|
||||
except Exception:
|
||||
# ecdsa is optional here; only used to generate a test pubkey if --pubkey is absent
|
||||
SigningKey = None # type: ignore
|
||||
|
||||
|
||||
def ensure_http_url(url: str) -> str:
|
||||
if url.startswith("http://") or url.startswith("https://"):
|
||||
return url
|
||||
# Accept ws:// scheme too; convert to http for JSON-RPC over HTTP
|
||||
if url.startswith("ws://"):
|
||||
return "http://" + url[len("ws://") :]
|
||||
if url.startswith("wss://"):
|
||||
return "https://" + url[len("wss://") :]
|
||||
# Default to http if no scheme
|
||||
return "http://" + url
|
||||
|
||||
|
||||
class JsonRpcClient:
|
||||
def __init__(self, url: str):
|
||||
self.url = ensure_http_url(url)
|
||||
self._id = int(time.time() * 1000)
|
||||
|
||||
def call(self, method: str, params: Any) -> Any:
|
||||
self._id += 1
|
||||
payload = {
|
||||
"jsonrpc": "2.0",
|
||||
"id": self._id,
|
||||
"method": method,
|
||||
"params": params,
|
||||
}
|
||||
resp = requests.post(self.url, json=payload, timeout=30)
|
||||
resp.raise_for_status()
|
||||
data = resp.json()
|
||||
if "error" in data and data["error"] is not None:
|
||||
raise RuntimeError(f"RPC error for {method}: {data['error']}")
|
||||
return data.get("result")
|
||||
|
||||
|
||||
def random_compressed_pubkey_hex() -> str:
|
||||
"""
|
||||
Generate a random secp256k1 keypair and return compressed public key hex.
|
||||
Requires 'ecdsa'. If unavailable, raise an informative error.
|
||||
"""
|
||||
if SigningKey is None:
|
||||
raise RuntimeError(
|
||||
"ecdsa not installed; either install with:\n"
|
||||
" python3 -m pip install -r tools/requirements.txt\n"
|
||||
"or pass --pubkey explicitly."
|
||||
)
|
||||
sk = SigningKey.generate(curve=SECP256k1)
|
||||
vk = sk.get_verifying_key()
|
||||
try:
|
||||
comp = vk.to_string("compressed")
|
||||
except TypeError:
|
||||
# Manual compression
|
||||
p = vk.pubkey.point
|
||||
x = p.x()
|
||||
y = p.y()
|
||||
prefix = b"\x02" if (y % 2 == 0) else b"\x03"
|
||||
comp = prefix + x.to_bytes(32, "big")
|
||||
return comp.hex()
|
||||
|
||||
|
||||
def main() -> int:
|
||||
parser = argparse.ArgumentParser(description="Non-destructive RPC smoke tests")
|
||||
parser.add_argument("--url", default=os.environ.get("RPC_URL", "http://127.0.0.1:9944"),
|
||||
help="RPC server URL (http[s]://host:port or ws[s]://host:port)")
|
||||
parser.add_argument("--pubkey", help="Compressed secp256k1 public key hex (33 bytes, 66 hex chars)")
|
||||
parser.add_argument("--limit", type=int, default=3, help="Number of job IDs to detail from list_jobs()")
|
||||
args = parser.parse_args()
|
||||
|
||||
client = JsonRpcClient(args.url)
|
||||
|
||||
print(f"[rpc] URL: {client.url}")
|
||||
|
||||
# 1) fetch_nonce
|
||||
pubkey = args.pubkey or random_compressed_pubkey_hex()
|
||||
print(f"[rpc] fetch_nonce(pubkey={pubkey[:10]}...):", end=" ")
|
||||
try:
|
||||
nonce = client.call("fetch_nonce", [pubkey])
|
||||
print("OK")
|
||||
print(f" nonce: {nonce}")
|
||||
except Exception as e:
|
||||
print(f"ERROR: {e}")
|
||||
return 1
|
||||
|
||||
# 2) whoami
|
||||
print("[rpc] whoami():", end=" ")
|
||||
try:
|
||||
who = client.call("whoami", [])
|
||||
print("OK")
|
||||
print(f" whoami: {who}")
|
||||
except Exception as e:
|
||||
print(f"ERROR: {e}")
|
||||
return 1
|
||||
|
||||
# 3) list_jobs
|
||||
print("[rpc] list_jobs():", end=" ")
|
||||
try:
|
||||
job_ids: List[str] = client.call("list_jobs", [])
|
||||
print("OK")
|
||||
print(f" total: {len(job_ids)}")
|
||||
for i, jid in enumerate(job_ids[: max(0, args.limit)]):
|
||||
print(f" [{i}] {jid}")
|
||||
except Exception as e:
|
||||
print(f"ERROR: {e}")
|
||||
return 1
|
||||
|
||||
# 4) For a few jobs, query status/output/logs
|
||||
detail_count = 0
|
||||
for jid in job_ids[: max(0, args.limit)] if 'job_ids' in locals() else []:
|
||||
print(f"[rpc] get_job_status({jid}):", end=" ")
|
||||
try:
|
||||
st = client.call("get_job_status", [jid])
|
||||
print("OK")
|
||||
print(f" status: {st}")
|
||||
except Exception as e:
|
||||
print(f"ERROR: {e}")
|
||||
|
||||
print(f"[rpc] get_job_output({jid}):", end=" ")
|
||||
try:
|
||||
out = client.call("get_job_output", [jid])
|
||||
print("OK")
|
||||
snippet = (out if isinstance(out, str) else json.dumps(out))[:120]
|
||||
print(f" output: {snippet}{'...' if len(snippet)==120 else ''}")
|
||||
except Exception as e:
|
||||
print(f"ERROR: {e}")
|
||||
|
||||
print(f"[rpc] get_job_logs({jid}):", end=" ")
|
||||
try:
|
||||
logs_obj = client.call("get_job_logs", [jid]) # { logs: String | null }
|
||||
print("OK")
|
||||
logs = logs_obj.get("logs") if isinstance(logs_obj, dict) else None
|
||||
if logs is None:
|
||||
print(" logs: (no logs)")
|
||||
else:
|
||||
snippet = logs[:120]
|
||||
print(f" logs: {snippet}{'...' if len(snippet)==120 else ''}")
|
||||
except Exception as e:
|
||||
print(f"ERROR: {e}")
|
||||
|
||||
detail_count += 1
|
||||
|
||||
print("\nSmoke tests complete.")
|
||||
print("Summary:")
|
||||
print(f" whoami tested")
|
||||
print(f" fetch_nonce tested (pubkey provided/generated)")
|
||||
print(f" list_jobs tested (count printed)")
|
||||
print(f" detailed queries for up to {detail_count} job(s) (status/output/logs)")
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
Loading…
Reference in New Issue
Block a user