Compare commits
9 Commits
0df79e78c6
...
main
Author | SHA1 | Date | |
---|---|---|---|
121eee3ccd | |||
|
0749a423bd | ||
|
9a509f95cc | ||
|
c8fbc6680b | ||
|
54b1b0adf5 | ||
|
0ebda7c1aa | ||
|
04a1af2423 | ||
|
337ec2f660 | ||
|
69e612e521 |
1457
Cargo.lock
generated
1457
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -53,13 +53,14 @@ uuid = { version = "1.6", features = ["v4", "serde"] }
|
|||||||
|
|
||||||
[workspace]
|
[workspace]
|
||||||
members = [
|
members = [
|
||||||
"interfaces/unix/client",
|
|
||||||
"interfaces/unix/server",
|
|
||||||
"interfaces/websocket/client",
|
"interfaces/websocket/client",
|
||||||
"interfaces/websocket/server",
|
"interfaces/websocket/server",
|
||||||
"core/supervisor",
|
"core/supervisor",
|
||||||
"core/actor",
|
"core/actor",
|
||||||
"core/job", "interfaces/websocket/examples",
|
"core/job", "interfaces/websocket/examples",
|
||||||
"proxies/http",
|
"proxies/http",
|
||||||
|
"interfaces/openrpc/client",
|
||||||
|
"interfaces/openrpc/server",
|
||||||
|
"frontend/baobap-frontend"
|
||||||
]
|
]
|
||||||
resolver = "2" # Recommended for new workspaces
|
resolver = "2" # Recommended for new workspaces
|
||||||
|
87
auth_keys_rpc_test_flow.md
Normal file
87
auth_keys_rpc_test_flow.md
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
1. Generate a keypair locally (public key is safe to share)
|
||||||
|
- `python tools/gen_auth.py --nonce init`
|
||||||
|
- Copy PUBLIC_HEX (compressed 33-byte hex, 66 chars). PRIVATE_HEX is your secret—keep it safe.
|
||||||
|
|
||||||
|
- Example output:
|
||||||
|
```
|
||||||
|
PRIVATE_HEX=5d38d57c83ef1845032fdee1c954958b66912218744ea31d0bc61a07115b6b93
|
||||||
|
PUBLIC_HEX=0270c0fe3599e82f7142d349fc88e47b07077a43fa00b0fe218ee7bdef4b42d316
|
||||||
|
NONCE=init
|
||||||
|
SIGNATURE_HEX=1b109a464c8a6326e66e7bd2caf4c537611f24c6e5e74b0003dc2d5025b6cd6ed180417eacf540938fb306d46d8ebeeed1e6e6c6b69f536d62144baf4a13a139
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Fetch a real nonce from the server
|
||||||
|
- In hero-openrpc-client menu, choose fetch_nonce
|
||||||
|
- Paste PUBLIC_HEX when prompted
|
||||||
|
- Copy the returned nonce string (the exact ASCII hex string)
|
||||||
|
|
||||||
|
- Example output:
|
||||||
|
```
|
||||||
|
7428f639c215b5ab655283632a39fbd8dc713805cc3b7b0a84c99a5f0e7d5465
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
3. Sign the nonce locally
|
||||||
|
- python tools/gen_auth.py --nonce "PASTE_NONCE" --priv "YOUR_PRIVATE_HEX"
|
||||||
|
- Copy SIGNATURE_HEX
|
||||||
|
|
||||||
|
- Example output:
|
||||||
|
```
|
||||||
|
PRIVATE_HEX=5d38d57c83ef1845032fdee1c954958b66912218744ea31d0bc61a07115b6b93
|
||||||
|
PUBLIC_HEX=0270c0fe3599e82f7142d349fc88e47b07077a43fa00b0fe218ee7bdef4b42d316
|
||||||
|
NONCE=7428f639c215b5ab655283632a39fbd8dc713805cc3b7b0a84c99a5f0e7d5465
|
||||||
|
SIGNATURE_HEX=47dca63f191f328ca9404843a1b3229e4e2affb85ff41dad8125320be3ee07507222c809876d5faa93bfafebdff9e9aef9e17d0b7792d7fcac4d19c92a4b303f
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Authenticate
|
||||||
|
- In hero-openrpc-client menu, choose authenticate
|
||||||
|
- Public key (hex): PUBLIC_HEX
|
||||||
|
- Signature (hex): SIGNATURE_HEX
|
||||||
|
- Nonce (hex): PASTE_NONCE
|
||||||
|
|
||||||
|
After success, whoami should return an authenticated state (basic placeholder in this phase) rust.interfaces/openrpc/server/src/lib.rs.
|
||||||
|
|
||||||
|
5. Run `python tools/rpc_smoke_test.py`
|
||||||
|
|
||||||
|
- Example output:
|
||||||
|
```
|
||||||
|
[rpc] URL: http://127.0.0.1:9944
|
||||||
|
[rpc] fetch_nonce(pubkey=03fc656cda...): OK
|
||||||
|
nonce: 4317af6ef04605c7e61ec4759611345f7288497564784cc08afc158553e5ecf1
|
||||||
|
[rpc] whoami(): OK
|
||||||
|
whoami: {"authenticated":true,"user_id":"anonymous"}
|
||||||
|
[rpc] list_jobs(): OK
|
||||||
|
total: 3
|
||||||
|
[0] 5f8b4951-35de-4568-8906-a5e9598729e1
|
||||||
|
[1] 8a0ee6ea-c053-4b72-807a-568c959f5188
|
||||||
|
[2] 1f929972-3aa5-40c6-af46-6cb81f5a0bae
|
||||||
|
[rpc] get_job_status(5f8b4951-35de-4568-8906-a5e9598729e1): OK
|
||||||
|
status: Finished
|
||||||
|
[rpc] get_job_output(5f8b4951-35de-4568-8906-a5e9598729e1): OK
|
||||||
|
output: 17
|
||||||
|
[rpc] get_job_logs(5f8b4951-35de-4568-8906-a5e9598729e1): OK
|
||||||
|
logs: (no logs)
|
||||||
|
[rpc] get_job_status(8a0ee6ea-c053-4b72-807a-568c959f5188): OK
|
||||||
|
status: Finished
|
||||||
|
[rpc] get_job_output(8a0ee6ea-c053-4b72-807a-568c959f5188): OK
|
||||||
|
output: 43
|
||||||
|
[rpc] get_job_logs(8a0ee6ea-c053-4b72-807a-568c959f5188): OK
|
||||||
|
logs: (no logs)
|
||||||
|
[rpc] get_job_status(1f929972-3aa5-40c6-af46-6cb81f5a0bae): OK
|
||||||
|
status: Finished
|
||||||
|
[rpc] get_job_output(1f929972-3aa5-40c6-af46-6cb81f5a0bae): OK
|
||||||
|
output: 43
|
||||||
|
[rpc] get_job_logs(1f929972-3aa5-40c6-af46-6cb81f5a0bae): OK
|
||||||
|
logs: (no logs)
|
||||||
|
|
||||||
|
Smoke tests complete.
|
||||||
|
Summary:
|
||||||
|
whoami tested
|
||||||
|
fetch_nonce tested (pubkey provided/generated)
|
||||||
|
list_jobs tested (count printed)
|
||||||
|
detailed queries for up to 3 job(s) (status/output/logs)
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@@ -24,4 +24,4 @@ tls = false
|
|||||||
# OSIS Actor Configuration
|
# OSIS Actor Configuration
|
||||||
# Handles OSIS (HeroScript) execution
|
# Handles OSIS (HeroScript) execution
|
||||||
[osis_actor]
|
[osis_actor]
|
||||||
binary_path = "actor_osis"
|
binary_path = "/home/maxime/actor_osis/target/debug/actor_osis"
|
||||||
|
@@ -27,7 +27,8 @@
|
|||||||
//! └───────────────┘
|
//! └───────────────┘
|
||||||
//! ```
|
//! ```
|
||||||
|
|
||||||
use hero_job::Job;
|
use hero_job::{Job, ScriptType};
|
||||||
|
use hero_job::keys;
|
||||||
use log::{debug, error, info};
|
use log::{debug, error, info};
|
||||||
use redis::AsyncCommands;
|
use redis::AsyncCommands;
|
||||||
|
|
||||||
@@ -36,7 +37,7 @@ use std::time::Duration;
|
|||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
use tokio::task::JoinHandle;
|
use tokio::task::JoinHandle;
|
||||||
|
|
||||||
use crate::{initialize_redis_connection, NAMESPACE_PREFIX, BLPOP_TIMEOUT_SECONDS};
|
use crate::{initialize_redis_connection, BLPOP_TIMEOUT_SECONDS};
|
||||||
|
|
||||||
/// Configuration for actor instances
|
/// Configuration for actor instances
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
@@ -44,7 +45,6 @@ pub struct ActorConfig {
|
|||||||
pub actor_id: String,
|
pub actor_id: String,
|
||||||
pub db_path: String,
|
pub db_path: String,
|
||||||
pub redis_url: String,
|
pub redis_url: String,
|
||||||
pub preserve_tasks: bool,
|
|
||||||
pub default_timeout: Option<Duration>, // Only used by async actors
|
pub default_timeout: Option<Duration>, // Only used by async actors
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -54,13 +54,11 @@ impl ActorConfig {
|
|||||||
actor_id: String,
|
actor_id: String,
|
||||||
db_path: String,
|
db_path: String,
|
||||||
redis_url: String,
|
redis_url: String,
|
||||||
preserve_tasks: bool,
|
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
actor_id,
|
actor_id,
|
||||||
db_path,
|
db_path,
|
||||||
redis_url,
|
redis_url,
|
||||||
preserve_tasks,
|
|
||||||
default_timeout: None,
|
default_timeout: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -123,11 +121,14 @@ pub trait Actor: Send + Sync + 'static {
|
|||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
let actor_id = self.actor_id();
|
let actor_id = self.actor_id();
|
||||||
let redis_url = self.redis_url();
|
let redis_url = self.redis_url();
|
||||||
let queue_key = format!("{}{}", NAMESPACE_PREFIX, actor_id);
|
// Canonical work queue based on script type (instance/group selection can be added later)
|
||||||
|
let script_type = derive_script_type_from_actor_id(actor_id);
|
||||||
|
let queue_key = keys::work_type(&script_type);
|
||||||
info!(
|
info!(
|
||||||
"{} Actor '{}' starting. Connecting to Redis at {}. Listening on queue: {}",
|
"{} Actor '{}' starting. Type {:?}. Connecting to Redis at {}. Listening on queue: {}",
|
||||||
self.actor_type(),
|
self.actor_type(),
|
||||||
actor_id,
|
actor_id,
|
||||||
|
script_type,
|
||||||
redis_url,
|
redis_url,
|
||||||
queue_key
|
queue_key
|
||||||
);
|
);
|
||||||
@@ -254,78 +255,18 @@ pub fn spawn_actor<W: Actor>(
|
|||||||
actor.spawn(shutdown_rx)
|
actor.spawn(shutdown_rx)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
fn derive_script_type_from_actor_id(actor_id: &str) -> ScriptType {
|
||||||
mod tests {
|
let lower = actor_id.to_lowercase();
|
||||||
use super::*;
|
if lower.contains("sal") {
|
||||||
use crate::engine::create_heromodels_engine;
|
ScriptType::SAL
|
||||||
|
} else if lower.contains("osis") {
|
||||||
// Mock actor for testing
|
ScriptType::OSIS
|
||||||
struct MockActor;
|
} else if lower.contains("python") {
|
||||||
|
ScriptType::Python
|
||||||
#[async_trait::async_trait]
|
} else if lower.contains("v") {
|
||||||
impl Actor for MockActor {
|
ScriptType::V
|
||||||
async fn process_job(
|
} else {
|
||||||
&self,
|
// Default to OSIS when uncertain
|
||||||
_job: Job,
|
ScriptType::OSIS
|
||||||
_redis_conn: &mut redis::aio::MultiplexedConnection,
|
|
||||||
) {
|
|
||||||
// Mock implementation - do nothing
|
|
||||||
// Engine would be owned by the actor implementation as a field
|
|
||||||
}
|
|
||||||
|
|
||||||
fn actor_type(&self) -> &'static str {
|
|
||||||
"Mock"
|
|
||||||
}
|
|
||||||
|
|
||||||
fn actor_id(&self) -> &str {
|
|
||||||
"mock_actor"
|
|
||||||
}
|
|
||||||
|
|
||||||
fn redis_url(&self) -> &str {
|
|
||||||
"redis://localhost:6379"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
#[tokio::test]
|
|
||||||
async fn test_actor_config_creation() {
|
|
||||||
let config = ActorConfig::new(
|
|
||||||
"test_actor".to_string(),
|
|
||||||
"/tmp".to_string(),
|
|
||||||
"redis://localhost:6379".to_string(),
|
|
||||||
false,
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_eq!(config.actor_id, "test_actor");
|
|
||||||
assert_eq!(config.db_path, "/tmp");
|
|
||||||
assert_eq!(config.redis_url, "redis://localhost:6379");
|
|
||||||
assert!(!config.preserve_tasks);
|
|
||||||
assert!(config.default_timeout.is_none());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_actor_config_with_timeout() {
|
|
||||||
let timeout = Duration::from_secs(300);
|
|
||||||
let config = ActorConfig::new(
|
|
||||||
"test_actor".to_string(),
|
|
||||||
"/tmp".to_string(),
|
|
||||||
"redis://localhost:6379".to_string(),
|
|
||||||
false,
|
|
||||||
).with_default_timeout(timeout);
|
|
||||||
|
|
||||||
assert_eq!(config.default_timeout, Some(timeout));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_spawn_actor_function() {
|
|
||||||
let (_shutdown_tx, shutdown_rx) = mpsc::channel(1);
|
|
||||||
let actor = Arc::new(MockActor);
|
|
||||||
|
|
||||||
let handle = spawn_actor(actor, shutdown_rx);
|
|
||||||
|
|
||||||
// The actor should be created successfully
|
|
||||||
assert!(!handle.is_finished());
|
|
||||||
|
|
||||||
// Abort the actor for cleanup
|
|
||||||
handle.abort();
|
|
||||||
}
|
|
||||||
}
|
|
@@ -1,4 +1,5 @@
|
|||||||
use hero_job::{Job, JobStatus};
|
use hero_job::{Job, JobStatus, ScriptType};
|
||||||
|
use hero_job::keys;
|
||||||
use log::{debug, error, info};
|
use log::{debug, error, info};
|
||||||
use redis::AsyncCommands;
|
use redis::AsyncCommands;
|
||||||
use rhai::{Dynamic, Engine};
|
use rhai::{Dynamic, Engine};
|
||||||
@@ -217,10 +218,11 @@ pub fn spawn_rhai_actor(
|
|||||||
preserve_tasks: bool,
|
preserve_tasks: bool,
|
||||||
) -> JoinHandle<Result<(), Box<dyn std::error::Error + Send + Sync>>> {
|
) -> JoinHandle<Result<(), Box<dyn std::error::Error + Send + Sync>>> {
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
let queue_key = format!("{}{}", NAMESPACE_PREFIX, actor_id);
|
let script_type = derive_script_type_from_actor_id(&actor_id);
|
||||||
|
let queue_key = keys::work_type(&script_type);
|
||||||
info!(
|
info!(
|
||||||
"Rhai Actor for Actor ID '{}' starting. Connecting to Redis at {}. Listening on queue: {}. Waiting for tasks or shutdown signal.",
|
"Rhai Actor '{}' starting. Type {:?}. Connecting to Redis at {}. Listening on queue: {}. Waiting for tasks or shutdown signal.",
|
||||||
actor_id, redis_url, queue_key
|
actor_id, script_type, redis_url, queue_key
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut redis_conn = initialize_redis_connection(&actor_id, &redis_url).await?;
|
let mut redis_conn = initialize_redis_connection(&actor_id, &redis_url).await?;
|
||||||
@@ -259,6 +261,23 @@ pub fn spawn_rhai_actor(
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Helper to derive script type from actor_id for canonical queue selection
|
||||||
|
fn derive_script_type_from_actor_id(actor_id: &str) -> ScriptType {
|
||||||
|
let lower = actor_id.to_lowercase();
|
||||||
|
if lower.contains("sal") {
|
||||||
|
ScriptType::SAL
|
||||||
|
} else if lower.contains("osis") {
|
||||||
|
ScriptType::OSIS
|
||||||
|
} else if lower.contains("python") {
|
||||||
|
ScriptType::Python
|
||||||
|
} else if lower == "v" || lower.contains(":v") || lower.contains(" v") {
|
||||||
|
ScriptType::V
|
||||||
|
} else {
|
||||||
|
// Default to OSIS when uncertain
|
||||||
|
ScriptType::OSIS
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Re-export the main trait-based interface for convenience
|
// Re-export the main trait-based interface for convenience
|
||||||
pub use actor_trait::{Actor, ActorConfig, spawn_actor};
|
pub use actor_trait::{Actor, ActorConfig, spawn_actor};
|
||||||
|
|
||||||
|
@@ -10,7 +10,8 @@ use crossterm::{
|
|||||||
execute,
|
execute,
|
||||||
};
|
};
|
||||||
use hero_job::{Job, JobStatus, ScriptType};
|
use hero_job::{Job, JobStatus, ScriptType};
|
||||||
use log::{error, info};
|
use hero_job::keys;
|
||||||
|
|
||||||
use ratatui::{
|
use ratatui::{
|
||||||
backend::{Backend, CrosstermBackend},
|
backend::{Backend, CrosstermBackend},
|
||||||
layout::{Alignment, Constraint, Direction, Layout, Rect},
|
layout::{Alignment, Constraint, Direction, Layout, Rect},
|
||||||
@@ -215,9 +216,7 @@ impl App {
|
|||||||
};
|
};
|
||||||
|
|
||||||
if let Some(dir) = example_dir {
|
if let Some(dir) = example_dir {
|
||||||
if let Err(e) = app.load_examples_tree(dir) {
|
let _ = app.load_examples_tree(dir);
|
||||||
error!("Failed to load examples tree: {}", e);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(app)
|
Ok(app)
|
||||||
@@ -225,32 +224,12 @@ impl App {
|
|||||||
|
|
||||||
pub fn load_examples_tree(&mut self, dir: PathBuf) -> Result<()> {
|
pub fn load_examples_tree(&mut self, dir: PathBuf) -> Result<()> {
|
||||||
if !dir.exists() {
|
if !dir.exists() {
|
||||||
log::warn!("Examples directory does not exist: {:?}", dir);
|
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
log::info!("Loading examples tree from: {:?}", dir);
|
|
||||||
|
|
||||||
// Load hierarchical tree structure
|
// Load hierarchical tree structure
|
||||||
match self.load_example_tree(&dir) {
|
self.example_tree = self.load_example_tree(&dir)?;
|
||||||
Ok(tree) => {
|
|
||||||
self.example_tree = tree;
|
|
||||||
log::info!("Successfully loaded {} top-level tree nodes", self.example_tree.len());
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
log::error!("Failed to load example tree: {}", e);
|
|
||||||
// Create a simple fallback structure
|
|
||||||
self.example_tree = vec![
|
|
||||||
ExampleTreeNode::File {
|
|
||||||
name: "Error loading examples".to_string(),
|
|
||||||
path: dir.join("error.rhai"),
|
|
||||||
}
|
|
||||||
];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
self.rebuild_tree_items();
|
self.rebuild_tree_items();
|
||||||
log::info!("Rebuilt tree items: {} total items", self.example_tree_items.len());
|
|
||||||
|
|
||||||
if !self.example_tree_items.is_empty() {
|
if !self.example_tree_items.is_empty() {
|
||||||
self.example_tree_state.select(Some(0));
|
self.example_tree_state.select(Some(0));
|
||||||
@@ -271,13 +250,9 @@ impl App {
|
|||||||
fn load_example_tree(&self, dir: &PathBuf) -> Result<Vec<ExampleTreeNode>> {
|
fn load_example_tree(&self, dir: &PathBuf) -> Result<Vec<ExampleTreeNode>> {
|
||||||
let mut nodes = Vec::new();
|
let mut nodes = Vec::new();
|
||||||
|
|
||||||
log::debug!("Loading directory: {:?}", dir);
|
|
||||||
|
|
||||||
let mut entries: Vec<_> = fs::read_dir(dir)?
|
let mut entries: Vec<_> = fs::read_dir(dir)?
|
||||||
.collect::<Result<Vec<_>, _>>()?;
|
.collect::<Result<Vec<_>, _>>()?;
|
||||||
|
|
||||||
log::debug!("Found {} entries in {:?}", entries.len(), dir);
|
|
||||||
|
|
||||||
// Sort entries: directories first, then files, both alphabetically
|
// Sort entries: directories first, then files, both alphabetically
|
||||||
entries.sort_by(|a, b| {
|
entries.sort_by(|a, b| {
|
||||||
let a_is_dir = a.path().is_dir();
|
let a_is_dir = a.path().is_dir();
|
||||||
@@ -295,17 +270,14 @@ impl App {
|
|||||||
let name = entry.file_name().to_string_lossy().to_string();
|
let name = entry.file_name().to_string_lossy().to_string();
|
||||||
|
|
||||||
if path.is_dir() {
|
if path.is_dir() {
|
||||||
log::debug!("Loading folder: {}", name);
|
|
||||||
let children = self.load_example_tree(&path)?;
|
let children = self.load_example_tree(&path)?;
|
||||||
log::debug!("Folder '{}' has {} children", name, children.len());
|
|
||||||
nodes.push(ExampleTreeNode::Folder {
|
nodes.push(ExampleTreeNode::Folder {
|
||||||
name,
|
name,
|
||||||
path,
|
path,
|
||||||
children,
|
children,
|
||||||
expanded: true, // Expand folders by default to show hierarchy
|
expanded: false, // Folders collapsed by default
|
||||||
});
|
});
|
||||||
} else if path.extension().map_or(false, |ext| ext == "rhai") {
|
} else if path.extension().map_or(false, |ext| ext == "rhai") {
|
||||||
log::debug!("Loading file: {}", name);
|
|
||||||
nodes.push(ExampleTreeNode::File {
|
nodes.push(ExampleTreeNode::File {
|
||||||
name: path.file_stem()
|
name: path.file_stem()
|
||||||
.and_then(|s| s.to_str())
|
.and_then(|s| s.to_str())
|
||||||
@@ -319,55 +291,17 @@ impl App {
|
|||||||
Ok(nodes)
|
Ok(nodes)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn load_flat_examples(&mut self, dir: &PathBuf) -> Result<()> {
|
|
||||||
let mut examples = Vec::new();
|
|
||||||
self.collect_all_rhai_files(&mut examples, dir)?;
|
|
||||||
self.examples = examples;
|
|
||||||
|
|
||||||
if !self.examples.is_empty() {
|
|
||||||
self.example_list_state.select(Some(0));
|
|
||||||
if self.selected_example.is_none() {
|
|
||||||
self.selected_example = Some(self.examples[0].clone());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn collect_all_rhai_files(&self, examples: &mut Vec<ExampleScript>, dir: &PathBuf) -> Result<()> {
|
|
||||||
for entry in fs::read_dir(dir)? {
|
|
||||||
let entry = entry?;
|
|
||||||
let path = entry.path();
|
|
||||||
|
|
||||||
if path.is_dir() {
|
|
||||||
self.collect_all_rhai_files(examples, &path)?;
|
|
||||||
} else if path.extension().map_or(false, |ext| ext == "rhai") {
|
|
||||||
if let Some(name) = path.file_stem().and_then(|s| s.to_str()) {
|
|
||||||
examples.push(ExampleScript {
|
|
||||||
name: name.to_string(),
|
|
||||||
path,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn rebuild_tree_items(&mut self) {
|
fn rebuild_tree_items(&mut self) {
|
||||||
self.example_tree_items.clear();
|
self.example_tree_items.clear();
|
||||||
log::info!("Rebuilding tree items from {} root nodes", self.example_tree.len());
|
|
||||||
let mut index = 0;
|
let mut index = 0;
|
||||||
let tree_clone = self.example_tree.clone();
|
let tree_clone = self.example_tree.clone();
|
||||||
for node in &tree_clone {
|
for node in &tree_clone {
|
||||||
log::info!("Processing root node: {:?}", node.name());
|
|
||||||
self.add_tree_items_recursive(node, 0, &mut index);
|
self.add_tree_items_recursive(node, 0, &mut index);
|
||||||
}
|
}
|
||||||
log::info!("Final tree items count: {}", self.example_tree_items.len());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn add_tree_items_recursive(&mut self, node: &ExampleTreeNode, depth: usize, index: &mut usize) {
|
fn add_tree_items_recursive(&mut self, node: &ExampleTreeNode, depth: usize, index: &mut usize) {
|
||||||
// Always add the current node to the flattened list
|
// Always add the current node to the flattened list
|
||||||
log::debug!("Adding tree item: {} at depth {}", node.name(), depth);
|
|
||||||
self.example_tree_items.push(ExampleTreeItem {
|
self.example_tree_items.push(ExampleTreeItem {
|
||||||
node: node.clone(),
|
node: node.clone(),
|
||||||
depth,
|
depth,
|
||||||
@@ -377,7 +311,6 @@ impl App {
|
|||||||
|
|
||||||
// For folders, add children only if the folder is expanded
|
// For folders, add children only if the folder is expanded
|
||||||
if let ExampleTreeNode::Folder { children, expanded, .. } = node {
|
if let ExampleTreeNode::Folder { children, expanded, .. } = node {
|
||||||
log::debug!("Folder '{}' has {} children, expanded: {}", node.name(), children.len(), expanded);
|
|
||||||
if *expanded {
|
if *expanded {
|
||||||
for child in children {
|
for child in children {
|
||||||
self.add_tree_items_recursive(child, depth + 1, index);
|
self.add_tree_items_recursive(child, depth + 1, index);
|
||||||
@@ -525,9 +458,9 @@ impl App {
|
|||||||
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
|
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
|
||||||
job.store_in_redis(&mut conn).await?;
|
job.store_in_redis(&mut conn).await?;
|
||||||
|
|
||||||
// Add to work queue
|
// Add to work queue (canonical type queue)
|
||||||
let queue_name = format!("hero:job:actor_queue:{}", self.actor_id.to_lowercase());
|
let queue_name = keys::work_type(&self.job_form.script_type);
|
||||||
let _: () = conn.lpush(&queue_name, &job_id).await?;
|
let _: () = conn.lpush(&queue_name, &job.id).await?;
|
||||||
|
|
||||||
self.status_message = Some(format!("Job {} dispatched successfully", job_id));
|
self.status_message = Some(format!("Job {} dispatched successfully", job_id));
|
||||||
|
|
||||||
@@ -765,9 +698,7 @@ impl App {
|
|||||||
/// Setup terminal and run the TUI application
|
/// Setup terminal and run the TUI application
|
||||||
pub async fn setup_and_run_tui(mut app: App) -> Result<()> {
|
pub async fn setup_and_run_tui(mut app: App) -> Result<()> {
|
||||||
// Initial job refresh
|
// Initial job refresh
|
||||||
if let Err(e) = app.refresh_jobs().await {
|
let _ = app.refresh_jobs().await;
|
||||||
error!("Failed to refresh jobs: {}", e);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Setup terminal
|
// Setup terminal
|
||||||
enable_raw_mode()?;
|
enable_raw_mode()?;
|
||||||
@@ -1395,6 +1326,8 @@ async fn run_app<B: Backend>(terminal: &mut Terminal<B>, mut app: App) -> Result
|
|||||||
|
|
||||||
if last_tick.elapsed() >= tick_rate {
|
if last_tick.elapsed() >= tick_rate {
|
||||||
last_tick = Instant::now();
|
last_tick = Instant::now();
|
||||||
|
// Refresh job data to show real-time updates
|
||||||
|
let _ = app.refresh_jobs().await;
|
||||||
}
|
}
|
||||||
|
|
||||||
if app.should_quit {
|
if app.should_quit {
|
||||||
|
@@ -387,3 +387,47 @@ impl Job {
|
|||||||
Ok(job_ids)
|
Ok(job_ids)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Canonical Redis key builders for queues and hashes
|
||||||
|
pub mod keys {
|
||||||
|
use super::{NAMESPACE_PREFIX, ScriptType};
|
||||||
|
|
||||||
|
// hero:job:{job_id}
|
||||||
|
pub fn job_hash(job_id: &str) -> String {
|
||||||
|
format!("{}{}", NAMESPACE_PREFIX, job_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// hero:q:reply:{job_id}
|
||||||
|
pub fn reply(job_id: &str) -> String {
|
||||||
|
format!("hero:q:reply:{}", job_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// hero:q:work:type:{script_type}
|
||||||
|
pub fn work_type(script_type: &ScriptType) -> String {
|
||||||
|
format!("hero:q:work:type:{}", script_type.actor_queue_suffix())
|
||||||
|
}
|
||||||
|
|
||||||
|
// hero:q:work:type:{script_type}:group:{group}
|
||||||
|
pub fn work_group(script_type: &ScriptType, group: &str) -> String {
|
||||||
|
format!(
|
||||||
|
"hero:q:work:type:{}:group:{}",
|
||||||
|
script_type.actor_queue_suffix(),
|
||||||
|
group
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// hero:q:work:type:{script_type}:group:{group}:inst:{instance}
|
||||||
|
pub fn work_instance(script_type: &ScriptType, group: &str, instance: &str) -> String {
|
||||||
|
format!(
|
||||||
|
"hero:q:work:type:{}:group:{}:inst:{}",
|
||||||
|
script_type.actor_queue_suffix(),
|
||||||
|
group,
|
||||||
|
instance
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// hero:q:ctl:type:{script_type}
|
||||||
|
pub fn stop_type(script_type: &ScriptType) -> String {
|
||||||
|
format!("hero:q:ctl:type:{}", script_type.actor_queue_suffix())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
52
core/supervisor/examples/simple_job.rs
Normal file
52
core/supervisor/examples/simple_job.rs
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
use hero_supervisor::{SupervisorBuilder, ScriptType};
|
||||||
|
use hero_job::JobBuilder as CoreJobBuilder;
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
// 1) Build a Supervisor
|
||||||
|
let supervisor = SupervisorBuilder::new()
|
||||||
|
.redis_url("redis://127.0.0.1/")
|
||||||
|
.build()
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
// 2) Build a Job (using core job builder to set caller_id, context_id)
|
||||||
|
let job = CoreJobBuilder::new()
|
||||||
|
.caller_id("02abc...caller") // required
|
||||||
|
.context_id("02def...context") // required
|
||||||
|
.script_type(ScriptType::OSIS) // select the OSIS actor (matches configured osis_actor_1)
|
||||||
|
.script("40 + 3") // simple Rhai script
|
||||||
|
.timeout(std::time::Duration::from_secs(10))
|
||||||
|
.build()?; // returns hero_job::Job
|
||||||
|
|
||||||
|
let job_id = job.id.clone();
|
||||||
|
|
||||||
|
// 3a) Store the job in Redis
|
||||||
|
supervisor.create_job(&job).await?;
|
||||||
|
|
||||||
|
// 3b) Start the job (pushes ID to the actor’s Redis queue)
|
||||||
|
supervisor.start_job(&job_id).await?;
|
||||||
|
|
||||||
|
// 3c) Wait until finished, then fetch output
|
||||||
|
use tokio::time::sleep;
|
||||||
|
|
||||||
|
let deadline = std::time::Instant::now() + std::time::Duration::from_secs(10);
|
||||||
|
loop {
|
||||||
|
let status = supervisor.get_job_status(&job_id).await?;
|
||||||
|
if status == hero_supervisor::JobStatus::Finished {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if std::time::Instant::now() >= deadline {
|
||||||
|
println!("Job {} timed out waiting for completion (status: {:?})", job_id, status);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
sleep(std::time::Duration::from_millis(250)).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(output) = supervisor.get_job_output(&job_id).await? {
|
||||||
|
println!("Job {} output: {}", job_id, output);
|
||||||
|
} else {
|
||||||
|
println!("Job {} completed with no output field set", job_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
@@ -408,7 +408,8 @@ impl Supervisor {
|
|||||||
|
|
||||||
/// Get the hardcoded actor queue key for the script type
|
/// Get the hardcoded actor queue key for the script type
|
||||||
fn get_actor_queue_key(&self, script_type: &ScriptType) -> String {
|
fn get_actor_queue_key(&self, script_type: &ScriptType) -> String {
|
||||||
format!("{}actor_queue:{}", NAMESPACE_PREFIX, script_type.actor_queue_suffix())
|
// Canonical type queue
|
||||||
|
hero_job::keys::work_type(script_type)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new_job(&self) -> JobBuilder {
|
pub fn new_job(&self) -> JobBuilder {
|
||||||
@@ -586,14 +587,9 @@ impl Supervisor {
|
|||||||
job_id: String,
|
job_id: String,
|
||||||
script_type: &ScriptType
|
script_type: &ScriptType
|
||||||
) -> Result<(), SupervisorError> {
|
) -> Result<(), SupervisorError> {
|
||||||
let actor_queue_key = self.get_actor_queue_key(script_type);
|
// Canonical dispatch to type queue
|
||||||
|
let actor_queue_key = hero_job::keys::work_type(script_type);
|
||||||
// lpush also infers its types, RV is typically i64 (length of list) or () depending on exact command variant
|
let _: redis::RedisResult<i64> = conn.lpush(&actor_queue_key, job_id.clone()).await;
|
||||||
// For `redis::AsyncCommands::lpush`, it's `RedisResult<R>` where R: FromRedisValue
|
|
||||||
// Often this is the length of the list. Let's allow inference or specify if needed.
|
|
||||||
let _: redis::RedisResult<i64> =
|
|
||||||
conn.lpush(&actor_queue_key, job_id.clone()).await;
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -675,7 +671,8 @@ impl Supervisor {
|
|||||||
) -> Result<String, SupervisorError> {
|
) -> Result<String, SupervisorError> {
|
||||||
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
|
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
|
||||||
|
|
||||||
let reply_queue_key = format!("{}:reply:{}", NAMESPACE_PREFIX, job.id); // Derived from the passed job_id
|
// Canonical reply queue
|
||||||
|
let reply_queue_key = hero_job::keys::reply(&job.id);
|
||||||
|
|
||||||
self.create_job_using_connection(
|
self.create_job_using_connection(
|
||||||
&mut conn,
|
&mut conn,
|
||||||
@@ -692,13 +689,48 @@ impl Supervisor {
|
|||||||
job.timeout
|
job.timeout
|
||||||
);
|
);
|
||||||
|
|
||||||
self.await_response_from_connection(
|
// Some actors update the job hash directly and do not use reply queues.
|
||||||
&mut conn,
|
// Poll the job hash for output until timeout to support both models.
|
||||||
&job.id,
|
let start_time = std::time::Instant::now();
|
||||||
&reply_queue_key,
|
|
||||||
job.timeout,
|
loop {
|
||||||
)
|
// If output is present in the job hash, return it immediately
|
||||||
.await
|
match self.get_job_output(&job.id).await {
|
||||||
|
Ok(Some(output)) => {
|
||||||
|
// Optional: cleanup reply queue in case it was created
|
||||||
|
let _: redis::RedisResult<i32> = conn.del(&reply_queue_key).await;
|
||||||
|
return Ok(output);
|
||||||
|
}
|
||||||
|
Ok(None) => {
|
||||||
|
// Check for error state
|
||||||
|
match self.get_job_status(&job.id).await {
|
||||||
|
Ok(JobStatus::Error) => {
|
||||||
|
// Try to read the error field for context
|
||||||
|
let mut conn2 = self.redis_client.get_multiplexed_async_connection().await?;
|
||||||
|
let job_key = format!("{}{}", NAMESPACE_PREFIX, job.id);
|
||||||
|
let err: Option<String> = conn2.hget(&job_key, "error").await.ok();
|
||||||
|
return Err(SupervisorError::InvalidInput(
|
||||||
|
err.unwrap_or_else(|| "Job failed".to_string())
|
||||||
|
));
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
// keep polling
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(_) => {
|
||||||
|
// Ignore transient read errors and continue polling
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if start_time.elapsed() >= job.timeout {
|
||||||
|
// On timeout, ensure any reply queue is cleaned up and return a Timeout error
|
||||||
|
let _: redis::RedisResult<i32> = conn.del(&reply_queue_key).await;
|
||||||
|
return Err(SupervisorError::Timeout(job.id.clone()));
|
||||||
|
}
|
||||||
|
|
||||||
|
tokio::time::sleep(std::time::Duration::from_millis(200)).await;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Method to get job status
|
// Method to get job status
|
||||||
@@ -772,7 +804,7 @@ impl Supervisor {
|
|||||||
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
|
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
|
||||||
|
|
||||||
// Get job details to determine script type and appropriate actor
|
// Get job details to determine script type and appropriate actor
|
||||||
let job_key = format!("{}job:{}", NAMESPACE_PREFIX, job_id);
|
let job_key = format!("{}{}", NAMESPACE_PREFIX, job_id);
|
||||||
let job_data: std::collections::HashMap<String, String> = conn.hgetall(&job_key).await?;
|
let job_data: std::collections::HashMap<String, String> = conn.hgetall(&job_key).await?;
|
||||||
|
|
||||||
if job_data.is_empty() {
|
if job_data.is_empty() {
|
||||||
@@ -787,7 +819,8 @@ impl Supervisor {
|
|||||||
.map_err(|e| SupervisorError::InvalidInput(format!("Invalid script type: {}", e)))?;
|
.map_err(|e| SupervisorError::InvalidInput(format!("Invalid script type: {}", e)))?;
|
||||||
|
|
||||||
// Use hardcoded stop queue key for this script type
|
// Use hardcoded stop queue key for this script type
|
||||||
let stop_queue_key = format!("{}stop_queue:{}", NAMESPACE_PREFIX, script_type.actor_queue_suffix());
|
// Stop queue per protocol: hero:stop_queue:{suffix}
|
||||||
|
let stop_queue_key = format!("hero:stop_queue:{}", script_type.actor_queue_suffix());
|
||||||
|
|
||||||
// Push job ID to the stop queue
|
// Push job ID to the stop queue
|
||||||
conn.lpush::<_, _, ()>(&stop_queue_key, job_id).await?;
|
conn.lpush::<_, _, ()>(&stop_queue_key, job_id).await?;
|
||||||
@@ -799,7 +832,7 @@ impl Supervisor {
|
|||||||
/// Get logs for a job by reading from its log file
|
/// Get logs for a job by reading from its log file
|
||||||
pub async fn get_job_logs(&self, job_id: &str) -> Result<Option<String>, SupervisorError> {
|
pub async fn get_job_logs(&self, job_id: &str) -> Result<Option<String>, SupervisorError> {
|
||||||
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
|
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
|
||||||
let job_key = format!("{}job:{}", NAMESPACE_PREFIX, job_id);
|
let job_key = format!("{}{}", NAMESPACE_PREFIX, job_id);
|
||||||
|
|
||||||
// Get the job data to find the log path
|
// Get the job data to find the log path
|
||||||
let result_map: Option<std::collections::HashMap<String, String>> =
|
let result_map: Option<std::collections::HashMap<String, String>> =
|
||||||
@@ -922,7 +955,7 @@ impl Supervisor {
|
|||||||
for job_id in ready_job_ids {
|
for job_id in ready_job_ids {
|
||||||
// Get job data to determine script type and select actor
|
// Get job data to determine script type and select actor
|
||||||
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
|
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
|
||||||
let job_key = format!("{}job:{}", NAMESPACE_PREFIX, job_id);
|
let job_key = format!("{}{}", NAMESPACE_PREFIX, job_id);
|
||||||
let job_data: std::collections::HashMap<String, String> = conn.hgetall(&job_key).await?;
|
let job_data: std::collections::HashMap<String, String> = conn.hgetall(&job_key).await?;
|
||||||
|
|
||||||
if let Some(script_type_str) = job_data.get("script_type") {
|
if let Some(script_type_str) = job_data.get("script_type") {
|
||||||
|
@@ -265,11 +265,11 @@
|
|||||||
"params": [],
|
"params": [],
|
||||||
"result": {
|
"result": {
|
||||||
"name": "jobList",
|
"name": "jobList",
|
||||||
"description": "List of all jobs.",
|
"description": "List of all job IDs.",
|
||||||
"schema": {
|
"schema": {
|
||||||
"type": "array",
|
"type": "array",
|
||||||
"items": {
|
"items": {
|
||||||
"$ref": "#/components/schemas/Job"
|
"type": "string"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -343,7 +343,7 @@
|
|||||||
},
|
},
|
||||||
"ScriptType": {
|
"ScriptType": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"enum": ["HeroScript", "RhaiSAL", "RhaiDSL"],
|
"enum": ["OSIS", "SAL", "V", "Python"],
|
||||||
"description": "The type of script to execute."
|
"description": "The type of script to execute."
|
||||||
},
|
},
|
||||||
"JobStatus": {
|
"JobStatus": {
|
||||||
|
@@ -38,12 +38,6 @@ enum Commands {
|
|||||||
#[arg(long, default_value = "ws://127.0.0.1:9944")]
|
#[arg(long, default_value = "ws://127.0.0.1:9944")]
|
||||||
url: String,
|
url: String,
|
||||||
},
|
},
|
||||||
/// Connect to Unix socket server
|
|
||||||
Unix {
|
|
||||||
/// Unix socket path
|
|
||||||
#[arg(long, default_value = "/tmp/hero-openrpc.sock")]
|
|
||||||
socket_path: PathBuf,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Available RPC methods with descriptions
|
/// Available RPC methods with descriptions
|
||||||
@@ -161,10 +155,6 @@ async fn main() -> Result<()> {
|
|||||||
println!("{} {}", "Connecting to WebSocket server:".green(), url.cyan());
|
println!("{} {}", "Connecting to WebSocket server:".green(), url.cyan());
|
||||||
ClientTransport::WebSocket(url)
|
ClientTransport::WebSocket(url)
|
||||||
}
|
}
|
||||||
Commands::Unix { socket_path } => {
|
|
||||||
println!("{} {:?}", "Connecting to Unix socket server:".green(), socket_path);
|
|
||||||
ClientTransport::Unix(socket_path)
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Connect to the server
|
// Connect to the server
|
||||||
@@ -282,15 +272,18 @@ async fn execute_method(client: &HeroOpenRpcClient, method_name: &str) -> Result
|
|||||||
.with_prompt("Signature (hex)")
|
.with_prompt("Signature (hex)")
|
||||||
.interact_text()?;
|
.interact_text()?;
|
||||||
|
|
||||||
|
let nonce: String = Input::new()
|
||||||
|
.with_prompt("Nonce (hex) - fetch via fetch_nonce first")
|
||||||
|
.interact_text()?;
|
||||||
|
|
||||||
let result = client.authenticate(pubkey, signature, nonce).await?;
|
let result = client.authenticate(pubkey, signature, nonce).await?;
|
||||||
println!("{} {}", "Authentication result:".green().bold(),
|
println!("{} {}", "Authentication result:".green().bold(),
|
||||||
if result { "Success".green() } else { "Failed".red() });
|
if result { "Success".green() } else { "Failed".red() });
|
||||||
}
|
}
|
||||||
|
|
||||||
"whoami" => {
|
"whoami" => {
|
||||||
let result = client.whoami().await?;
|
let result = client.whoami().await?;
|
||||||
println!("{} {}", "User info:".green().bold(),
|
println!("{} {}", "User info:".green().bold(), result.cyan());
|
||||||
serde_json::to_string_pretty(&result)?.cyan());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
"play" => {
|
"play" => {
|
||||||
@@ -307,7 +300,7 @@ async fn execute_method(client: &HeroOpenRpcClient, method_name: &str) -> Result
|
|||||||
.with_prompt("Script content")
|
.with_prompt("Script content")
|
||||||
.interact_text()?;
|
.interact_text()?;
|
||||||
|
|
||||||
let script_types = ["HeroScript", "RhaiSAL", "RhaiDSL"];
|
let script_types = ["OSIS", "SAL", "V", "Python"];
|
||||||
let script_type_selection = Select::new()
|
let script_type_selection = Select::new()
|
||||||
.with_prompt("Script type")
|
.with_prompt("Script type")
|
||||||
.items(&script_types)
|
.items(&script_types)
|
||||||
@@ -315,10 +308,10 @@ async fn execute_method(client: &HeroOpenRpcClient, method_name: &str) -> Result
|
|||||||
.interact()?;
|
.interact()?;
|
||||||
|
|
||||||
let script_type = match script_type_selection {
|
let script_type = match script_type_selection {
|
||||||
0 => ScriptType::HeroScript,
|
0 => ScriptType::OSIS,
|
||||||
1 => ScriptType::RhaiSAL,
|
1 => ScriptType::SAL,
|
||||||
2 => ScriptType::RhaiDSL,
|
2 => ScriptType::V,
|
||||||
_ => ScriptType::HeroScript,
|
_ => ScriptType::Python,
|
||||||
};
|
};
|
||||||
|
|
||||||
let add_prerequisites = Confirm::new()
|
let add_prerequisites = Confirm::new()
|
||||||
@@ -335,9 +328,34 @@ async fn execute_method(client: &HeroOpenRpcClient, method_name: &str) -> Result
|
|||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let caller_id: String = Input::new()
|
||||||
|
.with_prompt("Caller ID")
|
||||||
|
.interact_text()?;
|
||||||
|
|
||||||
|
let context_id: String = Input::new()
|
||||||
|
.with_prompt("Context ID")
|
||||||
|
.interact_text()?;
|
||||||
|
|
||||||
|
let specify_timeout = Confirm::new()
|
||||||
|
.with_prompt("Specify timeout (seconds)?")
|
||||||
|
.default(false)
|
||||||
|
.interact()?;
|
||||||
|
|
||||||
|
let timeout = if specify_timeout {
|
||||||
|
let t: u64 = Input::new()
|
||||||
|
.with_prompt("Timeout (seconds)")
|
||||||
|
.interact_text()?;
|
||||||
|
Some(t)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
let job_params = JobParams {
|
let job_params = JobParams {
|
||||||
script,
|
script,
|
||||||
script_type,
|
script_type,
|
||||||
|
caller_id,
|
||||||
|
context_id,
|
||||||
|
timeout,
|
||||||
prerequisites,
|
prerequisites,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -360,7 +378,7 @@ async fn execute_method(client: &HeroOpenRpcClient, method_name: &str) -> Result
|
|||||||
.with_prompt("Script content")
|
.with_prompt("Script content")
|
||||||
.interact_text()?;
|
.interact_text()?;
|
||||||
|
|
||||||
let script_types = ["HeroScript", "RhaiSAL", "RhaiDSL"];
|
let script_types = ["OSIS", "SAL", "V", "Python"];
|
||||||
let script_type_selection = Select::new()
|
let script_type_selection = Select::new()
|
||||||
.with_prompt("Script type")
|
.with_prompt("Script type")
|
||||||
.items(&script_types)
|
.items(&script_types)
|
||||||
@@ -368,10 +386,10 @@ async fn execute_method(client: &HeroOpenRpcClient, method_name: &str) -> Result
|
|||||||
.interact()?;
|
.interact()?;
|
||||||
|
|
||||||
let script_type = match script_type_selection {
|
let script_type = match script_type_selection {
|
||||||
0 => ScriptType::HeroScript,
|
0 => ScriptType::OSIS,
|
||||||
1 => ScriptType::RhaiSAL,
|
1 => ScriptType::SAL,
|
||||||
2 => ScriptType::RhaiDSL,
|
2 => ScriptType::V,
|
||||||
_ => ScriptType::HeroScript,
|
_ => ScriptType::Python,
|
||||||
};
|
};
|
||||||
|
|
||||||
let add_prerequisites = Confirm::new()
|
let add_prerequisites = Confirm::new()
|
||||||
@@ -416,18 +434,17 @@ async fn execute_method(client: &HeroOpenRpcClient, method_name: &str) -> Result
|
|||||||
.interact_text()?;
|
.interact_text()?;
|
||||||
|
|
||||||
let result = client.get_job_logs(job_id).await?;
|
let result = client.get_job_logs(job_id).await?;
|
||||||
println!("{} {}", "Job logs:".green().bold(), result.logs.cyan());
|
match result.logs {
|
||||||
|
Some(logs) => println!("{} {}", "Job logs:".green().bold(), logs.cyan()),
|
||||||
|
None => println!("{} {}", "Job logs:".green().bold(), "(no logs)".yellow()),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
"list_jobs" => {
|
"list_jobs" => {
|
||||||
let result = client.list_jobs().await?;
|
let result = client.list_jobs().await?;
|
||||||
println!("{}", "Jobs:".green().bold());
|
println!("{}", "Job IDs:".green().bold());
|
||||||
for job in result {
|
for id in result {
|
||||||
println!(" {} - {} ({:?})",
|
println!(" {}", id.yellow());
|
||||||
job.id().yellow(),
|
|
||||||
job.script_type(),
|
|
||||||
job.status()
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1,6 +1,6 @@
|
|||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use hero_job::{Job, JobStatus, ScriptType};
|
use hero_job::{JobStatus, ScriptType};
|
||||||
use jsonrpsee::core::client::ClientT;
|
use jsonrpsee::core::client::ClientT;
|
||||||
use jsonrpsee::core::ClientError;
|
use jsonrpsee::core::ClientError;
|
||||||
use jsonrpsee::proc_macros::rpc;
|
use jsonrpsee::proc_macros::rpc;
|
||||||
@@ -37,7 +37,7 @@ pub trait OpenRpcClient {
|
|||||||
) -> Result<bool, ClientError>;
|
) -> Result<bool, ClientError>;
|
||||||
|
|
||||||
#[method(name = "whoami")]
|
#[method(name = "whoami")]
|
||||||
async fn whoami(&self) -> Result<serde_json::Value, ClientError>;
|
async fn whoami(&self) -> Result<String, ClientError>;
|
||||||
|
|
||||||
// Script execution
|
// Script execution
|
||||||
#[method(name = "play")]
|
#[method(name = "play")]
|
||||||
@@ -68,7 +68,7 @@ pub trait OpenRpcClient {
|
|||||||
async fn get_job_logs(&self, job_id: String) -> Result<JobLogsResult, ClientError>;
|
async fn get_job_logs(&self, job_id: String) -> Result<JobLogsResult, ClientError>;
|
||||||
|
|
||||||
#[method(name = "list_jobs")]
|
#[method(name = "list_jobs")]
|
||||||
async fn list_jobs(&self) -> Result<Vec<Job>, ClientError>;
|
async fn list_jobs(&self) -> Result<Vec<String>, ClientError>;
|
||||||
|
|
||||||
#[method(name = "stop_job")]
|
#[method(name = "stop_job")]
|
||||||
async fn stop_job(&self, job_id: String) -> Result<(), ClientError>;
|
async fn stop_job(&self, job_id: String) -> Result<(), ClientError>;
|
||||||
@@ -146,7 +146,7 @@ impl HeroOpenRpcClient {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Delegate to whoami on the underlying client
|
/// Delegate to whoami on the underlying client
|
||||||
pub async fn whoami(&self) -> Result<serde_json::Value, ClientError> {
|
pub async fn whoami(&self) -> Result<String, ClientError> {
|
||||||
self.client.whoami().await
|
self.client.whoami().await
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -191,7 +191,7 @@ impl HeroOpenRpcClient {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Delegate to list_jobs on the underlying client
|
/// Delegate to list_jobs on the underlying client
|
||||||
pub async fn list_jobs(&self) -> Result<Vec<Job>, ClientError> {
|
pub async fn list_jobs(&self) -> Result<Vec<String>, ClientError> {
|
||||||
self.client.list_jobs().await
|
self.client.list_jobs().await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1,11 +1,14 @@
|
|||||||
use hero_job::ScriptType;
|
use hero_job::ScriptType;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
/// Parameters for creating a job
|
/** Parameters for creating a job (must mirror server DTO) */
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
pub struct JobParams {
|
pub struct JobParams {
|
||||||
pub script: String,
|
pub script: String,
|
||||||
pub script_type: ScriptType,
|
pub script_type: ScriptType,
|
||||||
|
pub caller_id: String,
|
||||||
|
pub context_id: String,
|
||||||
|
pub timeout: Option<u64>, // seconds
|
||||||
pub prerequisites: Option<Vec<String>>,
|
pub prerequisites: Option<Vec<String>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -21,8 +24,8 @@ pub struct StartJobResult {
|
|||||||
pub success: bool,
|
pub success: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Result of getting job logs
|
/** Result of getting job logs */
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
pub struct JobLogsResult {
|
pub struct JobLogsResult {
|
||||||
pub logs: String,
|
pub logs: Option<String>,
|
||||||
}
|
}
|
||||||
|
@@ -19,10 +19,7 @@ tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
|||||||
clap = { version = "4.0", features = ["derive"] }
|
clap = { version = "4.0", features = ["derive"] }
|
||||||
|
|
||||||
# JSON-RPC dependencies
|
# JSON-RPC dependencies
|
||||||
jsonrpsee = { version = "0.21", features = [
|
jsonrpsee = { version = "0.21", features = ["server", "macros"] }
|
||||||
"server",
|
|
||||||
"macros"
|
|
||||||
] }
|
|
||||||
jsonrpsee-types = "0.21"
|
jsonrpsee-types = "0.21"
|
||||||
uuid = { version = "1.6", features = ["v4", "serde"] }
|
uuid = { version = "1.6", features = ["v4", "serde"] }
|
||||||
chrono = { version = "0.4", features = ["serde"] }
|
chrono = { version = "0.4", features = ["serde"] }
|
||||||
|
@@ -8,7 +8,7 @@ use tracing_subscriber;
|
|||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[command(name = "hero-openrpc-server")]
|
#[command(name = "hero-openrpc-server")]
|
||||||
#[command(about = "Hero OpenRPC Server - WebSocket and Unix socket JSON-RPC server")]
|
#[command(about = "Hero OpenRPC Server - JSON-RPC over HTTP/WS")]
|
||||||
struct Cli {
|
struct Cli {
|
||||||
#[command(subcommand)]
|
#[command(subcommand)]
|
||||||
command: Commands,
|
command: Commands,
|
||||||
@@ -34,12 +34,6 @@ enum Commands {
|
|||||||
#[arg(long, default_value = "127.0.0.1:9944")]
|
#[arg(long, default_value = "127.0.0.1:9944")]
|
||||||
addr: SocketAddr,
|
addr: SocketAddr,
|
||||||
},
|
},
|
||||||
/// Start Unix socket server
|
|
||||||
Unix {
|
|
||||||
/// Unix socket path
|
|
||||||
#[arg(long, default_value = "/tmp/hero-openrpc.sock")]
|
|
||||||
socket_path: PathBuf,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
@@ -65,14 +59,6 @@ async fn main() -> Result<()> {
|
|||||||
info!("Starting WebSocket server on {}", addr);
|
info!("Starting WebSocket server on {}", addr);
|
||||||
Transport::WebSocket(addr)
|
Transport::WebSocket(addr)
|
||||||
}
|
}
|
||||||
Commands::Unix { socket_path } => {
|
|
||||||
info!("Starting Unix socket server on {:?}", socket_path);
|
|
||||||
// Remove existing socket file if it exists
|
|
||||||
if socket_path.exists() {
|
|
||||||
std::fs::remove_file(&socket_path)?;
|
|
||||||
}
|
|
||||||
Transport::Unix(socket_path)
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let config = OpenRpcServerConfig {
|
let config = OpenRpcServerConfig {
|
||||||
|
@@ -1,6 +1,6 @@
|
|||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use hero_job::{Job, JobBuilder, JobStatus, ScriptType};
|
use hero_job::{Job, JobBuilder, JobStatus, ScriptType};
|
||||||
use hero_supervisor::{Supervisor, SupervisorBuilder};
|
use hero_supervisor::{Supervisor, SupervisorBuilder, SupervisorError};
|
||||||
use jsonrpsee::core::async_trait;
|
use jsonrpsee::core::async_trait;
|
||||||
use jsonrpsee::proc_macros::rpc;
|
use jsonrpsee::proc_macros::rpc;
|
||||||
use jsonrpsee::server::{ServerBuilder, ServerHandle};
|
use jsonrpsee::server::{ServerBuilder, ServerHandle};
|
||||||
@@ -12,17 +12,24 @@ use std::sync::Arc;
|
|||||||
use tokio::sync::RwLock;
|
use tokio::sync::RwLock;
|
||||||
use tracing::error;
|
use tracing::error;
|
||||||
|
|
||||||
|
fn map_sup_error_to_rpc(e: &SupervisorError) -> ErrorCode {
|
||||||
|
match e {
|
||||||
|
SupervisorError::InvalidInput(_) | SupervisorError::JobError(_) => ErrorCode::InvalidParams,
|
||||||
|
SupervisorError::Timeout(_) => ErrorCode::ServerError(-32002),
|
||||||
|
_ => ErrorCode::InternalError,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
mod auth;
|
mod auth;
|
||||||
pub mod types;
|
pub mod types;
|
||||||
|
|
||||||
pub use auth::*;
|
pub use auth::*;
|
||||||
pub use types::*;
|
pub use types::*;
|
||||||
|
|
||||||
/// Transport type for the OpenRPC server
|
/** Transport type for the OpenRPC server */
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub enum Transport {
|
pub enum Transport {
|
||||||
WebSocket(SocketAddr),
|
WebSocket(SocketAddr),
|
||||||
Unix(PathBuf),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// OpenRPC server configuration
|
/// OpenRPC server configuration
|
||||||
@@ -82,7 +89,7 @@ pub trait OpenRpcApi {
|
|||||||
async fn get_job_logs(&self, job_id: String) -> Result<JobLogsResult, ErrorCode>;
|
async fn get_job_logs(&self, job_id: String) -> Result<JobLogsResult, ErrorCode>;
|
||||||
|
|
||||||
#[method(name = "list_jobs")]
|
#[method(name = "list_jobs")]
|
||||||
async fn list_jobs(&self) -> Result<Vec<Job>, ErrorCode>;
|
async fn list_jobs(&self) -> Result<Vec<String>, ErrorCode>;
|
||||||
|
|
||||||
#[method(name = "stop_job")]
|
#[method(name = "stop_job")]
|
||||||
async fn stop_job(&self, job_id: String) -> Result<(), ErrorCode>;
|
async fn stop_job(&self, job_id: String) -> Result<(), ErrorCode>;
|
||||||
@@ -114,8 +121,8 @@ impl OpenRpcServer {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Start the OpenRPC server
|
/// Start the OpenRPC server on the given SocketAddr (HTTP/WS only)
|
||||||
pub async fn start(self, config: OpenRpcServerConfig) -> Result<ServerHandle> {
|
pub async fn start_on(self, addr: SocketAddr) -> Result<ServerHandle> {
|
||||||
let mut module = RpcModule::new(());
|
let mut module = RpcModule::new(());
|
||||||
|
|
||||||
// Register all the RPC methods
|
// Register all the RPC methods
|
||||||
@@ -209,10 +216,10 @@ impl OpenRpcServer {
|
|||||||
})?;
|
})?;
|
||||||
|
|
||||||
let server_clone = self.clone();
|
let server_clone = self.clone();
|
||||||
module.register_async_method("list_jobs", move |params, _| {
|
module.register_async_method("list_jobs", move |_params, _| {
|
||||||
let server = server_clone.clone();
|
let server = server_clone.clone();
|
||||||
async move {
|
async move {
|
||||||
let _: () = params.parse().map_err(|_| ErrorCode::InvalidParams)?;
|
// No parameters expected; ignore any provided params for robustness
|
||||||
server.list_jobs().await
|
server.list_jobs().await
|
||||||
}
|
}
|
||||||
})?;
|
})?;
|
||||||
@@ -244,18 +251,17 @@ impl OpenRpcServer {
|
|||||||
}
|
}
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
|
let server = ServerBuilder::default()
|
||||||
|
.build(addr)
|
||||||
|
.await?;
|
||||||
|
let handle = server.start(module);
|
||||||
|
Ok(handle)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Start the OpenRPC server (config wrapper)
|
||||||
|
pub async fn start(self, config: OpenRpcServerConfig) -> Result<ServerHandle> {
|
||||||
match config.transport {
|
match config.transport {
|
||||||
Transport::WebSocket(addr) => {
|
Transport::WebSocket(addr) => self.start_on(addr).await,
|
||||||
let server = ServerBuilder::default()
|
|
||||||
.build(addr)
|
|
||||||
.await?;
|
|
||||||
let handle = server.start(module);
|
|
||||||
Ok(handle)
|
|
||||||
}
|
|
||||||
Transport::Unix(_path) => {
|
|
||||||
// Unix socket transport not yet implemented in jsonrpsee 0.21
|
|
||||||
return Err(anyhow::anyhow!("Unix socket transport not yet supported").into());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -295,12 +301,8 @@ impl OpenRpcApiServer for OpenRpcServer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn play(&self, script: String) -> Result<PlayResult, ErrorCode> {
|
async fn play(&self, script: String) -> Result<PlayResult, ErrorCode> {
|
||||||
let _supervisor = self.supervisor.read().await;
|
let output = self.run_job(script, ScriptType::SAL, None).await?;
|
||||||
|
Ok(PlayResult { output })
|
||||||
// For now, return a simple result since we need to implement execute_script method
|
|
||||||
Ok(PlayResult {
|
|
||||||
output: format!("Script executed: {}", script)
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn create_job(&self, job_params: JobParams) -> Result<String, ErrorCode> {
|
async fn create_job(&self, job_params: JobParams) -> Result<String, ErrorCode> {
|
||||||
@@ -360,10 +362,37 @@ impl OpenRpcApiServer for OpenRpcServer {
|
|||||||
&self,
|
&self,
|
||||||
script: String,
|
script: String,
|
||||||
script_type: ScriptType,
|
script_type: ScriptType,
|
||||||
_prerequisites: Option<Vec<String>>,
|
prerequisites: Option<Vec<String>>,
|
||||||
) -> Result<String, ErrorCode> {
|
) -> Result<String, ErrorCode> {
|
||||||
// For now, return a simple result
|
let supervisor = self.supervisor.read().await;
|
||||||
Ok(format!("Job executed with script: {} (type: {:?})", script, script_type))
|
|
||||||
|
// Build job with defaults and optional prerequisites
|
||||||
|
let mut builder = JobBuilder::new()
|
||||||
|
.caller_id("rpc-caller")
|
||||||
|
.context_id("rpc-context")
|
||||||
|
.script(&script)
|
||||||
|
.script_type(script_type)
|
||||||
|
.timeout(std::time::Duration::from_secs(30));
|
||||||
|
|
||||||
|
if let Some(prs) = prerequisites {
|
||||||
|
builder = builder.prerequisites(prs);
|
||||||
|
}
|
||||||
|
|
||||||
|
let job = match builder.build() {
|
||||||
|
Ok(j) => j,
|
||||||
|
Err(e) => {
|
||||||
|
error!("Failed to build job in run_job: {}", e);
|
||||||
|
return Err(ErrorCode::InvalidParams);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
match supervisor.run_job_and_await_result(&job).await {
|
||||||
|
Ok(output) => Ok(output),
|
||||||
|
Err(e) => {
|
||||||
|
error!("run_job failed: {}", e);
|
||||||
|
Err(map_sup_error_to_rpc(&e))
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_job_status(&self, job_id: String) -> Result<JobStatus, ErrorCode> {
|
async fn get_job_status(&self, job_id: String) -> Result<JobStatus, ErrorCode> {
|
||||||
@@ -373,7 +402,7 @@ impl OpenRpcApiServer for OpenRpcServer {
|
|||||||
Ok(status) => Ok(status),
|
Ok(status) => Ok(status),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!("Failed to get job status for {}: {}", job_id, e);
|
error!("Failed to get job status for {}: {}", job_id, e);
|
||||||
Err(ErrorCode::InvalidParams)
|
Err(map_sup_error_to_rpc(&e))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -385,50 +414,29 @@ impl OpenRpcApiServer for OpenRpcServer {
|
|||||||
Ok(output) => Ok(output.unwrap_or_else(|| "No output available".to_string())),
|
Ok(output) => Ok(output.unwrap_or_else(|| "No output available".to_string())),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!("Failed to get job output for {}: {}", job_id, e);
|
error!("Failed to get job output for {}: {}", job_id, e);
|
||||||
Err(ErrorCode::InvalidParams)
|
Err(map_sup_error_to_rpc(&e))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_job_logs(&self, job_id: String) -> Result<JobLogsResult, ErrorCode> {
|
async fn get_job_logs(&self, job_id: String) -> Result<JobLogsResult, ErrorCode> {
|
||||||
// For now, return mock logs
|
let supervisor = self.supervisor.read().await;
|
||||||
Ok(JobLogsResult {
|
match supervisor.get_job_logs(&job_id).await {
|
||||||
logs: format!("Logs for job {}", job_id),
|
Ok(logs_opt) => Ok(JobLogsResult { logs: logs_opt }),
|
||||||
})
|
Err(e) => {
|
||||||
|
error!("Failed to get job logs for {}: {}", job_id, e);
|
||||||
|
Err(map_sup_error_to_rpc(&e))
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn list_jobs(&self) -> Result<Vec<Job>, ErrorCode> {
|
async fn list_jobs(&self) -> Result<Vec<String>, ErrorCode> {
|
||||||
let supervisor = self.supervisor.read().await;
|
let supervisor = self.supervisor.read().await;
|
||||||
|
|
||||||
match supervisor.list_jobs().await {
|
match supervisor.list_jobs().await {
|
||||||
Ok(job_ids) => {
|
Ok(job_ids) => Ok(job_ids),
|
||||||
// For now, create minimal Job objects with just the IDs
|
|
||||||
// In a real implementation, we'd need a supervisor.get_job() method
|
|
||||||
let jobs: Vec<Job> = job_ids.into_iter().map(|job_id| {
|
|
||||||
// Create a minimal job object - this is a temporary solution
|
|
||||||
// until supervisor.get_job() is implemented
|
|
||||||
Job {
|
|
||||||
id: job_id,
|
|
||||||
caller_id: "unknown".to_string(),
|
|
||||||
context_id: "unknown".to_string(),
|
|
||||||
script: "unknown".to_string(),
|
|
||||||
script_type: ScriptType::OSIS,
|
|
||||||
timeout: std::time::Duration::from_secs(30),
|
|
||||||
retries: 0,
|
|
||||||
concurrent: false,
|
|
||||||
log_path: None,
|
|
||||||
env_vars: std::collections::HashMap::new(),
|
|
||||||
prerequisites: Vec::new(),
|
|
||||||
dependents: Vec::new(),
|
|
||||||
created_at: chrono::Utc::now(),
|
|
||||||
updated_at: chrono::Utc::now(),
|
|
||||||
}
|
|
||||||
}).collect();
|
|
||||||
Ok(jobs)
|
|
||||||
},
|
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!("Failed to list jobs: {}", e);
|
error!("Failed to list jobs: {}", e);
|
||||||
Err(ErrorCode::InternalError)
|
Err(map_sup_error_to_rpc(&e))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -440,7 +448,7 @@ impl OpenRpcApiServer for OpenRpcServer {
|
|||||||
Ok(_) => Ok(()),
|
Ok(_) => Ok(()),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!("Failed to stop job {}: {}", job_id, e);
|
error!("Failed to stop job {}: {}", job_id, e);
|
||||||
Err(ErrorCode::InvalidParams)
|
Err(map_sup_error_to_rpc(&e))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -452,7 +460,7 @@ impl OpenRpcApiServer for OpenRpcServer {
|
|||||||
Ok(_) => Ok(()),
|
Ok(_) => Ok(()),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!("Failed to delete job {}: {}", job_id, e);
|
error!("Failed to delete job {}: {}", job_id, e);
|
||||||
Err(ErrorCode::InvalidParams)
|
Err(map_sup_error_to_rpc(&e))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -464,7 +472,7 @@ impl OpenRpcApiServer for OpenRpcServer {
|
|||||||
Ok(_) => Ok(()),
|
Ok(_) => Ok(()),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!("Failed to clear all jobs: {}", e);
|
error!("Failed to clear all jobs: {}", e);
|
||||||
Err(ErrorCode::InternalError)
|
Err(map_sup_error_to_rpc(&e))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -24,8 +24,8 @@ pub struct StartJobResult {
|
|||||||
pub success: bool,
|
pub success: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Result of getting job logs
|
/** Result of getting job logs */
|
||||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||||
pub struct JobLogsResult {
|
pub struct JobLogsResult {
|
||||||
pub logs: String,
|
pub logs: Option<String>,
|
||||||
}
|
}
|
||||||
|
@@ -204,13 +204,13 @@ async fn test_list_jobs() {
|
|||||||
let result = server.list_jobs().await;
|
let result = server.list_jobs().await;
|
||||||
assert!(result.is_ok());
|
assert!(result.is_ok());
|
||||||
|
|
||||||
let jobs = result.unwrap();
|
let job_ids = result.unwrap();
|
||||||
assert!(jobs.len() >= 3); // Should have at least the 3 jobs we created
|
assert!(job_ids.len() >= 3); // Should have at least the 3 jobs we created
|
||||||
|
|
||||||
// Verify job structure
|
// Verify job IDs are valid UUIDs
|
||||||
for job in jobs {
|
for id in job_ids {
|
||||||
assert!(!job.id.is_empty());
|
assert!(!id.is_empty());
|
||||||
assert!(uuid::Uuid::parse_str(&job.id).is_ok());
|
assert!(uuid::Uuid::parse_str(&id).is_ok());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -337,7 +337,10 @@ async fn test_get_job_logs() {
|
|||||||
assert!(result.is_ok());
|
assert!(result.is_ok());
|
||||||
|
|
||||||
let logs_result = result.unwrap();
|
let logs_result = result.unwrap();
|
||||||
assert!(!logs_result.logs.is_empty());
|
match logs_result.logs {
|
||||||
|
Some(ref logs) => assert!(!logs.is_empty()),
|
||||||
|
None => {} // acceptable when no logs are available
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
|
@@ -1,6 +1,18 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "hero-client-unix"
|
name = "hero-client-unix"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
edition = "2024"
|
edition = "2021"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
anyhow = "1.0"
|
||||||
|
clap = { version = "4.5", features = ["derive"] }
|
||||||
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
|
serde_json = "1.0"
|
||||||
|
tokio = { version = "1", features = ["macros", "rt-multi-thread", "time"] }
|
||||||
|
|
||||||
|
# JSON-RPC async client and params types
|
||||||
|
jsonrpsee = { version = "0.21", features = ["macros", "async-client"] }
|
||||||
|
jsonrpsee-types = "0.21"
|
||||||
|
|
||||||
|
# IPC transport
|
||||||
|
reth-ipc = { git = "https://github.com/paradigmxyz/reth", package = "reth-ipc" }
|
||||||
|
@@ -1,3 +1,124 @@
|
|||||||
fn main() {
|
use std::path::PathBuf;
|
||||||
println!("Hello, world!");
|
|
||||||
|
use anyhow::Result;
|
||||||
|
use clap::Parser;
|
||||||
|
use jsonrpsee::core::client::ClientT;
|
||||||
|
use jsonrpsee::rpc_params;
|
||||||
|
use reth_ipc::client::IpcClientBuilder;
|
||||||
|
use serde_json::Value;
|
||||||
|
use tracing_subscriber::EnvFilter;
|
||||||
|
|
||||||
|
/// Simple IPC (Unix socket) JSON-RPC client for manual testing.
|
||||||
|
///
|
||||||
|
/// Examples:
|
||||||
|
/// - Call method without params:
|
||||||
|
/// hero-client-unix --socket /tmp/baobab.ipc --method whoami
|
||||||
|
///
|
||||||
|
/// - Call method with positional params (as JSON array):
|
||||||
|
/// hero-client-unix --socket /tmp/baobab.ipc --method authenticate --params '["pubkey","signature","nonce"]'
|
||||||
|
///
|
||||||
|
/// - Call method with single object param:
|
||||||
|
/// hero-client-unix --socket /tmp/baobab.ipc --method create_job --params '{"job_id":"abc"}'
|
||||||
|
#[derive(Parser, Debug)]
|
||||||
|
#[command(name = "hero-client-unix", version, about = "IPC JSON-RPC client")]
|
||||||
|
struct Args {
|
||||||
|
/// Filesystem path to the Unix domain socket
|
||||||
|
#[arg(long, default_value = "/tmp/baobab.ipc", env = "HERO_IPC_SOCKET")]
|
||||||
|
socket: PathBuf,
|
||||||
|
|
||||||
|
/// JSON-RPC method name to call
|
||||||
|
#[arg(long)]
|
||||||
|
method: String,
|
||||||
|
|
||||||
|
/// JSON string for params. Either an array for positional params or an object for named params.
|
||||||
|
/// Defaults to [] (no params).
|
||||||
|
#[arg(long, default_value = "[]")]
|
||||||
|
params: String,
|
||||||
|
|
||||||
|
/// Log filter (e.g., info, debug, trace)
|
||||||
|
#[arg(long, default_value = "info", env = "RUST_LOG")]
|
||||||
|
log: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> Result<()> {
|
||||||
|
let args = Args::parse();
|
||||||
|
|
||||||
|
tracing_subscriber::FmtSubscriber::builder()
|
||||||
|
.with_env_filter(EnvFilter::new(args.log.clone()))
|
||||||
|
.try_init()
|
||||||
|
.expect("setting default subscriber failed");
|
||||||
|
|
||||||
|
let socket_str = args.socket.to_string_lossy().to_string();
|
||||||
|
let client = IpcClientBuilder::default().build(&socket_str).await?;
|
||||||
|
|
||||||
|
let params_value: Value = serde_json::from_str(&args.params)?;
|
||||||
|
|
||||||
|
// We deserialize responses to serde_json::Value for generality.
|
||||||
|
// You can set a concrete type instead if needed.
|
||||||
|
let result: Value = match params_value {
|
||||||
|
Value::Array(arr) => match arr.len() {
|
||||||
|
0 => client.request(&args.method, rpc_params![]).await?,
|
||||||
|
1 => client.request(&args.method, rpc_params![arr[0].clone()]).await?,
|
||||||
|
2 => client.request(&args.method, rpc_params![arr[0].clone(), arr[1].clone()]).await?,
|
||||||
|
3 => client
|
||||||
|
.request(&args.method, rpc_params![arr[0].clone(), arr[1].clone(), arr[2].clone()])
|
||||||
|
.await?,
|
||||||
|
4 => client
|
||||||
|
.request(
|
||||||
|
&args.method,
|
||||||
|
rpc_params![arr[0].clone(), arr[1].clone(), arr[2].clone(), arr[3].clone()],
|
||||||
|
)
|
||||||
|
.await?,
|
||||||
|
5 => client
|
||||||
|
.request(
|
||||||
|
&args.method,
|
||||||
|
rpc_params![
|
||||||
|
arr[0].clone(),
|
||||||
|
arr[1].clone(),
|
||||||
|
arr[2].clone(),
|
||||||
|
arr[3].clone(),
|
||||||
|
arr[4].clone()
|
||||||
|
],
|
||||||
|
)
|
||||||
|
.await?,
|
||||||
|
6 => client
|
||||||
|
.request(
|
||||||
|
&args.method,
|
||||||
|
rpc_params![
|
||||||
|
arr[0].clone(),
|
||||||
|
arr[1].clone(),
|
||||||
|
arr[2].clone(),
|
||||||
|
arr[3].clone(),
|
||||||
|
arr[4].clone(),
|
||||||
|
arr[5].clone()
|
||||||
|
],
|
||||||
|
)
|
||||||
|
.await?,
|
||||||
|
7 => client
|
||||||
|
.request(
|
||||||
|
&args.method,
|
||||||
|
rpc_params![
|
||||||
|
arr[0].clone(),
|
||||||
|
arr[1].clone(),
|
||||||
|
arr[2].clone(),
|
||||||
|
arr[3].clone(),
|
||||||
|
arr[4].clone(),
|
||||||
|
arr[5].clone(),
|
||||||
|
arr[6].clone()
|
||||||
|
],
|
||||||
|
)
|
||||||
|
.await?,
|
||||||
|
_ => {
|
||||||
|
// Fallback: send entire array as a single param to avoid combinatorial explosion.
|
||||||
|
// Adjust if your server expects strictly positional expansion beyond 7 items.
|
||||||
|
client.request(&args.method, rpc_params![Value::Array(arr)]).await?
|
||||||
|
}
|
||||||
|
},
|
||||||
|
// Single non-array param (object, string, number, etc.)
|
||||||
|
other => client.request(&args.method, rpc_params![other]).await?,
|
||||||
|
};
|
||||||
|
|
||||||
|
println!("{}", serde_json::to_string_pretty(&result)?);
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@@ -1,6 +1,14 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "hero-server-unix"
|
name = "hero-server-unix"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
edition = "2024"
|
edition = "2021"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
anyhow = "1.0"
|
||||||
|
clap = { version = "4.5", features = ["derive"] }
|
||||||
|
tracing = "0.1"
|
||||||
|
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||||
|
tokio = { version = "1", features = ["macros", "rt-multi-thread", "signal"] }
|
||||||
|
|
||||||
|
# Reuse the OpenRPC server crate that registers all methods and now supports IPC
|
||||||
|
hero-openrpc-server = { path = "../../openrpc/server" }
|
||||||
|
@@ -1,3 +1,64 @@
|
|||||||
fn main() {
|
use std::path::PathBuf;
|
||||||
println!("Hello, world!");
|
|
||||||
|
use clap::Parser;
|
||||||
|
use tracing_subscriber::EnvFilter;
|
||||||
|
|
||||||
|
use hero_openrpc_server::{OpenRpcServer, OpenRpcServerConfig, Transport};
|
||||||
|
|
||||||
|
/// IPC (Unix socket) JSON-RPC server launcher.
|
||||||
|
///
|
||||||
|
/// This binary starts the OpenRPC server over a Unix domain socket using the reth-ipc transport.
|
||||||
|
#[derive(Parser, Debug)]
|
||||||
|
#[command(name = "hero-server-unix", version, about = "Start the JSON-RPC IPC server")]
|
||||||
|
struct Args {
|
||||||
|
/// Filesystem path to the Unix domain socket
|
||||||
|
#[arg(long, default_value = "/tmp/baobab.ipc", env = "HERO_IPC_SOCKET")]
|
||||||
|
socket_path: PathBuf,
|
||||||
|
|
||||||
|
/// Optional path to a supervisor configuration file
|
||||||
|
#[arg(long)]
|
||||||
|
supervisor_config: Option<PathBuf>,
|
||||||
|
|
||||||
|
/// Database path (reserved for future use)
|
||||||
|
#[arg(long, default_value = "./db", env = "HERO_DB_PATH")]
|
||||||
|
db_path: PathBuf,
|
||||||
|
|
||||||
|
/// Log filter (e.g., info, debug, trace)
|
||||||
|
#[arg(long, default_value = "info", env = "RUST_LOG")]
|
||||||
|
log: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
let args = Args::parse();
|
||||||
|
|
||||||
|
// Initialize tracing with provided log filter
|
||||||
|
tracing_subscriber::FmtSubscriber::builder()
|
||||||
|
.with_env_filter(EnvFilter::new(args.log.clone()))
|
||||||
|
.try_init()
|
||||||
|
.expect("setting default subscriber failed");
|
||||||
|
|
||||||
|
let cfg = OpenRpcServerConfig {
|
||||||
|
transport: Transport::Unix(args.socket_path.clone()),
|
||||||
|
supervisor_config_path: args.supervisor_config.clone(),
|
||||||
|
db_path: args.db_path.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
|
// Build server state
|
||||||
|
let server = OpenRpcServer::new(cfg.clone()).await?;
|
||||||
|
|
||||||
|
// Start IPC server
|
||||||
|
let handle = server.start(cfg).await?;
|
||||||
|
|
||||||
|
tracing::info!(
|
||||||
|
"IPC server started on {} (press Ctrl+C to stop)",
|
||||||
|
args.socket_path.display()
|
||||||
|
);
|
||||||
|
|
||||||
|
// Run until stopped
|
||||||
|
tokio::spawn(handle.stopped());
|
||||||
|
tokio::signal::ctrl_c().await?;
|
||||||
|
tracing::info!("Shutting down IPC server");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@@ -0,0 +1,127 @@
|
|||||||
|
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any
|
||||||
|
// person obtaining a copy of this software and associated
|
||||||
|
// documentation files (the "Software"), to deal in the
|
||||||
|
// Software without restriction, including without
|
||||||
|
// limitation the rights to use, copy, modify, merge,
|
||||||
|
// publish, distribute, sublicense, and/or sell copies of
|
||||||
|
// the Software, and to permit persons to whom the Software
|
||||||
|
// is furnished to do so, subject to the following
|
||||||
|
// conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice
|
||||||
|
// shall be included in all copies or substantial portions
|
||||||
|
// of the Software.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||||
|
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||||
|
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||||
|
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||||
|
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||||
|
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||||
|
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
// DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use futures::{Stream, StreamExt};
|
||||||
|
use jsonrpsee::core::DeserializeOwned;
|
||||||
|
use jsonrpsee::core::client::{Subscription, SubscriptionClientT};
|
||||||
|
use jsonrpsee::rpc_params;
|
||||||
|
use jsonrpsee::server::{RpcModule, Server};
|
||||||
|
use jsonrpsee::ws_client::WsClientBuilder;
|
||||||
|
use tokio_stream::wrappers::BroadcastStream;
|
||||||
|
use tokio_stream::wrappers::errors::BroadcastStreamRecvError;
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
tracing_subscriber::FmtSubscriber::builder()
|
||||||
|
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
||||||
|
.try_init()
|
||||||
|
.expect("setting default subscriber failed");
|
||||||
|
|
||||||
|
let addr = run_server().await?;
|
||||||
|
let url = format!("ws://{}", addr);
|
||||||
|
|
||||||
|
let client = WsClientBuilder::default().build(&url).await?;
|
||||||
|
|
||||||
|
let sub: Subscription<i32> = client.subscribe("subscribe_hello", rpc_params![], "unsubscribe_hello").await?;
|
||||||
|
|
||||||
|
// drop oldest messages from subscription:
|
||||||
|
let mut sub = drop_oldest_when_lagging(sub, 10);
|
||||||
|
|
||||||
|
// Simulate that polling takes a long time.
|
||||||
|
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||||
|
|
||||||
|
// The subscription starts from zero but you can
|
||||||
|
// notice that many items have been replaced
|
||||||
|
// because the subscription wasn't polled.
|
||||||
|
for _ in 0..10 {
|
||||||
|
match sub.next().await.unwrap() {
|
||||||
|
Ok(n) => {
|
||||||
|
tracing::info!("recv={n}");
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
tracing::info!("{e}");
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn drop_oldest_when_lagging<T: Clone + DeserializeOwned + Send + Sync + 'static>(
|
||||||
|
mut sub: Subscription<T>,
|
||||||
|
buffer_size: usize,
|
||||||
|
) -> impl Stream<Item = Result<T, BroadcastStreamRecvError>> {
|
||||||
|
let (tx, rx) = tokio::sync::broadcast::channel(buffer_size);
|
||||||
|
|
||||||
|
tokio::spawn(async move {
|
||||||
|
// Poll the subscription which ignores errors.
|
||||||
|
while let Some(n) = sub.next().await {
|
||||||
|
let msg = match n {
|
||||||
|
Ok(msg) => msg,
|
||||||
|
Err(e) => {
|
||||||
|
tracing::error!("Failed to decode the subscription message: {e}");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if tx.send(msg).is_err() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
BroadcastStream::new(rx)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_server() -> anyhow::Result<SocketAddr> {
|
||||||
|
let server = Server::builder().build("127.0.0.1:0").await?;
|
||||||
|
let mut module = RpcModule::new(());
|
||||||
|
module
|
||||||
|
.register_subscription("subscribe_hello", "s_hello", "unsubscribe_hello", |_, pending, _, _| async move {
|
||||||
|
let sub = pending.accept().await.unwrap();
|
||||||
|
|
||||||
|
for i in 0..usize::MAX {
|
||||||
|
let json = serde_json::value::to_raw_value(&i).unwrap();
|
||||||
|
sub.send(json).await.unwrap();
|
||||||
|
tokio::time::sleep(Duration::from_millis(10)).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
let addr = server.local_addr()?;
|
||||||
|
|
||||||
|
let handle = server.start(module);
|
||||||
|
|
||||||
|
// In this example we don't care about doing shutdown so let's it run forever.
|
||||||
|
// You may use the `ServerHandle` to shut it down or manage it yourself.
|
||||||
|
tokio::spawn(handle.stopped());
|
||||||
|
|
||||||
|
Ok(addr)
|
||||||
|
}
|
65
reference_jsonrpsee_crate_examples/core_client.rs
Normal file
65
reference_jsonrpsee_crate_examples/core_client.rs
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any
|
||||||
|
// person obtaining a copy of this software and associated
|
||||||
|
// documentation files (the "Software"), to deal in the
|
||||||
|
// Software without restriction, including without
|
||||||
|
// limitation the rights to use, copy, modify, merge,
|
||||||
|
// publish, distribute, sublicense, and/or sell copies of
|
||||||
|
// the Software, and to permit persons to whom the Software
|
||||||
|
// is furnished to do so, subject to the following
|
||||||
|
// conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice
|
||||||
|
// shall be included in all copies or substantial portions
|
||||||
|
// of the Software.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||||
|
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||||
|
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||||
|
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||||
|
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||||
|
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||||
|
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
// DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
|
||||||
|
use jsonrpsee::client_transport::ws::{Url, WsTransportClientBuilder};
|
||||||
|
use jsonrpsee::core::client::{ClientBuilder, ClientT};
|
||||||
|
use jsonrpsee::rpc_params;
|
||||||
|
use jsonrpsee::server::{RpcModule, Server};
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
tracing_subscriber::FmtSubscriber::builder()
|
||||||
|
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
||||||
|
.try_init()
|
||||||
|
.expect("setting default subscriber failed");
|
||||||
|
|
||||||
|
let addr = run_server().await?;
|
||||||
|
let uri = Url::parse(&format!("ws://{}", addr))?;
|
||||||
|
|
||||||
|
let (tx, rx) = WsTransportClientBuilder::default().build(uri).await?;
|
||||||
|
let client = ClientBuilder::default().build_with_tokio(tx, rx);
|
||||||
|
let response: String = client.request("say_hello", rpc_params![]).await?;
|
||||||
|
tracing::info!("response: {:?}", response);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_server() -> anyhow::Result<SocketAddr> {
|
||||||
|
let server = Server::builder().build("127.0.0.1:0").await?;
|
||||||
|
let mut module = RpcModule::new(());
|
||||||
|
module.register_method("say_hello", |_, _, _| "lo")?;
|
||||||
|
let addr = server.local_addr()?;
|
||||||
|
|
||||||
|
let handle = server.start(module);
|
||||||
|
|
||||||
|
// In this example we don't care about doing shutdown so let's it run forever.
|
||||||
|
// You may use the `ServerHandle` to shut it down or manage it yourself.
|
||||||
|
tokio::spawn(handle.stopped());
|
||||||
|
|
||||||
|
Ok(addr)
|
||||||
|
}
|
104
reference_jsonrpsee_crate_examples/cors_server.rs
Normal file
104
reference_jsonrpsee_crate_examples/cors_server.rs
Normal file
@@ -0,0 +1,104 @@
|
|||||||
|
// Copyright 2019-2022 Parity Technologies (UK) Ltd.
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any
|
||||||
|
// person obtaining a copy of this software and associated
|
||||||
|
// documentation files (the "Software"), to deal in the
|
||||||
|
// Software without restriction, including without
|
||||||
|
// limitation the rights to use, copy, modify, merge,
|
||||||
|
// publish, distribute, sublicense, and/or sell copies of
|
||||||
|
// the Software, and to permit persons to whom the Software
|
||||||
|
// is furnished to do so, subject to the following
|
||||||
|
// conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice
|
||||||
|
// shall be included in all copies or substantial portions
|
||||||
|
// of the Software.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||||
|
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||||
|
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||||
|
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||||
|
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||||
|
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||||
|
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
// DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
//! This example adds upstream CORS layers to the RPC service,
|
||||||
|
//! with access control allowing requests from all hosts.
|
||||||
|
|
||||||
|
use hyper::Method;
|
||||||
|
use jsonrpsee::server::{RpcModule, Server};
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
use tower_http::cors::{Any, CorsLayer};
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
tracing_subscriber::FmtSubscriber::builder()
|
||||||
|
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
||||||
|
.try_init()
|
||||||
|
.expect("setting default subscriber failed");
|
||||||
|
|
||||||
|
// Start up a JSON-RPC server that allows cross origin requests.
|
||||||
|
let server_addr = run_server().await?;
|
||||||
|
|
||||||
|
// Print instructions for testing CORS from a browser.
|
||||||
|
println!("Run the following snippet in the developer console in any Website.");
|
||||||
|
println!(
|
||||||
|
r#"
|
||||||
|
fetch("http://{}", {{
|
||||||
|
method: 'POST',
|
||||||
|
mode: 'cors',
|
||||||
|
headers: {{ 'Content-Type': 'application/json' }},
|
||||||
|
body: JSON.stringify({{
|
||||||
|
jsonrpc: '2.0',
|
||||||
|
method: 'say_hello',
|
||||||
|
id: 1
|
||||||
|
}})
|
||||||
|
}}).then(res => {{
|
||||||
|
console.log("Response:", res);
|
||||||
|
return res.text()
|
||||||
|
}}).then(body => {{
|
||||||
|
console.log("Response Body:", body)
|
||||||
|
}});
|
||||||
|
"#,
|
||||||
|
server_addr
|
||||||
|
);
|
||||||
|
|
||||||
|
futures::future::pending().await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_server() -> anyhow::Result<SocketAddr> {
|
||||||
|
// Add a CORS middleware for handling HTTP requests.
|
||||||
|
// This middleware does affect the response, including appropriate
|
||||||
|
// headers to satisfy CORS. Because any origins are allowed, the
|
||||||
|
// "Access-Control-Allow-Origin: *" header is appended to the response.
|
||||||
|
let cors = CorsLayer::new()
|
||||||
|
// Allow `POST` when accessing the resource
|
||||||
|
.allow_methods([Method::POST])
|
||||||
|
// Allow requests from any origin
|
||||||
|
.allow_origin(Any)
|
||||||
|
.allow_headers([hyper::header::CONTENT_TYPE]);
|
||||||
|
let middleware = tower::ServiceBuilder::new().layer(cors);
|
||||||
|
|
||||||
|
// The RPC exposes the access control for filtering and the middleware for
|
||||||
|
// modifying requests / responses. These features are independent of one another
|
||||||
|
// and can also be used separately.
|
||||||
|
// In this example, we use both features.
|
||||||
|
let server = Server::builder().set_http_middleware(middleware).build("127.0.0.1:0".parse::<SocketAddr>()?).await?;
|
||||||
|
|
||||||
|
let mut module = RpcModule::new(());
|
||||||
|
module.register_method("say_hello", |_, _, _| {
|
||||||
|
println!("say_hello method called!");
|
||||||
|
"Hello there!!"
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let addr = server.local_addr()?;
|
||||||
|
let handle = server.start(module);
|
||||||
|
|
||||||
|
// In this example we don't care about doing shutdown so let's it run forever.
|
||||||
|
// You may use the `ServerHandle` to shut it down or manage it yourself.
|
||||||
|
tokio::spawn(handle.stopped());
|
||||||
|
|
||||||
|
Ok(addr)
|
||||||
|
}
|
83
reference_jsonrpsee_crate_examples/host_filter_middleware.rs
Normal file
83
reference_jsonrpsee_crate_examples/host_filter_middleware.rs
Normal file
@@ -0,0 +1,83 @@
|
|||||||
|
// Copyright 2019-2022 Parity Technologies (UK) Ltd.
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any
|
||||||
|
// person obtaining a copy of this software and associated
|
||||||
|
// documentation files (the "Software"), to deal in the
|
||||||
|
// Software without restriction, including without
|
||||||
|
// limitation the rights to use, copy, modify, merge,
|
||||||
|
// publish, distribute, sublicense, and/or sell copies of
|
||||||
|
// the Software, and to permit persons to whom the Software
|
||||||
|
// is furnished to do so, subject to the following
|
||||||
|
// conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice
|
||||||
|
// shall be included in all copies or substantial portions
|
||||||
|
// of the Software.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||||
|
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||||
|
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||||
|
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||||
|
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||||
|
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||||
|
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
// DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
//! This example shows how to configure `host filtering` by tower middleware on the jsonrpsee server.
|
||||||
|
//!
|
||||||
|
//! The server whitelist's only `example.com` and any call from localhost will be
|
||||||
|
//! rejected both by HTTP and WebSocket transports.
|
||||||
|
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
|
||||||
|
use jsonrpsee::core::client::ClientT;
|
||||||
|
use jsonrpsee::http_client::HttpClient;
|
||||||
|
use jsonrpsee::rpc_params;
|
||||||
|
use jsonrpsee::server::middleware::http::HostFilterLayer;
|
||||||
|
use jsonrpsee::server::{RpcModule, Server};
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
tracing_subscriber::FmtSubscriber::builder()
|
||||||
|
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
||||||
|
.try_init()
|
||||||
|
.expect("setting default subscriber failed");
|
||||||
|
|
||||||
|
let addr = run_server().await?;
|
||||||
|
let url = format!("http://{}", addr);
|
||||||
|
|
||||||
|
// Use RPC client to get the response of `say_hello` method.
|
||||||
|
let client = HttpClient::builder().build(&url)?;
|
||||||
|
// This call will be denied because only `example.com` URIs/hosts are allowed by the host filter.
|
||||||
|
let response = client.request::<String, _>("say_hello", rpc_params![]).await.unwrap_err();
|
||||||
|
println!("[main]: response: {}", response);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_server() -> anyhow::Result<SocketAddr> {
|
||||||
|
// Custom tower service to handle the RPC requests
|
||||||
|
let service_builder = tower::ServiceBuilder::new()
|
||||||
|
// For this example we only want to permit requests from `example.com`
|
||||||
|
// all other request are denied.
|
||||||
|
//
|
||||||
|
// `HostFilerLayer::new` only fails on invalid URIs..
|
||||||
|
.layer(HostFilterLayer::new(["example.com"]).unwrap());
|
||||||
|
|
||||||
|
let server =
|
||||||
|
Server::builder().set_http_middleware(service_builder).build("127.0.0.1:0".parse::<SocketAddr>()?).await?;
|
||||||
|
|
||||||
|
let addr = server.local_addr()?;
|
||||||
|
|
||||||
|
let mut module = RpcModule::new(());
|
||||||
|
module.register_method("say_hello", |_, _, _| "lo").unwrap();
|
||||||
|
|
||||||
|
let handle = server.start(module);
|
||||||
|
|
||||||
|
// In this example we don't care about doing shutdown so let's it run forever.
|
||||||
|
// You may use the `ServerHandle` to shut it down or manage it yourself.
|
||||||
|
tokio::spawn(handle.stopped());
|
||||||
|
|
||||||
|
Ok(addr)
|
||||||
|
}
|
65
reference_jsonrpsee_crate_examples/http.rs
Normal file
65
reference_jsonrpsee_crate_examples/http.rs
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any
|
||||||
|
// person obtaining a copy of this software and associated
|
||||||
|
// documentation files (the "Software"), to deal in the
|
||||||
|
// Software without restriction, including without
|
||||||
|
// limitation the rights to use, copy, modify, merge,
|
||||||
|
// publish, distribute, sublicense, and/or sell copies of
|
||||||
|
// the Software, and to permit persons to whom the Software
|
||||||
|
// is furnished to do so, subject to the following
|
||||||
|
// conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice
|
||||||
|
// shall be included in all copies or substantial portions
|
||||||
|
// of the Software.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||||
|
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||||
|
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||||
|
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||||
|
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||||
|
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||||
|
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
// DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
|
||||||
|
use jsonrpsee::core::client::ClientT;
|
||||||
|
use jsonrpsee::http_client::HttpClient;
|
||||||
|
use jsonrpsee::rpc_params;
|
||||||
|
use jsonrpsee::server::{RpcModule, Server};
|
||||||
|
use tracing_subscriber::util::SubscriberInitExt;
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
let filter = tracing_subscriber::EnvFilter::try_from_default_env()?
|
||||||
|
.add_directive("jsonrpsee[method_call{name = \"say_hello\"}]=trace".parse()?);
|
||||||
|
tracing_subscriber::FmtSubscriber::builder().with_env_filter(filter).finish().try_init()?;
|
||||||
|
|
||||||
|
let server_addr = run_server().await?;
|
||||||
|
let url = format!("http://{}", server_addr);
|
||||||
|
|
||||||
|
let client = HttpClient::builder().build(url)?;
|
||||||
|
let params = rpc_params![1_u64, 2, 3];
|
||||||
|
let response: Result<String, _> = client.request("say_hello", params).await;
|
||||||
|
tracing::info!("r: {:?}", response);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_server() -> anyhow::Result<SocketAddr> {
|
||||||
|
let server = Server::builder().build("127.0.0.1:0".parse::<SocketAddr>()?).await?;
|
||||||
|
let mut module = RpcModule::new(());
|
||||||
|
module.register_method("say_hello", |_, _, _| "lo")?;
|
||||||
|
|
||||||
|
let addr = server.local_addr()?;
|
||||||
|
let handle = server.start(module);
|
||||||
|
|
||||||
|
// In this example we don't care about doing shutdown so let's it run forever.
|
||||||
|
// You may use the `ServerHandle` to shut it down or manage it yourself.
|
||||||
|
tokio::spawn(handle.stopped());
|
||||||
|
|
||||||
|
Ok(addr)
|
||||||
|
}
|
129
reference_jsonrpsee_crate_examples/http_middleware.rs
Normal file
129
reference_jsonrpsee_crate_examples/http_middleware.rs
Normal file
@@ -0,0 +1,129 @@
|
|||||||
|
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any
|
||||||
|
// person obtaining a copy of this software and associated
|
||||||
|
// documentation files (the "Software"), to deal in the
|
||||||
|
// Software without restriction, including without
|
||||||
|
// limitation the rights to use, copy, modify, merge,
|
||||||
|
// publish, distribute, sublicense, and/or sell copies of
|
||||||
|
// the Software, and to permit persons to whom the Software
|
||||||
|
// is furnished to do so, subject to the following
|
||||||
|
// conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice
|
||||||
|
// shall be included in all copies or substantial portions
|
||||||
|
// of the Software.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||||
|
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||||
|
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||||
|
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||||
|
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||||
|
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||||
|
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
// DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
//! jsonrpsee supports two kinds of middlewares `http_middleware` and `rpc_middleware`.
|
||||||
|
//!
|
||||||
|
//! This example demonstrates how to use the `http_middleware` which applies for each
|
||||||
|
//! HTTP request.
|
||||||
|
//!
|
||||||
|
//! A typical use-case for this it to apply a specific CORS policy which applies both
|
||||||
|
//! for HTTP and WebSocket.
|
||||||
|
//!
|
||||||
|
|
||||||
|
use hyper::Method;
|
||||||
|
use hyper::body::Bytes;
|
||||||
|
use hyper::http::HeaderValue;
|
||||||
|
use jsonrpsee::rpc_params;
|
||||||
|
use std::iter::once;
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
use std::time::Duration;
|
||||||
|
use tower_http::LatencyUnit;
|
||||||
|
use tower_http::compression::CompressionLayer;
|
||||||
|
use tower_http::cors::CorsLayer;
|
||||||
|
use tower_http::sensitive_headers::SetSensitiveRequestHeadersLayer;
|
||||||
|
use tower_http::trace::{DefaultMakeSpan, DefaultOnResponse, TraceLayer};
|
||||||
|
|
||||||
|
use jsonrpsee::core::client::ClientT;
|
||||||
|
use jsonrpsee::http_client::HttpClient;
|
||||||
|
use jsonrpsee::server::{RpcModule, Server};
|
||||||
|
use jsonrpsee::ws_client::WsClientBuilder;
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
tracing_subscriber::FmtSubscriber::builder()
|
||||||
|
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
||||||
|
.try_init()
|
||||||
|
.expect("setting default subscriber failed");
|
||||||
|
|
||||||
|
let addr = run_server().await?;
|
||||||
|
|
||||||
|
// WebSocket.
|
||||||
|
{
|
||||||
|
let client = WsClientBuilder::default().build(format!("ws://{}", addr)).await?;
|
||||||
|
let response: String = client.request("say_hello", rpc_params![]).await?;
|
||||||
|
println!("[main]: ws response: {:?}", response);
|
||||||
|
let _response: Result<String, _> = client.request("unknown_method", rpc_params![]).await;
|
||||||
|
let _ = client.request::<String, _>("say_hello", rpc_params![]).await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
|
||||||
|
|
||||||
|
// HTTP.
|
||||||
|
{
|
||||||
|
let client = HttpClient::builder().build(format!("http://{}", addr))?;
|
||||||
|
let response: String = client.request("say_hello", rpc_params![]).await?;
|
||||||
|
println!("[main]: http response: {:?}", response);
|
||||||
|
let _response: Result<String, _> = client.request("unknown_method", rpc_params![]).await;
|
||||||
|
let _ = client.request::<String, _>("say_hello", rpc_params![]).await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_server() -> anyhow::Result<SocketAddr> {
|
||||||
|
let cors = CorsLayer::new()
|
||||||
|
// Allow `POST` when accessing the resource
|
||||||
|
.allow_methods([Method::POST])
|
||||||
|
// Allow requests from any origin
|
||||||
|
.allow_origin(HeaderValue::from_str("http://example.com").unwrap())
|
||||||
|
.allow_headers([hyper::header::CONTENT_TYPE]);
|
||||||
|
|
||||||
|
// Custom tower service to handle the RPC requests
|
||||||
|
let service_builder = tower::ServiceBuilder::new()
|
||||||
|
// Add high level tracing/logging to all requests
|
||||||
|
.layer(
|
||||||
|
TraceLayer::new_for_http()
|
||||||
|
.on_request(
|
||||||
|
|request: &hyper::Request<_>, _span: &tracing::Span| tracing::info!(request = ?request, "on_request"),
|
||||||
|
)
|
||||||
|
.on_body_chunk(|chunk: &Bytes, latency: Duration, _: &tracing::Span| {
|
||||||
|
tracing::info!(size_bytes = chunk.len(), latency = ?latency, "sending body chunk")
|
||||||
|
})
|
||||||
|
.make_span_with(DefaultMakeSpan::new().include_headers(true))
|
||||||
|
.on_response(DefaultOnResponse::new().include_headers(true).latency_unit(LatencyUnit::Micros)),
|
||||||
|
)
|
||||||
|
// Mark the `Authorization` request header as sensitive so it doesn't show in logs
|
||||||
|
.layer(SetSensitiveRequestHeadersLayer::new(once(hyper::header::AUTHORIZATION)))
|
||||||
|
.layer(cors)
|
||||||
|
.layer(CompressionLayer::new())
|
||||||
|
.timeout(Duration::from_secs(2));
|
||||||
|
|
||||||
|
let server =
|
||||||
|
Server::builder().set_http_middleware(service_builder).build("127.0.0.1:0".parse::<SocketAddr>()?).await?;
|
||||||
|
|
||||||
|
let addr = server.local_addr()?;
|
||||||
|
|
||||||
|
let mut module = RpcModule::new(());
|
||||||
|
module.register_method("say_hello", |_, _, _| "lo").unwrap();
|
||||||
|
|
||||||
|
let handle = server.start(module);
|
||||||
|
|
||||||
|
// In this example we don't care about doing shutdown so let's it run forever.
|
||||||
|
// You may use the `ServerHandle` to shut it down or manage it yourself.
|
||||||
|
tokio::spawn(handle.stopped());
|
||||||
|
|
||||||
|
Ok(addr)
|
||||||
|
}
|
109
reference_jsonrpsee_crate_examples/http_proxy_middleware.rs
Normal file
109
reference_jsonrpsee_crate_examples/http_proxy_middleware.rs
Normal file
@@ -0,0 +1,109 @@
|
|||||||
|
// Copyright 2019-2022 Parity Technologies (UK) Ltd.
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any
|
||||||
|
// person obtaining a copy of this software and associated
|
||||||
|
// documentation files (the "Software"), to deal in the
|
||||||
|
// Software without restriction, including without
|
||||||
|
// limitation the rights to use, copy, modify, merge,
|
||||||
|
// publish, distribute, sublicense, and/or sell copies of
|
||||||
|
// the Software, and to permit persons to whom the Software
|
||||||
|
// is furnished to do so, subject to the following
|
||||||
|
// conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice
|
||||||
|
// shall be included in all copies or substantial portions
|
||||||
|
// of the Software.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||||
|
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||||
|
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||||
|
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||||
|
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||||
|
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||||
|
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
// DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
//! This example utilizes the `ProxyRequest` layer for redirecting
|
||||||
|
//! `GET /path` requests to internal RPC methods.
|
||||||
|
//!
|
||||||
|
//! The RPC server registers a method named `system_health` which
|
||||||
|
//! returns `serde_json::Value`. Redirect any `GET /health`
|
||||||
|
//! requests to the internal method, and return only the method's
|
||||||
|
//! response in the body (ie, without any jsonRPC 2.0 overhead).
|
||||||
|
//!
|
||||||
|
//! # Note
|
||||||
|
//!
|
||||||
|
//! This functionality is useful for services which would
|
||||||
|
//! like to query a certain `URI` path for statistics.
|
||||||
|
|
||||||
|
use hyper_util::client::legacy::Client;
|
||||||
|
use hyper_util::rt::TokioExecutor;
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use jsonrpsee::core::client::ClientT;
|
||||||
|
use jsonrpsee::http_client::HttpClient;
|
||||||
|
use jsonrpsee::rpc_params;
|
||||||
|
use jsonrpsee::server::middleware::http::ProxyGetRequestLayer;
|
||||||
|
use jsonrpsee::server::{RpcModule, Server};
|
||||||
|
|
||||||
|
type EmptyBody = http_body_util::Empty<hyper::body::Bytes>;
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
tracing_subscriber::FmtSubscriber::builder()
|
||||||
|
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
||||||
|
.try_init()
|
||||||
|
.expect("setting default subscriber failed");
|
||||||
|
|
||||||
|
let addr = run_server().await?;
|
||||||
|
let url = format!("http://{}", addr);
|
||||||
|
|
||||||
|
// Use RPC client to get the response of `say_hello` method.
|
||||||
|
let client = HttpClient::builder().build(&url)?;
|
||||||
|
let response: String = client.request("say_hello", rpc_params![]).await?;
|
||||||
|
println!("[main]: response: {:?}", response);
|
||||||
|
|
||||||
|
// Use hyper client to manually submit a `GET /health` request.
|
||||||
|
let http_client = Client::builder(TokioExecutor::new()).build_http();
|
||||||
|
let uri = format!("http://{}/health", addr);
|
||||||
|
|
||||||
|
let req = hyper::Request::builder().method("GET").uri(&uri).body(EmptyBody::new())?;
|
||||||
|
println!("[main]: Submit proxy request: {:?}", req);
|
||||||
|
let res = http_client.request(req).await?;
|
||||||
|
println!("[main]: Received proxy response: {:?}", res);
|
||||||
|
|
||||||
|
// Interpret the response as String.
|
||||||
|
let collected = http_body_util::BodyExt::collect(res.into_body()).await?;
|
||||||
|
let out = String::from_utf8(collected.to_bytes().to_vec()).unwrap();
|
||||||
|
println!("[main]: Interpret proxy response: {:?}", out);
|
||||||
|
assert_eq!(out.as_str(), "{\"health\":true}");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_server() -> anyhow::Result<SocketAddr> {
|
||||||
|
// Custom tower service to handle the RPC requests
|
||||||
|
let service_builder = tower::ServiceBuilder::new()
|
||||||
|
// Proxy `GET /health` requests to internal `system_health` method.
|
||||||
|
.layer(ProxyGetRequestLayer::new([("/health", "system_health")])?)
|
||||||
|
.timeout(Duration::from_secs(2));
|
||||||
|
|
||||||
|
let server =
|
||||||
|
Server::builder().set_http_middleware(service_builder).build("127.0.0.1:0".parse::<SocketAddr>()?).await?;
|
||||||
|
|
||||||
|
let addr = server.local_addr()?;
|
||||||
|
|
||||||
|
let mut module = RpcModule::new(());
|
||||||
|
module.register_method("say_hello", |_, _, _| "lo").unwrap();
|
||||||
|
module.register_method("system_health", |_, _, _| serde_json::json!({ "health": true })).unwrap();
|
||||||
|
|
||||||
|
let handle = server.start(module);
|
||||||
|
|
||||||
|
// In this example we don't care about doing shutdown so let's it run forever.
|
||||||
|
// You may use the `ServerHandle` to shut it down or manage it yourself.
|
||||||
|
tokio::spawn(handle.stopped());
|
||||||
|
|
||||||
|
Ok(addr)
|
||||||
|
}
|
380
reference_jsonrpsee_crate_examples/jsonrpsee_as_service.rs
Normal file
380
reference_jsonrpsee_crate_examples/jsonrpsee_as_service.rs
Normal file
@@ -0,0 +1,380 @@
|
|||||||
|
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any
|
||||||
|
// person obtaining a copy of this software and associated
|
||||||
|
// documentation files (the "Software"), to deal in the
|
||||||
|
// Software without restriction, including without
|
||||||
|
// limitation the rights to use, copy, modify, merge,
|
||||||
|
// publish, distribute, sublicense, and/or sell copies of
|
||||||
|
// the Software, and to permit persons to whom the Software
|
||||||
|
// is furnished to do so, subject to the following
|
||||||
|
// conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice
|
||||||
|
// shall be included in all copies or substantial portions
|
||||||
|
// of the Software.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||||
|
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||||
|
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||||
|
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||||
|
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||||
|
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||||
|
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
// DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
//! This example shows how to use the `jsonrpsee::server` as
|
||||||
|
//! a tower service such that it's possible to get access
|
||||||
|
//! HTTP related things by launching a `hyper::service_fn`.
|
||||||
|
//!
|
||||||
|
//! The typical use-case for this is when one wants to have
|
||||||
|
//! access to HTTP related things.
|
||||||
|
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
|
|
||||||
|
use futures::FutureExt;
|
||||||
|
use hyper::HeaderMap;
|
||||||
|
use hyper::header::AUTHORIZATION;
|
||||||
|
use jsonrpsee::core::async_trait;
|
||||||
|
use jsonrpsee::core::middleware::{Batch, BatchEntry, BatchEntryErr, Notification, RpcServiceBuilder, RpcServiceT};
|
||||||
|
use jsonrpsee::http_client::HttpClient;
|
||||||
|
use jsonrpsee::proc_macros::rpc;
|
||||||
|
use jsonrpsee::server::middleware::http::{HostFilterLayer, ProxyGetRequestLayer};
|
||||||
|
use jsonrpsee::server::{
|
||||||
|
ServerConfig, ServerHandle, StopHandle, TowerServiceBuilder, serve_with_graceful_shutdown, stop_channel,
|
||||||
|
};
|
||||||
|
use jsonrpsee::types::{ErrorObject, ErrorObjectOwned, Request};
|
||||||
|
use jsonrpsee::ws_client::{HeaderValue, WsClientBuilder};
|
||||||
|
use jsonrpsee::{MethodResponse, Methods};
|
||||||
|
use tokio::net::TcpListener;
|
||||||
|
use tower::Service;
|
||||||
|
use tower_http::cors::CorsLayer;
|
||||||
|
use tracing_subscriber::util::SubscriberInitExt;
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
struct IdentityLayer;
|
||||||
|
|
||||||
|
impl<S> tower::Layer<S> for IdentityLayer
|
||||||
|
where
|
||||||
|
S: RpcServiceT + Send + Sync + Clone + 'static,
|
||||||
|
{
|
||||||
|
type Service = Identity<S>;
|
||||||
|
|
||||||
|
fn layer(&self, inner: S) -> Self::Service {
|
||||||
|
Identity(inner)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
struct Identity<S>(S);
|
||||||
|
|
||||||
|
impl<S> RpcServiceT for Identity<S>
|
||||||
|
where
|
||||||
|
S: RpcServiceT + Send + Sync + Clone + 'static,
|
||||||
|
{
|
||||||
|
type MethodResponse = S::MethodResponse;
|
||||||
|
type BatchResponse = S::BatchResponse;
|
||||||
|
type NotificationResponse = S::NotificationResponse;
|
||||||
|
|
||||||
|
fn batch<'a>(&self, batch: Batch<'a>) -> impl Future<Output = Self::BatchResponse> + Send + 'a {
|
||||||
|
self.0.batch(batch)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn call<'a>(&self, request: Request<'a>) -> impl Future<Output = Self::MethodResponse> + Send + 'a {
|
||||||
|
self.0.call(request)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn notification<'a>(&self, n: Notification<'a>) -> impl Future<Output = Self::NotificationResponse> + Send + 'a {
|
||||||
|
self.0.notification(n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Default, Clone, Debug)]
|
||||||
|
struct Metrics {
|
||||||
|
opened_ws_connections: Arc<AtomicUsize>,
|
||||||
|
closed_ws_connections: Arc<AtomicUsize>,
|
||||||
|
http_calls: Arc<AtomicUsize>,
|
||||||
|
success_http_calls: Arc<AtomicUsize>,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn auth_reject_error() -> ErrorObjectOwned {
|
||||||
|
ErrorObject::owned(-32999, "HTTP Authorization header is missing", None::<()>)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
struct AuthorizationMiddleware<S> {
|
||||||
|
headers: HeaderMap,
|
||||||
|
inner: S,
|
||||||
|
#[allow(unused)]
|
||||||
|
transport_label: &'static str,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S> AuthorizationMiddleware<S> {
|
||||||
|
/// Authorize the request by checking the `Authorization` header.
|
||||||
|
///
|
||||||
|
///
|
||||||
|
/// In this example for simplicity, the authorization value is not checked
|
||||||
|
// and used because it's just a toy example.
|
||||||
|
fn auth_method_call(&self, req: &Request<'_>) -> bool {
|
||||||
|
if req.method_name() == "trusted_call" {
|
||||||
|
let Some(Ok(_)) = self.headers.get(AUTHORIZATION).map(|auth| auth.to_str()) else { return false };
|
||||||
|
}
|
||||||
|
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Authorize the notification by checking the `Authorization` header.
|
||||||
|
///
|
||||||
|
/// Because notifications are not expected to return a response, we
|
||||||
|
/// return a `MethodResponse` by injecting an error into the extensions
|
||||||
|
/// which could be read by other middleware or the server.
|
||||||
|
fn auth_notif(&self, notif: &Notification<'_>) -> bool {
|
||||||
|
if notif.method_name() == "trusted_call" {
|
||||||
|
let Some(Ok(_)) = self.headers.get(AUTHORIZATION).map(|auth| auth.to_str()) else { return false };
|
||||||
|
}
|
||||||
|
|
||||||
|
true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S> RpcServiceT for AuthorizationMiddleware<S>
|
||||||
|
where
|
||||||
|
// We need to specify the concrete types here because otherwise we return an error or specific response
|
||||||
|
// in the middleware implementation.
|
||||||
|
S: RpcServiceT<MethodResponse = MethodResponse, BatchResponse = MethodResponse> + Send + Sync + Clone + 'static,
|
||||||
|
{
|
||||||
|
type MethodResponse = S::MethodResponse;
|
||||||
|
type BatchResponse = S::BatchResponse;
|
||||||
|
type NotificationResponse = S::NotificationResponse;
|
||||||
|
|
||||||
|
fn call<'a>(&self, req: Request<'a>) -> impl Future<Output = Self::MethodResponse> + Send + 'a {
|
||||||
|
let this = self.clone();
|
||||||
|
let auth_ok = this.auth_method_call(&req);
|
||||||
|
|
||||||
|
async move {
|
||||||
|
// If the authorization header is missing, it's recommended to
|
||||||
|
// to return the response as MethodResponse::error instead of
|
||||||
|
// returning an error from the service.
|
||||||
|
//
|
||||||
|
// This way the error is returned as a JSON-RPC error
|
||||||
|
if !auth_ok {
|
||||||
|
return MethodResponse::error(req.id, auth_reject_error());
|
||||||
|
}
|
||||||
|
this.inner.call(req).await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn batch<'a>(&self, batch: Batch<'a>) -> impl Future<Output = Self::BatchResponse> + Send + 'a {
|
||||||
|
// Check the authorization header for each entry in the batch.
|
||||||
|
let entries: Vec<_> = batch
|
||||||
|
.into_iter()
|
||||||
|
.filter_map(|entry| match entry {
|
||||||
|
Ok(BatchEntry::Call(req)) => {
|
||||||
|
if self.auth_method_call(&req) {
|
||||||
|
Some(Ok(BatchEntry::Call(req)))
|
||||||
|
} else {
|
||||||
|
// If the authorization header is missing, we return
|
||||||
|
// a JSON-RPC error instead of an error from the service.
|
||||||
|
Some(Err(BatchEntryErr::new(req.id, auth_reject_error())))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(BatchEntry::Notification(notif)) => {
|
||||||
|
if self.auth_notif(¬if) {
|
||||||
|
Some(Ok(BatchEntry::Notification(notif)))
|
||||||
|
} else {
|
||||||
|
// Just filter out the notification if the auth fails
|
||||||
|
// because notifications are not expected to return a response.
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Errors which could happen such as invalid JSON-RPC call
|
||||||
|
// or invalid JSON are just passed through.
|
||||||
|
Err(err) => Some(Err(err)),
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
self.inner.batch(Batch::from(entries))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn notification<'a>(&self, n: Notification<'a>) -> impl Future<Output = Self::NotificationResponse> + Send + 'a {
|
||||||
|
self.inner.notification(n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[rpc(server, client)]
|
||||||
|
pub trait Rpc {
|
||||||
|
#[method(name = "trusted_call")]
|
||||||
|
async fn trusted_call(&self) -> Result<String, ErrorObjectOwned>;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl RpcServer for () {
|
||||||
|
async fn trusted_call(&self) -> Result<String, ErrorObjectOwned> {
|
||||||
|
Ok("mysecret".to_string())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
let filter = tracing_subscriber::EnvFilter::try_from_default_env()?;
|
||||||
|
tracing_subscriber::FmtSubscriber::builder().with_env_filter(filter).finish().try_init()?;
|
||||||
|
|
||||||
|
let metrics = Metrics::default();
|
||||||
|
|
||||||
|
let handle = run_server(metrics.clone()).await?;
|
||||||
|
tokio::spawn(handle.stopped());
|
||||||
|
|
||||||
|
{
|
||||||
|
let client = HttpClient::builder().build("http://127.0.0.1:9944").unwrap();
|
||||||
|
|
||||||
|
// Fails because the authorization header is missing.
|
||||||
|
let x = client.trusted_call().await.unwrap_err();
|
||||||
|
tracing::info!("response: {x}");
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
let client = WsClientBuilder::default().build("ws://127.0.0.1:9944").await.unwrap();
|
||||||
|
|
||||||
|
// Fails because the authorization header is missing.
|
||||||
|
let x = client.trusted_call().await.unwrap_err();
|
||||||
|
tracing::info!("response: {x}");
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
let mut headers = HeaderMap::new();
|
||||||
|
headers.insert(AUTHORIZATION, HeaderValue::from_static("don't care in this example"));
|
||||||
|
|
||||||
|
let client = HttpClient::builder().set_headers(headers).build("http://127.0.0.1:9944").unwrap();
|
||||||
|
|
||||||
|
let x = client.trusted_call().await.unwrap();
|
||||||
|
tracing::info!("response: {x}");
|
||||||
|
}
|
||||||
|
|
||||||
|
tracing::info!("{:?}", metrics);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_server(metrics: Metrics) -> anyhow::Result<ServerHandle> {
|
||||||
|
let listener = TcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 9944))).await?;
|
||||||
|
|
||||||
|
// This state is cloned for every connection
|
||||||
|
// all these types based on Arcs and it should
|
||||||
|
// be relatively cheap to clone them.
|
||||||
|
//
|
||||||
|
// Make sure that nothing expensive is cloned here
|
||||||
|
// when doing this or use an `Arc`.
|
||||||
|
#[derive(Clone)]
|
||||||
|
struct PerConnection<RpcMiddleware, HttpMiddleware> {
|
||||||
|
methods: Methods,
|
||||||
|
stop_handle: StopHandle,
|
||||||
|
metrics: Metrics,
|
||||||
|
svc_builder: TowerServiceBuilder<RpcMiddleware, HttpMiddleware>,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Each RPC call/connection get its own `stop_handle`
|
||||||
|
// to able to determine whether the server has been stopped or not.
|
||||||
|
//
|
||||||
|
// To keep the server running the `server_handle`
|
||||||
|
// must be kept and it can also be used to stop the server.
|
||||||
|
let (stop_handle, server_handle) = stop_channel();
|
||||||
|
|
||||||
|
let per_conn = PerConnection {
|
||||||
|
methods: ().into_rpc().into(),
|
||||||
|
stop_handle: stop_handle.clone(),
|
||||||
|
metrics,
|
||||||
|
svc_builder: jsonrpsee::server::Server::builder()
|
||||||
|
.set_config(ServerConfig::builder().max_connections(33).build())
|
||||||
|
.set_http_middleware(
|
||||||
|
tower::ServiceBuilder::new()
|
||||||
|
.layer(CorsLayer::permissive())
|
||||||
|
.layer(ProxyGetRequestLayer::new(vec![("trusted_call", "foo")]).unwrap())
|
||||||
|
.layer(HostFilterLayer::new(["example.com"]).unwrap()),
|
||||||
|
)
|
||||||
|
.to_service_builder(),
|
||||||
|
};
|
||||||
|
|
||||||
|
tokio::spawn(async move {
|
||||||
|
loop {
|
||||||
|
// The `tokio::select!` macro is used to wait for either of the
|
||||||
|
// listeners to accept a new connection or for the server to be
|
||||||
|
// stopped.
|
||||||
|
let sock = tokio::select! {
|
||||||
|
res = listener.accept() => {
|
||||||
|
match res {
|
||||||
|
Ok((stream, _remote_addr)) => stream,
|
||||||
|
Err(e) => {
|
||||||
|
tracing::error!("failed to accept v4 connection: {:?}", e);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ = per_conn.stop_handle.clone().shutdown() => break,
|
||||||
|
};
|
||||||
|
let per_conn2 = per_conn.clone();
|
||||||
|
|
||||||
|
let svc = tower::service_fn(move |req: hyper::Request<hyper::body::Incoming>| {
|
||||||
|
let is_websocket = jsonrpsee::server::ws::is_upgrade_request(&req);
|
||||||
|
let transport_label = if is_websocket { "ws" } else { "http" };
|
||||||
|
let PerConnection { methods, stop_handle, metrics, svc_builder } = per_conn2.clone();
|
||||||
|
|
||||||
|
// NOTE, the rpc middleware must be initialized here to be able to created once per connection
|
||||||
|
// with data from the connection such as the headers in this example
|
||||||
|
let headers = req.headers().clone();
|
||||||
|
let rpc_middleware = RpcServiceBuilder::new()
|
||||||
|
.rpc_logger(1024)
|
||||||
|
.layer_fn(move |service| AuthorizationMiddleware {
|
||||||
|
inner: service,
|
||||||
|
headers: headers.clone(),
|
||||||
|
transport_label,
|
||||||
|
})
|
||||||
|
.option_layer(Some(IdentityLayer));
|
||||||
|
|
||||||
|
let mut svc = svc_builder.set_rpc_middleware(rpc_middleware).build(methods, stop_handle);
|
||||||
|
|
||||||
|
if is_websocket {
|
||||||
|
// Utilize the session close future to know when the actual WebSocket
|
||||||
|
// session was closed.
|
||||||
|
let session_close = svc.on_session_closed();
|
||||||
|
|
||||||
|
// A little bit weird API but the response to HTTP request must be returned below
|
||||||
|
// and we spawn a task to register when the session is closed.
|
||||||
|
tokio::spawn(async move {
|
||||||
|
session_close.await;
|
||||||
|
tracing::info!("Closed WebSocket connection");
|
||||||
|
metrics.closed_ws_connections.fetch_add(1, Ordering::Relaxed);
|
||||||
|
});
|
||||||
|
|
||||||
|
async move {
|
||||||
|
tracing::info!("Opened WebSocket connection");
|
||||||
|
metrics.opened_ws_connections.fetch_add(1, Ordering::Relaxed);
|
||||||
|
svc.call(req).await
|
||||||
|
}
|
||||||
|
.boxed()
|
||||||
|
} else {
|
||||||
|
// HTTP.
|
||||||
|
async move {
|
||||||
|
tracing::info!("Opened HTTP connection");
|
||||||
|
metrics.http_calls.fetch_add(1, Ordering::Relaxed);
|
||||||
|
let rp = svc.call(req).await;
|
||||||
|
|
||||||
|
if rp.is_ok() {
|
||||||
|
metrics.success_http_calls.fetch_add(1, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
tracing::info!("Closed HTTP connection");
|
||||||
|
rp
|
||||||
|
}
|
||||||
|
.boxed()
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
tokio::spawn(serve_with_graceful_shutdown(sock, svc, stop_handle.clone().shutdown()));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
Ok(server_handle)
|
||||||
|
}
|
@@ -0,0 +1,222 @@
|
|||||||
|
// Copyright 2024 Parity Technologies (UK) Ltd.
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any
|
||||||
|
// person obtaining a copy of this software and associated
|
||||||
|
// documentation files (the "Software"), to deal in the
|
||||||
|
// Software without restriction, including without
|
||||||
|
// limitation the rights to use, copy, modify, merge,
|
||||||
|
// publish, distribute, sublicense, and/or sell copies of
|
||||||
|
// the Software, and to permit persons to whom the Software
|
||||||
|
// is furnished to do so, subject to the following
|
||||||
|
// conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice
|
||||||
|
// shall be included in all copies or substantial portions
|
||||||
|
// of the Software.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||||
|
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||||
|
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||||
|
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||||
|
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||||
|
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||||
|
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
// DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
//! This example shows how to use the low-level server API
|
||||||
|
//! in jsonrpsee and inject a `mpsc::Sender<()>` into the
|
||||||
|
//! request extensions to be able to close the connection from
|
||||||
|
//! a rpc handler (method call or subscription).
|
||||||
|
|
||||||
|
use std::convert::Infallible;
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::sync::atomic::{AtomicU32, Ordering};
|
||||||
|
|
||||||
|
use futures::FutureExt;
|
||||||
|
use jsonrpsee::core::middleware::RpcServiceBuilder;
|
||||||
|
use jsonrpsee::core::{SubscriptionResult, async_trait};
|
||||||
|
use jsonrpsee::proc_macros::rpc;
|
||||||
|
use jsonrpsee::server::{
|
||||||
|
ConnectionGuard, ConnectionState, HttpRequest, ServerConfig, ServerHandle, StopHandle, http,
|
||||||
|
serve_with_graceful_shutdown, stop_channel, ws,
|
||||||
|
};
|
||||||
|
use jsonrpsee::types::ErrorObjectOwned;
|
||||||
|
use jsonrpsee::ws_client::WsClientBuilder;
|
||||||
|
use jsonrpsee::{Extensions, Methods, PendingSubscriptionSink};
|
||||||
|
use tokio::net::TcpListener;
|
||||||
|
use tokio::sync::mpsc;
|
||||||
|
use tracing_subscriber::util::SubscriberInitExt;
|
||||||
|
|
||||||
|
#[rpc(server, client)]
|
||||||
|
pub trait Rpc {
|
||||||
|
#[method(name = "closeConn", with_extensions)]
|
||||||
|
async fn close_conn(&self) -> Result<(), ErrorObjectOwned>;
|
||||||
|
|
||||||
|
#[subscription(name = "subscribeCloseConn", item = String, with_extensions)]
|
||||||
|
async fn close_conn_from_sub(&self) -> SubscriptionResult;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl RpcServer for () {
|
||||||
|
async fn close_conn(&self, ext: &Extensions) -> Result<(), ErrorObjectOwned> {
|
||||||
|
let tx = ext.get::<mpsc::Sender<()>>().unwrap();
|
||||||
|
tx.send(()).await.unwrap();
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn close_conn_from_sub(&self, _pending: PendingSubscriptionSink, ext: &Extensions) -> SubscriptionResult {
|
||||||
|
let tx = ext.get::<mpsc::Sender<()>>().unwrap();
|
||||||
|
tx.send(()).await.unwrap();
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
let filter = tracing_subscriber::EnvFilter::try_from_default_env()?;
|
||||||
|
tracing_subscriber::FmtSubscriber::builder().with_env_filter(filter).finish().try_init()?;
|
||||||
|
|
||||||
|
let handle = run_server().await?;
|
||||||
|
|
||||||
|
{
|
||||||
|
let client = WsClientBuilder::default().build("ws://127.0.0.1:9944").await?;
|
||||||
|
let _ = client.close_conn().await;
|
||||||
|
client.on_disconnect().await;
|
||||||
|
eprintln!("Connection closed from RPC call");
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
let client = WsClientBuilder::default().build("ws://127.0.0.1:9944").await?;
|
||||||
|
let _ = client.close_conn_from_sub().await;
|
||||||
|
client.on_disconnect().await;
|
||||||
|
eprintln!("Connection closed from RPC subscription");
|
||||||
|
}
|
||||||
|
|
||||||
|
let _ = handle.stop();
|
||||||
|
handle.stopped().await;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_server() -> anyhow::Result<ServerHandle> {
|
||||||
|
// Construct our SocketAddr to listen on...
|
||||||
|
let listener = TcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 9944))).await?;
|
||||||
|
|
||||||
|
// Each RPC call/connection get its own `stop_handle`
|
||||||
|
// to able to determine whether the server has been stopped or not.
|
||||||
|
//
|
||||||
|
// To keep the server running the `server_handle`
|
||||||
|
// must be kept and it can also be used to stop the server.
|
||||||
|
let (stop_handle, server_handle) = stop_channel();
|
||||||
|
|
||||||
|
// This state is cloned for every connection
|
||||||
|
// all these types based on Arcs and it should
|
||||||
|
// be relatively cheap to clone them.
|
||||||
|
//
|
||||||
|
// Make sure that nothing expensive is cloned here
|
||||||
|
// when doing this or use an `Arc`.
|
||||||
|
#[derive(Clone)]
|
||||||
|
struct PerConnection {
|
||||||
|
methods: Methods,
|
||||||
|
stop_handle: StopHandle,
|
||||||
|
conn_id: Arc<AtomicU32>,
|
||||||
|
conn_guard: ConnectionGuard,
|
||||||
|
}
|
||||||
|
|
||||||
|
let per_conn = PerConnection {
|
||||||
|
methods: ().into_rpc().into(),
|
||||||
|
stop_handle: stop_handle.clone(),
|
||||||
|
conn_id: Default::default(),
|
||||||
|
conn_guard: ConnectionGuard::new(100),
|
||||||
|
};
|
||||||
|
|
||||||
|
tokio::spawn(async move {
|
||||||
|
loop {
|
||||||
|
// The `tokio::select!` macro is used to wait for either of the
|
||||||
|
// listeners to accept a new connection or for the server to be
|
||||||
|
// stopped.
|
||||||
|
let (sock, _) = tokio::select! {
|
||||||
|
res = listener.accept() => {
|
||||||
|
match res {
|
||||||
|
Ok(sock) => sock,
|
||||||
|
Err(e) => {
|
||||||
|
tracing::error!("failed to accept v4 connection: {:?}", e);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ = per_conn.stop_handle.clone().shutdown() => break,
|
||||||
|
};
|
||||||
|
let per_conn = per_conn.clone();
|
||||||
|
|
||||||
|
// Create a service handler.
|
||||||
|
let stop_handle2 = per_conn.stop_handle.clone();
|
||||||
|
let per_conn = per_conn.clone();
|
||||||
|
let svc = tower::service_fn(move |mut req: HttpRequest<hyper::body::Incoming>| {
|
||||||
|
let PerConnection { methods, stop_handle, conn_guard, conn_id } = per_conn.clone();
|
||||||
|
let (tx, mut disconnect) = mpsc::channel::<()>(1);
|
||||||
|
|
||||||
|
// Insert the `tx` into the request extensions to be able to close the connection
|
||||||
|
// from method or subscription handlers.
|
||||||
|
req.extensions_mut().insert(tx.clone());
|
||||||
|
|
||||||
|
// jsonrpsee expects a `conn permit` for each connection.
|
||||||
|
//
|
||||||
|
// This may be omitted if don't want to limit the number of connections
|
||||||
|
// to the server.
|
||||||
|
let Some(conn_permit) = conn_guard.try_acquire() else {
|
||||||
|
return async { Ok::<_, Infallible>(http::response::too_many_requests()) }.boxed();
|
||||||
|
};
|
||||||
|
|
||||||
|
let conn = ConnectionState::new(stop_handle, conn_id.fetch_add(1, Ordering::Relaxed), conn_permit);
|
||||||
|
|
||||||
|
if ws::is_upgrade_request(&req) {
|
||||||
|
let rpc_service = RpcServiceBuilder::new();
|
||||||
|
|
||||||
|
// Establishes the websocket connection
|
||||||
|
async move {
|
||||||
|
match ws::connect(req, ServerConfig::default(), methods, conn, rpc_service).await {
|
||||||
|
Ok((rp, conn_fut)) => {
|
||||||
|
tokio::spawn(async move {
|
||||||
|
tokio::select! {
|
||||||
|
_ = conn_fut => (),
|
||||||
|
_ = disconnect.recv() => {
|
||||||
|
eprintln!("Server closed connection");
|
||||||
|
},
|
||||||
|
}
|
||||||
|
});
|
||||||
|
Ok(rp)
|
||||||
|
}
|
||||||
|
Err(rp) => Ok(rp),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
.boxed()
|
||||||
|
} else if !ws::is_upgrade_request(&req) {
|
||||||
|
// There is another API for making call with just a service as well.
|
||||||
|
//
|
||||||
|
// See [`jsonrpsee::server::http::call_with_service`]
|
||||||
|
async move {
|
||||||
|
tokio::select! {
|
||||||
|
// RPC call finished successfully.
|
||||||
|
res = http::call_with_service_builder(req, ServerConfig::default(), conn, methods, RpcServiceBuilder::new()) => Ok(res),
|
||||||
|
// The connection was closed by a RPC handler
|
||||||
|
_ = disconnect.recv() => Ok(http::response::denied()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
.boxed()
|
||||||
|
} else {
|
||||||
|
async { Ok(http::response::denied()) }.boxed()
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Upgrade the connection to a HTTP service.
|
||||||
|
tokio::spawn(serve_with_graceful_shutdown(sock, svc, stop_handle2.shutdown()));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
Ok(server_handle)
|
||||||
|
}
|
@@ -0,0 +1,349 @@
|
|||||||
|
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any
|
||||||
|
// person obtaining a copy of this software and associated
|
||||||
|
// documentation files (the "Software"), to deal in the
|
||||||
|
// Software without restriction, including without
|
||||||
|
// limitation the rights to use, copy, modify, merge,
|
||||||
|
// publish, distribute, sublicense, and/or sell copies of
|
||||||
|
// the Software, and to permit persons to whom the Software
|
||||||
|
// is furnished to do so, subject to the following
|
||||||
|
// conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice
|
||||||
|
// shall be included in all copies or substantial portions
|
||||||
|
// of the Software.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||||
|
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||||
|
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||||
|
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||||
|
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||||
|
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||||
|
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
// DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
//! This example shows how to use the low-level server API
|
||||||
|
//! in jsonrpsee.
|
||||||
|
//!
|
||||||
|
//! The particular example disconnects peers that
|
||||||
|
//! makes more than ten RPC calls and bans the IP addr.
|
||||||
|
//!
|
||||||
|
//! NOTE:
|
||||||
|
//!
|
||||||
|
//! Enabling tower middleware in this example doesn't work,
|
||||||
|
//! to do so then the low-level API in hyper must be used.
|
||||||
|
//!
|
||||||
|
//! See <https://docs.rs/hyper/latest/hyper/server/conn/index.html>
|
||||||
|
//! for further information regarding the "low-level API" in hyper.
|
||||||
|
|
||||||
|
use std::collections::HashSet;
|
||||||
|
use std::convert::Infallible;
|
||||||
|
use std::net::{IpAddr, SocketAddr};
|
||||||
|
use std::sync::atomic::{AtomicU32, Ordering};
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
|
use futures::FutureExt;
|
||||||
|
use jsonrpsee::core::async_trait;
|
||||||
|
use jsonrpsee::core::middleware::{Batch, Notification, RpcServiceBuilder, RpcServiceT};
|
||||||
|
use jsonrpsee::http_client::HttpClient;
|
||||||
|
use jsonrpsee::proc_macros::rpc;
|
||||||
|
use jsonrpsee::server::{
|
||||||
|
ConnectionGuard, ConnectionState, ServerConfig, ServerHandle, StopHandle, http, serve_with_graceful_shutdown,
|
||||||
|
stop_channel, ws,
|
||||||
|
};
|
||||||
|
use jsonrpsee::types::{ErrorObject, ErrorObjectOwned, Id, Request};
|
||||||
|
use jsonrpsee::ws_client::WsClientBuilder;
|
||||||
|
use jsonrpsee::{MethodResponse, Methods};
|
||||||
|
use tokio::net::TcpListener;
|
||||||
|
use tokio::sync::Mutex as AsyncMutex;
|
||||||
|
use tokio::sync::mpsc;
|
||||||
|
use tracing_subscriber::util::SubscriberInitExt;
|
||||||
|
|
||||||
|
/// This is just a counter to limit
|
||||||
|
/// the number of calls per connection.
|
||||||
|
/// Once the limit has been exceeded
|
||||||
|
/// all future calls are rejected.
|
||||||
|
#[derive(Clone)]
|
||||||
|
struct CallLimit<S> {
|
||||||
|
service: S,
|
||||||
|
count: Arc<AsyncMutex<usize>>,
|
||||||
|
state: mpsc::Sender<()>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S> RpcServiceT for CallLimit<S>
|
||||||
|
where
|
||||||
|
S: RpcServiceT<
|
||||||
|
MethodResponse = MethodResponse,
|
||||||
|
BatchResponse = MethodResponse,
|
||||||
|
NotificationResponse = MethodResponse,
|
||||||
|
> + Send
|
||||||
|
+ Sync
|
||||||
|
+ Clone
|
||||||
|
+ 'static,
|
||||||
|
{
|
||||||
|
type MethodResponse = S::MethodResponse;
|
||||||
|
type NotificationResponse = S::NotificationResponse;
|
||||||
|
type BatchResponse = S::BatchResponse;
|
||||||
|
|
||||||
|
fn call<'a>(&self, req: Request<'a>) -> impl Future<Output = Self::MethodResponse> + Send + 'a {
|
||||||
|
let count = self.count.clone();
|
||||||
|
let state = self.state.clone();
|
||||||
|
let service = self.service.clone();
|
||||||
|
|
||||||
|
async move {
|
||||||
|
let mut lock = count.lock().await;
|
||||||
|
|
||||||
|
if *lock >= 10 {
|
||||||
|
let _ = state.try_send(());
|
||||||
|
MethodResponse::error(req.id, ErrorObject::borrowed(-32000, "RPC rate limit", None))
|
||||||
|
} else {
|
||||||
|
let rp = service.call(req).await;
|
||||||
|
*lock += 1;
|
||||||
|
rp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn batch<'a>(&self, batch: Batch<'a>) -> impl Future<Output = Self::BatchResponse> + Send + 'a {
|
||||||
|
let count = self.count.clone();
|
||||||
|
let state = self.state.clone();
|
||||||
|
let service = self.service.clone();
|
||||||
|
|
||||||
|
async move {
|
||||||
|
let mut lock = count.lock().await;
|
||||||
|
let batch_len = batch.len();
|
||||||
|
|
||||||
|
if *lock >= 10 + batch_len {
|
||||||
|
let _ = state.try_send(());
|
||||||
|
MethodResponse::error(Id::Null, ErrorObject::borrowed(-32000, "RPC rate limit", None))
|
||||||
|
} else {
|
||||||
|
let rp = service.batch(batch).await;
|
||||||
|
*lock += batch_len;
|
||||||
|
rp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn notification<'a>(&self, n: Notification<'a>) -> impl Future<Output = Self::NotificationResponse> + Send + 'a {
|
||||||
|
let count = self.count.clone();
|
||||||
|
let service = self.service.clone();
|
||||||
|
|
||||||
|
// A notification is not expected to return a response so the result here doesn't matter
|
||||||
|
// rather than other middlewares may not be invoked.
|
||||||
|
async move { if *count.lock().await >= 10 { MethodResponse::notification() } else { service.notification(n).await } }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[rpc(server, client)]
|
||||||
|
pub trait Rpc {
|
||||||
|
#[method(name = "say_hello")]
|
||||||
|
async fn say_hello(&self) -> Result<String, ErrorObjectOwned>;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl RpcServer for () {
|
||||||
|
async fn say_hello(&self) -> Result<String, ErrorObjectOwned> {
|
||||||
|
Ok("lo".to_string())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
let filter = tracing_subscriber::EnvFilter::try_from_default_env()?;
|
||||||
|
tracing_subscriber::FmtSubscriber::builder().with_env_filter(filter).finish().try_init()?;
|
||||||
|
|
||||||
|
// Make a bunch of WebSocket calls to be blacklisted by server.
|
||||||
|
{
|
||||||
|
let mut i = 0;
|
||||||
|
let handle = run_server().await?;
|
||||||
|
|
||||||
|
let client = WsClientBuilder::default().build("ws://127.0.0.1:9944").await.unwrap();
|
||||||
|
while client.is_connected() {
|
||||||
|
let rp: Result<String, _> = client.say_hello().await;
|
||||||
|
if rp.is_ok() {
|
||||||
|
i += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// After the server has blacklisted the IP address, the connection is denied.
|
||||||
|
assert!(WsClientBuilder::default().build("ws://127.0.0.1:9944").await.is_err());
|
||||||
|
tracing::info!("WS client made {i} successful calls before getting blacklisted");
|
||||||
|
|
||||||
|
handle.stop().unwrap();
|
||||||
|
handle.stopped().await;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make a bunch of HTTP calls to be blacklisted by server.
|
||||||
|
{
|
||||||
|
let mut i = 0;
|
||||||
|
let handle = run_server().await?;
|
||||||
|
|
||||||
|
let client = HttpClient::builder().build("http://127.0.0.1:9944").unwrap();
|
||||||
|
while client.say_hello().await.is_ok() {
|
||||||
|
i += 1;
|
||||||
|
}
|
||||||
|
tracing::info!("HTTP client made {i} successful calls before getting blacklisted");
|
||||||
|
|
||||||
|
handle.stop().unwrap();
|
||||||
|
handle.stopped().await;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_server() -> anyhow::Result<ServerHandle> {
|
||||||
|
// Construct our SocketAddr to listen on...
|
||||||
|
let listener = TcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 9944))).await?;
|
||||||
|
|
||||||
|
// Each RPC call/connection get its own `stop_handle`
|
||||||
|
// to able to determine whether the server has been stopped or not.
|
||||||
|
//
|
||||||
|
// To keep the server running the `server_handle`
|
||||||
|
// must be kept and it can also be used to stop the server.
|
||||||
|
let (stop_handle, server_handle) = stop_channel();
|
||||||
|
|
||||||
|
// This state is cloned for every connection
|
||||||
|
// all these types based on Arcs and it should
|
||||||
|
// be relatively cheap to clone them.
|
||||||
|
//
|
||||||
|
// Make sure that nothing expensive is cloned here
|
||||||
|
// when doing this or use an `Arc`.
|
||||||
|
#[derive(Clone)]
|
||||||
|
struct PerConnection {
|
||||||
|
methods: Methods,
|
||||||
|
stop_handle: StopHandle,
|
||||||
|
conn_id: Arc<AtomicU32>,
|
||||||
|
conn_guard: ConnectionGuard,
|
||||||
|
blacklisted_peers: Arc<Mutex<HashSet<IpAddr>>>,
|
||||||
|
// HTTP rate limit that is shared by all connections.
|
||||||
|
//
|
||||||
|
// This is just a toy-example and one not should "limit" HTTP connections
|
||||||
|
// like this because the actual IP addr of each request is not checked.
|
||||||
|
//
|
||||||
|
// Because it's possible to blacklist a peer which has only made one or
|
||||||
|
// a few calls.
|
||||||
|
global_http_rate_limit: Arc<AsyncMutex<usize>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
let per_conn = PerConnection {
|
||||||
|
methods: ().into_rpc().into(),
|
||||||
|
stop_handle: stop_handle.clone(),
|
||||||
|
conn_id: Default::default(),
|
||||||
|
conn_guard: ConnectionGuard::new(100),
|
||||||
|
blacklisted_peers: Default::default(),
|
||||||
|
global_http_rate_limit: Default::default(),
|
||||||
|
};
|
||||||
|
|
||||||
|
tokio::spawn(async move {
|
||||||
|
loop {
|
||||||
|
// The `tokio::select!` macro is used to wait for either of the
|
||||||
|
// listeners to accept a new connection or for the server to be
|
||||||
|
// stopped.
|
||||||
|
let (sock, remote_addr) = tokio::select! {
|
||||||
|
res = listener.accept() => {
|
||||||
|
match res {
|
||||||
|
Ok(sock) => sock,
|
||||||
|
Err(e) => {
|
||||||
|
tracing::error!("failed to accept v4 connection: {:?}", e);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ = per_conn.stop_handle.clone().shutdown() => break,
|
||||||
|
};
|
||||||
|
let per_conn = per_conn.clone();
|
||||||
|
|
||||||
|
// Create a service handler.
|
||||||
|
let stop_handle2 = per_conn.stop_handle.clone();
|
||||||
|
let per_conn = per_conn.clone();
|
||||||
|
let svc = tower::service_fn(move |req| {
|
||||||
|
let PerConnection {
|
||||||
|
methods,
|
||||||
|
stop_handle,
|
||||||
|
conn_guard,
|
||||||
|
conn_id,
|
||||||
|
blacklisted_peers,
|
||||||
|
global_http_rate_limit,
|
||||||
|
} = per_conn.clone();
|
||||||
|
|
||||||
|
// jsonrpsee expects a `conn permit` for each connection.
|
||||||
|
//
|
||||||
|
// This may be omitted if don't want to limit the number of connections
|
||||||
|
// to the server.
|
||||||
|
let Some(conn_permit) = conn_guard.try_acquire() else {
|
||||||
|
return async { Ok::<_, Infallible>(http::response::too_many_requests()) }.boxed();
|
||||||
|
};
|
||||||
|
|
||||||
|
// The IP addr was blacklisted.
|
||||||
|
if blacklisted_peers.lock().unwrap().get(&remote_addr.ip()).is_some() {
|
||||||
|
return async { Ok(http::response::denied()) }.boxed();
|
||||||
|
}
|
||||||
|
|
||||||
|
if ws::is_upgrade_request(&req) {
|
||||||
|
let (tx, mut disconnect) = mpsc::channel(1);
|
||||||
|
let rpc_service = RpcServiceBuilder::new().layer_fn(move |service| CallLimit {
|
||||||
|
service,
|
||||||
|
count: Default::default(),
|
||||||
|
state: tx.clone(),
|
||||||
|
});
|
||||||
|
|
||||||
|
let conn = ConnectionState::new(stop_handle, conn_id.fetch_add(1, Ordering::Relaxed), conn_permit);
|
||||||
|
|
||||||
|
// Establishes the websocket connection
|
||||||
|
// and if the `CallLimit` middleware triggers the hard limit
|
||||||
|
// then the connection is closed i.e, the `conn_fut` is dropped.
|
||||||
|
async move {
|
||||||
|
match ws::connect(req, ServerConfig::default(), methods, conn, rpc_service).await {
|
||||||
|
Ok((rp, conn_fut)) => {
|
||||||
|
tokio::spawn(async move {
|
||||||
|
tokio::select! {
|
||||||
|
_ = conn_fut => (),
|
||||||
|
_ = disconnect.recv() => {
|
||||||
|
blacklisted_peers.lock().unwrap().insert(remote_addr.ip());
|
||||||
|
},
|
||||||
|
}
|
||||||
|
});
|
||||||
|
Ok(rp)
|
||||||
|
}
|
||||||
|
Err(rp) => Ok(rp),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
.boxed()
|
||||||
|
} else if !ws::is_upgrade_request(&req) {
|
||||||
|
let (tx, mut disconnect) = mpsc::channel(1);
|
||||||
|
|
||||||
|
let rpc_service = RpcServiceBuilder::new().layer_fn(move |service| CallLimit {
|
||||||
|
service,
|
||||||
|
count: global_http_rate_limit.clone(),
|
||||||
|
state: tx.clone(),
|
||||||
|
});
|
||||||
|
|
||||||
|
let server_cfg = ServerConfig::default();
|
||||||
|
let conn = ConnectionState::new(stop_handle, conn_id.fetch_add(1, Ordering::Relaxed), conn_permit);
|
||||||
|
|
||||||
|
// There is another API for making call with just a service as well.
|
||||||
|
//
|
||||||
|
// See [`jsonrpsee::server::http::call_with_service`]
|
||||||
|
async move {
|
||||||
|
tokio::select! {
|
||||||
|
// Rpc call finished successfully.
|
||||||
|
res = http::call_with_service_builder(req, server_cfg, conn, methods, rpc_service) => Ok(res),
|
||||||
|
// Deny the call if the call limit is exceeded.
|
||||||
|
_ = disconnect.recv() => Ok(http::response::denied()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
.boxed()
|
||||||
|
} else {
|
||||||
|
async { Ok(http::response::denied()) }.boxed()
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Upgrade the connection to a HTTP service.
|
||||||
|
tokio::spawn(serve_with_graceful_shutdown(sock, svc, stop_handle2.shutdown()));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
Ok(server_handle)
|
||||||
|
}
|
123
reference_jsonrpsee_crate_examples/proc_macro.rs
Normal file
123
reference_jsonrpsee_crate_examples/proc_macro.rs
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any
|
||||||
|
// person obtaining a copy of this software and associated
|
||||||
|
// documentation files (the "Software"), to deal in the
|
||||||
|
// Software without restriction, including without
|
||||||
|
// limitation the rights to use, copy, modify, merge,
|
||||||
|
// publish, distribute, sublicense, and/or sell copies of
|
||||||
|
// the Software, and to permit persons to whom the Software
|
||||||
|
// is furnished to do so, subject to the following
|
||||||
|
// conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice
|
||||||
|
// shall be included in all copies or substantial portions
|
||||||
|
// of the Software.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||||
|
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||||
|
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||||
|
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||||
|
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||||
|
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||||
|
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
// DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
|
||||||
|
use jsonrpsee::core::{SubscriptionResult, async_trait, client::Subscription};
|
||||||
|
use jsonrpsee::proc_macros::rpc;
|
||||||
|
use jsonrpsee::server::{PendingSubscriptionSink, Server};
|
||||||
|
use jsonrpsee::types::ErrorObjectOwned;
|
||||||
|
use jsonrpsee::ws_client::WsClientBuilder;
|
||||||
|
|
||||||
|
type ExampleHash = [u8; 32];
|
||||||
|
type ExampleStorageKey = Vec<u8>;
|
||||||
|
|
||||||
|
#[rpc(server, client, namespace = "state")]
|
||||||
|
pub trait Rpc<Hash, StorageKey>
|
||||||
|
where
|
||||||
|
Hash: std::fmt::Debug,
|
||||||
|
{
|
||||||
|
/// Async method call example.
|
||||||
|
#[method(name = "getKeys")]
|
||||||
|
async fn storage_keys(
|
||||||
|
&self,
|
||||||
|
storage_key: StorageKey,
|
||||||
|
hash: Option<Hash>,
|
||||||
|
) -> Result<Vec<StorageKey>, ErrorObjectOwned>;
|
||||||
|
|
||||||
|
/// Subscription that takes a `StorageKey` as input and produces a `Vec<Hash>`.
|
||||||
|
#[subscription(name = "subscribeStorage" => "override", item = Vec<Hash>)]
|
||||||
|
async fn subscribe_storage(&self, keys: Option<Vec<StorageKey>>) -> SubscriptionResult;
|
||||||
|
|
||||||
|
#[subscription(name = "subscribeSync" => "sync", item = Vec<Hash>)]
|
||||||
|
fn s(&self, keys: Option<Vec<StorageKey>>);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct RpcServerImpl;
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl RpcServer<ExampleHash, ExampleStorageKey> for RpcServerImpl {
|
||||||
|
async fn storage_keys(
|
||||||
|
&self,
|
||||||
|
storage_key: ExampleStorageKey,
|
||||||
|
_hash: Option<ExampleHash>,
|
||||||
|
) -> Result<Vec<ExampleStorageKey>, ErrorObjectOwned> {
|
||||||
|
Ok(vec![storage_key])
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn subscribe_storage(
|
||||||
|
&self,
|
||||||
|
pending: PendingSubscriptionSink,
|
||||||
|
_keys: Option<Vec<ExampleStorageKey>>,
|
||||||
|
) -> SubscriptionResult {
|
||||||
|
let sink = pending.accept().await?;
|
||||||
|
let json = serde_json::value::to_raw_value(&vec![[0; 32]])?;
|
||||||
|
sink.send(json).await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn s(&self, pending: PendingSubscriptionSink, _keys: Option<Vec<ExampleStorageKey>>) {
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let sink = pending.accept().await.unwrap();
|
||||||
|
let json = serde_json::value::to_raw_value(&vec![[0; 32]]).unwrap();
|
||||||
|
sink.send(json).await.unwrap();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
tracing_subscriber::FmtSubscriber::builder()
|
||||||
|
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
||||||
|
.try_init()
|
||||||
|
.expect("setting default subscriber failed");
|
||||||
|
|
||||||
|
let server_addr = run_server().await?;
|
||||||
|
let url = format!("ws://{}", server_addr);
|
||||||
|
|
||||||
|
let client = WsClientBuilder::default().build(&url).await?;
|
||||||
|
assert_eq!(client.storage_keys(vec![1, 2, 3, 4], None::<ExampleHash>).await.unwrap(), vec![vec![1, 2, 3, 4]]);
|
||||||
|
|
||||||
|
let mut sub: Subscription<Vec<ExampleHash>> =
|
||||||
|
RpcClient::<ExampleHash, ExampleStorageKey>::subscribe_storage(&client, None).await.unwrap();
|
||||||
|
assert_eq!(Some(vec![[0; 32]]), sub.next().await.transpose().unwrap());
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_server() -> anyhow::Result<SocketAddr> {
|
||||||
|
let server = Server::builder().build("127.0.0.1:0").await?;
|
||||||
|
|
||||||
|
let addr = server.local_addr()?;
|
||||||
|
let handle = server.start(RpcServerImpl.into_rpc());
|
||||||
|
|
||||||
|
// In this example we don't care about doing shutdown so let's it run forever.
|
||||||
|
// You may use the `ServerHandle` to shut it down or manage it yourself.
|
||||||
|
tokio::spawn(handle.stopped());
|
||||||
|
|
||||||
|
Ok(addr)
|
||||||
|
}
|
95
reference_jsonrpsee_crate_examples/proc_macro_bounds.rs
Normal file
95
reference_jsonrpsee_crate_examples/proc_macro_bounds.rs
Normal file
@@ -0,0 +1,95 @@
|
|||||||
|
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any
|
||||||
|
// person obtaining a copy of this software and associated
|
||||||
|
// documentation files (the "Software"), to deal in the
|
||||||
|
// Software without restriction, including without
|
||||||
|
// limitation the rights to use, copy, modify, merge,
|
||||||
|
// publish, distribute, sublicense, and/or sell copies of
|
||||||
|
// the Software, and to permit persons to whom the Software
|
||||||
|
// is furnished to do so, subject to the following
|
||||||
|
// conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice
|
||||||
|
// shall be included in all copies or substantial portions
|
||||||
|
// of the Software.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||||
|
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||||
|
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||||
|
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||||
|
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||||
|
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||||
|
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
// DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
|
||||||
|
use jsonrpsee::core::async_trait;
|
||||||
|
use jsonrpsee::proc_macros::rpc;
|
||||||
|
use jsonrpsee::server::Server;
|
||||||
|
use jsonrpsee::types::ErrorObjectOwned;
|
||||||
|
use jsonrpsee::ws_client::WsClientBuilder;
|
||||||
|
type ExampleHash = [u8; 32];
|
||||||
|
|
||||||
|
pub trait Config {
|
||||||
|
type Hash: Send + Sync + 'static;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Config for ExampleHash {
|
||||||
|
type Hash = Self;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The RPC macro requires `DeserializeOwned` for output types for the client implementation, while the
|
||||||
|
/// server implementation requires output types to be bounded by `Serialize`.
|
||||||
|
///
|
||||||
|
/// In this example, we don't want the `Conf` to be bounded by default to
|
||||||
|
/// `Conf : Send + Sync + 'static + jsonrpsee::core::DeserializeOwned` for client implementation and
|
||||||
|
/// `Conf : Send + Sync + 'static + jsonrpsee::core::Serialize` for server implementation.
|
||||||
|
///
|
||||||
|
/// Explicitly, specify client and server bounds to handle the `Serialize` and `DeserializeOwned` cases
|
||||||
|
/// just for the `Conf::hash` part.
|
||||||
|
#[rpc(server, client, namespace = "foo", client_bounds(T::Hash: jsonrpsee::core::DeserializeOwned), server_bounds(T::Hash: jsonrpsee::core::Serialize + Clone))]
|
||||||
|
pub trait Rpc<T: Config> {
|
||||||
|
#[method(name = "bar")]
|
||||||
|
fn method(&self) -> Result<T::Hash, ErrorObjectOwned>;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct RpcServerImpl;
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl RpcServer<ExampleHash> for RpcServerImpl {
|
||||||
|
fn method(&self) -> Result<<ExampleHash as Config>::Hash, ErrorObjectOwned> {
|
||||||
|
Ok([0u8; 32])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
tracing_subscriber::FmtSubscriber::builder()
|
||||||
|
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
||||||
|
.try_init()
|
||||||
|
.expect("setting default subscriber failed");
|
||||||
|
|
||||||
|
let server_addr = run_server().await?;
|
||||||
|
let url = format!("ws://{}", server_addr);
|
||||||
|
|
||||||
|
let client = WsClientBuilder::default().build(&url).await?;
|
||||||
|
assert_eq!(RpcClient::<ExampleHash>::method(&client).await.unwrap(), [0u8; 32]);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_server() -> anyhow::Result<SocketAddr> {
|
||||||
|
let server = Server::builder().build("127.0.0.1:0").await?;
|
||||||
|
|
||||||
|
let addr = server.local_addr()?;
|
||||||
|
let handle = server.start(RpcServerImpl.into_rpc());
|
||||||
|
|
||||||
|
// In this example we don't care about doing shutdown so let's it run forever.
|
||||||
|
// You may use the `ServerHandle` to shut it down or manage it yourself.
|
||||||
|
tokio::spawn(handle.stopped());
|
||||||
|
|
||||||
|
Ok(addr)
|
||||||
|
}
|
@@ -0,0 +1,84 @@
|
|||||||
|
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any
|
||||||
|
// person obtaining a copy of this software and associated
|
||||||
|
// documentation files (the "Software"), to deal in the
|
||||||
|
// Software without restriction, including without
|
||||||
|
// limitation the rights to use, copy, modify, merge,
|
||||||
|
// publish, distribute, sublicense, and/or sell copies of
|
||||||
|
// the Software, and to permit persons to whom the Software
|
||||||
|
// is furnished to do so, subject to the following
|
||||||
|
// conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice
|
||||||
|
// shall be included in all copies or substantial portions
|
||||||
|
// of the Software.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||||
|
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||||
|
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||||
|
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||||
|
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||||
|
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||||
|
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
// DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
|
||||||
|
use jsonrpsee::core::client::ClientT;
|
||||||
|
use jsonrpsee::proc_macros::rpc;
|
||||||
|
use jsonrpsee::server::Server;
|
||||||
|
use jsonrpsee::ws_client::WsClientBuilder;
|
||||||
|
use jsonrpsee::{ResponsePayload, rpc_params};
|
||||||
|
|
||||||
|
#[rpc(client, server, namespace = "state")]
|
||||||
|
pub trait Rpc {
|
||||||
|
/// Async method call example.
|
||||||
|
#[method(name = "getKeys")]
|
||||||
|
fn storage_keys(&self) -> ResponsePayload<'static, String>;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct RpcServerImpl;
|
||||||
|
|
||||||
|
impl RpcServer for RpcServerImpl {
|
||||||
|
fn storage_keys(&self) -> ResponsePayload<'static, String> {
|
||||||
|
let (rp, rp_future) = ResponsePayload::success("ehheeheh".to_string()).notify_on_completion();
|
||||||
|
|
||||||
|
tokio::spawn(async move {
|
||||||
|
rp_future.await.unwrap();
|
||||||
|
println!("Method response to `state_getKeys` finished");
|
||||||
|
});
|
||||||
|
|
||||||
|
rp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
tracing_subscriber::FmtSubscriber::builder()
|
||||||
|
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
||||||
|
.try_init()
|
||||||
|
.expect("setting default subscriber failed");
|
||||||
|
|
||||||
|
let server_addr = run_server().await?;
|
||||||
|
let url = format!("ws://{}", server_addr);
|
||||||
|
|
||||||
|
let client = WsClientBuilder::default().build(&url).await?;
|
||||||
|
assert_eq!("ehheeheh".to_string(), client.request::<String, _>("state_getKeys", rpc_params![]).await.unwrap());
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_server() -> anyhow::Result<SocketAddr> {
|
||||||
|
let server = Server::builder().build("127.0.0.1:0").await?;
|
||||||
|
|
||||||
|
let addr = server.local_addr()?;
|
||||||
|
let handle = server.start(RpcServerImpl.into_rpc());
|
||||||
|
|
||||||
|
// In this example we don't care about doing shutdown so let's it run forever.
|
||||||
|
// You may use the `ServerHandle` to shut it down or manage it yourself.
|
||||||
|
tokio::spawn(handle.stopped());
|
||||||
|
|
||||||
|
Ok(addr)
|
||||||
|
}
|
265
reference_jsonrpsee_crate_examples/rpc_middleware.rs
Normal file
265
reference_jsonrpsee_crate_examples/rpc_middleware.rs
Normal file
@@ -0,0 +1,265 @@
|
|||||||
|
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any
|
||||||
|
// person obtaining a copy of this software and associated
|
||||||
|
// documentation files (the "Software"), to deal in the
|
||||||
|
// Software without restriction, including without
|
||||||
|
// limitation the rights to use, copy, modify, merge,
|
||||||
|
// publish, distribute, sublicense, and/or sell copies of
|
||||||
|
// the Software, and to permit persons to whom the Software
|
||||||
|
// is furnished to do so, subject to the following
|
||||||
|
// conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice
|
||||||
|
// shall be included in all copies or substantial portions
|
||||||
|
// of the Software.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||||
|
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||||
|
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||||
|
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||||
|
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||||
|
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||||
|
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
// DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
//! jsonrpsee supports two kinds of middlewares `http_middleware` and `rpc_middleware`.
|
||||||
|
//!
|
||||||
|
//! This example demonstrates how to use the `rpc_middleware` which applies for each
|
||||||
|
//! JSON-RPC method call and batch requests may call the middleware more than once.
|
||||||
|
//!
|
||||||
|
//! A typical use-case for this is to implement rate-limiting based on the actual
|
||||||
|
//! number of JSON-RPC methods calls and a request could potentially be made
|
||||||
|
//! by HTTP or WebSocket which this middleware is agnostic to.
|
||||||
|
//!
|
||||||
|
//! Contrary the HTTP middleware does only apply per HTTP request and
|
||||||
|
//! may be handy in some scenarios such CORS but if you want to access
|
||||||
|
//! to the actual JSON-RPC details this is the middleware to use.
|
||||||
|
//!
|
||||||
|
//! This example enables the same middleware for both the server and client which
|
||||||
|
//! can be confusing when one runs this but it is just to demonstrate the API.
|
||||||
|
//!
|
||||||
|
//! That the middleware is applied to the server and client in the same way.
|
||||||
|
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
|
|
||||||
|
use jsonrpsee::core::client::ClientT;
|
||||||
|
use jsonrpsee::core::middleware::{Batch, Notification, RpcServiceBuilder, RpcServiceT};
|
||||||
|
use jsonrpsee::rpc_params;
|
||||||
|
use jsonrpsee::server::{RpcModule, Server};
|
||||||
|
use jsonrpsee::types::Request;
|
||||||
|
use jsonrpsee::ws_client::WsClientBuilder;
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
struct IdentityLayer;
|
||||||
|
|
||||||
|
impl<S> tower::Layer<S> for IdentityLayer
|
||||||
|
where
|
||||||
|
S: RpcServiceT + Send + Sync + Clone + 'static,
|
||||||
|
{
|
||||||
|
type Service = Identity<S>;
|
||||||
|
|
||||||
|
fn layer(&self, inner: S) -> Self::Service {
|
||||||
|
Identity(inner)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
struct Identity<S>(S);
|
||||||
|
|
||||||
|
impl<S> RpcServiceT for Identity<S>
|
||||||
|
where
|
||||||
|
S: RpcServiceT + Send + Sync + Clone + 'static,
|
||||||
|
{
|
||||||
|
type MethodResponse = S::MethodResponse;
|
||||||
|
type NotificationResponse = S::NotificationResponse;
|
||||||
|
type BatchResponse = S::BatchResponse;
|
||||||
|
|
||||||
|
fn batch<'a>(&self, batch: Batch<'a>) -> impl Future<Output = Self::BatchResponse> + Send + 'a {
|
||||||
|
self.0.batch(batch)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn call<'a>(&self, request: Request<'a>) -> impl Future<Output = Self::MethodResponse> + Send + 'a {
|
||||||
|
self.0.call(request)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn notification<'a>(&self, n: Notification<'a>) -> impl Future<Output = Self::NotificationResponse> + Send + 'a {
|
||||||
|
self.0.notification(n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// It's possible to access the connection ID
|
||||||
|
// by using the low-level API.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct CallsPerConn<S> {
|
||||||
|
service: S,
|
||||||
|
count: Arc<AtomicUsize>,
|
||||||
|
role: &'static str,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S> RpcServiceT for CallsPerConn<S>
|
||||||
|
where
|
||||||
|
S: RpcServiceT + Send + Sync + Clone + 'static,
|
||||||
|
{
|
||||||
|
type MethodResponse = S::MethodResponse;
|
||||||
|
type NotificationResponse = S::NotificationResponse;
|
||||||
|
type BatchResponse = S::BatchResponse;
|
||||||
|
|
||||||
|
fn call<'a>(&self, req: Request<'a>) -> impl Future<Output = Self::MethodResponse> + Send + 'a {
|
||||||
|
let count = self.count.clone();
|
||||||
|
let service = self.service.clone();
|
||||||
|
let role = self.role;
|
||||||
|
|
||||||
|
async move {
|
||||||
|
let rp = service.call(req).await;
|
||||||
|
count.fetch_add(1, Ordering::SeqCst);
|
||||||
|
println!("{role} processed calls={} on the connection", count.load(Ordering::SeqCst));
|
||||||
|
rp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn batch<'a>(&self, batch: Batch<'a>) -> impl Future<Output = Self::BatchResponse> + Send + 'a {
|
||||||
|
let len = batch.len();
|
||||||
|
self.count.fetch_add(len, Ordering::SeqCst);
|
||||||
|
println!("{} processed calls={} on the connection", self.role, self.count.load(Ordering::SeqCst));
|
||||||
|
self.service.batch(batch)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn notification<'a>(&self, n: Notification<'a>) -> impl Future<Output = Self::NotificationResponse> + Send + 'a {
|
||||||
|
self.service.notification(n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct GlobalCalls<S> {
|
||||||
|
service: S,
|
||||||
|
count: Arc<AtomicUsize>,
|
||||||
|
role: &'static str,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S> RpcServiceT for GlobalCalls<S>
|
||||||
|
where
|
||||||
|
S: RpcServiceT + Send + Sync + Clone + 'static,
|
||||||
|
{
|
||||||
|
type MethodResponse = S::MethodResponse;
|
||||||
|
type NotificationResponse = S::NotificationResponse;
|
||||||
|
type BatchResponse = S::BatchResponse;
|
||||||
|
|
||||||
|
fn call<'a>(&self, req: Request<'a>) -> impl Future<Output = Self::MethodResponse> + Send + 'a {
|
||||||
|
let count = self.count.clone();
|
||||||
|
let service = self.service.clone();
|
||||||
|
let role = self.role;
|
||||||
|
|
||||||
|
async move {
|
||||||
|
let rp = service.call(req).await;
|
||||||
|
count.fetch_add(1, Ordering::SeqCst);
|
||||||
|
println!("{role} processed calls={} in total", count.load(Ordering::SeqCst));
|
||||||
|
|
||||||
|
rp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn batch<'a>(&self, batch: Batch<'a>) -> impl Future<Output = Self::BatchResponse> + Send + 'a {
|
||||||
|
let len = batch.len();
|
||||||
|
self.count.fetch_add(len, Ordering::SeqCst);
|
||||||
|
println!("{}, processed calls={} in total", self.role, self.count.load(Ordering::SeqCst));
|
||||||
|
self.service.batch(batch)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn notification<'a>(&self, n: Notification<'a>) -> impl Future<Output = Self::NotificationResponse> + Send + 'a {
|
||||||
|
self.service.notification(n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct Logger<S> {
|
||||||
|
service: S,
|
||||||
|
role: &'static str,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S> RpcServiceT for Logger<S>
|
||||||
|
where
|
||||||
|
S: RpcServiceT + Send + Sync + Clone + 'static,
|
||||||
|
{
|
||||||
|
type MethodResponse = S::MethodResponse;
|
||||||
|
type NotificationResponse = S::NotificationResponse;
|
||||||
|
type BatchResponse = S::BatchResponse;
|
||||||
|
|
||||||
|
fn call<'a>(&self, req: Request<'a>) -> impl Future<Output = Self::MethodResponse> + Send + 'a {
|
||||||
|
println!("{} logger middleware: method `{}`", self.role, req.method);
|
||||||
|
self.service.call(req)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn batch<'a>(&self, batch: Batch<'a>) -> impl Future<Output = Self::BatchResponse> + Send + 'a {
|
||||||
|
println!("{} logger middleware: batch {batch}", self.role);
|
||||||
|
self.service.batch(batch)
|
||||||
|
}
|
||||||
|
fn notification<'a>(&self, n: Notification<'a>) -> impl Future<Output = Self::NotificationResponse> + Send + 'a {
|
||||||
|
self.service.notification(n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
tracing_subscriber::FmtSubscriber::builder()
|
||||||
|
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
||||||
|
.try_init()
|
||||||
|
.expect("setting default subscriber failed");
|
||||||
|
|
||||||
|
let addr = run_server().await?;
|
||||||
|
let url = format!("ws://{}", addr);
|
||||||
|
|
||||||
|
for _ in 0..2 {
|
||||||
|
let global_cnt = Arc::new(AtomicUsize::new(0));
|
||||||
|
let rpc_middleware = RpcServiceBuilder::new()
|
||||||
|
.layer_fn(|service| Logger { service, role: "client" })
|
||||||
|
// This state is created per connection.
|
||||||
|
.layer_fn(|service| CallsPerConn { service, count: Default::default(), role: "client" })
|
||||||
|
// This state is shared by all connections.
|
||||||
|
.layer_fn(move |service| GlobalCalls { service, count: global_cnt.clone(), role: "client" });
|
||||||
|
let client = WsClientBuilder::new().set_rpc_middleware(rpc_middleware).build(&url).await?;
|
||||||
|
let response: String = client.request("say_hello", rpc_params![]).await?;
|
||||||
|
println!("response: {:?}", response);
|
||||||
|
let _response: Result<String, _> = client.request("unknown_method", rpc_params![]).await;
|
||||||
|
let _: String = client.request("say_hello", rpc_params![]).await?;
|
||||||
|
let _: String = client.request("thready", rpc_params![4]).await?;
|
||||||
|
|
||||||
|
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_server() -> anyhow::Result<SocketAddr> {
|
||||||
|
let global_cnt = Arc::new(AtomicUsize::new(0));
|
||||||
|
|
||||||
|
let rpc_middleware = RpcServiceBuilder::new()
|
||||||
|
.layer_fn(|service| Logger { service, role: "server" })
|
||||||
|
// This state is created per connection.
|
||||||
|
.layer_fn(|service| CallsPerConn { service, count: Default::default(), role: "server" })
|
||||||
|
// This state is shared by all connections.
|
||||||
|
.layer_fn(move |service| GlobalCalls { service, count: global_cnt.clone(), role: "server" })
|
||||||
|
// Optional layer that does nothing, just an example to be useful if one has an optional layer.
|
||||||
|
.option_layer(Some(IdentityLayer));
|
||||||
|
let server = Server::builder().set_rpc_middleware(rpc_middleware).build("127.0.0.1:0").await?;
|
||||||
|
let mut module = RpcModule::new(());
|
||||||
|
module.register_method("say_hello", |_, _, _| "lo")?;
|
||||||
|
module.register_method("thready", |params, _, _| {
|
||||||
|
let thread_count: usize = params.one().unwrap();
|
||||||
|
for _ in 0..thread_count {
|
||||||
|
std::thread::spawn(|| std::thread::sleep(std::time::Duration::from_secs(1)));
|
||||||
|
}
|
||||||
|
""
|
||||||
|
})?;
|
||||||
|
let addr = server.local_addr()?;
|
||||||
|
let handle = server.start(module);
|
||||||
|
|
||||||
|
// In this example we don't care about doing shutdown so let's it run forever.
|
||||||
|
// You may use the `ServerHandle` to shut it down or manage it yourself.
|
||||||
|
tokio::spawn(handle.stopped());
|
||||||
|
|
||||||
|
Ok(addr)
|
||||||
|
}
|
173
reference_jsonrpsee_crate_examples/rpc_middleware_client.rs
Normal file
173
reference_jsonrpsee_crate_examples/rpc_middleware_client.rs
Normal file
@@ -0,0 +1,173 @@
|
|||||||
|
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any
|
||||||
|
// person obtaining a copy of this software and associated
|
||||||
|
// documentation files (the "Software"), to deal in the
|
||||||
|
// Software without restriction, including without
|
||||||
|
// limitation the rights to use, copy, modify, merge,
|
||||||
|
// publish, distribute, sublicense, and/or sell copies of
|
||||||
|
// the Software, and to permit persons to whom the Software
|
||||||
|
// is furnished to do so, subject to the following
|
||||||
|
// conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice
|
||||||
|
// shall be included in all copies or substantial portions
|
||||||
|
// of the Software.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||||
|
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||||
|
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||||
|
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||||
|
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||||
|
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||||
|
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
// DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
//! jsonrpsee supports two kinds of middlewares `http_middleware` and `rpc_middleware`.
|
||||||
|
//!
|
||||||
|
//! This example demonstrates how to use the `rpc_middleware` which applies for each
|
||||||
|
//! JSON-RPC method calls, notifications and batch requests.
|
||||||
|
//!
|
||||||
|
//! This example demonstrates how to use the `rpc_middleware` for the client
|
||||||
|
//! and you may benefit specifying the response type to `core::client::MethodResponse`
|
||||||
|
//! to actually inspect the response instead of using the serialized JSON-RPC response.
|
||||||
|
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
|
use jsonrpsee::core::client::{ClientT, MiddlewareMethodResponse, error::Error};
|
||||||
|
use jsonrpsee::core::middleware::{Batch, Notification, RpcServiceBuilder, RpcServiceT};
|
||||||
|
use jsonrpsee::rpc_params;
|
||||||
|
use jsonrpsee::server::{RpcModule, Server};
|
||||||
|
use jsonrpsee::types::{ErrorCode, ErrorObject, Request};
|
||||||
|
use jsonrpsee::ws_client::WsClientBuilder;
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
struct InnerMetrics {
|
||||||
|
method_calls_success: usize,
|
||||||
|
method_calls_failure: usize,
|
||||||
|
notifications: usize,
|
||||||
|
batch_calls: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct Metrics<S> {
|
||||||
|
service: S,
|
||||||
|
metrics: Arc<Mutex<InnerMetrics>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Debug for InnerMetrics {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
f.debug_struct("InnerMetrics")
|
||||||
|
.field("method_calls_success", &self.method_calls_success)
|
||||||
|
.field("method_calls_failure", &self.method_calls_failure)
|
||||||
|
.field("notifications", &self.notifications)
|
||||||
|
.field("batch_calls", &self.batch_calls)
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S> Metrics<S> {
|
||||||
|
pub fn new(service: S) -> Self {
|
||||||
|
Self { service, metrics: Arc::new(Mutex::new(InnerMetrics::default())) }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE: We are using MethodResponse as the response type here to be able to inspect the response
|
||||||
|
// and not just the serialized JSON-RPC response. This is not necessary if you only care about
|
||||||
|
// the serialized JSON-RPC response.
|
||||||
|
impl<S> RpcServiceT for Metrics<S>
|
||||||
|
where
|
||||||
|
S: RpcServiceT<MethodResponse = Result<MiddlewareMethodResponse, Error>> + Send + Sync + Clone + 'static,
|
||||||
|
{
|
||||||
|
type MethodResponse = Result<MiddlewareMethodResponse, Error>;
|
||||||
|
type NotificationResponse = S::NotificationResponse;
|
||||||
|
type BatchResponse = S::BatchResponse;
|
||||||
|
|
||||||
|
fn call<'a>(&self, req: Request<'a>) -> impl Future<Output = Self::MethodResponse> + Send + 'a {
|
||||||
|
let m = self.metrics.clone();
|
||||||
|
let service = self.service.clone();
|
||||||
|
|
||||||
|
async move {
|
||||||
|
let rp = service.call(req).await;
|
||||||
|
|
||||||
|
// Access to inner response via the deref implementation.
|
||||||
|
match &rp {
|
||||||
|
Ok(rp) => {
|
||||||
|
if rp.is_success() {
|
||||||
|
m.lock().unwrap().method_calls_success += 1;
|
||||||
|
} else {
|
||||||
|
m.lock().unwrap().method_calls_failure += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
m.lock().unwrap().method_calls_failure += 1;
|
||||||
|
tracing::error!("Error: {:?}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn batch<'a>(&self, batch: Batch<'a>) -> impl Future<Output = Self::BatchResponse> + Send + 'a {
|
||||||
|
self.metrics.lock().unwrap().batch_calls += 1;
|
||||||
|
self.service.batch(batch)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn notification<'a>(&self, n: Notification<'a>) -> impl Future<Output = Self::NotificationResponse> + Send + 'a {
|
||||||
|
self.metrics.lock().unwrap().notifications += 1;
|
||||||
|
self.service.notification(n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
tracing_subscriber::FmtSubscriber::builder()
|
||||||
|
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
||||||
|
.try_init()
|
||||||
|
.expect("setting default subscriber failed");
|
||||||
|
|
||||||
|
let addr = run_server().await?;
|
||||||
|
let url = format!("ws://{}", addr);
|
||||||
|
|
||||||
|
let metrics = Arc::new(Mutex::new(InnerMetrics::default()));
|
||||||
|
|
||||||
|
for _ in 0..2 {
|
||||||
|
let metrics = metrics.clone();
|
||||||
|
let rpc_middleware =
|
||||||
|
RpcServiceBuilder::new().layer_fn(move |s| Metrics { service: s, metrics: metrics.clone() });
|
||||||
|
let client = WsClientBuilder::new().set_rpc_middleware(rpc_middleware).build(&url).await?;
|
||||||
|
let _: Result<String, _> = client.request("say_hello", rpc_params![]).await;
|
||||||
|
let _: Result<String, _> = client.request("unknown_method", rpc_params![]).await;
|
||||||
|
let _: Result<String, _> = client.request("thready", rpc_params![4]).await;
|
||||||
|
let _: Result<String, _> = client.request("mul", rpc_params![4]).await;
|
||||||
|
let _: Result<String, _> = client.request("err", rpc_params![4]).await;
|
||||||
|
|
||||||
|
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
println!("Metrics: {:?}", metrics.lock().unwrap());
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_server() -> anyhow::Result<SocketAddr> {
|
||||||
|
let server = Server::builder().build("127.0.0.1:0").await?;
|
||||||
|
let mut module = RpcModule::new(());
|
||||||
|
module.register_method("say_hello", |_, _, _| "lo")?;
|
||||||
|
module.register_method("mul", |params, _, _| {
|
||||||
|
let count: usize = params.one().unwrap();
|
||||||
|
count * 2
|
||||||
|
})?;
|
||||||
|
module.register_method("error", |_, _, _| ErrorObject::from(ErrorCode::InternalError))?;
|
||||||
|
let addr = server.local_addr()?;
|
||||||
|
let handle = server.start(module);
|
||||||
|
|
||||||
|
// In this example we don't care about doing shutdown so let's it run forever.
|
||||||
|
// You may use the `ServerHandle` to shut it down or manage it yourself.
|
||||||
|
tokio::spawn(handle.stopped());
|
||||||
|
|
||||||
|
Ok(addr)
|
||||||
|
}
|
@@ -0,0 +1,139 @@
|
|||||||
|
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any
|
||||||
|
// person obtaining a copy of this software and associated
|
||||||
|
// documentation files (the "Software"), to deal in the
|
||||||
|
// Software without restriction, including without
|
||||||
|
// limitation the rights to use, copy, modify, merge,
|
||||||
|
// publish, distribute, sublicense, and/or sell copies of
|
||||||
|
// the Software, and to permit persons to whom the Software
|
||||||
|
// is furnished to do so, subject to the following
|
||||||
|
// conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice
|
||||||
|
// shall be included in all copies or substantial portions
|
||||||
|
// of the Software.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||||
|
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||||
|
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||||
|
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||||
|
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||||
|
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||||
|
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
// DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
use jsonrpsee::core::client::ClientT;
|
||||||
|
use jsonrpsee::core::middleware::{Batch, BatchEntry, Notification, RpcServiceBuilder, RpcServiceT};
|
||||||
|
use jsonrpsee::server::Server;
|
||||||
|
use jsonrpsee::types::Request;
|
||||||
|
use jsonrpsee::ws_client::WsClientBuilder;
|
||||||
|
use jsonrpsee::{RpcModule, rpc_params};
|
||||||
|
use std::borrow::Cow as StdCow;
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
|
||||||
|
fn modify_method_call(req: &mut Request<'_>) {
|
||||||
|
// Example how to modify the params in the call.
|
||||||
|
if req.method == "say_hello" {
|
||||||
|
// It's a bit awkward to create new params in the request
|
||||||
|
// but this shows how to do it.
|
||||||
|
let raw_value = serde_json::value::to_raw_value("myparams").unwrap();
|
||||||
|
req.params = Some(StdCow::Owned(raw_value));
|
||||||
|
}
|
||||||
|
// Re-direct all calls that isn't `say_hello` to `say_goodbye`
|
||||||
|
else if req.method != "say_hello" {
|
||||||
|
req.method = "say_goodbye".into();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn modify_notif(n: &mut Notification<'_>) {
|
||||||
|
// Example how to modify the params in the notification.
|
||||||
|
if n.method == "say_hello" {
|
||||||
|
// It's a bit awkward to create new params in the request
|
||||||
|
// but this shows how to do it.
|
||||||
|
let raw_value = serde_json::value::to_raw_value("myparams").unwrap();
|
||||||
|
n.params = Some(StdCow::Owned(raw_value));
|
||||||
|
}
|
||||||
|
// Re-direct all notifs that isn't `say_hello` to `say_goodbye`
|
||||||
|
else if n.method != "say_hello" {
|
||||||
|
n.method = "say_goodbye".into();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct ModifyRequestIf<S>(S);
|
||||||
|
|
||||||
|
impl<S> RpcServiceT for ModifyRequestIf<S>
|
||||||
|
where
|
||||||
|
S: RpcServiceT + Send + Sync + Clone + 'static,
|
||||||
|
{
|
||||||
|
type MethodResponse = S::MethodResponse;
|
||||||
|
type NotificationResponse = S::NotificationResponse;
|
||||||
|
type BatchResponse = S::BatchResponse;
|
||||||
|
|
||||||
|
fn call<'a>(&self, mut req: Request<'a>) -> impl Future<Output = Self::MethodResponse> + Send + 'a {
|
||||||
|
modify_method_call(&mut req);
|
||||||
|
self.0.call(req)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn batch<'a>(&self, mut batch: Batch<'a>) -> impl Future<Output = Self::BatchResponse> + Send + 'a {
|
||||||
|
for call in batch.iter_mut() {
|
||||||
|
match call {
|
||||||
|
Ok(BatchEntry::Call(call)) => {
|
||||||
|
modify_method_call(call);
|
||||||
|
}
|
||||||
|
Ok(BatchEntry::Notification(n)) => {
|
||||||
|
modify_notif(n);
|
||||||
|
}
|
||||||
|
// Invalid request, we don't care about it.
|
||||||
|
Err(_err) => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
self.0.batch(batch)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn notification<'a>(
|
||||||
|
&self,
|
||||||
|
mut n: Notification<'a>,
|
||||||
|
) -> impl Future<Output = Self::NotificationResponse> + Send + 'a {
|
||||||
|
modify_notif(&mut n);
|
||||||
|
self.0.notification(n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
tracing_subscriber::FmtSubscriber::builder()
|
||||||
|
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
||||||
|
.try_init()
|
||||||
|
.expect("setting default subscriber failed");
|
||||||
|
|
||||||
|
let addr = run_server().await?;
|
||||||
|
let url = format!("ws://{}", addr);
|
||||||
|
|
||||||
|
let client = WsClientBuilder::default().build(&url).await?;
|
||||||
|
let _response: String = client.request("say_hello", rpc_params![]).await?;
|
||||||
|
let _response: Result<String, _> = client.request("unknown_method", rpc_params![]).await;
|
||||||
|
let _: String = client.request("say_hello", rpc_params![]).await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_server() -> anyhow::Result<SocketAddr> {
|
||||||
|
let rpc_middleware = RpcServiceBuilder::new().layer_fn(ModifyRequestIf);
|
||||||
|
let server = Server::builder().set_rpc_middleware(rpc_middleware).build("127.0.0.1:0").await?;
|
||||||
|
let mut module = RpcModule::new(());
|
||||||
|
module.register_method("say_hello", |_, _, _| "lo")?;
|
||||||
|
module.register_method("say_goodbye", |_, _, _| "goodbye")?;
|
||||||
|
let addr = server.local_addr()?;
|
||||||
|
|
||||||
|
let handle = server.start(module);
|
||||||
|
|
||||||
|
// In this example we don't care about doing shutdown so let's it run forever.
|
||||||
|
// You may use the `ServerHandle` to shut it down or manage it yourself.
|
||||||
|
tokio::spawn(handle.stopped());
|
||||||
|
|
||||||
|
Ok(addr)
|
||||||
|
}
|
@@ -0,0 +1,218 @@
|
|||||||
|
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any
|
||||||
|
// person obtaining a copy of this software and associated
|
||||||
|
// documentation files (the "Software"), to deal in the
|
||||||
|
// Software without restriction, including without
|
||||||
|
// limitation the rights to use, copy, modify, merge,
|
||||||
|
// publish, distribute, sublicense, and/or sell copies of
|
||||||
|
// the Software, and to permit persons to whom the Software
|
||||||
|
// is furnished to do so, subject to the following
|
||||||
|
// conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice
|
||||||
|
// shall be included in all copies or substantial portions
|
||||||
|
// of the Software.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||||
|
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||||
|
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||||
|
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||||
|
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||||
|
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||||
|
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
// DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
//! Example middleware to rate limit based on the number
|
||||||
|
//! JSON-RPC calls.
|
||||||
|
//!
|
||||||
|
//! As demonstrated in this example any state must be
|
||||||
|
//! stored in something to provide interior mutability
|
||||||
|
//! such as `Arc<Mutex>`
|
||||||
|
|
||||||
|
use jsonrpsee::core::client::ClientT;
|
||||||
|
use jsonrpsee::core::middleware::{
|
||||||
|
Batch, BatchEntry, BatchEntryErr, Notification, ResponseFuture, RpcServiceBuilder, RpcServiceT,
|
||||||
|
};
|
||||||
|
use jsonrpsee::server::Server;
|
||||||
|
use jsonrpsee::types::{ErrorObject, Request};
|
||||||
|
use jsonrpsee::ws_client::WsClientBuilder;
|
||||||
|
use jsonrpsee::{MethodResponse, RpcModule, rpc_params};
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
|
#[derive(Debug, Copy, Clone)]
|
||||||
|
struct Rate {
|
||||||
|
num: u64,
|
||||||
|
period: Duration,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Copy, Clone)]
|
||||||
|
enum State {
|
||||||
|
Deny { until: Instant },
|
||||||
|
Allow { until: Instant, rem: u64 },
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Depending on how the rate limit is instantiated
|
||||||
|
/// it's possible to select whether the rate limit
|
||||||
|
/// is be applied per connection or shared by
|
||||||
|
/// all connections.
|
||||||
|
///
|
||||||
|
/// Have a look at `async fn run_server` below which
|
||||||
|
/// shows how do it.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct RateLimit<S> {
|
||||||
|
service: S,
|
||||||
|
state: Arc<Mutex<State>>,
|
||||||
|
rate: Rate,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S> RateLimit<S> {
|
||||||
|
fn new(service: S, rate: Rate) -> Self {
|
||||||
|
let period = rate.period;
|
||||||
|
let num = rate.num;
|
||||||
|
|
||||||
|
Self {
|
||||||
|
service,
|
||||||
|
rate,
|
||||||
|
state: Arc::new(Mutex::new(State::Allow { until: Instant::now() + period, rem: num + 1 })),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rate_limit_deny(&self) -> bool {
|
||||||
|
let now = Instant::now();
|
||||||
|
let mut lock = self.state.lock().unwrap();
|
||||||
|
let next_state = match *lock {
|
||||||
|
State::Deny { until } => {
|
||||||
|
if now > until {
|
||||||
|
State::Allow { until: now + self.rate.period, rem: self.rate.num - 1 }
|
||||||
|
} else {
|
||||||
|
State::Deny { until }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
State::Allow { until, rem } => {
|
||||||
|
if now > until {
|
||||||
|
State::Allow { until: now + self.rate.period, rem: self.rate.num - 1 }
|
||||||
|
} else {
|
||||||
|
let n = rem - 1;
|
||||||
|
if n > 0 { State::Allow { until: now + self.rate.period, rem: n } } else { State::Deny { until } }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
*lock = next_state;
|
||||||
|
matches!(next_state, State::Deny { .. })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S> RpcServiceT for RateLimit<S>
|
||||||
|
where
|
||||||
|
S: RpcServiceT<
|
||||||
|
MethodResponse = MethodResponse,
|
||||||
|
BatchResponse = MethodResponse,
|
||||||
|
NotificationResponse = MethodResponse,
|
||||||
|
> + Send
|
||||||
|
+ Sync
|
||||||
|
+ Clone
|
||||||
|
+ 'static,
|
||||||
|
{
|
||||||
|
type MethodResponse = S::MethodResponse;
|
||||||
|
type NotificationResponse = S::NotificationResponse;
|
||||||
|
type BatchResponse = S::BatchResponse;
|
||||||
|
|
||||||
|
fn call<'a>(&self, req: Request<'a>) -> impl Future<Output = Self::MethodResponse> + Send + 'a {
|
||||||
|
if self.rate_limit_deny() {
|
||||||
|
ResponseFuture::ready(MethodResponse::error(req.id, ErrorObject::borrowed(-32000, "RPC rate limit", None)))
|
||||||
|
} else {
|
||||||
|
ResponseFuture::future(self.service.call(req))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn batch<'a>(&self, mut batch: Batch<'a>) -> impl Future<Output = Self::BatchResponse> + Send + 'a {
|
||||||
|
// If the rate limit is reached then we modify each entry
|
||||||
|
// in the batch to be a request with an error.
|
||||||
|
//
|
||||||
|
// This makes sure that the client will receive an error
|
||||||
|
// for each request in the batch.
|
||||||
|
if self.rate_limit_deny() {
|
||||||
|
for entry in batch.iter_mut() {
|
||||||
|
let id = match entry {
|
||||||
|
Ok(BatchEntry::Call(req)) => req.id.clone(),
|
||||||
|
Ok(BatchEntry::Notification(_)) => continue,
|
||||||
|
Err(_) => continue,
|
||||||
|
};
|
||||||
|
|
||||||
|
// This will create a new error response for batch and replace the method call
|
||||||
|
*entry = Err(BatchEntryErr::new(id, ErrorObject::borrowed(-32000, "RPC rate limit", None)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
self.service.batch(batch)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn notification<'a>(&self, n: Notification<'a>) -> impl Future<Output = Self::NotificationResponse> + Send + 'a {
|
||||||
|
if self.rate_limit_deny() {
|
||||||
|
// Notifications are not expected to return a response so just ignore
|
||||||
|
// if the rate limit is reached.
|
||||||
|
ResponseFuture::ready(MethodResponse::notification())
|
||||||
|
} else {
|
||||||
|
ResponseFuture::future(self.service.notification(n))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
tracing_subscriber::FmtSubscriber::builder()
|
||||||
|
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
||||||
|
.try_init()
|
||||||
|
.expect("setting default subscriber failed");
|
||||||
|
|
||||||
|
let addr = run_server().await?;
|
||||||
|
let url = format!("ws://{}", addr);
|
||||||
|
|
||||||
|
let client1 = WsClientBuilder::default().build(&url).await?;
|
||||||
|
let _response: String = client1.request("say_hello", rpc_params![]).await?;
|
||||||
|
|
||||||
|
// The rate limit should trigger an error here.
|
||||||
|
let _response = client1.request::<String, _>("unknown_method", rpc_params![]).await.unwrap_err();
|
||||||
|
|
||||||
|
// Make a new connection and the server will allow it because our `RateLimit`
|
||||||
|
// applies per connection and not globally on the server.
|
||||||
|
let client2 = WsClientBuilder::default().build(&url).await?;
|
||||||
|
let _response: String = client2.request("say_hello", rpc_params![]).await?;
|
||||||
|
|
||||||
|
// The first connection should allow a call now again.
|
||||||
|
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||||
|
let _response: String = client1.request("say_hello", rpc_params![]).await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_server() -> anyhow::Result<SocketAddr> {
|
||||||
|
// This will create a new `RateLimit` per connection.
|
||||||
|
//
|
||||||
|
// In this particular example the server will only
|
||||||
|
// allow one RPC call per second.
|
||||||
|
//
|
||||||
|
// Have a look at the `rpc_middleware example` if you want see an example
|
||||||
|
// how to share state of the "middleware" for all connections on the server.
|
||||||
|
let rpc_middleware = RpcServiceBuilder::new()
|
||||||
|
.layer_fn(|service| RateLimit::new(service, Rate { num: 1, period: Duration::from_secs(1) }));
|
||||||
|
|
||||||
|
let server = Server::builder().set_rpc_middleware(rpc_middleware).build("127.0.0.1:0").await?;
|
||||||
|
let mut module = RpcModule::new(());
|
||||||
|
module.register_method("say_hello", |_, _, _| "lo")?;
|
||||||
|
module.register_method("say_goodbye", |_, _, _| "goodbye")?;
|
||||||
|
let addr = server.local_addr()?;
|
||||||
|
|
||||||
|
let handle = server.start(module);
|
||||||
|
|
||||||
|
// In this example we don't care about doing shutdown so let's it run forever.
|
||||||
|
// You may use the `ServerHandle` to shut it down or manage it yourself.
|
||||||
|
tokio::spawn(handle.stopped());
|
||||||
|
|
||||||
|
Ok(addr)
|
||||||
|
}
|
@@ -0,0 +1,156 @@
|
|||||||
|
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any
|
||||||
|
// person obtaining a copy of this software and associated
|
||||||
|
// documentation files (the "Software"), to deal in the
|
||||||
|
// Software without restriction, including without
|
||||||
|
// limitation the rights to use, copy, modify, merge,
|
||||||
|
// publish, distribute, sublicense, and/or sell copies of
|
||||||
|
// the Software, and to permit persons to whom the Software
|
||||||
|
// is furnished to do so, subject to the following
|
||||||
|
// conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice
|
||||||
|
// shall be included in all copies or substantial portions
|
||||||
|
// of the Software.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||||
|
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||||
|
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||||
|
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||||
|
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||||
|
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||||
|
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
// DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
|
||||||
|
use jsonrpsee::core::middleware::{Batch, Notification, Request, RpcServiceT};
|
||||||
|
use jsonrpsee::core::{SubscriptionResult, async_trait};
|
||||||
|
use jsonrpsee::proc_macros::rpc;
|
||||||
|
use jsonrpsee::server::PendingSubscriptionSink;
|
||||||
|
use jsonrpsee::types::{ErrorObject, ErrorObjectOwned};
|
||||||
|
use jsonrpsee::ws_client::WsClientBuilder;
|
||||||
|
use jsonrpsee::{ConnectionId, Extensions};
|
||||||
|
|
||||||
|
#[rpc(server, client)]
|
||||||
|
pub trait Rpc {
|
||||||
|
/// method with connection ID.
|
||||||
|
#[method(name = "connectionIdMethod", with_extensions)]
|
||||||
|
async fn method(&self) -> Result<usize, ErrorObjectOwned>;
|
||||||
|
|
||||||
|
#[subscription(name = "subscribeConnectionId", item = usize, with_extensions)]
|
||||||
|
async fn sub(&self) -> SubscriptionResult;
|
||||||
|
|
||||||
|
#[subscription(name = "subscribeSyncConnectionId", item = usize, with_extensions)]
|
||||||
|
fn sub2(&self) -> SubscriptionResult;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct LoggingMiddleware<S>(S);
|
||||||
|
|
||||||
|
impl<S> RpcServiceT for LoggingMiddleware<S>
|
||||||
|
where
|
||||||
|
S: RpcServiceT,
|
||||||
|
{
|
||||||
|
type MethodResponse = S::MethodResponse;
|
||||||
|
type NotificationResponse = S::NotificationResponse;
|
||||||
|
type BatchResponse = S::BatchResponse;
|
||||||
|
|
||||||
|
fn call<'a>(&self, request: Request<'a>) -> impl Future<Output = Self::MethodResponse> + Send + 'a {
|
||||||
|
tracing::info!("Received request: {:?}", request);
|
||||||
|
assert!(request.extensions().get::<ConnectionId>().is_some());
|
||||||
|
|
||||||
|
self.0.call(request)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn batch<'a>(&self, batch: Batch<'a>) -> impl Future<Output = Self::BatchResponse> + Send + 'a {
|
||||||
|
tracing::info!("Received batch: {:?}", batch);
|
||||||
|
self.0.batch(batch)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn notification<'a>(&self, n: Notification<'a>) -> impl Future<Output = Self::NotificationResponse> + Send + 'a {
|
||||||
|
tracing::info!("Received notif: {:?}", n);
|
||||||
|
self.0.notification(n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct RpcServerImpl;
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl RpcServer for RpcServerImpl {
|
||||||
|
async fn method(&self, ext: &Extensions) -> Result<usize, ErrorObjectOwned> {
|
||||||
|
let conn_id = ext
|
||||||
|
.get::<ConnectionId>()
|
||||||
|
.cloned()
|
||||||
|
.ok_or_else(|| ErrorObject::owned(0, "No connection details found", None::<()>))?;
|
||||||
|
|
||||||
|
Ok(conn_id.0)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn sub(&self, pending: PendingSubscriptionSink, ext: &Extensions) -> SubscriptionResult {
|
||||||
|
let sink = pending.accept().await?;
|
||||||
|
let conn_id = ext
|
||||||
|
.get::<ConnectionId>()
|
||||||
|
.cloned()
|
||||||
|
.ok_or_else(|| ErrorObject::owned(0, "No connection details found", None::<()>))?;
|
||||||
|
let json = serde_json::value::to_raw_value(&conn_id)
|
||||||
|
.map_err(|e| ErrorObject::owned(0, format!("Failed to serialize connection ID: {e}"), None::<()>))?;
|
||||||
|
sink.send(json).await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn sub2(&self, pending: PendingSubscriptionSink, ext: &Extensions) -> SubscriptionResult {
|
||||||
|
let conn_id = ext.get::<ConnectionId>().cloned().unwrap();
|
||||||
|
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let sink = pending.accept().await.unwrap();
|
||||||
|
let json = serde_json::value::to_raw_value(&conn_id).unwrap();
|
||||||
|
sink.send(json).await.unwrap();
|
||||||
|
});
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
tracing_subscriber::FmtSubscriber::builder()
|
||||||
|
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
||||||
|
.try_init()
|
||||||
|
.expect("setting default subscriber failed");
|
||||||
|
|
||||||
|
let server_addr = run_server().await?;
|
||||||
|
let url = format!("ws://{}", server_addr);
|
||||||
|
|
||||||
|
let client = WsClientBuilder::default().build(&url).await?;
|
||||||
|
let connection_id_first = client.method().await.unwrap();
|
||||||
|
|
||||||
|
// Second call from the same connection ID.
|
||||||
|
assert_eq!(client.method().await.unwrap(), connection_id_first);
|
||||||
|
|
||||||
|
// Second client will increment the connection ID.
|
||||||
|
let client2 = WsClientBuilder::default().build(&url).await?;
|
||||||
|
let connection_id_second = client2.method().await.unwrap();
|
||||||
|
assert_ne!(connection_id_first, connection_id_second);
|
||||||
|
|
||||||
|
let mut sub = client.sub().await.unwrap();
|
||||||
|
assert_eq!(connection_id_first, sub.next().await.transpose().unwrap().unwrap());
|
||||||
|
|
||||||
|
let mut sub = client2.sub().await.unwrap();
|
||||||
|
assert_eq!(connection_id_second, sub.next().await.transpose().unwrap().unwrap());
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_server() -> anyhow::Result<SocketAddr> {
|
||||||
|
let rpc_middleware = jsonrpsee::server::middleware::rpc::RpcServiceBuilder::new().layer_fn(LoggingMiddleware);
|
||||||
|
|
||||||
|
let server = jsonrpsee::server::Server::builder().set_rpc_middleware(rpc_middleware).build("127.0.0.1:0").await?;
|
||||||
|
let addr = server.local_addr()?;
|
||||||
|
|
||||||
|
let handle = server.start(RpcServerImpl.into_rpc());
|
||||||
|
|
||||||
|
tokio::spawn(handle.stopped());
|
||||||
|
|
||||||
|
Ok(addr)
|
||||||
|
}
|
69
reference_jsonrpsee_crate_examples/tokio_console.rs
Normal file
69
reference_jsonrpsee_crate_examples/tokio_console.rs
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
// Copyright 2022 Parity Technologies (UK) Ltd.
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any
|
||||||
|
// person obtaining a copy of this software and associated
|
||||||
|
// documentation files (the "Software"), to deal in the
|
||||||
|
// Software without restriction, including without
|
||||||
|
// limitation the rights to use, copy, modify, merge,
|
||||||
|
// publish, distribute, sublicense, and/or sell copies of
|
||||||
|
// the Software, and to permit persons to whom the Software
|
||||||
|
// is furnished to do so, subject to the following
|
||||||
|
// conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice
|
||||||
|
// shall be included in all copies or substantial portions
|
||||||
|
// of the Software.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||||
|
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||||
|
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||||
|
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||||
|
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||||
|
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||||
|
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
// DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
//! Example how to use `tokio-console` to debug async tasks `jsonrpsee`.
|
||||||
|
//! For further information see https://docs.rs/console-subscriber.
|
||||||
|
//!
|
||||||
|
//! To run it:
|
||||||
|
//! `$ cargo install --locked tokio-console`
|
||||||
|
//! `$ RUSTFLAGS="--cfg tokio_unstable" cargo run --example tokio_console`
|
||||||
|
//! `$ tokio-console`
|
||||||
|
//!
|
||||||
|
//! It will start a server on http://127.0.0.1:6669 for `tokio-console` to connect to.
|
||||||
|
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
|
||||||
|
use jsonrpsee::RpcModule;
|
||||||
|
use jsonrpsee::server::Server;
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
console_subscriber::init();
|
||||||
|
|
||||||
|
let _ = run_server().await?;
|
||||||
|
|
||||||
|
futures::future::pending().await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_server() -> anyhow::Result<SocketAddr> {
|
||||||
|
let server = Server::builder().build("127.0.0.1:9944").await?;
|
||||||
|
let mut module = RpcModule::new(());
|
||||||
|
module.register_method("say_hello", |_, _, _| "lo")?;
|
||||||
|
module.register_method("memory_call", |_, _, _| "A".repeat(1024 * 1024))?;
|
||||||
|
module.register_async_method("sleep", |_, _, _| async {
|
||||||
|
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
|
||||||
|
"lo"
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let addr = server.local_addr()?;
|
||||||
|
let handle = server.start(module);
|
||||||
|
|
||||||
|
// In this example we don't care about doing a stopping the server so let it run forever.
|
||||||
|
// You may use the `ServerHandle` to shut it down or manage it yourself.
|
||||||
|
tokio::spawn(handle.stopped());
|
||||||
|
|
||||||
|
Ok(addr)
|
||||||
|
}
|
67
reference_jsonrpsee_crate_examples/ws.rs
Normal file
67
reference_jsonrpsee_crate_examples/ws.rs
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any
|
||||||
|
// person obtaining a copy of this software and associated
|
||||||
|
// documentation files (the "Software"), to deal in the
|
||||||
|
// Software without restriction, including without
|
||||||
|
// limitation the rights to use, copy, modify, merge,
|
||||||
|
// publish, distribute, sublicense, and/or sell copies of
|
||||||
|
// the Software, and to permit persons to whom the Software
|
||||||
|
// is furnished to do so, subject to the following
|
||||||
|
// conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice
|
||||||
|
// shall be included in all copies or substantial portions
|
||||||
|
// of the Software.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||||
|
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||||
|
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||||
|
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||||
|
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||||
|
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||||
|
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
// DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
|
||||||
|
use jsonrpsee::core::client::ClientT;
|
||||||
|
use jsonrpsee::core::middleware::RpcServiceBuilder;
|
||||||
|
use jsonrpsee::server::Server;
|
||||||
|
use jsonrpsee::ws_client::{WsClient, WsClientBuilder};
|
||||||
|
use jsonrpsee::{RpcModule, rpc_params};
|
||||||
|
use tracing_subscriber::util::SubscriberInitExt;
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
let filter = tracing_subscriber::EnvFilter::try_from_default_env()?
|
||||||
|
.add_directive("jsonrpsee[method_call{name = \"say_hello\"}]=trace".parse()?);
|
||||||
|
|
||||||
|
tracing_subscriber::FmtSubscriber::builder().with_env_filter(filter).finish().try_init()?;
|
||||||
|
|
||||||
|
let addr = run_server().await?;
|
||||||
|
let url = format!("ws://{}", addr);
|
||||||
|
|
||||||
|
let client: WsClient = WsClientBuilder::new().build(&url).await?;
|
||||||
|
let response: String = client.request("say_hello", rpc_params![]).await?;
|
||||||
|
tracing::info!("response: {:?}", response);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_server() -> anyhow::Result<SocketAddr> {
|
||||||
|
let rpc_middleware = RpcServiceBuilder::new().rpc_logger(1024);
|
||||||
|
let server = Server::builder().set_rpc_middleware(rpc_middleware).build("127.0.0.1:0").await?;
|
||||||
|
let mut module = RpcModule::new(());
|
||||||
|
module.register_method("say_hello", |_, _, _| "lo")?;
|
||||||
|
let addr = server.local_addr()?;
|
||||||
|
|
||||||
|
let handle = server.start(module);
|
||||||
|
|
||||||
|
// In this example we don't care about doing shutdown so let's it run forever.
|
||||||
|
// You may use the `ServerHandle` to shut it down or manage it yourself.
|
||||||
|
tokio::spawn(handle.stopped());
|
||||||
|
|
||||||
|
Ok(addr)
|
||||||
|
}
|
122
reference_jsonrpsee_crate_examples/ws_dual_stack.rs
Normal file
122
reference_jsonrpsee_crate_examples/ws_dual_stack.rs
Normal file
@@ -0,0 +1,122 @@
|
|||||||
|
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any
|
||||||
|
// person obtaining a copy of this software and associated
|
||||||
|
// documentation files (the "Software"), to deal in the
|
||||||
|
// Software without restriction, including without
|
||||||
|
// limitation the rights to use, copy, modify, merge,
|
||||||
|
// publish, distribute, sublicense, and/or sell copies of
|
||||||
|
// the Software, and to permit persons to whom the Software
|
||||||
|
// is furnished to do so, subject to the following
|
||||||
|
// conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice
|
||||||
|
// shall be included in all copies or substantial portions
|
||||||
|
// of the Software.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||||
|
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||||
|
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||||
|
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||||
|
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||||
|
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||||
|
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
// DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
use jsonrpsee::core::client::ClientT;
|
||||||
|
use jsonrpsee::server::{ServerHandle, serve_with_graceful_shutdown, stop_channel};
|
||||||
|
use jsonrpsee::ws_client::WsClientBuilder;
|
||||||
|
use jsonrpsee::{RpcModule, rpc_params};
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
use tokio::net::TcpListener;
|
||||||
|
use tracing_subscriber::util::SubscriberInitExt;
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
let filter = tracing_subscriber::EnvFilter::try_from_default_env()?
|
||||||
|
.add_directive("jsonrpsee[method_call{name = \"say_hello\"}]=trace".parse()?)
|
||||||
|
.add_directive("jsonrpsee-client=trace".parse()?);
|
||||||
|
|
||||||
|
tracing_subscriber::FmtSubscriber::builder().with_env_filter(filter).finish().try_init()?;
|
||||||
|
|
||||||
|
let (_server_hdl, addrs) = run_server().await?;
|
||||||
|
let url_v4 = format!("ws://{}", addrs.v4);
|
||||||
|
let url_v6 = format!("ws://{}", addrs.v6);
|
||||||
|
|
||||||
|
let client_v4 = WsClientBuilder::default().build(&url_v4).await?;
|
||||||
|
let client_v6 = WsClientBuilder::default().build(&url_v6).await?;
|
||||||
|
|
||||||
|
let response_v4: String = client_v4.request("say_hello", rpc_params![]).await?;
|
||||||
|
let response_v6: String = client_v6.request("say_hello", rpc_params![]).await?;
|
||||||
|
|
||||||
|
tracing::info!("response V4: {:?}", response_v4);
|
||||||
|
tracing::info!("response V6: {:?}", response_v6);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_server() -> anyhow::Result<(ServerHandle, Addrs)> {
|
||||||
|
let port = 9944;
|
||||||
|
// V4 address
|
||||||
|
let v4_addr = SocketAddr::from(([127, 0, 0, 1], port));
|
||||||
|
// V6 address
|
||||||
|
let v6_addr = SocketAddr::new("::1".parse().unwrap(), port);
|
||||||
|
|
||||||
|
let mut module = RpcModule::new(());
|
||||||
|
module.register_method("say_hello", |_, _, _| "lo")?;
|
||||||
|
|
||||||
|
// Bind to both IPv4 and IPv6 addresses.
|
||||||
|
let listener_v4 = TcpListener::bind(&v4_addr).await?;
|
||||||
|
let listener_v6 = TcpListener::bind(&v6_addr).await?;
|
||||||
|
|
||||||
|
// Each RPC call/connection get its own `stop_handle`
|
||||||
|
// to able to determine whether the server has been stopped or not.
|
||||||
|
//
|
||||||
|
// To keep the server running the `server_handle`
|
||||||
|
// must be kept and it can also be used to stop the server.
|
||||||
|
let (stop_hdl, server_hdl) = stop_channel();
|
||||||
|
|
||||||
|
// Create and finalize a server configuration from a TowerServiceBuilder
|
||||||
|
// given an RpcModule and the stop handle.
|
||||||
|
let svc = jsonrpsee::server::Server::builder().to_service_builder().build(module, stop_hdl.clone());
|
||||||
|
|
||||||
|
tokio::spawn(async move {
|
||||||
|
loop {
|
||||||
|
// The `tokio::select!` macro is used to wait for either of the
|
||||||
|
// listeners to accept a new connection or for the server to be
|
||||||
|
// stopped.
|
||||||
|
let stream = tokio::select! {
|
||||||
|
res = listener_v4.accept() => {
|
||||||
|
match res {
|
||||||
|
Ok((stream, _remote_addr)) => stream,
|
||||||
|
Err(e) => {
|
||||||
|
tracing::error!("failed to accept v4 connection: {:?}", e);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
res = listener_v6.accept() => {
|
||||||
|
match res {
|
||||||
|
Ok((stream, _remote_addr)) => stream,
|
||||||
|
Err(e) => {
|
||||||
|
tracing::error!("failed to accept v6 connection: {:?}", e);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ = stop_hdl.clone().shutdown() => break,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Spawn a new task to serve each respective (Hyper) connection.
|
||||||
|
tokio::spawn(serve_with_graceful_shutdown(stream, svc.clone(), stop_hdl.clone().shutdown()));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
Ok((server_hdl, Addrs { v4: v4_addr, v6: v6_addr }))
|
||||||
|
}
|
||||||
|
|
||||||
|
struct Addrs {
|
||||||
|
v4: SocketAddr,
|
||||||
|
v6: SocketAddr,
|
||||||
|
}
|
149
reference_jsonrpsee_crate_examples/ws_pubsub_broadcast.rs
Normal file
149
reference_jsonrpsee_crate_examples/ws_pubsub_broadcast.rs
Normal file
@@ -0,0 +1,149 @@
|
|||||||
|
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any
|
||||||
|
// person obtaining a copy of this software and associated
|
||||||
|
// documentation files (the "Software"), to deal in the
|
||||||
|
// Software without restriction, including without
|
||||||
|
// limitation the rights to use, copy, modify, merge,
|
||||||
|
// publish, distribute, sublicense, and/or sell copies of
|
||||||
|
// the Software, and to permit persons to whom the Software
|
||||||
|
// is furnished to do so, subject to the following
|
||||||
|
// conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice
|
||||||
|
// shall be included in all copies or substantial portions
|
||||||
|
// of the Software.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||||
|
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||||
|
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||||
|
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||||
|
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||||
|
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||||
|
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
// DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
//! Example that shows how to broadcast to all active subscriptions using `tokio::sync::broadcast`.
|
||||||
|
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
|
||||||
|
use futures::StreamExt;
|
||||||
|
use futures::future::{self, Either};
|
||||||
|
use jsonrpsee::PendingSubscriptionSink;
|
||||||
|
use jsonrpsee::core::client::{Subscription, SubscriptionClientT};
|
||||||
|
use jsonrpsee::core::middleware::RpcServiceBuilder;
|
||||||
|
use jsonrpsee::rpc_params;
|
||||||
|
use jsonrpsee::server::{RpcModule, Server, ServerConfig};
|
||||||
|
use jsonrpsee::ws_client::WsClientBuilder;
|
||||||
|
use tokio::sync::broadcast;
|
||||||
|
use tokio_stream::wrappers::BroadcastStream;
|
||||||
|
|
||||||
|
const NUM_SUBSCRIPTION_RESPONSES: usize = 5;
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
tracing_subscriber::FmtSubscriber::builder()
|
||||||
|
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
||||||
|
.try_init()
|
||||||
|
.expect("setting default subscriber failed");
|
||||||
|
|
||||||
|
let addr = run_server().await?;
|
||||||
|
let url = format!("ws://{}", addr);
|
||||||
|
|
||||||
|
let client1 =
|
||||||
|
WsClientBuilder::default().set_rpc_middleware(RpcServiceBuilder::new().rpc_logger(1024)).build(&url).await?;
|
||||||
|
let client2 =
|
||||||
|
WsClientBuilder::default().set_rpc_middleware(RpcServiceBuilder::new().rpc_logger(1024)).build(&url).await?;
|
||||||
|
let sub1: Subscription<i32> = client1.subscribe("subscribe_hello", rpc_params![], "unsubscribe_hello").await?;
|
||||||
|
let sub2: Subscription<i32> = client2.subscribe("subscribe_hello", rpc_params![], "unsubscribe_hello").await?;
|
||||||
|
|
||||||
|
let fut1 = sub1.take(NUM_SUBSCRIPTION_RESPONSES).for_each(|r| async move { tracing::info!("sub1 rx: {:?}", r) });
|
||||||
|
let fut2 = sub2.take(NUM_SUBSCRIPTION_RESPONSES).for_each(|r| async move { tracing::info!("sub2 rx: {:?}", r) });
|
||||||
|
|
||||||
|
future::join(fut1, fut2).await;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_server() -> anyhow::Result<SocketAddr> {
|
||||||
|
// let's configure the server only hold 5 messages in memory.
|
||||||
|
let config = ServerConfig::builder().set_message_buffer_capacity(5).build();
|
||||||
|
let server = Server::builder()
|
||||||
|
.set_config(config)
|
||||||
|
.set_rpc_middleware(RpcServiceBuilder::new().rpc_logger(1024))
|
||||||
|
.build("127.0.0.1:0")
|
||||||
|
.await?;
|
||||||
|
let (tx, _rx) = broadcast::channel::<usize>(16);
|
||||||
|
|
||||||
|
let mut module = RpcModule::new(tx.clone());
|
||||||
|
|
||||||
|
std::thread::spawn(move || produce_items(tx));
|
||||||
|
|
||||||
|
module
|
||||||
|
.register_subscription("subscribe_hello", "s_hello", "unsubscribe_hello", |_, pending, tx, _| async move {
|
||||||
|
let rx = tx.subscribe();
|
||||||
|
let stream = BroadcastStream::new(rx);
|
||||||
|
pipe_from_stream_with_bounded_buffer(pending, stream).await?;
|
||||||
|
Ok(())
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
let addr = server.local_addr()?;
|
||||||
|
let handle = server.start(module);
|
||||||
|
|
||||||
|
// In this example we don't care about doing shutdown so let's it run forever.
|
||||||
|
// You may use the `ServerHandle` to shut it down or manage it yourself.
|
||||||
|
tokio::spawn(handle.stopped());
|
||||||
|
|
||||||
|
Ok(addr)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn pipe_from_stream_with_bounded_buffer(
|
||||||
|
pending: PendingSubscriptionSink,
|
||||||
|
stream: BroadcastStream<usize>,
|
||||||
|
) -> Result<(), anyhow::Error> {
|
||||||
|
let sink = pending.accept().await?;
|
||||||
|
let closed = sink.closed();
|
||||||
|
|
||||||
|
futures::pin_mut!(closed, stream);
|
||||||
|
|
||||||
|
loop {
|
||||||
|
match future::select(closed, stream.next()).await {
|
||||||
|
// subscription closed.
|
||||||
|
Either::Left((_, _)) => break Ok(()),
|
||||||
|
|
||||||
|
// received new item from the stream.
|
||||||
|
Either::Right((Some(Ok(item)), c)) => {
|
||||||
|
let msg = serde_json::value::to_raw_value(&item)?;
|
||||||
|
|
||||||
|
// NOTE: this will block until there a spot in the queue
|
||||||
|
// and you might want to do something smarter if it's
|
||||||
|
// critical that "the most recent item" must be sent when it is produced.
|
||||||
|
if sink.send(msg).await.is_err() {
|
||||||
|
break Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
closed = c;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send back back the error.
|
||||||
|
Either::Right((Some(Err(e)), _)) => break Err(e.into()),
|
||||||
|
|
||||||
|
// Stream is closed.
|
||||||
|
Either::Right((None, _)) => break Ok(()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Naive example that broadcasts the produced values to all active subscribers.
|
||||||
|
fn produce_items(tx: broadcast::Sender<usize>) {
|
||||||
|
for c in 1..=100 {
|
||||||
|
std::thread::sleep(std::time::Duration::from_millis(1));
|
||||||
|
|
||||||
|
// This might fail if no receivers are alive, could occur if no subscriptions are active...
|
||||||
|
// Also be aware that this will succeed when at least one receiver is alive
|
||||||
|
// Thus, clients connecting at different point in time will not receive
|
||||||
|
// the items sent before the subscription got established.
|
||||||
|
let _ = tx.send(c);
|
||||||
|
}
|
||||||
|
}
|
138
reference_jsonrpsee_crate_examples/ws_pubsub_with_params.rs
Normal file
138
reference_jsonrpsee_crate_examples/ws_pubsub_with_params.rs
Normal file
@@ -0,0 +1,138 @@
|
|||||||
|
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any
|
||||||
|
// person obtaining a copy of this software and associated
|
||||||
|
// documentation files (the "Software"), to deal in the
|
||||||
|
// Software without restriction, including without
|
||||||
|
// limitation the rights to use, copy, modify, merge,
|
||||||
|
// publish, distribute, sublicense, and/or sell copies of
|
||||||
|
// the Software, and to permit persons to whom the Software
|
||||||
|
// is furnished to do so, subject to the following
|
||||||
|
// conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice
|
||||||
|
// shall be included in all copies or substantial portions
|
||||||
|
// of the Software.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||||
|
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||||
|
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||||
|
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||||
|
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||||
|
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||||
|
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
// DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use futures::{Stream, StreamExt};
|
||||||
|
use jsonrpsee::core::Serialize;
|
||||||
|
use jsonrpsee::core::client::{Subscription, SubscriptionClientT};
|
||||||
|
use jsonrpsee::server::{RpcModule, Server, ServerConfig, TrySendError};
|
||||||
|
use jsonrpsee::ws_client::WsClientBuilder;
|
||||||
|
use jsonrpsee::{PendingSubscriptionSink, rpc_params};
|
||||||
|
use tokio::time::interval;
|
||||||
|
use tokio_stream::wrappers::IntervalStream;
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
tracing_subscriber::FmtSubscriber::builder()
|
||||||
|
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
||||||
|
.try_init()
|
||||||
|
.expect("setting default subscriber failed");
|
||||||
|
|
||||||
|
let addr = run_server().await?;
|
||||||
|
let url = format!("ws://{}", addr);
|
||||||
|
|
||||||
|
let client = WsClientBuilder::default().build(&url).await?;
|
||||||
|
|
||||||
|
// Subscription with a single parameter
|
||||||
|
let mut sub_params_one: Subscription<Option<char>> =
|
||||||
|
client.subscribe("sub_one_param", rpc_params![3], "unsub_one_param").await?;
|
||||||
|
tracing::info!("subscription with one param: {:?}", sub_params_one.next().await);
|
||||||
|
|
||||||
|
// Subscription with multiple parameters
|
||||||
|
let mut sub_params_two: Subscription<String> =
|
||||||
|
client.subscribe("sub_params_two", rpc_params![2, 5], "unsub_params_two").await?;
|
||||||
|
tracing::info!("subscription with two params: {:?}", sub_params_two.next().await);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_server() -> anyhow::Result<SocketAddr> {
|
||||||
|
const LETTERS: &str = "abcdefghijklmnopqrstuvxyz";
|
||||||
|
let config = ServerConfig::builder().set_message_buffer_capacity(10).build();
|
||||||
|
let server = Server::builder().set_config(config).build("127.0.0.1:0").await?;
|
||||||
|
let mut module = RpcModule::new(());
|
||||||
|
module
|
||||||
|
.register_subscription(
|
||||||
|
"sub_one_param",
|
||||||
|
"sub_one_param",
|
||||||
|
"unsub_one_param",
|
||||||
|
|params, pending, _, _| async move {
|
||||||
|
// we are doing this verbose way to get a customized reject error on the subscription.
|
||||||
|
let idx = match params.one::<usize>() {
|
||||||
|
Ok(p) => p,
|
||||||
|
Err(e) => {
|
||||||
|
let _ = pending.reject(e).await;
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let item = LETTERS.chars().nth(idx);
|
||||||
|
|
||||||
|
let interval = interval(Duration::from_millis(200));
|
||||||
|
let stream = IntervalStream::new(interval).map(move |_| item);
|
||||||
|
|
||||||
|
pipe_from_stream_and_drop(pending, stream).await.map_err(Into::into)
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
module
|
||||||
|
.register_subscription("sub_params_two", "params_two", "unsub_params_two", |params, pending, _, _| async move {
|
||||||
|
let (one, two) = params.parse::<(usize, usize)>()?;
|
||||||
|
|
||||||
|
let item = &LETTERS[one..two];
|
||||||
|
let interval = interval(Duration::from_millis(200));
|
||||||
|
let stream = IntervalStream::new(interval).map(move |_| item);
|
||||||
|
pipe_from_stream_and_drop(pending, stream).await.map_err(Into::into)
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let addr = server.local_addr()?;
|
||||||
|
let handle = server.start(module);
|
||||||
|
|
||||||
|
// In this example we don't care about doing shutdown so let's it run forever.
|
||||||
|
// You may use the `ServerHandle` to shut it down or manage it yourself.
|
||||||
|
tokio::spawn(handle.stopped());
|
||||||
|
|
||||||
|
Ok(addr)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn pipe_from_stream_and_drop<T: Serialize>(
|
||||||
|
pending: PendingSubscriptionSink,
|
||||||
|
mut stream: impl Stream<Item = T> + Unpin,
|
||||||
|
) -> Result<(), anyhow::Error> {
|
||||||
|
let mut sink = pending.accept().await?;
|
||||||
|
|
||||||
|
loop {
|
||||||
|
tokio::select! {
|
||||||
|
_ = sink.closed() => break Err(anyhow::anyhow!("Subscription was closed")),
|
||||||
|
maybe_item = stream.next() => {
|
||||||
|
let item = match maybe_item {
|
||||||
|
Some(item) => item,
|
||||||
|
None => break Err(anyhow::anyhow!("Subscription was closed")),
|
||||||
|
};
|
||||||
|
let msg = serde_json::value::to_raw_value(&item)?;
|
||||||
|
match sink.try_send(msg) {
|
||||||
|
Ok(_) => (),
|
||||||
|
Err(TrySendError::Closed(_)) => break Err(anyhow::anyhow!("Subscription was closed")),
|
||||||
|
// channel is full, let's be naive an just drop the message.
|
||||||
|
Err(TrySendError::Full(_)) => (),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
54
reference_osis_actor/Cargo.toml
Normal file
54
reference_osis_actor/Cargo.toml
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
[package]
|
||||||
|
name = "actor_osis"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2024"
|
||||||
|
|
||||||
|
[lib]
|
||||||
|
name = "actor_osis" # Can be different from package name, or same
|
||||||
|
path = "src/lib.rs"
|
||||||
|
|
||||||
|
[[bin]]
|
||||||
|
name = "actor_osis"
|
||||||
|
path = "cmd/actor_osis.rs"
|
||||||
|
|
||||||
|
[[example]]
|
||||||
|
name = "engine"
|
||||||
|
path = "examples/engine.rs"
|
||||||
|
|
||||||
|
[[example]]
|
||||||
|
name = "actor"
|
||||||
|
path = "examples/actor.rs"
|
||||||
|
|
||||||
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
redis = { version = "0.25.0", features = ["tokio-comp"] }
|
||||||
|
rhai = { version = "1.21.0", features = ["std", "sync", "decimal", "internals"] }
|
||||||
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
|
serde_json = "1.0"
|
||||||
|
tokio = { version = "1", features = ["macros", "rt-multi-thread", "time"] }
|
||||||
|
log = "0.4"
|
||||||
|
env_logger = "0.10"
|
||||||
|
clap = { version = "4.4", features = ["derive"] }
|
||||||
|
uuid = { version = "1.6", features = ["v4", "serde"] } # Though task_id is string, uuid might be useful
|
||||||
|
chrono = { version = "0.4", features = ["serde"] }
|
||||||
|
toml = "0.8"
|
||||||
|
thiserror = "1.0"
|
||||||
|
async-trait = "0.1"
|
||||||
|
hero_job = { git = "https://git.ourworld.tf/herocode/baobab.git"}
|
||||||
|
baobab_actor = { git = "https://git.ourworld.tf/herocode/baobab.git"}
|
||||||
|
heromodels = { git = "https://git.ourworld.tf/herocode/db.git" }
|
||||||
|
heromodels_core = { git = "https://git.ourworld.tf/herocode/db.git" }
|
||||||
|
heromodels-derive = { git = "https://git.ourworld.tf/herocode/db.git" }
|
||||||
|
rhailib_dsl = { git = "https://git.ourworld.tf/herocode/rhailib.git" }
|
||||||
|
hero_logger = { git = "https://git.ourworld.tf/herocode/baobab.git", branch = "logger" }
|
||||||
|
tracing = "0.1.41"
|
||||||
|
|
||||||
|
[features]
|
||||||
|
default = ["calendar", "finance"]
|
||||||
|
calendar = []
|
||||||
|
finance = []
|
||||||
|
flow = []
|
||||||
|
legal = []
|
||||||
|
projects = []
|
||||||
|
biz = []
|
79
reference_osis_actor/README.md
Normal file
79
reference_osis_actor/README.md
Normal file
@@ -0,0 +1,79 @@
|
|||||||
|
# Object Storage and Indexing System (OSIS) Actor
|
||||||
|
|
||||||
|
The OSIS Actor is responsible for storing and indexing objects in the system. It implements the actor interface to process jobs in a **blocking, synchronized manner**.
|
||||||
|
|
||||||
|
## Job Processing Behavior
|
||||||
|
|
||||||
|
The OSISActor processes jobs sequentially with the following characteristics:
|
||||||
|
|
||||||
|
- **Blocking Processing**: Each job is processed completely before the next job begins
|
||||||
|
- **Synchronized Execution**: Jobs are executed one at a time in the order they are received
|
||||||
|
- **No Concurrency**: Unlike async actors, OSIS ensures no parallel job execution
|
||||||
|
- **Deterministic Order**: Job completion follows the exact order of job submission
|
||||||
|
|
||||||
|
This design ensures data consistency and prevents race conditions when performing storage and indexing operations.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use actor_osis::{OSISActor, spawn_osis_actor};
|
||||||
|
|
||||||
|
// Create an OSIS actor with builder pattern
|
||||||
|
let actor = OSISActor::builder()
|
||||||
|
.db_path("/path/to/database")
|
||||||
|
.redis_url("redis://localhost:6379")
|
||||||
|
.build()
|
||||||
|
.expect("Failed to build OSISActor");
|
||||||
|
|
||||||
|
// Or spawn directly with convenience function
|
||||||
|
let handle = spawn_osis_actor(
|
||||||
|
"/path/to/database".to_string(),
|
||||||
|
"redis://localhost:6379".to_string(),
|
||||||
|
shutdown_rx,
|
||||||
|
);
|
||||||
|
```
|
||||||
|
|
||||||
|
## Actor Properties
|
||||||
|
|
||||||
|
- **Actor ID**: `"osis"` (constant)
|
||||||
|
- **Actor Type**: `"OSIS"`
|
||||||
|
- **Processing Model**: Sequential, blocking
|
||||||
|
- **Script Engine**: Rhai with OSIS-specific DSL extensions
|
||||||
|
## Canonical Redis queues and verification
|
||||||
|
|
||||||
|
The project uses canonical dispatch queues per script type. For OSIS, the work queue is:
|
||||||
|
- hero:q:work:type:osis
|
||||||
|
|
||||||
|
Consumer behavior:
|
||||||
|
- The in-repo actor derives ScriptType=OSIS from its actor_id containing "osis" and BLPOPs hero:q:work:type:osis.
|
||||||
|
- This repo’s OSIS actor has been updated so its actor_id is "osis", ensuring it consumes the canonical queue.
|
||||||
|
|
||||||
|
Quick verification (redis-cli):
|
||||||
|
- List work queues:
|
||||||
|
- KEYS hero:q:work:type:*
|
||||||
|
- Check OSIS queue length:
|
||||||
|
- LLEN hero:q:work:type:osis
|
||||||
|
- Inspect a specific job (replace {job_id} with the printed id):
|
||||||
|
- HGET hero:job:{job_id} status
|
||||||
|
- HGET hero:job:{job_id} output
|
||||||
|
|
||||||
|
Run options:
|
||||||
|
- Option A: Run the example which spawns the OSIS actor and dispatches jobs to the canonical queue.
|
||||||
|
1) Start Redis (if not already): redis-server
|
||||||
|
2) In this repo:
|
||||||
|
- cargo run --example actor
|
||||||
|
3) Observe the console: job IDs will be printed as they are created and dispatched.
|
||||||
|
4) In a separate terminal, verify with redis-cli:
|
||||||
|
- LLEN hero:q:work:type:osis (will briefly increment, then return to 0 as the actor consumes)
|
||||||
|
- HGET hero:job:{job_id} status (should transition to started then finished)
|
||||||
|
- HGET hero:job:{job_id} output (should contain the script result)
|
||||||
|
|
||||||
|
- Option B: Run the standalone actor binary and dispatch from another process that pushes to the canonical type queue.
|
||||||
|
1) Start the actor:
|
||||||
|
- cargo run --bin actor_osis
|
||||||
|
2) From any producer, LPUSH hero:q:work:type:osis {job_id} after persisting the job hash hero:job:{job_id}.
|
||||||
|
3) Use the same redis-cli checks above to confirm consumption and completion.
|
||||||
|
|
||||||
|
Notes:
|
||||||
|
- Hash-only result model is the default. The job result is written to hero:job:{job_id}.output and status=finished.
|
||||||
|
- Reply queues (hero:q:reply:{job_id}) are optional and not required for OSIS to function.
|
60
reference_osis_actor/cmd/actor_osis.rs
Normal file
60
reference_osis_actor/cmd/actor_osis.rs
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
use actor_osis::OSISActor;
|
||||||
|
use clap::Parser;
|
||||||
|
use log::info;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
|
#[derive(Parser, Debug)]
|
||||||
|
#[command(name = "actor_osis")]
|
||||||
|
#[command(about = "OSIS Actor - Synchronous job processing actor")]
|
||||||
|
struct Args {
|
||||||
|
/// Database path
|
||||||
|
#[arg(short, long, default_value = "/tmp/osis_db")]
|
||||||
|
db_path: String,
|
||||||
|
|
||||||
|
/// Redis URL
|
||||||
|
#[arg(short, long, default_value = "redis://localhost:6379")]
|
||||||
|
redis_url: String,
|
||||||
|
|
||||||
|
/// Preserve completed tasks in Redis
|
||||||
|
#[arg(short, long)]
|
||||||
|
preserve_tasks: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||||
|
env_logger::init();
|
||||||
|
|
||||||
|
let args = Args::parse();
|
||||||
|
|
||||||
|
info!("Starting OSIS Actor");
|
||||||
|
|
||||||
|
// Create shutdown channel
|
||||||
|
let (shutdown_tx, shutdown_rx) = mpsc::channel(1);
|
||||||
|
|
||||||
|
// Setup signal handler for graceful shutdown
|
||||||
|
let shutdown_tx_clone = shutdown_tx.clone();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
tokio::signal::ctrl_c().await.expect("Failed to listen for Ctrl+C");
|
||||||
|
info!("Received Ctrl+C, initiating shutdown...");
|
||||||
|
let _ = shutdown_tx_clone.send(()).await;
|
||||||
|
});
|
||||||
|
|
||||||
|
// Create and start the actor
|
||||||
|
let actor = Arc::new(
|
||||||
|
OSISActor::builder()
|
||||||
|
.db_path(args.db_path)
|
||||||
|
.redis_url(args.redis_url)
|
||||||
|
.build()?
|
||||||
|
);
|
||||||
|
|
||||||
|
let handle = baobab_actor::spawn_actor(actor, shutdown_rx);
|
||||||
|
|
||||||
|
info!("OSIS Actor started, waiting for jobs...");
|
||||||
|
|
||||||
|
// Wait for the actor to complete
|
||||||
|
handle.await??;
|
||||||
|
|
||||||
|
info!("OSIS Actor shutdown complete");
|
||||||
|
Ok(())
|
||||||
|
}
|
179
reference_osis_actor/src/engine.rs
Normal file
179
reference_osis_actor/src/engine.rs
Normal file
@@ -0,0 +1,179 @@
|
|||||||
|
//! # Rhailib Domain-Specific Language (DSL) Engine
|
||||||
|
//!
|
||||||
|
//! This module provides a comprehensive Domain-Specific Language implementation for the Rhai
|
||||||
|
//! scripting engine, exposing business domain models and operations through a fluent,
|
||||||
|
//! chainable API.
|
||||||
|
//!
|
||||||
|
//! ## Overview
|
||||||
|
//!
|
||||||
|
//! The DSL is organized into business domain modules, each providing Rhai-compatible
|
||||||
|
//! functions for creating, manipulating, and persisting domain entities. All operations
|
||||||
|
//! include proper authorization checks and type safety.
|
||||||
|
//!
|
||||||
|
//! ## Available Domains
|
||||||
|
//!
|
||||||
|
//! - **Business Operations** (`biz`): Companies, products, sales, shareholders
|
||||||
|
//! - **Financial Models** (`finance`): Accounts, assets, marketplace operations
|
||||||
|
//! - **Content Management** (`library`): Collections, images, PDFs, books, slideshows
|
||||||
|
//! - **Workflow Management** (`flow`): Flows, steps, signature requirements
|
||||||
|
//! - **Community Management** (`circle`): Circles, themes, membership
|
||||||
|
//! - **Contact Management** (`contact`): Contact information and relationships
|
||||||
|
//! - **Access Control** (`access`): Security and permissions
|
||||||
|
//! - **Time Management** (`calendar`): Calendar and scheduling
|
||||||
|
//! - **Core Utilities** (`core`): Comments and fundamental operations
|
||||||
|
//! - **Generic Objects** (`object`): Generic object manipulation
|
||||||
|
//!
|
||||||
|
//! ## Usage Example
|
||||||
|
//!
|
||||||
|
//! ```rust
|
||||||
|
//! use rhai::Engine;
|
||||||
|
//! use crate::engine::register_dsl_modules;
|
||||||
|
//!
|
||||||
|
//! let mut engine = Engine::new();
|
||||||
|
//! register_dsl_modules(&mut engine);
|
||||||
|
//!
|
||||||
|
//! // Now the engine can execute scripts like:
|
||||||
|
//! // let company = new_company().name("Acme Corp").email("contact@acme.com");
|
||||||
|
//! // let saved = save_company(company);
|
||||||
|
//! ```
|
||||||
|
|
||||||
|
use rhai::Engine;
|
||||||
|
use rhailib_dsl;
|
||||||
|
use std::sync::{Arc, OnceLock};
|
||||||
|
|
||||||
|
/// Engine factory for creating and sharing Rhai engines.
|
||||||
|
pub struct EngineFactory {
|
||||||
|
engine: Arc<Engine>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl EngineFactory {
|
||||||
|
/// Create a new engine factory with a configured Rhai engine.
|
||||||
|
pub fn new() -> Self {
|
||||||
|
let mut engine = Engine::new();
|
||||||
|
register_dsl_modules(&mut engine);
|
||||||
|
// Logger
|
||||||
|
hero_logger::rhai_integration::configure_rhai_logging(&mut engine, "osis_actor");
|
||||||
|
|
||||||
|
Self {
|
||||||
|
engine: Arc::new(engine),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get a shared reference to the engine.
|
||||||
|
pub fn get_engine(&self) -> Arc<Engine> {
|
||||||
|
Arc::clone(&self.engine)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the global singleton engine factory.
|
||||||
|
pub fn global() -> &'static EngineFactory {
|
||||||
|
static FACTORY: OnceLock<EngineFactory> = OnceLock::new();
|
||||||
|
FACTORY.get_or_init(|| EngineFactory::new())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Register basic object functions directly in the engine.
|
||||||
|
/// This provides object functionality without relying on the problematic rhailib_dsl object module.
|
||||||
|
fn register_object_functions(engine: &mut Engine) {
|
||||||
|
use heromodels::models::object::Object;
|
||||||
|
|
||||||
|
// Register the Object type
|
||||||
|
engine.register_type_with_name::<Object>("Object");
|
||||||
|
|
||||||
|
// Register constructor function
|
||||||
|
engine.register_fn("new_object", || Object::new());
|
||||||
|
|
||||||
|
// Register setter functions
|
||||||
|
engine.register_fn("object_title", |obj: &mut Object, title: String| {
|
||||||
|
obj.title = title;
|
||||||
|
obj.clone()
|
||||||
|
});
|
||||||
|
|
||||||
|
engine.register_fn(
|
||||||
|
"object_description",
|
||||||
|
|obj: &mut Object, description: String| {
|
||||||
|
obj.description = description;
|
||||||
|
obj.clone()
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
// Register getter functions
|
||||||
|
engine.register_fn("get_object_id", |obj: &mut Object| obj.id() as i64);
|
||||||
|
engine.register_fn("get_object_title", |obj: &mut Object| obj.title.clone());
|
||||||
|
engine.register_fn("get_object_description", |obj: &mut Object| {
|
||||||
|
obj.description.clone()
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Registers all DSL modules with the provided Rhai engine.
|
||||||
|
///
|
||||||
|
/// This function is the main entry point for integrating the rhailib DSL with a Rhai engine.
|
||||||
|
/// It registers all business domain modules, making their functions available to Rhai scripts.
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
///
|
||||||
|
/// * `engine` - A mutable reference to the Rhai engine to register modules with
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```rust
|
||||||
|
/// use rhai::Engine;
|
||||||
|
/// use crate::engine::register_dsl_modules;
|
||||||
|
///
|
||||||
|
/// let mut engine = Engine::new();
|
||||||
|
/// register_dsl_modules(&mut engine);
|
||||||
|
///
|
||||||
|
/// // Engine now has access to all DSL functions
|
||||||
|
/// let result = engine.eval::<String>(r#"
|
||||||
|
/// let company = new_company().name("Test Corp");
|
||||||
|
/// company.name
|
||||||
|
/// "#).unwrap();
|
||||||
|
/// assert_eq!(result, "Test Corp");
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// # Registered Modules
|
||||||
|
///
|
||||||
|
/// This function registers the following domain modules:
|
||||||
|
/// - Access control functions
|
||||||
|
/// - Business operation functions (companies, products, sales, shareholders)
|
||||||
|
/// - Calendar and scheduling functions
|
||||||
|
/// - Circle and community management functions
|
||||||
|
/// - Company management functions
|
||||||
|
/// - Contact management functions
|
||||||
|
/// - Core utility functions
|
||||||
|
/// - Financial operation functions (accounts, assets, marketplace)
|
||||||
|
/// - Workflow management functions (flows, steps, signatures)
|
||||||
|
/// - Library and content management functions
|
||||||
|
/// - Generic object manipulation functions (custom implementation)
|
||||||
|
pub fn register_dsl_modules(engine: &mut Engine) {
|
||||||
|
rhailib_dsl::access::register_access_rhai_module(engine);
|
||||||
|
rhailib_dsl::biz::register_biz_rhai_module(engine);
|
||||||
|
rhailib_dsl::calendar::register_calendar_rhai_module(engine);
|
||||||
|
rhailib_dsl::circle::register_circle_rhai_module(engine);
|
||||||
|
rhailib_dsl::company::register_company_rhai_module(engine);
|
||||||
|
rhailib_dsl::contact::register_contact_rhai_module(engine);
|
||||||
|
rhailib_dsl::core::register_core_rhai_module(engine);
|
||||||
|
rhailib_dsl::finance::register_finance_rhai_modules(engine);
|
||||||
|
// rhailib_dsl::flow::register_flow_rhai_modules(engine);
|
||||||
|
rhailib_dsl::library::register_library_rhai_module(engine);
|
||||||
|
// Skip problematic object module for now - can be implemented separately if needed
|
||||||
|
// rhailib_dsl::object::register_object_fns(engine);
|
||||||
|
rhailib_dsl::payment::register_payment_rhai_module(engine);
|
||||||
|
|
||||||
|
// Register basic object functionality directly
|
||||||
|
register_object_functions(engine);
|
||||||
|
|
||||||
|
println!("Rhailib Domain Specific Language modules registered successfully.");
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a shared heromodels engine using the factory.
|
||||||
|
pub fn create_osis_engine() -> Arc<Engine> {
|
||||||
|
EngineFactory::global().get_engine()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Evaluate a Rhai script string.
|
||||||
|
pub fn eval_script(
|
||||||
|
engine: &Engine,
|
||||||
|
script: &str,
|
||||||
|
) -> Result<rhai::Dynamic, Box<rhai::EvalAltResult>> {
|
||||||
|
engine.eval(script)
|
||||||
|
}
|
332
reference_osis_actor/src/lib.rs
Normal file
332
reference_osis_actor/src/lib.rs
Normal file
@@ -0,0 +1,332 @@
|
|||||||
|
mod engine;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use baobab_actor::execute_job_with_engine;
|
||||||
|
use hero_job::{Job, JobStatus, ScriptType};
|
||||||
|
use hero_logger::{create_job_logger, create_job_logger_with_guard};
|
||||||
|
use log::{error, info};
|
||||||
|
use redis::AsyncCommands;
|
||||||
|
use rhai::Engine;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use tokio::sync::mpsc;
|
||||||
|
use tokio::task::JoinHandle;
|
||||||
|
use tracing::subscriber::with_default;
|
||||||
|
|
||||||
|
use baobab_actor::{actor_trait::Actor, spawn_actor};
|
||||||
|
|
||||||
|
/// Constant actor ID for OSIS actor
|
||||||
|
const OSIS: &str = "osis";
|
||||||
|
|
||||||
|
/// Builder for OSISActor
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct OSISActorBuilder {
|
||||||
|
engine: Option<Arc<Engine>>,
|
||||||
|
db_path: Option<String>,
|
||||||
|
redis_url: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for OSISActorBuilder {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
engine: None,
|
||||||
|
db_path: None,
|
||||||
|
redis_url: Some("redis://localhost:6379".to_string()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl OSISActorBuilder {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self::default()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn engine(mut self, engine: Engine) -> Self {
|
||||||
|
self.engine = Some(Arc::new(engine));
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn shared_engine(mut self, engine: Arc<Engine>) -> Self {
|
||||||
|
self.engine = Some(engine);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn db_path<S: Into<String>>(mut self, db_path: S) -> Self {
|
||||||
|
self.db_path = Some(db_path.into());
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn redis_url<S: Into<String>>(mut self, redis_url: S) -> Self {
|
||||||
|
self.redis_url = Some(redis_url.into());
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn build(self) -> Result<OSISActor, String> {
|
||||||
|
let engine = self
|
||||||
|
.engine
|
||||||
|
.unwrap_or_else(|| crate::engine::create_osis_engine());
|
||||||
|
|
||||||
|
Ok(OSISActor {
|
||||||
|
engine,
|
||||||
|
db_path: self.db_path.ok_or("db_path is required")?,
|
||||||
|
redis_url: self
|
||||||
|
.redis_url
|
||||||
|
.unwrap_or("redis://localhost:6379".to_string()),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// OSIS actor that processes jobs in a blocking, synchronized manner
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct OSISActor {
|
||||||
|
pub engine: Arc<Engine>,
|
||||||
|
pub db_path: String,
|
||||||
|
pub redis_url: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl OSISActor {
|
||||||
|
/// Create a new OSISActorBuilder
|
||||||
|
pub fn builder() -> OSISActorBuilder {
|
||||||
|
OSISActorBuilder::new()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for OSISActor {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
engine: crate::engine::create_osis_engine(),
|
||||||
|
db_path: "/tmp".to_string(),
|
||||||
|
redis_url: "redis://localhost:6379".to_string(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl Actor for OSISActor {
|
||||||
|
async fn process_job(&self, job: Job, redis_conn: &mut redis::aio::MultiplexedConnection) {
|
||||||
|
let job_id = &job.id;
|
||||||
|
let _db_path = &self.db_path;
|
||||||
|
|
||||||
|
// Debug: Log job details
|
||||||
|
info!(
|
||||||
|
"OSIS Actor '{}', Job {}: Processing job with context_id: {}, script length: {}",
|
||||||
|
OSIS, job_id, job.context_id, job.script.len()
|
||||||
|
);
|
||||||
|
|
||||||
|
// Create job-specific logger
|
||||||
|
let (job_logger, guard) = match create_job_logger_with_guard("logs", "osis", job_id) {
|
||||||
|
Ok((logger, guard)) => {
|
||||||
|
info!(
|
||||||
|
"OSIS Actor '{}', Job {}: Job logger created successfully",
|
||||||
|
OSIS, job_id
|
||||||
|
);
|
||||||
|
(logger, guard)
|
||||||
|
},
|
||||||
|
Err(e) => {
|
||||||
|
error!(
|
||||||
|
"OSIS Actor '{}', Job {}: Failed to create job logger: {}",
|
||||||
|
OSIS, job_id, e
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
info!(
|
||||||
|
"OSIS Actor '{}', Job {}: Starting sequential processing",
|
||||||
|
OSIS, job_id
|
||||||
|
);
|
||||||
|
|
||||||
|
// Update job status to Started
|
||||||
|
if let Err(e) = Job::update_status(redis_conn, job_id, JobStatus::Started).await {
|
||||||
|
error!(
|
||||||
|
"OSIS Actor '{}', Job {}: Failed to update status to Started: {}",
|
||||||
|
OSIS, job_id, e
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute ALL job processing within logging context
|
||||||
|
let job_result = with_default(job_logger, || {
|
||||||
|
tracing::info!(target: "osis_actor", "Job {} started", job_id);
|
||||||
|
|
||||||
|
// Move the Rhai script execution inside this scope
|
||||||
|
// IMPORTANT: Create a new engine and configure Rhai logging for this job context
|
||||||
|
let mut job_engine = Engine::new();
|
||||||
|
register_dsl_modules(&mut job_engine);
|
||||||
|
// Configure Rhai logging integration for this engine instance
|
||||||
|
hero_logger::rhai_integration::configure_rhai_logging(&mut job_engine, "osis_actor");
|
||||||
|
|
||||||
|
// Execute the script within the job logger context
|
||||||
|
let script_result = tokio::task::block_in_place(|| {
|
||||||
|
tokio::runtime::Handle::current().block_on(async {
|
||||||
|
execute_job_with_engine(&mut job_engine, &job, &self.db_path).await
|
||||||
|
})
|
||||||
|
});
|
||||||
|
|
||||||
|
tracing::info!(target: "osis_actor", "Job {} completed", job_id);
|
||||||
|
|
||||||
|
script_result // Return the result
|
||||||
|
});
|
||||||
|
|
||||||
|
// Handle the result outside the logging context
|
||||||
|
match job_result {
|
||||||
|
Ok(result) => {
|
||||||
|
let result_str = format!("{:?}", result);
|
||||||
|
info!(
|
||||||
|
"OSIS Actor '{}', Job {}: Script executed successfully. Result: {}",
|
||||||
|
OSIS, job_id, result_str
|
||||||
|
);
|
||||||
|
|
||||||
|
// Update job with success result (stores in job hash output field)
|
||||||
|
if let Err(e) = Job::set_result(redis_conn, job_id, &result_str).await {
|
||||||
|
error!(
|
||||||
|
"OSIS Actor '{}', Job {}: Failed to set result: {}",
|
||||||
|
OSIS, job_id, e
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Also push result to result queue for retrieval
|
||||||
|
let result_queue_key = format!("hero:job:{}:result", job_id);
|
||||||
|
if let Err(e) = redis_conn
|
||||||
|
.lpush::<_, _, ()>(&result_queue_key, &result_str)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
error!(
|
||||||
|
"OSIS Actor '{}', Job {}: Failed to push result to queue {}: {}",
|
||||||
|
OSIS, job_id, result_queue_key, e
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
info!(
|
||||||
|
"OSIS Actor '{}', Job {}: Result pushed to queue: {}",
|
||||||
|
OSIS, job_id, result_queue_key
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Err(e) = Job::update_status(redis_conn, job_id, JobStatus::Finished).await {
|
||||||
|
error!(
|
||||||
|
"OSIS Actor '{}', Job {}: Failed to update status to Finished: {}",
|
||||||
|
OSIS, job_id, e
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
let error_msg = format!("Script execution error: {}", e);
|
||||||
|
error!("OSIS Actor '{}', Job {}: {}", OSIS, job_id, error_msg);
|
||||||
|
|
||||||
|
// Update job with error (stores in job hash error field)
|
||||||
|
if let Err(e) = Job::set_error(redis_conn, job_id, &error_msg).await {
|
||||||
|
error!(
|
||||||
|
"OSIS Actor '{}', Job {}: Failed to set error: {}",
|
||||||
|
OSIS, job_id, e
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Also push error to error queue for retrieval
|
||||||
|
let error_queue_key = format!("hero:job:{}:error", job_id);
|
||||||
|
if let Err(e) = redis_conn
|
||||||
|
.lpush::<_, _, ()>(&error_queue_key, &error_msg)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
error!(
|
||||||
|
"OSIS Actor '{}', Job {}: Failed to push error to queue {}: {}",
|
||||||
|
OSIS, job_id, error_queue_key, e
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
info!(
|
||||||
|
"OSIS Actor '{}', Job {}: Error pushed to queue: {}",
|
||||||
|
OSIS, job_id, error_queue_key
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Err(e) = Job::update_status(redis_conn, job_id, JobStatus::Error).await {
|
||||||
|
error!(
|
||||||
|
"OSIS Actor '{}', Job {}: Failed to update status to Error: {}",
|
||||||
|
OSIS, job_id, e
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Force flush logs before dropping guard
|
||||||
|
std::thread::sleep(std::time::Duration::from_millis(100));
|
||||||
|
|
||||||
|
// Keep the guard alive until after processing
|
||||||
|
drop(guard);
|
||||||
|
|
||||||
|
info!(
|
||||||
|
"OSIS Actor '{}', Job {}: Sequential processing completed",
|
||||||
|
OSIS, job_id
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn actor_type(&self) -> &'static str {
|
||||||
|
"OSIS"
|
||||||
|
}
|
||||||
|
|
||||||
|
fn actor_id(&self) -> &str {
|
||||||
|
// Actor ID contains "osis" so the runtime derives ScriptType=OSIS and consumes the canonical type queue.
|
||||||
|
"osis"
|
||||||
|
}
|
||||||
|
|
||||||
|
fn redis_url(&self) -> &str {
|
||||||
|
&self.redis_url
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convenience function to spawn an OSIS actor using the trait interface
|
||||||
|
///
|
||||||
|
/// This function provides backward compatibility with the original actor API
|
||||||
|
/// while using the new trait-based implementation.
|
||||||
|
pub fn spawn_osis_actor(
|
||||||
|
db_path: String,
|
||||||
|
redis_url: String,
|
||||||
|
shutdown_rx: mpsc::Receiver<()>,
|
||||||
|
) -> JoinHandle<Result<(), Box<dyn std::error::Error + Send + Sync>>> {
|
||||||
|
let actor = Arc::new(
|
||||||
|
OSISActor::builder()
|
||||||
|
.db_path(db_path)
|
||||||
|
.redis_url(redis_url)
|
||||||
|
.build()
|
||||||
|
.expect("Failed to build OSISActor"),
|
||||||
|
);
|
||||||
|
spawn_actor(actor, shutdown_rx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Re-export engine functions for examples and external use
|
||||||
|
pub use crate::engine::{create_osis_engine, register_dsl_modules};
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_osis_actor_creation() {
|
||||||
|
let actor = OSISActor::builder().build().unwrap();
|
||||||
|
assert_eq!(actor.actor_type(), "OSIS");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_osis_actor_default() {
|
||||||
|
let actor = OSISActor::default();
|
||||||
|
assert_eq!(actor.actor_type(), "OSIS");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_osis_actor_process_job_interface() {
|
||||||
|
let actor = OSISActor::default();
|
||||||
|
|
||||||
|
// Create a simple test job
|
||||||
|
let _job = Job::new(
|
||||||
|
"test_caller".to_string(),
|
||||||
|
"test_context".to_string(),
|
||||||
|
r#"print("Hello from sync actor test!"); 42"#.to_string(),
|
||||||
|
ScriptType::OSIS,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Note: This test doesn't actually connect to Redis, it just tests the interface
|
||||||
|
// In a real test environment, you'd need a Redis instance or mock
|
||||||
|
|
||||||
|
// For now, just verify the actor was created successfully
|
||||||
|
assert_eq!(actor.actor_type(), "OSIS");
|
||||||
|
}
|
||||||
|
}
|
109
tools/gen_auth.py
Normal file
109
tools/gen_auth.py
Normal file
@@ -0,0 +1,109 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Generate secp256k1 keypair and sign a nonce in the exact format the server expects.
|
||||||
|
|
||||||
|
Install dependencies once:
|
||||||
|
python3 -m pip install -r tools/requirements.txt
|
||||||
|
|
||||||
|
Usage examples:
|
||||||
|
# Generate a new keypair and sign a nonce (prints PRIVATE_HEX, PUBLIC_HEX, SIGNATURE_HEX)
|
||||||
|
python tools/gen_auth.py --nonce "PASTE_NONCE_FROM_fetch_nonce"
|
||||||
|
|
||||||
|
# Sign with an existing private key (64 hex chars)
|
||||||
|
python tools/gen_auth.py --nonce "PASTE_NONCE" --priv "YOUR_PRIVATE_KEY_HEX"
|
||||||
|
|
||||||
|
# Output JSON instead of key=value lines
|
||||||
|
python tools/gen_auth.py --nonce "PASTE_NONCE" --json
|
||||||
|
|
||||||
|
Notes:
|
||||||
|
- Public key is compressed (33 bytes) hex, starting with 02/03 (66 hex chars total).
|
||||||
|
- Signature is compact ECDSA (r||s) 64 bytes (128 hex chars).
|
||||||
|
- The nonce should be the exact ASCII string returned by fetch_nonce().
|
||||||
|
- The message signed is sha256(nonce_ascii) to match client/server behavior:
|
||||||
|
- [rust.AuthHelper::sign_message()](interfaces/openrpc/client/src/auth.rs:55)
|
||||||
|
- [rust.AuthManager::verify_signature()](interfaces/openrpc/server/src/auth.rs:85)
|
||||||
|
"""
|
||||||
|
import argparse
|
||||||
|
import hashlib
|
||||||
|
import json
|
||||||
|
import sys
|
||||||
|
from typing import Dict, Tuple, Optional
|
||||||
|
|
||||||
|
try:
|
||||||
|
from ecdsa import SigningKey, VerifyingKey, SECP256k1, util
|
||||||
|
except Exception as e:
|
||||||
|
print("Missing dependency 'ecdsa'. Install with:", file=sys.stderr)
|
||||||
|
print(" python3 -m pip install -r tools/requirements.txt", file=sys.stderr)
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def sha256_ascii(s: str) -> bytes:
|
||||||
|
return hashlib.sha256(s.encode()).digest()
|
||||||
|
|
||||||
|
|
||||||
|
def to_compact_signature_hex(sk: SigningKey, nonce_ascii: str) -> str:
|
||||||
|
digest = sha256_ascii(nonce_ascii)
|
||||||
|
sig = sk.sign_digest(digest, sigencode=util.sigencode_string) # 64 bytes r||s
|
||||||
|
return sig.hex()
|
||||||
|
|
||||||
|
|
||||||
|
def compressed_pubkey_hex(vk: VerifyingKey) -> str:
|
||||||
|
# Prefer compressed output if library supports it directly (ecdsa>=0.18)
|
||||||
|
try:
|
||||||
|
return vk.to_string("compressed").hex()
|
||||||
|
except TypeError:
|
||||||
|
# Manual compression (02/03 + X)
|
||||||
|
p = vk.pubkey.point
|
||||||
|
x = p.x()
|
||||||
|
y = p.y()
|
||||||
|
prefix = b"\x02" if (y % 2 == 0) else b"\x03"
|
||||||
|
return (prefix + x.to_bytes(32, "big")).hex()
|
||||||
|
|
||||||
|
|
||||||
|
def generate_or_load_sk(priv_hex: Optional[str]) -> Tuple[SigningKey, bool]:
|
||||||
|
if priv_hex:
|
||||||
|
if len(priv_hex) != 64:
|
||||||
|
raise ValueError("Provided --priv must be 64 hex chars (32 bytes).")
|
||||||
|
return SigningKey.from_string(bytes.fromhex(priv_hex), curve=SECP256k1), False
|
||||||
|
return SigningKey.generate(curve=SECP256k1), True
|
||||||
|
|
||||||
|
|
||||||
|
def run(nonce: str, priv_hex: Optional[str], as_json: bool) -> int:
|
||||||
|
sk, generated = generate_or_load_sk(priv_hex)
|
||||||
|
vk = sk.get_verifying_key()
|
||||||
|
|
||||||
|
out: Dict[str, str] = {
|
||||||
|
"PUBLIC_HEX": compressed_pubkey_hex(vk),
|
||||||
|
"NONCE": nonce,
|
||||||
|
"SIGNATURE_HEX": to_compact_signature_hex(sk, nonce),
|
||||||
|
}
|
||||||
|
# Always print the private key for convenience (either generated or provided)
|
||||||
|
out["PRIVATE_HEX"] = sk.to_string().hex()
|
||||||
|
|
||||||
|
if as_json:
|
||||||
|
print(json.dumps(out, separators=(",", ":")))
|
||||||
|
else:
|
||||||
|
# key=value form for easy copy/paste
|
||||||
|
print(f"PRIVATE_HEX={out['PRIVATE_HEX']}")
|
||||||
|
print(f"PUBLIC_HEX={out['PUBLIC_HEX']}")
|
||||||
|
print(f"NONCE={out['NONCE']}")
|
||||||
|
print(f"SIGNATURE_HEX={out['SIGNATURE_HEX']}")
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def main() -> int:
|
||||||
|
parser = argparse.ArgumentParser(description="Generate secp256k1 auth material and signature for a nonce.")
|
||||||
|
parser.add_argument("--nonce", required=True, help="Nonce string returned by fetch_nonce (paste as-is)")
|
||||||
|
parser.add_argument("--priv", help="Existing private key hex (64 hex chars). If omitted, a new keypair is generated.")
|
||||||
|
parser.add_argument("--json", action="store_true", help="Output JSON instead of key=value lines.")
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
try:
|
||||||
|
return run(args.nonce, args.priv, args.json)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error: {e}", file=sys.stderr)
|
||||||
|
return 1
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
sys.exit(main())
|
124
tools/gen_auth.sh
Executable file
124
tools/gen_auth.sh
Executable file
@@ -0,0 +1,124 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
usage() {
|
||||||
|
cat <<'USAGE'
|
||||||
|
Usage:
|
||||||
|
gen_auth.sh --nonce "<nonce_string>" [--priv <private_key_hex>] [--json]
|
||||||
|
|
||||||
|
Options:
|
||||||
|
--nonce The nonce string returned by fetch_nonce (paste as-is).
|
||||||
|
--priv Optional private key hex (64 hex chars). If omitted, a new key is generated.
|
||||||
|
--json Output JSON instead of plain KEY=VALUE lines.
|
||||||
|
|
||||||
|
Outputs:
|
||||||
|
PRIVATE_HEX Private key hex (only when generated, or echoed back if provided)
|
||||||
|
PUBLIC_HEX Compressed secp256k1 public key hex (33 bytes, 66 hex chars)
|
||||||
|
NONCE The nonce string you passed in
|
||||||
|
SIGNATURE_HEX Compact ECDSA signature hex (64 bytes, 128 hex chars)
|
||||||
|
|
||||||
|
Notes:
|
||||||
|
- The signature is produced by signing sha256(nonce_ascii) and encoded as compact r||s (64 bytes),
|
||||||
|
which matches the server/client behavior ([interfaces/openrpc/client/src/auth.rs](interfaces/openrpc/client/src/auth.rs:55), [interfaces/openrpc/server/src/auth.rs](interfaces/openrpc/server/src/auth.rs:85)).
|
||||||
|
USAGE
|
||||||
|
}
|
||||||
|
|
||||||
|
NONCE=""
|
||||||
|
PRIV_HEX=""
|
||||||
|
OUT_JSON=0
|
||||||
|
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case "$1" in
|
||||||
|
--nonce)
|
||||||
|
NONCE="${2:-}"; shift 2 ;;
|
||||||
|
--priv)
|
||||||
|
PRIV_HEX="${2:-}"; shift 2 ;;
|
||||||
|
--json)
|
||||||
|
OUT_JSON=1; shift ;;
|
||||||
|
-h|--help)
|
||||||
|
usage; exit 0 ;;
|
||||||
|
*)
|
||||||
|
echo "Unknown arg: $1" >&2; usage; exit 1 ;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
if [[ -z "$NONCE" ]]; then
|
||||||
|
echo "Error: --nonce is required" >&2
|
||||||
|
usage
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! command -v python3 >/dev/null 2>&1; then
|
||||||
|
echo "Error: python3 not found. Install Python 3 (e.g., sudo pacman -S python) and retry." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Ensure 'ecdsa' module is available; install to user site if missing.
|
||||||
|
if ! python3 - <<'PY' >/dev/null 2>&1
|
||||||
|
import importlib; importlib.import_module("ecdsa")
|
||||||
|
PY
|
||||||
|
then
|
||||||
|
echo "Installing Python 'ecdsa' package in user site..." >&2
|
||||||
|
if ! python3 -m pip install --user --quiet ecdsa; then
|
||||||
|
echo "Error: failed to install 'ecdsa'. Install manually: python3 -m pip install --user ecdsa" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Now run Python to generate/derive keys and sign the nonce (ASCII) with compact ECDSA.
|
||||||
|
python3 - "$NONCE" "$PRIV_HEX" "$OUT_JSON" <<'PY'
|
||||||
|
import sys, json, hashlib
|
||||||
|
from ecdsa import SigningKey, VerifyingKey, SECP256k1, util
|
||||||
|
|
||||||
|
NONCE = sys.argv[1]
|
||||||
|
PRIV_HEX = sys.argv[2]
|
||||||
|
OUT_JSON = int(sys.argv[3]) == 1
|
||||||
|
|
||||||
|
def to_compact_signature(sk: SigningKey, msg_ascii: str) -> bytes:
|
||||||
|
digest = hashlib.sha256(msg_ascii.encode()).digest()
|
||||||
|
return sk.sign_digest(digest, sigencode=util.sigencode_string) # 64 bytes r||s
|
||||||
|
|
||||||
|
def compressed_pubkey(vk: VerifyingKey) -> bytes:
|
||||||
|
try:
|
||||||
|
return vk.to_string("compressed")
|
||||||
|
except TypeError:
|
||||||
|
p = vk.pubkey.point
|
||||||
|
x = p.x()
|
||||||
|
y = vk.pubkey.point.y()
|
||||||
|
prefix = b'\x02' if (y % 2 == 0) else b'\x03'
|
||||||
|
return prefix + x.to_bytes(32, "big")
|
||||||
|
|
||||||
|
generated = False
|
||||||
|
if PRIV_HEX:
|
||||||
|
if len(PRIV_HEX) != 64:
|
||||||
|
print("ERROR: Provided --priv must be 64 hex chars", file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
sk = SigningKey.from_string(bytes.fromhex(PRIV_HEX), curve=SECP256k1)
|
||||||
|
else:
|
||||||
|
sk = SigningKey.generate(curve=SECP256k1)
|
||||||
|
generated = True
|
||||||
|
|
||||||
|
vk = sk.get_verifying_key()
|
||||||
|
pub_hex = compressed_pubkey(vk).hex()
|
||||||
|
sig_hex = to_compact_signature(sk, NONCE).hex()
|
||||||
|
priv_hex = sk.to_string().hex()
|
||||||
|
|
||||||
|
out = {
|
||||||
|
"PUBLIC_HEX": pub_hex,
|
||||||
|
"NONCE": NONCE,
|
||||||
|
"SIGNATURE_HEX": sig_hex,
|
||||||
|
}
|
||||||
|
if generated or PRIV_HEX:
|
||||||
|
out["PRIVATE_HEX"] = priv_hex
|
||||||
|
|
||||||
|
if OUT_JSON:
|
||||||
|
print(json.dumps(out, separators=(",", ":")))
|
||||||
|
else:
|
||||||
|
if "PRIVATE_HEX" in out:
|
||||||
|
print(f"PRIVATE_HEX={out['PRIVATE_HEX']}")
|
||||||
|
print(f"PUBLIC_HEX={out['PUBLIC_HEX']}")
|
||||||
|
print(f"NONCE={out['NONCE']}")
|
||||||
|
print(f"SIGNATURE_HEX={out['SIGNATURE_HEX']}")
|
||||||
|
PY
|
||||||
|
|
||||||
|
# End
|
2
tools/requirements.txt
Normal file
2
tools/requirements.txt
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
ecdsa==0.18.0
|
||||||
|
requests==2.32.3
|
204
tools/rpc_smoke_test.py
Normal file
204
tools/rpc_smoke_test.py
Normal file
@@ -0,0 +1,204 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Non-destructive JSON-RPC smoke tests against the OpenRPC server.
|
||||||
|
|
||||||
|
Installs:
|
||||||
|
python3 -m pip install -r tools/requirements.txt
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
# Default URL http://127.0.0.1:9944
|
||||||
|
python tools/rpc_smoke_test.py
|
||||||
|
|
||||||
|
# Specify a different URL
|
||||||
|
python tools/rpc_smoke_test.py --url http://127.0.0.1:9944
|
||||||
|
|
||||||
|
# Provide a specific pubkey for fetch_nonce (compressed 33-byte hex)
|
||||||
|
python tools/rpc_smoke_test.py --pubkey 02deadbeef...
|
||||||
|
|
||||||
|
# Lookup details for first N jobs returned by list_jobs
|
||||||
|
python tools/rpc_smoke_test.py --limit 5
|
||||||
|
|
||||||
|
What it tests (non-destructive):
|
||||||
|
- fetch_nonce(pubkey) -> returns a nonce string from the server auth manager
|
||||||
|
- whoami() -> returns a JSON string with basic server info
|
||||||
|
- list_jobs() -> returns job IDs only (no mutation)
|
||||||
|
- get_job_status(id) -> reads status (for up to --limit items)
|
||||||
|
- get_job_output(id) -> reads output (for up to --limit items)
|
||||||
|
- get_job_logs(id) -> reads logs (for up to --limit items)
|
||||||
|
|
||||||
|
Notes:
|
||||||
|
- If you don't pass --pubkey, this script will generate a random secp256k1 keypair
|
||||||
|
and derive a compressed public key (no persistence, just for testing fetch_nonce).
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import random
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
from typing import Any, Dict, List, Optional
|
||||||
|
|
||||||
|
try:
|
||||||
|
import requests
|
||||||
|
except Exception:
|
||||||
|
print("Missing dependency 'requests'. Install with:\n python3 -m pip install -r tools/requirements.txt", file=sys.stderr)
|
||||||
|
raise
|
||||||
|
|
||||||
|
try:
|
||||||
|
from ecdsa import SigningKey, SECP256k1
|
||||||
|
except Exception:
|
||||||
|
# ecdsa is optional here; only used to generate a test pubkey if --pubkey is absent
|
||||||
|
SigningKey = None # type: ignore
|
||||||
|
|
||||||
|
|
||||||
|
def ensure_http_url(url: str) -> str:
|
||||||
|
if url.startswith("http://") or url.startswith("https://"):
|
||||||
|
return url
|
||||||
|
# Accept ws:// scheme too; convert to http for JSON-RPC over HTTP
|
||||||
|
if url.startswith("ws://"):
|
||||||
|
return "http://" + url[len("ws://") :]
|
||||||
|
if url.startswith("wss://"):
|
||||||
|
return "https://" + url[len("wss://") :]
|
||||||
|
# Default to http if no scheme
|
||||||
|
return "http://" + url
|
||||||
|
|
||||||
|
|
||||||
|
class JsonRpcClient:
|
||||||
|
def __init__(self, url: str):
|
||||||
|
self.url = ensure_http_url(url)
|
||||||
|
self._id = int(time.time() * 1000)
|
||||||
|
|
||||||
|
def call(self, method: str, params: Any) -> Any:
|
||||||
|
self._id += 1
|
||||||
|
payload = {
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"id": self._id,
|
||||||
|
"method": method,
|
||||||
|
"params": params,
|
||||||
|
}
|
||||||
|
resp = requests.post(self.url, json=payload, timeout=30)
|
||||||
|
resp.raise_for_status()
|
||||||
|
data = resp.json()
|
||||||
|
if "error" in data and data["error"] is not None:
|
||||||
|
raise RuntimeError(f"RPC error for {method}: {data['error']}")
|
||||||
|
return data.get("result")
|
||||||
|
|
||||||
|
|
||||||
|
def random_compressed_pubkey_hex() -> str:
|
||||||
|
"""
|
||||||
|
Generate a random secp256k1 keypair and return compressed public key hex.
|
||||||
|
Requires 'ecdsa'. If unavailable, raise an informative error.
|
||||||
|
"""
|
||||||
|
if SigningKey is None:
|
||||||
|
raise RuntimeError(
|
||||||
|
"ecdsa not installed; either install with:\n"
|
||||||
|
" python3 -m pip install -r tools/requirements.txt\n"
|
||||||
|
"or pass --pubkey explicitly."
|
||||||
|
)
|
||||||
|
sk = SigningKey.generate(curve=SECP256k1)
|
||||||
|
vk = sk.get_verifying_key()
|
||||||
|
try:
|
||||||
|
comp = vk.to_string("compressed")
|
||||||
|
except TypeError:
|
||||||
|
# Manual compression
|
||||||
|
p = vk.pubkey.point
|
||||||
|
x = p.x()
|
||||||
|
y = p.y()
|
||||||
|
prefix = b"\x02" if (y % 2 == 0) else b"\x03"
|
||||||
|
comp = prefix + x.to_bytes(32, "big")
|
||||||
|
return comp.hex()
|
||||||
|
|
||||||
|
|
||||||
|
def main() -> int:
|
||||||
|
parser = argparse.ArgumentParser(description="Non-destructive RPC smoke tests")
|
||||||
|
parser.add_argument("--url", default=os.environ.get("RPC_URL", "http://127.0.0.1:9944"),
|
||||||
|
help="RPC server URL (http[s]://host:port or ws[s]://host:port)")
|
||||||
|
parser.add_argument("--pubkey", help="Compressed secp256k1 public key hex (33 bytes, 66 hex chars)")
|
||||||
|
parser.add_argument("--limit", type=int, default=3, help="Number of job IDs to detail from list_jobs()")
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
client = JsonRpcClient(args.url)
|
||||||
|
|
||||||
|
print(f"[rpc] URL: {client.url}")
|
||||||
|
|
||||||
|
# 1) fetch_nonce
|
||||||
|
pubkey = args.pubkey or random_compressed_pubkey_hex()
|
||||||
|
print(f"[rpc] fetch_nonce(pubkey={pubkey[:10]}...):", end=" ")
|
||||||
|
try:
|
||||||
|
nonce = client.call("fetch_nonce", [pubkey])
|
||||||
|
print("OK")
|
||||||
|
print(f" nonce: {nonce}")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"ERROR: {e}")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
# 2) whoami
|
||||||
|
print("[rpc] whoami():", end=" ")
|
||||||
|
try:
|
||||||
|
who = client.call("whoami", [])
|
||||||
|
print("OK")
|
||||||
|
print(f" whoami: {who}")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"ERROR: {e}")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
# 3) list_jobs
|
||||||
|
print("[rpc] list_jobs():", end=" ")
|
||||||
|
try:
|
||||||
|
job_ids: List[str] = client.call("list_jobs", [])
|
||||||
|
print("OK")
|
||||||
|
print(f" total: {len(job_ids)}")
|
||||||
|
for i, jid in enumerate(job_ids[: max(0, args.limit)]):
|
||||||
|
print(f" [{i}] {jid}")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"ERROR: {e}")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
# 4) For a few jobs, query status/output/logs
|
||||||
|
detail_count = 0
|
||||||
|
for jid in job_ids[: max(0, args.limit)] if 'job_ids' in locals() else []:
|
||||||
|
print(f"[rpc] get_job_status({jid}):", end=" ")
|
||||||
|
try:
|
||||||
|
st = client.call("get_job_status", [jid])
|
||||||
|
print("OK")
|
||||||
|
print(f" status: {st}")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"ERROR: {e}")
|
||||||
|
|
||||||
|
print(f"[rpc] get_job_output({jid}):", end=" ")
|
||||||
|
try:
|
||||||
|
out = client.call("get_job_output", [jid])
|
||||||
|
print("OK")
|
||||||
|
snippet = (out if isinstance(out, str) else json.dumps(out))[:120]
|
||||||
|
print(f" output: {snippet}{'...' if len(snippet)==120 else ''}")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"ERROR: {e}")
|
||||||
|
|
||||||
|
print(f"[rpc] get_job_logs({jid}):", end=" ")
|
||||||
|
try:
|
||||||
|
logs_obj = client.call("get_job_logs", [jid]) # { logs: String | null }
|
||||||
|
print("OK")
|
||||||
|
logs = logs_obj.get("logs") if isinstance(logs_obj, dict) else None
|
||||||
|
if logs is None:
|
||||||
|
print(" logs: (no logs)")
|
||||||
|
else:
|
||||||
|
snippet = logs[:120]
|
||||||
|
print(f" logs: {snippet}{'...' if len(snippet)==120 else ''}")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"ERROR: {e}")
|
||||||
|
|
||||||
|
detail_count += 1
|
||||||
|
|
||||||
|
print("\nSmoke tests complete.")
|
||||||
|
print("Summary:")
|
||||||
|
print(f" whoami tested")
|
||||||
|
print(f" fetch_nonce tested (pubkey provided/generated)")
|
||||||
|
print(f" list_jobs tested (count printed)")
|
||||||
|
print(f" detailed queries for up to {detail_count} job(s) (status/output/logs)")
|
||||||
|
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
sys.exit(main())
|
Reference in New Issue
Block a user