add benchmarking, more models and examples
This commit is contained in:
72
benches/simple_rhai_bench/README.md
Normal file
72
benches/simple_rhai_bench/README.md
Normal file
@@ -0,0 +1,72 @@
|
||||
# Minimal Rhailib Benchmark
|
||||
|
||||
A simplified, minimal benchmarking tool for rhailib performance testing.
|
||||
|
||||
## Overview
|
||||
|
||||
This benchmark focuses on simplicity and direct timing measurements:
|
||||
- Creates a single task (n=1) using Lua script
|
||||
- Measures latency using Redis timestamps
|
||||
- Uses existing worker binary
|
||||
- ~85 lines of code total
|
||||
|
||||
## Usage
|
||||
|
||||
### Prerequisites
|
||||
- Redis running on `127.0.0.1:6379`
|
||||
- Worker binary built: `cd src/worker && cargo build --release`
|
||||
|
||||
### Run Benchmark
|
||||
```bash
|
||||
# From project root
|
||||
cargo bench
|
||||
```
|
||||
|
||||
### Expected Output
|
||||
```
|
||||
🧹 Cleaning up Redis...
|
||||
🚀 Starting worker...
|
||||
📝 Creating single task...
|
||||
⏱️ Waiting for completion...
|
||||
✅ Task completed in 23.45ms
|
||||
🧹 Cleaning up...
|
||||
```
|
||||
|
||||
## Files
|
||||
|
||||
- `simple_bench.rs` - Main benchmark binary (85 lines)
|
||||
- `batch_task.lua` - Minimal Lua script for task creation (28 lines)
|
||||
- `Cargo.toml` - Dependencies and binary configuration
|
||||
- `README.md` - This file
|
||||
|
||||
## How It Works
|
||||
|
||||
1. **Cleanup**: Clear Redis queues and task details
|
||||
2. **Start Worker**: Spawn single worker process
|
||||
3. **Create Task**: Use Lua script to create one task with timestamp
|
||||
4. **Wait & Measure**: Poll task until complete, calculate latency
|
||||
5. **Cleanup**: Kill worker and clear Redis
|
||||
|
||||
## Latency Calculation
|
||||
|
||||
```
|
||||
latency_ms = updated_at - created_at
|
||||
```
|
||||
|
||||
Where:
|
||||
- `created_at`: Timestamp when task was created (Lua script)
|
||||
- `updated_at`: Timestamp when worker completed task
|
||||
|
||||
## Future Iterations
|
||||
|
||||
- **Iteration 2**: Small batches (n=5, n=10)
|
||||
- **Iteration 3**: Larger batches and script complexity
|
||||
- **Iteration 4**: Performance optimizations
|
||||
|
||||
## Benefits
|
||||
|
||||
- **Minimal Code**: 85 lines vs previous 800+ lines
|
||||
- **Easy to Understand**: Single file, linear flow
|
||||
- **Direct Timing**: Redis timestamps, no complex stats
|
||||
- **Fast to Modify**: No abstractions or frameworks
|
||||
- **Reliable**: Simple Redis operations
|
46
benches/simple_rhai_bench/batch_task.lua
Normal file
46
benches/simple_rhai_bench/batch_task.lua
Normal file
@@ -0,0 +1,46 @@
|
||||
-- Minimal Lua script for single task creation (n=1)
|
||||
-- Args: circle_name, rhai_script_content, task_count (optional, defaults to 1)
|
||||
-- Returns: array of task keys for timing
|
||||
|
||||
if #ARGV < 2 then
|
||||
return redis.error_reply("Usage: EVAL script 0 circle_name rhai_script_content [task_count]")
|
||||
end
|
||||
|
||||
local circle_name = ARGV[1]
|
||||
local rhai_script_content = ARGV[2]
|
||||
local task_count = tonumber(ARGV[3]) or 1
|
||||
|
||||
-- Validate task_count
|
||||
if task_count <= 0 or task_count > 10000 then
|
||||
return redis.error_reply("task_count must be a positive integer between 1 and 10000")
|
||||
end
|
||||
|
||||
-- Get current timestamp in Unix seconds (to match worker expectations)
|
||||
local rhai_task_queue = 'rhai_tasks:' .. circle_name
|
||||
local task_keys = {}
|
||||
local current_time = redis.call('TIME')[1]
|
||||
|
||||
-- Create multiple tasks
|
||||
for i = 1, task_count do
|
||||
-- Generate unique task ID
|
||||
local task_id = 'task_' .. redis.call('INCR', 'global_task_counter')
|
||||
local task_details_key = 'rhai_task_details:' .. task_id
|
||||
|
||||
-- Create task details hash with creation timestamp
|
||||
redis.call('HSET', task_details_key,
|
||||
'script', rhai_script_content,
|
||||
'status', 'pending',
|
||||
'createdAt', current_time,
|
||||
'updatedAt', current_time,
|
||||
'task_sequence', tostring(i)
|
||||
)
|
||||
|
||||
-- Queue the task for workers
|
||||
redis.call('LPUSH', rhai_task_queue, task_id)
|
||||
|
||||
-- Add key to return array
|
||||
table.insert(task_keys, task_details_key)
|
||||
end
|
||||
|
||||
-- Return array of task keys for timing analysis
|
||||
return task_keys
|
204
benches/simple_rhai_bench/main.rs
Normal file
204
benches/simple_rhai_bench/main.rs
Normal file
@@ -0,0 +1,204 @@
|
||||
use criterion::{criterion_group, criterion_main, Criterion};
|
||||
use redis::{Client, Commands};
|
||||
use std::process::{Command, Child, Stdio};
|
||||
use std::time::Duration;
|
||||
use std::thread;
|
||||
use std::fs;
|
||||
|
||||
const REDIS_URL: &str = "redis://127.0.0.1:6379";
|
||||
const CIRCLE_NAME: &str = "bench_circle";
|
||||
const SIMPLE_SCRIPT: &str = "new_event()\n .title(\"Weekly Sync\")\n .location(\"Conference Room A\")\n .description(\"Regular team sync meeting\")\n .save_event();";
|
||||
|
||||
fn cleanup_redis() -> Result<(), redis::RedisError> {
|
||||
let client = Client::open(REDIS_URL)?;
|
||||
let mut conn = client.get_connection()?;
|
||||
|
||||
// Clear task queue and any existing task details
|
||||
let _: () = conn.del(format!("rhai_tasks:{}", CIRCLE_NAME))?;
|
||||
let keys: Vec<String> = conn.scan_match("rhai_task_details:*")?.collect();
|
||||
if !keys.is_empty() {
|
||||
let _: () = conn.del(keys)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn start_worker() -> Result<Child, std::io::Error> {
|
||||
Command::new("cargo")
|
||||
.args(&["run", "--release", "--bin", "worker", "--",
|
||||
"--circle", CIRCLE_NAME,
|
||||
"--redis-url", REDIS_URL,
|
||||
"--worker-id", "bench_worker",
|
||||
"--preserve-tasks"])
|
||||
.current_dir("src/worker")
|
||||
.stdout(Stdio::null())
|
||||
.stderr(Stdio::null())
|
||||
.spawn()
|
||||
}
|
||||
|
||||
fn create_batch_tasks(task_count: usize) -> Result<Vec<String>, Box<dyn std::error::Error>> {
|
||||
let client = Client::open(REDIS_URL)?;
|
||||
let mut conn = client.get_connection()?;
|
||||
|
||||
// Load and execute Lua script
|
||||
let lua_script = fs::read_to_string("benches/simple_rhai_bench/batch_task.lua")?;
|
||||
let result: redis::Value = redis::cmd("EVAL")
|
||||
.arg(lua_script)
|
||||
.arg(0)
|
||||
.arg(CIRCLE_NAME)
|
||||
.arg(SIMPLE_SCRIPT)
|
||||
.arg(task_count)
|
||||
.query(&mut conn)?;
|
||||
|
||||
// Parse the task keys from the response
|
||||
let task_keys = match result {
|
||||
redis::Value::Bulk(items) => {
|
||||
let mut keys = Vec::new();
|
||||
for item in items {
|
||||
if let redis::Value::Data(key_data) = item {
|
||||
keys.push(String::from_utf8_lossy(&key_data).to_string());
|
||||
}
|
||||
}
|
||||
keys
|
||||
}
|
||||
_ => {
|
||||
return Err(format!("Unexpected Redis response type: {:?}", result).into());
|
||||
}
|
||||
};
|
||||
|
||||
Ok(task_keys)
|
||||
}
|
||||
|
||||
fn wait_and_measure(task_key: &str) -> Result<f64, redis::RedisError> {
|
||||
let client = Client::open(REDIS_URL)?;
|
||||
let mut conn = client.get_connection()?;
|
||||
|
||||
let start_time = std::time::Instant::now();
|
||||
let timeout = Duration::from_secs(100);
|
||||
|
||||
// Poll until task is completed or timeout
|
||||
loop {
|
||||
let status: Option<String> = conn.hget(task_key, "status")?;
|
||||
|
||||
match status.as_deref() {
|
||||
Some("completed") | Some("error") => {
|
||||
println!("Task {} completed with status: {}", task_key, status.as_deref().unwrap_or("unknown"));
|
||||
let created_at: u64 = conn.hget(task_key, "createdAt")?;
|
||||
let updated_at: u64 = conn.hget(task_key, "updatedAt")?;
|
||||
return Ok((updated_at - created_at) as f64 * 1000.0); // Convert to milliseconds
|
||||
}
|
||||
Some("pending") | Some("processing") => {
|
||||
thread::sleep(Duration::from_millis(100));
|
||||
}
|
||||
_ => {
|
||||
thread::sleep(Duration::from_millis(100));
|
||||
}
|
||||
}
|
||||
|
||||
// Check timeout
|
||||
if start_time.elapsed() > timeout {
|
||||
return Err(redis::RedisError::from((
|
||||
redis::ErrorKind::IoError,
|
||||
"Timeout waiting for task completion"
|
||||
)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn wait_for_batch_completion(task_keys: &[String]) -> Result<f64, Box<dyn std::error::Error>> {
|
||||
let client = Client::open(REDIS_URL)?;
|
||||
let mut conn = client.get_connection()?;
|
||||
|
||||
let start_time = std::time::Instant::now();
|
||||
let timeout = Duration::from_secs(30);
|
||||
|
||||
// Wait for all tasks to complete
|
||||
loop {
|
||||
let mut completed_count = 0;
|
||||
let mut total_latency = 0u64;
|
||||
|
||||
for task_key in task_keys {
|
||||
let status: Option<String> = conn.hget(task_key, "status")?;
|
||||
|
||||
match status.as_deref() {
|
||||
Some("completed") | Some("error") => {
|
||||
completed_count += 1;
|
||||
|
||||
// Get timing data
|
||||
let created_at: u64 = conn.hget(task_key, "createdAt")?;
|
||||
let updated_at: u64 = conn.hget(task_key, "updatedAt")?;
|
||||
total_latency += updated_at - created_at;
|
||||
}
|
||||
_ => {} // Still pending or processing
|
||||
}
|
||||
}
|
||||
|
||||
if completed_count == task_keys.len() {
|
||||
// All tasks completed, calculate average latency in milliseconds
|
||||
let avg_latency_ms = (total_latency as f64 / task_keys.len() as f64) * 1000.0;
|
||||
return Ok(avg_latency_ms);
|
||||
}
|
||||
|
||||
// Check timeout
|
||||
if start_time.elapsed() > timeout {
|
||||
return Err(format!("Timeout waiting for batch completion. Completed: {}/{}", completed_count, task_keys.len()).into());
|
||||
}
|
||||
|
||||
thread::sleep(Duration::from_millis(100));
|
||||
}
|
||||
}
|
||||
|
||||
fn cleanup_worker(mut worker: Child) -> Result<(), std::io::Error> {
|
||||
worker.kill()?;
|
||||
worker.wait()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn bench_single_rhai_task(c: &mut Criterion) {
|
||||
// Setup: ensure worker is built
|
||||
let _ = Command::new("cargo")
|
||||
.args(&["build", "--release", "--bin", "worker"])
|
||||
.current_dir("src/worker")
|
||||
.output()
|
||||
.expect("Failed to build worker");
|
||||
|
||||
// Clean up before starting
|
||||
cleanup_redis().expect("Failed to cleanup Redis");
|
||||
|
||||
// Start worker once and reuse it
|
||||
let worker = start_worker().expect("Failed to start worker");
|
||||
thread::sleep(Duration::from_millis(1000)); // Give worker time to start
|
||||
|
||||
let mut group = c.benchmark_group("rhai_task_execution");
|
||||
group.sample_size(10); // Reduce sample size
|
||||
group.measurement_time(Duration::from_secs(10)); // Reduce measurement time
|
||||
|
||||
group.bench_function("batch_task_latency", |b| {
|
||||
b.iter_custom(|iters| {
|
||||
let mut total_latency = Duration::ZERO;
|
||||
|
||||
for _i in 0..iters {
|
||||
// Clean up Redis between iterations
|
||||
cleanup_redis().expect("Failed to cleanup Redis");
|
||||
|
||||
// Create 100 tasks and measure average latency using Redis timestamps
|
||||
let task_keys = create_batch_tasks(5000).expect("Failed to create batch tasks");
|
||||
let avg_latency_ms = wait_for_batch_completion(&task_keys).expect("Failed to measure batch completion");
|
||||
|
||||
// Convert average latency to duration
|
||||
total_latency += Duration::from_millis(avg_latency_ms as u64);
|
||||
}
|
||||
|
||||
total_latency
|
||||
});
|
||||
});
|
||||
|
||||
group.finish();
|
||||
|
||||
// Cleanup worker
|
||||
cleanup_worker(worker).expect("Failed to cleanup worker");
|
||||
cleanup_redis().expect("Failed to cleanup Redis");
|
||||
}
|
||||
|
||||
criterion_group!(benches, bench_single_rhai_task);
|
||||
criterion_main!(benches);
|
Reference in New Issue
Block a user