184 lines
		
	
	
		
			5.8 KiB
		
	
	
	
		
			Rust
		
	
	
	
	
	
			
		
		
	
	
			184 lines
		
	
	
		
			5.8 KiB
		
	
	
	
		
			Rust
		
	
	
	
	
	
| use criterion::{criterion_group, criterion_main, Criterion};
 | |
| use redis::{Client, Commands};
 | |
| use std::fs;
 | |
| use std::process::{Child, Command, Stdio};
 | |
| use std::thread;
 | |
| use std::time::Duration;
 | |
| 
 | |
| const REDIS_URL: &str = "redis://127.0.0.1:6379";
 | |
| const CIRCLE_NAME: &str = "bench_circle";
 | |
| const SIMPLE_SCRIPT: &str = "new_event()\n    .title(\"Weekly Sync\")\n    .location(\"Conference Room A\")\n    .description(\"Regular team sync meeting\")\n    .save_event();";
 | |
| 
 | |
| fn cleanup_redis() -> Result<(), redis::RedisError> {
 | |
|     let client = Client::open(REDIS_URL)?;
 | |
|     let mut conn = client.get_connection()?;
 | |
| 
 | |
|     // Clear task queue and any existing task details
 | |
|     let _: () = conn.del(format!("rhai_tasks:{}", CIRCLE_NAME))?;
 | |
|     let keys: Vec<String> = conn.scan_match("rhai_task_details:*")?.collect();
 | |
|     if !keys.is_empty() {
 | |
|         let _: () = conn.del(keys)?;
 | |
|     }
 | |
| 
 | |
|     Ok(())
 | |
| }
 | |
| 
 | |
| fn start_worker() -> Result<Child, std::io::Error> {
 | |
|     Command::new("cargo")
 | |
|         .args(&[
 | |
|             "run",
 | |
|             "--release",
 | |
|             "--bin",
 | |
|             "worker",
 | |
|             "--",
 | |
|             "--circle",
 | |
|             CIRCLE_NAME,
 | |
|             "--redis-url",
 | |
|             REDIS_URL,
 | |
|             "--worker-id",
 | |
|             "bench_worker",
 | |
|             "--preserve-tasks",
 | |
|         ])
 | |
|         .current_dir("src/worker")
 | |
|         .stdout(Stdio::null())
 | |
|         .stderr(Stdio::null())
 | |
|         .spawn()
 | |
| }
 | |
| 
 | |
| fn create_batch_tasks(task_count: usize) -> Result<Vec<String>, Box<dyn std::error::Error>> {
 | |
|     let client = Client::open(REDIS_URL)?;
 | |
|     let mut conn = client.get_connection()?;
 | |
| 
 | |
|     // Load and execute Lua script
 | |
|     let lua_script = fs::read_to_string("benches/simple_rhai_bench/batch_task.lua")?;
 | |
|     let result: redis::Value = redis::cmd("EVAL")
 | |
|         .arg(lua_script)
 | |
|         .arg(0)
 | |
|         .arg(CIRCLE_NAME)
 | |
|         .arg(SIMPLE_SCRIPT)
 | |
|         .arg(task_count)
 | |
|         .query(&mut conn)?;
 | |
| 
 | |
|     // Parse the task keys from the response
 | |
|     let task_keys = match result {
 | |
|         redis::Value::Bulk(items) => {
 | |
|             let mut keys = Vec::new();
 | |
|             for item in items {
 | |
|                 if let redis::Value::Data(key_data) = item {
 | |
|                     keys.push(String::from_utf8_lossy(&key_data).to_string());
 | |
|                 }
 | |
|             }
 | |
|             keys
 | |
|         }
 | |
|         _ => {
 | |
|             return Err(format!("Unexpected Redis response type: {:?}", result).into());
 | |
|         }
 | |
|     };
 | |
| 
 | |
|     Ok(task_keys)
 | |
| }
 | |
| 
 | |
| fn wait_for_batch_completion(task_keys: &[String]) -> Result<f64, Box<dyn std::error::Error>> {
 | |
|     let client = Client::open(REDIS_URL)?;
 | |
|     let mut conn = client.get_connection()?;
 | |
| 
 | |
|     let start_time = std::time::Instant::now();
 | |
|     let timeout = Duration::from_secs(30);
 | |
| 
 | |
|     // Wait for all tasks to complete
 | |
|     loop {
 | |
|         let mut completed_count = 0;
 | |
|         let mut total_latency = 0u64;
 | |
| 
 | |
|         for task_key in task_keys {
 | |
|             let status: Option<String> = conn.hget(task_key, "status")?;
 | |
| 
 | |
|             match status.as_deref() {
 | |
|                 Some("completed") | Some("error") => {
 | |
|                     completed_count += 1;
 | |
| 
 | |
|                     // Get timing data
 | |
|                     let created_at: u64 = conn.hget(task_key, "createdAt")?;
 | |
|                     let updated_at: u64 = conn.hget(task_key, "updatedAt")?;
 | |
|                     total_latency += updated_at - created_at;
 | |
|                 }
 | |
|                 _ => {} // Still pending or processing
 | |
|             }
 | |
|         }
 | |
| 
 | |
|         if completed_count == task_keys.len() {
 | |
|             // All tasks completed, calculate average latency in milliseconds
 | |
|             let avg_latency_ms = (total_latency as f64 / task_keys.len() as f64) * 1000.0;
 | |
|             return Ok(avg_latency_ms);
 | |
|         }
 | |
| 
 | |
|         // Check timeout
 | |
|         if start_time.elapsed() > timeout {
 | |
|             return Err(format!(
 | |
|                 "Timeout waiting for batch completion. Completed: {}/{}",
 | |
|                 completed_count,
 | |
|                 task_keys.len()
 | |
|             )
 | |
|             .into());
 | |
|         }
 | |
| 
 | |
|         thread::sleep(Duration::from_millis(100));
 | |
|     }
 | |
| }
 | |
| 
 | |
| fn cleanup_worker(mut worker: Child) -> Result<(), std::io::Error> {
 | |
|     worker.kill()?;
 | |
|     worker.wait()?;
 | |
|     Ok(())
 | |
| }
 | |
| 
 | |
| fn bench_single_rhai_task(c: &mut Criterion) {
 | |
|     // Setup: ensure worker is built
 | |
|     let _ = Command::new("cargo")
 | |
|         .args(&["build", "--release", "--bin", "worker"])
 | |
|         .current_dir("src/worker")
 | |
|         .output()
 | |
|         .expect("Failed to build worker");
 | |
| 
 | |
|     // Clean up before starting
 | |
|     cleanup_redis().expect("Failed to cleanup Redis");
 | |
| 
 | |
|     // Start worker once and reuse it
 | |
|     let worker = start_worker().expect("Failed to start worker");
 | |
|     thread::sleep(Duration::from_millis(1000)); // Give worker time to start
 | |
| 
 | |
|     let mut group = c.benchmark_group("rhai_task_execution");
 | |
|     group.sample_size(10); // Reduce sample size
 | |
|     group.measurement_time(Duration::from_secs(10)); // Reduce measurement time
 | |
| 
 | |
|     group.bench_function("batch_task_latency", |b| {
 | |
|         b.iter_custom(|iters| {
 | |
|             let mut total_latency = Duration::ZERO;
 | |
| 
 | |
|             for _i in 0..iters {
 | |
|                 // Clean up Redis between iterations
 | |
|                 cleanup_redis().expect("Failed to cleanup Redis");
 | |
| 
 | |
|                 // Create 100 tasks and measure average latency using Redis timestamps
 | |
|                 let task_keys = create_batch_tasks(5000).expect("Failed to create batch tasks");
 | |
|                 let avg_latency_ms = wait_for_batch_completion(&task_keys)
 | |
|                     .expect("Failed to measure batch completion");
 | |
| 
 | |
|                 // Convert average latency to duration
 | |
|                 total_latency += Duration::from_millis(avg_latency_ms as u64);
 | |
|             }
 | |
| 
 | |
|             total_latency
 | |
|         });
 | |
|     });
 | |
| 
 | |
|     group.finish();
 | |
| 
 | |
|     // Cleanup worker
 | |
|     cleanup_worker(worker).expect("Failed to cleanup worker");
 | |
|     cleanup_redis().expect("Failed to cleanup Redis");
 | |
| }
 | |
| 
 | |
| criterion_group!(benches, bench_single_rhai_task);
 | |
| criterion_main!(benches);
 |