This commit is contained in:
2025-04-20 08:00:59 +02:00
parent 0051754c65
commit e3ec26a6ef
22 changed files with 971 additions and 691 deletions

View File

@@ -14,61 +14,42 @@ fn criterion_benchmark(c: &mut Criterion) {
incremental_mode: true,
file_size: Some(10 * 1024 * 1024), // 10MB
keysize: Some(6), // Use keysize=6 to allow non-zero file_nr
reset: Some(true), // Reset the database for benchmarking
};
let mut db = OurDB::new(config).unwrap();
let test_data = vec![b'X'; 100]; // 100 bytes of data
let mut i = 0;
b.iter(|| {
let args = OurDBSetArgs {
id: None, // Let the DB assign an ID
let _ = db.set(OurDBSetArgs {
id: None,
data: &test_data,
};
black_box(db.set(args).unwrap());
i += 1;
}).unwrap();
});
db.close().unwrap();
});
// Setup database with data for other benchmarks
let setup_config = OurDBConfig {
path: db_path.clone(),
incremental_mode: true,
file_size: Some(10 * 1024 * 1024), // 10MB
keysize: Some(6), // Use keysize=6 to allow non-zero file_nr
};
let mut setup_db = OurDB::new(setup_config).unwrap();
let test_data = vec![b'X'; 100]; // 100 bytes of data
let mut ids = Vec::with_capacity(1000);
// Insert 1000 records
for _ in 0..1000 {
let args = OurDBSetArgs {
id: None,
data: &test_data,
};
let id = setup_db.set(args).unwrap();
ids.push(id);
}
// Benchmark get operation
// Benchmark get operation (retrieval)
c.bench_function("get", |b| {
let config = OurDBConfig {
// Setup: Create a database and insert a record
let setup_config = OurDBConfig {
path: db_path.clone(),
incremental_mode: true,
file_size: Some(10 * 1024 * 1024),
keysize: Some(6), // Use keysize=6 to allow non-zero file_nr
keysize: Some(6),
reset: Some(true), // Reset the database for benchmarking
};
let mut db = OurDB::new(config).unwrap();
let mut i = 0;
let mut db = OurDB::new(setup_config).unwrap();
let test_data = vec![b'X'; 100];
let id = db.set(OurDBSetArgs { id: None, data: &test_data }).unwrap();
b.iter(|| {
let id = ids[i % ids.len()];
black_box(db.get(id).unwrap());
i += 1;
let _ = db.get(id).unwrap();
});
db.close().unwrap();
});
// Benchmark update operation
@@ -77,199 +58,143 @@ fn criterion_benchmark(c: &mut Criterion) {
path: db_path.clone(),
incremental_mode: true,
file_size: Some(10 * 1024 * 1024),
keysize: Some(6), // Use keysize=6 to allow non-zero file_nr
keysize: Some(6),
reset: Some(true), // Reset the database for benchmarking
};
let mut db = OurDB::new(config).unwrap();
let updated_data = vec![b'Y'; 100]; // Different data for updates
let mut i = 0;
let test_data = vec![b'X'; 100];
let id = db.set(OurDBSetArgs { id: None, data: &test_data }).unwrap();
b.iter(|| {
let id = ids[i % ids.len()];
let args = OurDBSetArgs {
let _ = db.set(OurDBSetArgs {
id: Some(id),
data: &updated_data,
};
black_box(db.set(args).unwrap());
i += 1;
data: &test_data,
}).unwrap();
});
db.close().unwrap();
});
// Benchmark get_history operation
// Benchmark delete operation
c.bench_function("delete", |b| {
let config = OurDBConfig {
path: db_path.clone(),
incremental_mode: true,
file_size: Some(10 * 1024 * 1024),
keysize: Some(6),
reset: Some(true), // Reset the database for benchmarking
};
let mut db = OurDB::new(config).unwrap();
// Create a test data vector outside the closure
let test_data = vec![b'X'; 100];
b.iter_with_setup(
// Setup: Insert a record before each iteration
|| {
db.set(OurDBSetArgs { id: None, data: &test_data }).unwrap()
},
// Benchmark: Delete the record
|id| {
db.delete(id).unwrap();
}
);
db.close().unwrap();
});
// Benchmark history tracking
c.bench_function("get_history", |b| {
let config = OurDBConfig {
path: db_path.clone(),
incremental_mode: true,
file_size: Some(10 * 1024 * 1024),
keysize: Some(6), // Use keysize=6 to allow non-zero file_nr
};
let mut db = OurDB::new(config).unwrap();
let mut i = 0;
b.iter(|| {
let id = ids[i % ids.len()];
black_box(db.get_history(id, 2).unwrap());
i += 1;
});
});
// Benchmark delete operation
c.bench_function("delete", |b| {
// Create a fresh database for deletion benchmarks
let delete_dir = tempdir().expect("Failed to create temp directory");
let delete_path = delete_dir.path().to_path_buf();
let config = OurDBConfig {
path: delete_path.clone(),
incremental_mode: true,
file_size: Some(10 * 1024 * 1024),
keysize: Some(6), // Use keysize=6 to allow non-zero file_nr
keysize: Some(6),
reset: Some(true), // Reset the database for benchmarking
};
let mut db = OurDB::new(config).unwrap();
let test_data = vec![b'X'; 100];
// Setup keys to delete
let mut delete_ids = Vec::with_capacity(1000);
for _ in 0..1000 {
let args = OurDBSetArgs {
id: None,
data: &test_data,
};
let id = db.set(args).unwrap();
delete_ids.push(id);
// Create a record with history
let id = db.set(OurDBSetArgs { id: None, data: &test_data }).unwrap();
// Update it a few times to create history
for _ in 0..5 {
db.set(OurDBSetArgs { id: Some(id), data: &test_data }).unwrap();
}
let mut i = 0;
b.iter(|| {
let id = delete_ids[i % delete_ids.len()];
// Only try to delete if it exists (not already deleted)
if db.get(id).is_ok() {
black_box(db.delete(id).unwrap());
}
i += 1;
let _ = db.get_history(id, 3).unwrap();
});
db.close().unwrap();
});
// Benchmark key-value mode vs incremental mode
let mut group = c.benchmark_group("mode_comparison");
// Benchmark set in key-value mode
group.bench_function("set_keyvalue_mode", |b| {
let kv_dir = tempdir().expect("Failed to create temp directory");
let kv_path = kv_dir.path().to_path_buf();
// Benchmark large data handling
c.bench_function("large_data", |b| {
let config = OurDBConfig {
path: kv_path.clone(),
incremental_mode: false, // Key-value mode
path: db_path.clone(),
incremental_mode: true,
file_size: Some(10 * 1024 * 1024),
keysize: Some(6), // Use keysize=6 to allow non-zero file_nr
keysize: Some(6),
reset: Some(true), // Reset the database for benchmarking
};
let mut db = OurDB::new(config).unwrap();
let test_data = vec![b'X'; 100];
let mut i = 0;
let large_data = vec![b'X'; 10 * 1024]; // 10KB
b.iter(|| {
let id = i + 1; // Explicit ID
let args = OurDBSetArgs {
id: Some(id as u32),
data: &test_data,
};
black_box(db.set(args).unwrap());
i += 1;
let id = db.set(OurDBSetArgs { id: None, data: &large_data }).unwrap();
let _ = db.get(id).unwrap();
db.delete(id).unwrap();
});
db.close().unwrap();
});
// Benchmark set in incremental mode
group.bench_function("set_incremental_mode", |b| {
let inc_dir = tempdir().expect("Failed to create temp directory");
let inc_path = inc_dir.path().to_path_buf();
// Benchmark concurrent operations (simulated)
c.bench_function("concurrent_ops", |b| {
let config = OurDBConfig {
path: inc_path.clone(),
incremental_mode: true, // Incremental mode
path: db_path.clone(),
incremental_mode: true,
file_size: Some(10 * 1024 * 1024),
keysize: Some(6), // Use keysize=6 to allow non-zero file_nr
keysize: Some(6),
reset: Some(true), // Reset the database for benchmarking
};
let mut db = OurDB::new(config).unwrap();
let test_data = vec![b'X'; 100];
// Pre-insert some data
let mut ids = Vec::with_capacity(100);
for _ in 0..100 {
let id = db.set(OurDBSetArgs { id: None, data: &test_data }).unwrap();
ids.push(id);
}
b.iter(|| {
let args = OurDBSetArgs {
id: None, // Auto-generated ID
data: &test_data,
};
black_box(db.set(args).unwrap());
});
});
group.finish();
// Benchmark with different record sizes
let mut size_group = c.benchmark_group("record_size");
for &size in &[10, 100, 1000, 10000] {
size_group.bench_function(format!("set_size_{}", size), |b| {
let size_dir = tempdir().expect("Failed to create temp directory");
let size_path = size_dir.path().to_path_buf();
let config = OurDBConfig {
path: size_path.clone(),
incremental_mode: true,
file_size: Some(10 * 1024 * 1024),
keysize: Some(6), // Use keysize=6 to allow non-zero file_nr
};
let mut db = OurDB::new(config).unwrap();
let test_data = vec![b'X'; size];
b.iter(|| {
let args = OurDBSetArgs {
id: None,
data: &test_data,
};
black_box(db.set(args).unwrap());
});
// Simulate mixed workload
for i in 0..10 {
if i % 3 == 0 {
// Insert
let _ = db.set(OurDBSetArgs { id: None, data: &test_data }).unwrap();
} else if i % 3 == 1 {
// Read
let idx = i % ids.len();
let _ = db.get(ids[idx]).unwrap();
} else {
// Update
let idx = i % ids.len();
db.set(OurDBSetArgs { id: Some(ids[idx]), data: &test_data }).unwrap();
}
}
});
size_group.bench_function(format!("get_size_{}", size), |b| {
let size_dir = tempdir().expect("Failed to create temp directory");
let size_path = size_dir.path().to_path_buf();
let config = OurDBConfig {
path: size_path.clone(),
incremental_mode: true,
file_size: Some(10 * 1024 * 1024),
keysize: Some(6), // Use keysize=6 to allow non-zero file_nr
};
let mut db = OurDB::new(config).unwrap();
let test_data = vec![b'X'; size];
// Insert some records first
let mut size_ids = Vec::with_capacity(100);
for _ in 0..100 {
let args = OurDBSetArgs {
id: None,
data: &test_data,
};
let id = db.set(args).unwrap();
size_ids.push(id);
}
let mut i = 0;
b.iter(|| {
let id = size_ids[i % size_ids.len()];
black_box(db.get(id).unwrap());
i += 1;
});
});
}
size_group.finish();
db.close().unwrap();
});
}
criterion_group!(benches, criterion_benchmark);

View File

@@ -41,6 +41,7 @@ fn key_value_mode_example(base_path: &PathBuf) -> Result<(), ourdb::Error> {
incremental_mode: false,
file_size: Some(1024 * 1024), // 1MB for testing
keysize: Some(2), // Small key size for demonstration
reset: None, // Don't reset existing database
};
let mut db = OurDB::new(config)?;
@@ -94,6 +95,7 @@ fn incremental_mode_example(base_path: &PathBuf) -> Result<(), ourdb::Error> {
incremental_mode: true,
file_size: Some(1024 * 1024), // 1MB for testing
keysize: Some(3), // 3-byte keys
reset: None, // Don't reset existing database
};
let mut db = OurDB::new(config)?;
@@ -136,7 +138,8 @@ fn performance_benchmark(base_path: &PathBuf) -> Result<(), ourdb::Error> {
path: db_path,
incremental_mode: true,
file_size: Some(1024 * 1024), // 10MB
keysize: Some(4), // 4-byte keys
keysize: Some(4), // 4-byte keys
reset: None, // Don't reset existing database
};
let mut db = OurDB::new(config)?;

View File

@@ -13,6 +13,7 @@ fn main() -> Result<(), ourdb::Error> {
incremental_mode: true,
file_size: None, // Use default (500MB)
keysize: None, // Use default (4 bytes)
reset: None, // Don't reset existing database
};
let mut db = OurDB::new(config)?;

View File

@@ -1,22 +1,28 @@
use ourdb::{OurDB, OurDBConfig, OurDBSetArgs};
use std::time::{Duration, Instant};
use std::time::Instant;
fn main() -> Result<(), ourdb::Error> {
// Parse command line arguments
// Parse command-line arguments
let args: Vec<String> = std::env::args().collect();
let (num_operations, record_size, incremental_mode, keysize) = parse_args(&args);
// Default values
let mut incremental_mode = true;
let mut keysize: u8 = 4;
let mut num_operations = 10000;
println!("OurDB Benchmark");
println!("===============");
println!("Operations: {}", num_operations);
println!("Record size: {} bytes", record_size);
println!("Mode: {}", if incremental_mode { "Incremental" } else { "Key-Value" });
println!("Key size: {} bytes", keysize);
println!();
// Parse arguments
for i in 1..args.len() {
if args[i] == "--no-incremental" {
incremental_mode = false;
} else if args[i] == "--keysize" && i + 1 < args.len() {
keysize = args[i + 1].parse().unwrap_or(4);
} else if args[i] == "--ops" && i + 1 < args.len() {
num_operations = args[i + 1].parse().unwrap_or(10000);
}
}
// Create a temporary directory for the database
let db_path = std::env::temp_dir().join(format!("ourdb_benchmark_{}", std::process::id()));
let db_path = std::env::temp_dir().join("ourdb_benchmark");
std::fs::create_dir_all(&db_path)?;
println!("Database path: {}", db_path.display());
@@ -27,24 +33,27 @@ fn main() -> Result<(), ourdb::Error> {
incremental_mode,
file_size: Some(1024 * 1024),
keysize: Some(keysize),
reset: Some(true), // Reset the database for benchmarking
};
let mut db = OurDB::new(config)?;
// Prepare test data
let test_data = vec![b'X'; record_size];
// Prepare test data (100 bytes per record)
let test_data = vec![b'A'; 100];
// Benchmark write operations
println!("\nBenchmarking writes...");
println!("Benchmarking {} write operations (incremental: {}, keysize: {})...",
num_operations, incremental_mode, keysize);
let start = Instant::now();
let mut ids = Vec::with_capacity(num_operations);
for i in 0..num_operations {
for _ in 0..num_operations {
let id = if incremental_mode {
db.set(OurDBSetArgs { id: None, data: &test_data })?
} else {
// In key-value mode, we provide explicit IDs
let id = i as u32 + 1;
// In non-incremental mode, we need to provide IDs
let id = ids.len() as u32 + 1;
db.set(OurDBSetArgs { id: Some(id), data: &test_data })?;
id
};
@@ -52,10 +61,15 @@ fn main() -> Result<(), ourdb::Error> {
}
let write_duration = start.elapsed();
print_performance_stats("Write", num_operations, write_duration);
let writes_per_second = num_operations as f64 / write_duration.as_secs_f64();
println!("Write performance: {:.2} ops/sec ({:.2} ms/op)",
writes_per_second,
write_duration.as_secs_f64() * 1000.0 / num_operations as f64);
// Benchmark read operations
println!("Benchmarking {} read operations...", num_operations);
// Benchmark read operations (sequential)
println!("\nBenchmarking sequential reads...");
let start = Instant::now();
for &id in &ids {
@@ -63,123 +77,31 @@ fn main() -> Result<(), ourdb::Error> {
}
let read_duration = start.elapsed();
print_performance_stats("Sequential read", num_operations, read_duration);
let reads_per_second = num_operations as f64 / read_duration.as_secs_f64();
// Benchmark random reads
println!("\nBenchmarking random reads...");
let start = Instant::now();
use std::collections::HashSet;
let mut rng = rand::thread_rng();
let mut random_indices = HashSet::new();
// Select 20% of the IDs randomly for testing
let sample_size = num_operations / 5;
while random_indices.len() < sample_size {
let idx = rand::Rng::gen_range(&mut rng, 0..ids.len());
random_indices.insert(idx);
}
for idx in random_indices {
let _ = db.get(ids[idx])?;
}
let random_read_duration = start.elapsed();
print_performance_stats("Random read", sample_size, random_read_duration);
println!("Read performance: {:.2} ops/sec ({:.2} ms/op)",
reads_per_second,
read_duration.as_secs_f64() * 1000.0 / num_operations as f64);
// Benchmark update operations
println!("\nBenchmarking updates...");
println!("Benchmarking {} update operations...", num_operations);
let start = Instant::now();
for &id in &ids[0..num_operations/2] {
for &id in &ids {
db.set(OurDBSetArgs { id: Some(id), data: &test_data })?;
}
let update_duration = start.elapsed();
print_performance_stats("Update", num_operations/2, update_duration);
let updates_per_second = num_operations as f64 / update_duration.as_secs_f64();
// Benchmark history retrieval
println!("\nBenchmarking history retrieval...");
let start = Instant::now();
println!("Update performance: {:.2} ops/sec ({:.2} ms/op)",
updates_per_second,
update_duration.as_secs_f64() * 1000.0 / num_operations as f64);
for &id in &ids[0..num_operations/10] {
let _ = db.get_history(id, 2)?;
}
let history_duration = start.elapsed();
print_performance_stats("History retrieval", num_operations/10, history_duration);
// Benchmark delete operations
println!("\nBenchmarking deletes...");
let start = Instant::now();
for &id in &ids[0..num_operations/4] {
db.delete(id)?;
}
let delete_duration = start.elapsed();
print_performance_stats("Delete", num_operations/4, delete_duration);
// Close and clean up
// Clean up
db.close()?;
std::fs::remove_dir_all(&db_path)?;
println!("\nBenchmark completed successfully");
Ok(())
}
fn parse_args(args: &[String]) -> (usize, usize, bool, u8) {
let mut num_operations = 100000;
let mut record_size = 100;
let mut incremental_mode = true;
let mut keysize = 4;
for i in 1..args.len() {
if args[i] == "--ops" && i + 1 < args.len() {
if let Ok(n) = args[i + 1].parse() {
num_operations = n;
}
} else if args[i] == "--size" && i + 1 < args.len() {
if let Ok(n) = args[i + 1].parse() {
record_size = n;
}
} else if args[i] == "--keyvalue" {
incremental_mode = false;
} else if args[i] == "--keysize" && i + 1 < args.len() {
if let Ok(n) = args[i + 1].parse() {
if [2, 3, 4, 6].contains(&n) {
keysize = n;
}
}
} else if args[i] == "--help" {
print_usage();
std::process::exit(0);
}
}
(num_operations, record_size, incremental_mode, keysize)
}
fn print_usage() {
println!("OurDB Benchmark Tool");
println!("Usage: cargo run --example benchmark [OPTIONS]");
println!();
println!("Options:");
println!(" --ops N Number of operations to perform (default: 100000)");
println!(" --size N Size of each record in bytes (default: 100)");
println!(" --keyvalue Use key-value mode instead of incremental mode");
println!(" --keysize N Key size in bytes (2, 3, 4, or 6) (default: 4)");
println!(" --help Print this help message");
}
fn print_performance_stats(operation: &str, count: usize, duration: Duration) {
let ops_per_second = count as f64 / duration.as_secs_f64();
let ms_per_op = duration.as_secs_f64() * 1000.0 / count as f64;
println!("{} performance:", operation);
println!(" Total time: {:.2} seconds", duration.as_secs_f64());
println!(" Operations: {}", count);
println!(" Speed: {:.2} ops/sec", ops_per_second);
println!(" Average: {:.3} ms/op", ms_per_op);
}