...
This commit is contained in:
parent
10831fd260
commit
d32a41d579
@ -1,49 +0,0 @@
|
||||
//! Main entry point for the HeroDB server
|
||||
//!
|
||||
//! This file serves as the entry point for the HeroDB server,
|
||||
//! which provides a web API for accessing HeroDB functionality.
|
||||
|
||||
use std::path::PathBuf;
|
||||
|
||||
// We need tokio for the async runtime
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
// Print a welcome message
|
||||
println!("Starting HeroDB server...");
|
||||
|
||||
// Set up default values
|
||||
let host = "127.0.0.1";
|
||||
let port = 3002;
|
||||
|
||||
// Create a temporary directory for the database
|
||||
let db_path = match create_temp_dir() {
|
||||
Ok(path) => {
|
||||
println!("Using temporary DB path: {:?}", path);
|
||||
path
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("Error creating temporary directory: {}", e);
|
||||
return Err(e.into());
|
||||
}
|
||||
};
|
||||
|
||||
println!("Starting server on {}:{}", host, port);
|
||||
println!("API Documentation: http://{}:{}/docs", host, port);
|
||||
|
||||
// Start the server
|
||||
herodb::server::start_server(db_path, host, port).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Creates a simple temporary directory for the database
|
||||
fn create_temp_dir() -> std::io::Result<PathBuf> {
|
||||
let temp_dir = std::env::temp_dir();
|
||||
let random_name = format!("herodb-{}", std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_millis());
|
||||
let path = temp_dir.join(random_name);
|
||||
std::fs::create_dir_all(&path)?;
|
||||
Ok(path)
|
||||
}
|
@ -1,201 +0,0 @@
|
||||
use criterion::{black_box, criterion_group, criterion_main, Criterion};
|
||||
use ourdb::{OurDB, OurDBConfig, OurDBSetArgs};
|
||||
use tempfile::tempdir;
|
||||
|
||||
fn criterion_benchmark(c: &mut Criterion) {
|
||||
// Create a temporary directory for benchmarks
|
||||
let temp_dir = tempdir().expect("Failed to create temp directory");
|
||||
let db_path = temp_dir.path().to_path_buf();
|
||||
|
||||
// Benchmark set operation (insertion)
|
||||
c.bench_function("set", |b| {
|
||||
let config = OurDBConfig {
|
||||
path: db_path.clone(),
|
||||
incremental_mode: true,
|
||||
file_size: Some(10 * 1024 * 1024), // 10MB
|
||||
keysize: Some(6), // Use keysize=6 to allow non-zero file_nr
|
||||
reset: Some(true), // Reset the database for benchmarking
|
||||
};
|
||||
|
||||
let mut db = OurDB::new(config).unwrap();
|
||||
let test_data = vec![b'X'; 100]; // 100 bytes of data
|
||||
|
||||
b.iter(|| {
|
||||
let _ = db.set(OurDBSetArgs {
|
||||
id: None,
|
||||
data: &test_data,
|
||||
}).unwrap();
|
||||
});
|
||||
|
||||
db.close().unwrap();
|
||||
});
|
||||
|
||||
// Benchmark get operation (retrieval)
|
||||
c.bench_function("get", |b| {
|
||||
// Setup: Create a database and insert a record
|
||||
let setup_config = OurDBConfig {
|
||||
path: db_path.clone(),
|
||||
incremental_mode: true,
|
||||
file_size: Some(10 * 1024 * 1024),
|
||||
keysize: Some(6),
|
||||
reset: Some(true), // Reset the database for benchmarking
|
||||
};
|
||||
|
||||
let mut db = OurDB::new(setup_config).unwrap();
|
||||
let test_data = vec![b'X'; 100];
|
||||
let id = db.set(OurDBSetArgs { id: None, data: &test_data }).unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let _ = db.get(id).unwrap();
|
||||
});
|
||||
|
||||
db.close().unwrap();
|
||||
});
|
||||
|
||||
// Benchmark update operation
|
||||
c.bench_function("update", |b| {
|
||||
let config = OurDBConfig {
|
||||
path: db_path.clone(),
|
||||
incremental_mode: true,
|
||||
file_size: Some(10 * 1024 * 1024),
|
||||
keysize: Some(6),
|
||||
reset: Some(true), // Reset the database for benchmarking
|
||||
};
|
||||
|
||||
let mut db = OurDB::new(config).unwrap();
|
||||
let test_data = vec![b'X'; 100];
|
||||
let id = db.set(OurDBSetArgs { id: None, data: &test_data }).unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let _ = db.set(OurDBSetArgs {
|
||||
id: Some(id),
|
||||
data: &test_data,
|
||||
}).unwrap();
|
||||
});
|
||||
|
||||
db.close().unwrap();
|
||||
});
|
||||
|
||||
// Benchmark delete operation
|
||||
c.bench_function("delete", |b| {
|
||||
let config = OurDBConfig {
|
||||
path: db_path.clone(),
|
||||
incremental_mode: true,
|
||||
file_size: Some(10 * 1024 * 1024),
|
||||
keysize: Some(6),
|
||||
reset: Some(true), // Reset the database for benchmarking
|
||||
};
|
||||
|
||||
let mut db = OurDB::new(config).unwrap();
|
||||
|
||||
// Create a test data vector outside the closure
|
||||
let test_data = vec![b'X'; 100];
|
||||
|
||||
b.iter_with_setup(
|
||||
// Setup: Insert a record before each iteration
|
||||
|| {
|
||||
db.set(OurDBSetArgs { id: None, data: &test_data }).unwrap()
|
||||
},
|
||||
// Benchmark: Delete the record
|
||||
|id| {
|
||||
db.delete(id).unwrap();
|
||||
}
|
||||
);
|
||||
|
||||
db.close().unwrap();
|
||||
});
|
||||
|
||||
// Benchmark history tracking
|
||||
c.bench_function("get_history", |b| {
|
||||
let config = OurDBConfig {
|
||||
path: db_path.clone(),
|
||||
incremental_mode: true,
|
||||
file_size: Some(10 * 1024 * 1024),
|
||||
keysize: Some(6),
|
||||
reset: Some(true), // Reset the database for benchmarking
|
||||
};
|
||||
|
||||
let mut db = OurDB::new(config).unwrap();
|
||||
let test_data = vec![b'X'; 100];
|
||||
|
||||
// Create a record with history
|
||||
let id = db.set(OurDBSetArgs { id: None, data: &test_data }).unwrap();
|
||||
|
||||
// Update it a few times to create history
|
||||
for _ in 0..5 {
|
||||
db.set(OurDBSetArgs { id: Some(id), data: &test_data }).unwrap();
|
||||
}
|
||||
|
||||
b.iter(|| {
|
||||
let _ = db.get_history(id, 3).unwrap();
|
||||
});
|
||||
|
||||
db.close().unwrap();
|
||||
});
|
||||
|
||||
// Benchmark large data handling
|
||||
c.bench_function("large_data", |b| {
|
||||
let config = OurDBConfig {
|
||||
path: db_path.clone(),
|
||||
incremental_mode: true,
|
||||
file_size: Some(10 * 1024 * 1024),
|
||||
keysize: Some(6),
|
||||
reset: Some(true), // Reset the database for benchmarking
|
||||
};
|
||||
|
||||
let mut db = OurDB::new(config).unwrap();
|
||||
let large_data = vec![b'X'; 10 * 1024]; // 10KB
|
||||
|
||||
b.iter(|| {
|
||||
let id = db.set(OurDBSetArgs { id: None, data: &large_data }).unwrap();
|
||||
let _ = db.get(id).unwrap();
|
||||
db.delete(id).unwrap();
|
||||
});
|
||||
|
||||
db.close().unwrap();
|
||||
});
|
||||
|
||||
// Benchmark concurrent operations (simulated)
|
||||
c.bench_function("concurrent_ops", |b| {
|
||||
let config = OurDBConfig {
|
||||
path: db_path.clone(),
|
||||
incremental_mode: true,
|
||||
file_size: Some(10 * 1024 * 1024),
|
||||
keysize: Some(6),
|
||||
reset: Some(true), // Reset the database for benchmarking
|
||||
};
|
||||
|
||||
let mut db = OurDB::new(config).unwrap();
|
||||
let test_data = vec![b'X'; 100];
|
||||
|
||||
// Pre-insert some data
|
||||
let mut ids = Vec::with_capacity(100);
|
||||
for _ in 0..100 {
|
||||
let id = db.set(OurDBSetArgs { id: None, data: &test_data }).unwrap();
|
||||
ids.push(id);
|
||||
}
|
||||
|
||||
b.iter(|| {
|
||||
// Simulate mixed workload
|
||||
for i in 0..10 {
|
||||
if i % 3 == 0 {
|
||||
// Insert
|
||||
let _ = db.set(OurDBSetArgs { id: None, data: &test_data }).unwrap();
|
||||
} else if i % 3 == 1 {
|
||||
// Read
|
||||
let idx = i % ids.len();
|
||||
let _ = db.get(ids[idx]).unwrap();
|
||||
} else {
|
||||
// Update
|
||||
let idx = i % ids.len();
|
||||
db.set(OurDBSetArgs { id: Some(ids[idx]), data: &test_data }).unwrap();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
db.close().unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(benches, criterion_benchmark);
|
||||
criterion_main!(benches);
|
@ -326,6 +326,7 @@ mod tests {
|
||||
incremental_mode: false,
|
||||
file_size: None,
|
||||
keysize: None,
|
||||
reset: None, // Don't reset existing database
|
||||
};
|
||||
|
||||
let mut db = OurDB::new(config).unwrap();
|
||||
|
@ -244,6 +244,7 @@ mod tests {
|
||||
incremental_mode: true,
|
||||
file_size: None,
|
||||
keysize: None,
|
||||
reset: None, // Don't reset existing database
|
||||
};
|
||||
|
||||
let mut db = OurDB::new(config).unwrap();
|
||||
|
@ -33,6 +33,7 @@ fn test_basic_operations() {
|
||||
incremental_mode: true,
|
||||
file_size: None,
|
||||
keysize: None,
|
||||
reset: None
|
||||
};
|
||||
|
||||
|
||||
@ -77,6 +78,7 @@ fn test_key_value_mode() {
|
||||
incremental_mode: false,
|
||||
file_size: None,
|
||||
keysize: None,
|
||||
reset: None
|
||||
};
|
||||
|
||||
let mut db = OurDB::new(config).unwrap();
|
||||
@ -106,6 +108,7 @@ fn test_incremental_mode() {
|
||||
incremental_mode: true,
|
||||
file_size: None,
|
||||
keysize: None,
|
||||
reset: None
|
||||
};
|
||||
|
||||
|
||||
@ -141,6 +144,7 @@ fn test_persistence() {
|
||||
incremental_mode: true,
|
||||
file_size: None,
|
||||
keysize: None,
|
||||
reset: None
|
||||
};
|
||||
|
||||
let mut db = OurDB::new(config).unwrap();
|
||||
@ -162,6 +166,7 @@ fn test_persistence() {
|
||||
incremental_mode: true,
|
||||
file_size: None,
|
||||
keysize: None,
|
||||
reset: None
|
||||
};
|
||||
|
||||
let mut db = OurDB::new(config).unwrap();
|
||||
@ -193,6 +198,7 @@ fn test_different_keysizes() {
|
||||
incremental_mode: true,
|
||||
file_size: None,
|
||||
keysize: Some(*keysize),
|
||||
reset: None
|
||||
};
|
||||
|
||||
let mut db = OurDB::new(config).unwrap();
|
||||
@ -219,6 +225,7 @@ fn test_large_data() {
|
||||
incremental_mode: true,
|
||||
file_size: None,
|
||||
keysize: None,
|
||||
reset: None
|
||||
};
|
||||
|
||||
let mut db = OurDB::new(config).unwrap();
|
||||
@ -247,6 +254,7 @@ fn test_exceed_size_limit() {
|
||||
incremental_mode: true,
|
||||
file_size: None,
|
||||
keysize: None,
|
||||
reset: None
|
||||
};
|
||||
|
||||
let mut db = OurDB::new(config).unwrap();
|
||||
@ -275,6 +283,7 @@ fn test_multiple_files() {
|
||||
incremental_mode: true,
|
||||
file_size: Some(1024), // Very small file size (1KB)
|
||||
keysize: Some(6), // 6-byte keysize for multiple files
|
||||
reset: None
|
||||
};
|
||||
|
||||
let mut db = OurDB::new(config).unwrap();
|
||||
|
@ -15,6 +15,7 @@ pub fn new_radix_tree(path: &str, reset: bool) -> Result<RadixTree, Error> {
|
||||
incremental_mode: true,
|
||||
file_size: Some(1024 * 1024 * 10), // 10MB file size for better performance with large datasets
|
||||
keysize: Some(6), // Use keysize=6 to support multiple files (file_nr + position)
|
||||
reset: None, // Don't reset existing database
|
||||
};
|
||||
|
||||
let mut db = OurDB::new(config)?;
|
||||
|
Loading…
Reference in New Issue
Block a user