321 lines
8.3 KiB
Rust
321 lines
8.3 KiB
Rust
use ourdb::{OurDB, OurDBConfig, OurDBSetArgs};
|
|
use std::env::temp_dir;
|
|
use std::fs;
|
|
use std::path::PathBuf;
|
|
use std::time::{SystemTime, UNIX_EPOCH};
|
|
use rand;
|
|
|
|
// Helper function to create a unique temporary directory for tests
|
|
fn get_temp_dir() -> PathBuf {
|
|
let timestamp = SystemTime::now()
|
|
.duration_since(UNIX_EPOCH)
|
|
.unwrap()
|
|
.as_nanos();
|
|
let random_part = rand::random::<u32>();
|
|
let dir = temp_dir().join(format!("ourdb_test_{}_{}", timestamp, random_part));
|
|
|
|
// Ensure the directory exists and is empty
|
|
if dir.exists() {
|
|
std::fs::remove_dir_all(&dir).unwrap();
|
|
}
|
|
std::fs::create_dir_all(&dir).unwrap();
|
|
|
|
dir
|
|
}
|
|
|
|
#[test]
|
|
fn test_basic_operations() {
|
|
let temp_dir = get_temp_dir();
|
|
|
|
// Create a new database with incremental mode
|
|
let config = OurDBConfig {
|
|
path: temp_dir.clone(),
|
|
incremental_mode: true,
|
|
file_size: None,
|
|
keysize: None,
|
|
reset: None
|
|
};
|
|
|
|
|
|
let mut db = OurDB::new(config).unwrap();
|
|
|
|
// Test set and get
|
|
let test_data = b"Hello, OurDB!";
|
|
let id = db.set(OurDBSetArgs { id: None, data: test_data }).unwrap();
|
|
|
|
let retrieved = db.get(id).unwrap();
|
|
assert_eq!(retrieved, test_data);
|
|
|
|
// Test update
|
|
let updated_data = b"Updated data";
|
|
db.set(OurDBSetArgs { id: Some(id), data: updated_data }).unwrap();
|
|
|
|
let retrieved = db.get(id).unwrap();
|
|
assert_eq!(retrieved, updated_data);
|
|
|
|
// Test history
|
|
let history = db.get_history(id, 2).unwrap();
|
|
assert_eq!(history.len(), 2);
|
|
assert_eq!(history[0], updated_data);
|
|
assert_eq!(history[1], test_data);
|
|
|
|
// Test delete
|
|
db.delete(id).unwrap();
|
|
assert!(db.get(id).is_err());
|
|
|
|
// Clean up
|
|
db.destroy().unwrap();
|
|
}
|
|
|
|
#[test]
|
|
fn test_key_value_mode() {
|
|
let temp_dir = get_temp_dir();
|
|
|
|
|
|
// Create a new database with key-value mode
|
|
let config = OurDBConfig {
|
|
path: temp_dir.clone(),
|
|
incremental_mode: false,
|
|
file_size: None,
|
|
keysize: None,
|
|
reset: None
|
|
};
|
|
|
|
let mut db = OurDB::new(config).unwrap();
|
|
|
|
// Test set with explicit ID
|
|
let test_data = b"Key-value data";
|
|
let id = 42;
|
|
db.set(OurDBSetArgs { id: Some(id), data: test_data }).unwrap();
|
|
|
|
let retrieved = db.get(id).unwrap();
|
|
assert_eq!(retrieved, test_data);
|
|
|
|
// Verify next_id fails in key-value mode
|
|
assert!(db.get_next_id().is_err());
|
|
|
|
// Clean up
|
|
db.destroy().unwrap();
|
|
}
|
|
|
|
#[test]
|
|
fn test_incremental_mode() {
|
|
let temp_dir = get_temp_dir();
|
|
|
|
// Create a new database with incremental mode
|
|
let config = OurDBConfig {
|
|
path: temp_dir.clone(),
|
|
incremental_mode: true,
|
|
file_size: None,
|
|
keysize: None,
|
|
reset: None
|
|
};
|
|
|
|
|
|
let mut db = OurDB::new(config).unwrap();
|
|
|
|
// Test auto-increment IDs
|
|
let data1 = b"First record";
|
|
let id1 = db.set(OurDBSetArgs { id: None, data: data1 }).unwrap();
|
|
|
|
let data2 = b"Second record";
|
|
let id2 = db.set(OurDBSetArgs { id: None, data: data2 }).unwrap();
|
|
|
|
// IDs should be sequential
|
|
assert_eq!(id2, id1 + 1);
|
|
|
|
// Verify get_next_id works
|
|
let next_id = db.get_next_id().unwrap();
|
|
assert_eq!(next_id, id2 + 1);
|
|
|
|
// Clean up
|
|
db.destroy().unwrap();
|
|
}
|
|
|
|
#[test]
|
|
fn test_persistence() {
|
|
let temp_dir = get_temp_dir();
|
|
|
|
|
|
// Create data in a new database
|
|
{
|
|
let config = OurDBConfig {
|
|
path: temp_dir.clone(),
|
|
incremental_mode: true,
|
|
file_size: None,
|
|
keysize: None,
|
|
reset: None
|
|
};
|
|
|
|
let mut db = OurDB::new(config).unwrap();
|
|
|
|
let test_data = b"Persistent data";
|
|
let id = db.set(OurDBSetArgs { id: None, data: test_data }).unwrap();
|
|
|
|
// Explicitly close the database
|
|
db.close().unwrap();
|
|
|
|
// ID should be 1 in a new database
|
|
assert_eq!(id, 1);
|
|
}
|
|
|
|
// Reopen the database and verify data persists
|
|
{
|
|
let config = OurDBConfig {
|
|
path: temp_dir.clone(),
|
|
incremental_mode: true,
|
|
file_size: None,
|
|
keysize: None,
|
|
reset: None
|
|
};
|
|
|
|
let mut db = OurDB::new(config).unwrap();
|
|
|
|
// Verify data is still there
|
|
let retrieved = db.get(1).unwrap();
|
|
assert_eq!(retrieved, b"Persistent data");
|
|
|
|
// Verify incremental counter persisted
|
|
let next_id = db.get_next_id().unwrap();
|
|
assert_eq!(next_id, 2);
|
|
|
|
// Clean up
|
|
db.destroy().unwrap();
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn test_different_keysizes() {
|
|
for keysize in [2, 3, 4, 6].iter() {
|
|
let temp_dir = get_temp_dir();
|
|
|
|
// Ensure the directory exists
|
|
std::fs::create_dir_all(&temp_dir).unwrap();
|
|
|
|
// Create a new database with specified keysize
|
|
let config = OurDBConfig {
|
|
path: temp_dir.clone(),
|
|
incremental_mode: true,
|
|
file_size: None,
|
|
keysize: Some(*keysize),
|
|
reset: None
|
|
};
|
|
|
|
let mut db = OurDB::new(config).unwrap();
|
|
|
|
// Test basic operations
|
|
let test_data = b"Keysize test data";
|
|
let id = db.set(OurDBSetArgs { id: None, data: test_data }).unwrap();
|
|
|
|
let retrieved = db.get(id).unwrap();
|
|
assert_eq!(retrieved, test_data);
|
|
|
|
// Clean up
|
|
db.destroy().unwrap();
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn test_large_data() {
|
|
let temp_dir = get_temp_dir();
|
|
|
|
// Create a new database
|
|
let config = OurDBConfig {
|
|
path: temp_dir.clone(),
|
|
incremental_mode: true,
|
|
file_size: None,
|
|
keysize: None,
|
|
reset: None
|
|
};
|
|
|
|
let mut db = OurDB::new(config).unwrap();
|
|
|
|
// Create a large data set (60KB - within the 64KB limit)
|
|
let large_data = vec![b'X'; 60 * 1024];
|
|
|
|
// Store and retrieve large data
|
|
let id = db.set(OurDBSetArgs { id: None, data: &large_data }).unwrap();
|
|
let retrieved = db.get(id).unwrap();
|
|
|
|
assert_eq!(retrieved.len(), large_data.len());
|
|
assert_eq!(retrieved, large_data);
|
|
|
|
// Clean up
|
|
db.destroy().unwrap();
|
|
}
|
|
|
|
#[test]
|
|
fn test_exceed_size_limit() {
|
|
let temp_dir = get_temp_dir();
|
|
|
|
// Create a new database
|
|
let config = OurDBConfig {
|
|
path: temp_dir.clone(),
|
|
incremental_mode: true,
|
|
file_size: None,
|
|
keysize: None,
|
|
reset: None
|
|
};
|
|
|
|
let mut db = OurDB::new(config).unwrap();
|
|
|
|
// Create data larger than the 64KB limit (70KB)
|
|
let oversized_data = vec![b'X'; 70 * 1024];
|
|
|
|
// Attempt to store data that exceeds the size limit
|
|
let result = db.set(OurDBSetArgs { id: None, data: &oversized_data });
|
|
|
|
// Verify that an error is returned
|
|
assert!(result.is_err(), "Expected an error when storing data larger than 64KB");
|
|
|
|
// Clean up
|
|
db.destroy().unwrap();
|
|
}
|
|
|
|
#[test]
|
|
fn test_multiple_files() {
|
|
let temp_dir = get_temp_dir();
|
|
|
|
|
|
// Create a new database with small file size to force multiple files
|
|
let config = OurDBConfig {
|
|
path: temp_dir.clone(),
|
|
incremental_mode: true,
|
|
file_size: Some(1024), // Very small file size (1KB)
|
|
keysize: Some(6), // 6-byte keysize for multiple files
|
|
reset: None
|
|
};
|
|
|
|
let mut db = OurDB::new(config).unwrap();
|
|
|
|
// Store enough data to span multiple files
|
|
let data_size = 500; // bytes per record
|
|
let test_data = vec![b'A'; data_size];
|
|
|
|
let mut ids = Vec::new();
|
|
for _ in 0..10 {
|
|
let id = db.set(OurDBSetArgs { id: None, data: &test_data }).unwrap();
|
|
ids.push(id);
|
|
}
|
|
|
|
// Verify all data can be retrieved
|
|
for &id in &ids {
|
|
let retrieved = db.get(id).unwrap();
|
|
assert_eq!(retrieved.len(), data_size);
|
|
}
|
|
|
|
// Verify multiple files were created
|
|
let files = fs::read_dir(&temp_dir).unwrap()
|
|
.filter_map(Result::ok)
|
|
.filter(|entry| {
|
|
let path = entry.path();
|
|
path.is_file() && path.extension().map_or(false, |ext| ext == "db")
|
|
})
|
|
.count();
|
|
|
|
assert!(files > 1, "Expected multiple database files, found {}", files);
|
|
|
|
// Clean up
|
|
db.destroy().unwrap();
|
|
}
|