don't use strings for paths

This commit is contained in:
Maxime Van Hees
2025-09-25 16:25:08 +02:00
parent 7f689ae29b
commit 77a53bae86
13 changed files with 54 additions and 41 deletions

View File

@@ -1,4 +1,4 @@
use std::path::PathBuf; use std::path::{Path, PathBuf};
use std::sync::{Arc, OnceLock, Mutex, RwLock}; use std::sync::{Arc, OnceLock, Mutex, RwLock};
use std::collections::HashMap; use std::collections::HashMap;
@@ -35,11 +35,11 @@ static DATA_STORAGES: OnceLock<RwLock<HashMap<u64, Arc<dyn StorageBackend>>>> =
static DATA_INIT_LOCK: Mutex<()> = Mutex::new(()); static DATA_INIT_LOCK: Mutex<()> = Mutex::new(());
fn init_admin_storage( fn init_admin_storage(
base_dir: &str, base_dir: &Path,
backend: options::BackendType, backend: options::BackendType,
admin_secret: &str, admin_secret: &str,
) -> Result<Arc<dyn StorageBackend>, DBError> { ) -> Result<Arc<dyn StorageBackend>, DBError> {
let db_file = PathBuf::from(base_dir).join("0.db"); let db_file = base_dir.join("0.db");
if let Some(parent_dir) = db_file.parent() { if let Some(parent_dir) = db_file.parent() {
std::fs::create_dir_all(parent_dir).map_err(|e| { std::fs::create_dir_all(parent_dir).map_err(|e| {
DBError(format!("Failed to create directory {}: {}", parent_dir.display(), e)) DBError(format!("Failed to create directory {}: {}", parent_dir.display(), e))
@@ -57,24 +57,25 @@ fn init_admin_storage(
// Get or initialize a cached handle to admin DB 0 per base_dir (thread-safe, no double-open race) // Get or initialize a cached handle to admin DB 0 per base_dir (thread-safe, no double-open race)
pub fn open_admin_storage( pub fn open_admin_storage(
base_dir: &str, base_dir: &Path,
backend: options::BackendType, backend: options::BackendType,
admin_secret: &str, admin_secret: &str,
) -> Result<Arc<dyn StorageBackend>, DBError> { ) -> Result<Arc<dyn StorageBackend>, DBError> {
let map = ADMIN_STORAGES.get_or_init(|| RwLock::new(HashMap::new())); let map = ADMIN_STORAGES.get_or_init(|| RwLock::new(HashMap::new()));
let key = base_dir.display().to_string();
// Fast path // Fast path
if let Some(st) = map.read().unwrap().get(base_dir) { if let Some(st) = map.read().unwrap().get(&key) {
return Ok(st.clone()); return Ok(st.clone());
} }
// Slow path with write lock // Slow path with write lock
{ {
let mut w = map.write().unwrap(); let mut w = map.write().unwrap();
if let Some(st) = w.get(base_dir) { if let Some(st) = w.get(&key) {
return Ok(st.clone()); return Ok(st.clone());
} }
// Detect existing 0.db backend by filesystem, if present. // Detect existing 0.db backend by filesystem, if present.
let admin_path = PathBuf::from(base_dir).join("0.db"); let admin_path = base_dir.join("0.db");
let detected = if admin_path.exists() { let detected = if admin_path.exists() {
if admin_path.is_file() { if admin_path.is_file() {
Some(options::BackendType::Redb) Some(options::BackendType::Redb)
@@ -102,14 +103,14 @@ pub fn open_admin_storage(
}; };
let st = init_admin_storage(base_dir, effective_backend, admin_secret)?; let st = init_admin_storage(base_dir, effective_backend, admin_secret)?;
w.insert(base_dir.to_string(), st.clone()); w.insert(key, st.clone());
Ok(st) Ok(st)
} }
} }
// Ensure admin structures exist in encrypted DB 0 // Ensure admin structures exist in encrypted DB 0
pub fn ensure_bootstrap( pub fn ensure_bootstrap(
base_dir: &str, base_dir: &Path,
backend: options::BackendType, backend: options::BackendType,
admin_secret: &str, admin_secret: &str,
) -> Result<(), DBError> { ) -> Result<(), DBError> {
@@ -125,7 +126,7 @@ pub fn ensure_bootstrap(
// Get or initialize a shared handle to a data DB (> 0), avoiding double-open across subsystems // Get or initialize a shared handle to a data DB (> 0), avoiding double-open across subsystems
pub fn open_data_storage( pub fn open_data_storage(
base_dir: &str, base_dir: &Path,
backend: options::BackendType, backend: options::BackendType,
admin_secret: &str, admin_secret: &str,
id: u64, id: u64,
@@ -159,7 +160,7 @@ pub fn open_data_storage(
// 2) If missing, sniff filesystem (file => Redb, dir => Sled), then persist into admin meta // 2) If missing, sniff filesystem (file => Redb, dir => Sled), then persist into admin meta
// 3) Fallback to requested 'backend' (startup default) if nothing else is known // 3) Fallback to requested 'backend' (startup default) if nothing else is known
let meta_backend = get_database_backend(base_dir, backend.clone(), admin_secret, id).ok().flatten(); let meta_backend = get_database_backend(base_dir, backend.clone(), admin_secret, id).ok().flatten();
let db_path = PathBuf::from(base_dir).join(format!("{}.db", id)); let db_path = base_dir.join(format!("{}.db", id));
let sniffed_backend = if db_path.exists() { let sniffed_backend = if db_path.exists() {
if db_path.is_file() { if db_path.is_file() {
Some(options::BackendType::Redb) Some(options::BackendType::Redb)
@@ -214,7 +215,7 @@ pub fn open_data_storage(
// Allocate the next DB id and persist new pointer // Allocate the next DB id and persist new pointer
pub fn allocate_next_id( pub fn allocate_next_id(
base_dir: &str, base_dir: &Path,
backend: options::BackendType, backend: options::BackendType,
admin_secret: &str, admin_secret: &str,
) -> Result<u64, DBError> { ) -> Result<u64, DBError> {
@@ -238,7 +239,7 @@ pub fn allocate_next_id(
// Check existence of a db id in admin:dbs // Check existence of a db id in admin:dbs
pub fn db_exists( pub fn db_exists(
base_dir: &str, base_dir: &Path,
backend: options::BackendType, backend: options::BackendType,
admin_secret: &str, admin_secret: &str,
id: u64, id: u64,
@@ -249,7 +250,7 @@ pub fn db_exists(
// Get per-db encryption key, if any // Get per-db encryption key, if any
pub fn get_enc_key( pub fn get_enc_key(
base_dir: &str, base_dir: &Path,
backend: options::BackendType, backend: options::BackendType,
admin_secret: &str, admin_secret: &str,
id: u64, id: u64,
@@ -260,7 +261,7 @@ pub fn get_enc_key(
// Set per-db encryption key (called during create) // Set per-db encryption key (called during create)
pub fn set_enc_key( pub fn set_enc_key(
base_dir: &str, base_dir: &Path,
backend: options::BackendType, backend: options::BackendType,
admin_secret: &str, admin_secret: &str,
id: u64, id: u64,
@@ -272,7 +273,7 @@ pub fn set_enc_key(
// Set database public flag // Set database public flag
pub fn set_database_public( pub fn set_database_public(
base_dir: &str, base_dir: &Path,
backend: options::BackendType, backend: options::BackendType,
admin_secret: &str, admin_secret: &str,
id: u64, id: u64,
@@ -286,7 +287,7 @@ pub fn set_database_public(
// Persist per-db backend type in admin metadata (module-scope) // Persist per-db backend type in admin metadata (module-scope)
pub fn set_database_backend( pub fn set_database_backend(
base_dir: &str, base_dir: &Path,
backend: options::BackendType, backend: options::BackendType,
admin_secret: &str, admin_secret: &str,
id: u64, id: u64,
@@ -304,7 +305,7 @@ pub fn set_database_backend(
} }
pub fn get_database_backend( pub fn get_database_backend(
base_dir: &str, base_dir: &Path,
backend: options::BackendType, backend: options::BackendType,
admin_secret: &str, admin_secret: &str,
id: u64, id: u64,
@@ -321,7 +322,7 @@ pub fn get_database_backend(
// Set database name // Set database name
pub fn set_database_name( pub fn set_database_name(
base_dir: &str, base_dir: &Path,
backend: options::BackendType, backend: options::BackendType,
admin_secret: &str, admin_secret: &str,
id: u64, id: u64,
@@ -335,7 +336,7 @@ pub fn set_database_name(
// Get database name // Get database name
pub fn get_database_name( pub fn get_database_name(
base_dir: &str, base_dir: &Path,
backend: options::BackendType, backend: options::BackendType,
admin_secret: &str, admin_secret: &str,
id: u64, id: u64,
@@ -359,7 +360,7 @@ fn load_public(
// Add access key for db (value format: "Read:ts" or "ReadWrite:ts") // Add access key for db (value format: "Read:ts" or "ReadWrite:ts")
pub fn add_access_key( pub fn add_access_key(
base_dir: &str, base_dir: &Path,
backend: options::BackendType, backend: options::BackendType,
admin_secret: &str, admin_secret: &str,
id: u64, id: u64,
@@ -378,7 +379,7 @@ pub fn add_access_key(
// Delete access key by hash // Delete access key by hash
pub fn delete_access_key( pub fn delete_access_key(
base_dir: &str, base_dir: &Path,
backend: options::BackendType, backend: options::BackendType,
admin_secret: &str, admin_secret: &str,
id: u64, id: u64,
@@ -391,7 +392,7 @@ pub fn delete_access_key(
// List access keys, returning (hash, perms, created_at_secs) // List access keys, returning (hash, perms, created_at_secs)
pub fn list_access_keys( pub fn list_access_keys(
base_dir: &str, base_dir: &Path,
backend: options::BackendType, backend: options::BackendType,
admin_secret: &str, admin_secret: &str,
id: u64, id: u64,
@@ -411,7 +412,7 @@ pub fn list_access_keys(
// - Ok(Some(Permissions)) when access is allowed // - Ok(Some(Permissions)) when access is allowed
// - Ok(None) when not allowed or db missing (caller can distinguish by calling db_exists) // - Ok(None) when not allowed or db missing (caller can distinguish by calling db_exists)
pub fn verify_access( pub fn verify_access(
base_dir: &str, base_dir: &Path,
backend: options::BackendType, backend: options::BackendType,
admin_secret: &str, admin_secret: &str,
id: u64, id: u64,
@@ -456,7 +457,7 @@ pub fn verify_access(
// Enumerate all db ids // Enumerate all db ids
pub fn list_dbs( pub fn list_dbs(
base_dir: &str, base_dir: &Path,
backend: options::BackendType, backend: options::BackendType,
admin_secret: &str, admin_secret: &str,
) -> Result<Vec<u64>, DBError> { ) -> Result<Vec<u64>, DBError> {

View File

@@ -1427,7 +1427,7 @@ async fn incr_cmd(server: &Server, key: &String) -> Result<Protocol, DBError> {
fn config_get_cmd(name: &String, server: &Server) -> Result<Protocol, DBError> { fn config_get_cmd(name: &String, server: &Server) -> Result<Protocol, DBError> {
let value = match name.as_str() { let value = match name.as_str() {
"dir" => Some(server.option.dir.clone()), "dir" => Some(server.option.dir.display().to_string()),
"dbfilename" => Some(format!("{}.db", server.selected_db)), "dbfilename" => Some(format!("{}.db", server.selected_db)),
"databases" => Some("16".to_string()), // Hardcoded as per original logic "databases" => Some("16".to_string()), // Hardcoded as per original logic
_ => None, _ => None,

View File

@@ -1,5 +1,6 @@
// #![allow(unused_imports)] // #![allow(unused_imports)]
use std::path::PathBuf;
use tokio::net::TcpListener; use tokio::net::TcpListener;
use herodb::server; use herodb::server;
@@ -13,7 +14,7 @@ use clap::Parser;
struct Args { struct Args {
/// The directory of Redis DB file /// The directory of Redis DB file
#[arg(long)] #[arg(long)]
dir: String, dir: PathBuf,
/// The port of the Redis server, default is 6379 if not specified /// The port of the Redis server, default is 6379 if not specified
#[arg(long)] #[arg(long)]

View File

@@ -1,3 +1,5 @@
use std::path::PathBuf;
#[derive(Debug, Clone, PartialEq, Eq)] #[derive(Debug, Clone, PartialEq, Eq)]
pub enum BackendType { pub enum BackendType {
Redb, Redb,
@@ -7,7 +9,7 @@ pub enum BackendType {
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct DBOption { pub struct DBOption {
pub dir: String, pub dir: PathBuf,
pub port: u16, pub port: u16,
pub debug: bool, pub debug: bool,
// Deprecated for data DBs; retained for backward-compat on CLI parsing // Deprecated for data DBs; retained for backward-compat on CLI parsing

View File

@@ -1,4 +1,5 @@
use std::collections::HashMap; use std::collections::HashMap;
use std::path::PathBuf;
use std::sync::Arc; use std::sync::Arc;
use tokio::sync::RwLock; use tokio::sync::RwLock;
use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use jsonrpsee::{core::RpcResult, proc_macros::rpc};
@@ -165,7 +166,7 @@ pub trait Rpc {
/// RPC Server implementation /// RPC Server implementation
pub struct RpcServerImpl { pub struct RpcServerImpl {
/// Base directory for database files /// Base directory for database files
base_dir: String, base_dir: PathBuf,
/// Managed database servers /// Managed database servers
servers: Arc<RwLock<HashMap<u64, Arc<Server>>>>, servers: Arc<RwLock<HashMap<u64, Arc<Server>>>>,
/// Default backend type /// Default backend type
@@ -176,7 +177,7 @@ pub struct RpcServerImpl {
impl RpcServerImpl { impl RpcServerImpl {
/// Create a new RPC server instance /// Create a new RPC server instance
pub fn new(base_dir: String, backend: crate::options::BackendType, admin_secret: String) -> Self { pub fn new(base_dir: PathBuf, backend: crate::options::BackendType, admin_secret: String) -> Self {
Self { Self {
base_dir, base_dir,
servers: Arc::new(RwLock::new(HashMap::new())), servers: Arc::new(RwLock::new(HashMap::new())),
@@ -351,7 +352,7 @@ impl RpcServerImpl {
backend, backend,
encrypted, encrypted,
redis_version: Some("7.0".to_string()), redis_version: Some("7.0".to_string()),
storage_path: Some(server.option.dir.clone()), storage_path: Some(server.option.dir.display().to_string()),
size_on_disk, size_on_disk,
key_count, key_count,
created_at, created_at,

View File

@@ -1,11 +1,12 @@
use std::net::SocketAddr; use std::net::SocketAddr;
use std::path::PathBuf;
use jsonrpsee::server::{ServerBuilder, ServerHandle}; use jsonrpsee::server::{ServerBuilder, ServerHandle};
use jsonrpsee::RpcModule; use jsonrpsee::RpcModule;
use crate::rpc::{RpcServer, RpcServerImpl}; use crate::rpc::{RpcServer, RpcServerImpl};
/// Start the RPC server on the specified address /// Start the RPC server on the specified address
pub async fn start_rpc_server(addr: SocketAddr, base_dir: String, backend: crate::options::BackendType, admin_secret: String) -> Result<ServerHandle, Box<dyn std::error::Error + Send + Sync>> { pub async fn start_rpc_server(addr: SocketAddr, base_dir: PathBuf, backend: crate::options::BackendType, admin_secret: String) -> Result<ServerHandle, Box<dyn std::error::Error + Send + Sync>> {
// Create the RPC server implementation // Create the RPC server implementation
let rpc_impl = RpcServerImpl::new(base_dir, backend, admin_secret); let rpc_impl = RpcServerImpl::new(base_dir, backend, admin_secret);
@@ -34,7 +35,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn test_rpc_server_startup() { async fn test_rpc_server_startup() {
let addr = "127.0.0.1:0".parse().unwrap(); // Use port 0 for auto-assignment let addr = "127.0.0.1:0".parse().unwrap(); // Use port 0 for auto-assignment
let base_dir = "/tmp/test_rpc".to_string(); let base_dir = PathBuf::from("/tmp/test_rpc");
let backend = crate::options::BackendType::Redb; // Default for test let backend = crate::options::BackendType::Redb; // Default for test
let handle = start_rpc_server(addr, base_dir, backend, "test-admin".to_string()).await.unwrap(); let handle = start_rpc_server(addr, base_dir, backend, "test-admin".to_string()).await.unwrap();

View File

@@ -1,4 +1,5 @@
use herodb::{server::Server, options::DBOption}; use herodb::{server::Server, options::DBOption};
use std::path::PathBuf;
use std::time::Duration; use std::time::Duration;
use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::net::TcpStream; use tokio::net::TcpStream;
@@ -22,7 +23,7 @@ async fn debug_hset_simple() {
let port = 16500; let port = 16500;
let option = DBOption { let option = DBOption {
dir: test_dir.to_string(), dir: PathBuf::from(test_dir),
port, port,
debug: false, debug: false,
encrypt: false, encrypt: false,

View File

@@ -1,4 +1,5 @@
use herodb::{server::Server, options::DBOption}; use herodb::{server::Server, options::DBOption};
use std::path::PathBuf;
use std::time::Duration; use std::time::Duration;
use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::net::TcpStream; use tokio::net::TcpStream;
@@ -13,7 +14,7 @@ async fn debug_hset_return_value() {
std::fs::create_dir_all(&test_dir).unwrap(); std::fs::create_dir_all(&test_dir).unwrap();
let option = DBOption { let option = DBOption {
dir: test_dir.to_string(), dir: PathBuf::from(test_dir),
port: 16390, port: 16390,
debug: false, debug: false,
encrypt: false, encrypt: false,

View File

@@ -1,4 +1,5 @@
use herodb::{server::Server, options::DBOption}; use herodb::{server::Server, options::DBOption};
use std::path::PathBuf;
use std::time::Duration; use std::time::Duration;
use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::net::TcpStream; use tokio::net::TcpStream;
@@ -17,7 +18,7 @@ async fn start_test_server(test_name: &str) -> (Server, u16) {
std::fs::create_dir_all(&test_dir).unwrap(); std::fs::create_dir_all(&test_dir).unwrap();
let option = DBOption { let option = DBOption {
dir: test_dir, dir: PathBuf::from(test_dir),
port, port,
debug: true, debug: true,
encrypt: false, encrypt: false,

View File

@@ -1,6 +1,7 @@
use herodb::rpc::{BackendType, DatabaseConfig}; use herodb::rpc::{BackendType, DatabaseConfig};
use herodb::admin_meta; use herodb::admin_meta;
use herodb::options::BackendType as OptionsBackendType; use herodb::options::BackendType as OptionsBackendType;
use std::path::Path;
#[tokio::test] #[tokio::test]
async fn test_rpc_server_basic() { async fn test_rpc_server_basic() {
@@ -70,11 +71,11 @@ async fn test_database_name_persistence() {
let _ = std::fs::remove_dir_all(base_dir); let _ = std::fs::remove_dir_all(base_dir);
// Set the database name // Set the database name
admin_meta::set_database_name(base_dir, backend.clone(), admin_secret, db_id, test_name) admin_meta::set_database_name(Path::new(base_dir), backend.clone(), admin_secret, db_id, test_name)
.expect("Failed to set database name"); .expect("Failed to set database name");
// Retrieve the database name // Retrieve the database name
let retrieved_name = admin_meta::get_database_name(base_dir, backend, admin_secret, db_id) let retrieved_name = admin_meta::get_database_name(Path::new(base_dir), backend, admin_secret, db_id)
.expect("Failed to get database name"); .expect("Failed to get database name");
// Verify the name matches // Verify the name matches

View File

@@ -1,4 +1,5 @@
use herodb::{server::Server, options::DBOption}; use herodb::{server::Server, options::DBOption};
use std::path::PathBuf;
use std::time::Duration; use std::time::Duration;
use tokio::time::sleep; use tokio::time::sleep;
use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tokio::io::{AsyncReadExt, AsyncWriteExt};
@@ -19,7 +20,7 @@ async fn start_test_server(test_name: &str) -> (Server, u16) {
std::fs::create_dir_all(&test_dir).unwrap(); std::fs::create_dir_all(&test_dir).unwrap();
let option = DBOption { let option = DBOption {
dir: test_dir, dir: PathBuf::from(test_dir),
port, port,
debug: true, debug: true,
encrypt: false, encrypt: false,

View File

@@ -1,4 +1,5 @@
use herodb::{server::Server, options::DBOption}; use herodb::{server::Server, options::DBOption};
use std::path::PathBuf;
use std::time::Duration; use std::time::Duration;
use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::net::TcpStream; use tokio::net::TcpStream;
@@ -17,7 +18,7 @@ async fn start_test_server(test_name: &str) -> (Server, u16) {
std::fs::create_dir_all(&test_dir).unwrap(); std::fs::create_dir_all(&test_dir).unwrap();
let option = DBOption { let option = DBOption {
dir: test_dir, dir: PathBuf::from(test_dir),
port, port,
debug: false, debug: false,
encrypt: false, encrypt: false,

View File

@@ -1,4 +1,5 @@
use herodb::{options::DBOption, server::Server}; use herodb::{options::DBOption, server::Server};
use std::path::PathBuf;
use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::net::TcpStream; use tokio::net::TcpStream;
use tokio::time::{sleep, Duration}; use tokio::time::{sleep, Duration};
@@ -17,7 +18,7 @@ async fn start_test_server(test_name: &str) -> (Server, u16) {
std::fs::create_dir_all(&test_dir).unwrap(); std::fs::create_dir_all(&test_dir).unwrap();
let option = DBOption { let option = DBOption {
dir: test_dir, dir: PathBuf::from(test_dir),
port, port,
debug: false, debug: false,
encrypt: false, encrypt: false,