Compare commits

9 Commits

Author SHA1 Message Date
Timur Gordon
6569e819ae marketplace models wip 2025-08-21 14:07:14 +02:00
Timur Gordon
130822b69b Merge branch 'development' of https://git.ourworld.tf/herocode/db into development 2025-08-21 14:06:40 +02:00
Timur Gordon
7439980b33 Merge branch 'main' into development 2025-08-21 14:05:57 +02:00
Timur Gordon
cedea2f305 move rhai wrappers of models from rhailib 2025-08-21 14:05:01 +02:00
Timur Gordon
58ed59cd12 Merge branch 'main' of https://git.ourworld.tf/herocode/db 2025-08-08 09:46:38 +02:00
Timur Gordon
6727c7498d add heroledger models 2025-08-08 09:46:30 +02:00
fc7e327f07 ... 2025-08-08 09:42:47 +02:00
993fa2adcd ... 2025-08-08 08:53:49 +02:00
Maxime Van Hees
453e86edd2 fixed dependencies issues on main branch which is called 'development' 2025-08-05 13:11:53 +02:00
106 changed files with 11910 additions and 11273 deletions

694
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -3,7 +3,4 @@ members = [
"heromodels",
"heromodels_core",
"heromodels-derive",
"ourdb",
"radixtree",
"tst",
]

48
heromodels/Cargo.lock generated
View File

@@ -60,7 +60,7 @@ checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.104",
"syn",
]
[[package]]
@@ -233,14 +233,6 @@ dependencies = [
"typenum",
]
[[package]]
name = "derive"
version = "0.1.0"
dependencies = [
"quote",
"syn 1.0.109",
]
[[package]]
name = "digest"
version = "0.10.7"
@@ -300,7 +292,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.104",
"syn",
]
[[package]]
@@ -387,7 +379,6 @@ version = "0.1.0"
dependencies = [
"bincode",
"chrono",
"derive",
"heromodels-derive",
"heromodels_core",
"jsonb",
@@ -411,7 +402,7 @@ version = "0.1.0"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.104",
"syn",
]
[[package]]
@@ -514,7 +505,7 @@ checksum = "03343451ff899767262ec32146f6d559dd759fdadf42ff0e227c7c48f72594b4"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.104",
"syn",
]
[[package]]
@@ -952,7 +943,7 @@ checksum = "a5a11a05ee1ce44058fa3d5961d05194fdbe3ad6b40f904af764d81b86450e6b"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.104",
"syn",
]
[[package]]
@@ -1015,7 +1006,7 @@ checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.104",
"syn",
]
[[package]]
@@ -1145,7 +1136,7 @@ dependencies = [
"proc-macro2",
"quote",
"rustversion",
"syn 2.0.104",
"syn",
]
[[package]]
@@ -1154,17 +1145,6 @@ version = "2.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292"
[[package]]
name = "syn"
version = "1.0.109"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "syn"
version = "2.0.104"
@@ -1199,7 +1179,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.104",
"syn",
]
[[package]]
@@ -1254,7 +1234,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.104",
"syn",
]
[[package]]
@@ -1409,7 +1389,7 @@ dependencies = [
"log",
"proc-macro2",
"quote",
"syn 2.0.104",
"syn",
"wasm-bindgen-shared",
]
@@ -1431,7 +1411,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.104",
"syn",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
@@ -1487,7 +1467,7 @@ checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.104",
"syn",
]
[[package]]
@@ -1498,7 +1478,7 @@ checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.104",
"syn",
]
[[package]]
@@ -1633,5 +1613,5 @@ checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.104",
"syn",
]

View File

@@ -10,16 +10,18 @@ serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
bincode = { version = "2", features = ["serde"] }
chrono = { version = "0.4", features = ["serde"] }
ourdb = { path = "../ourdb" }
tst = { path = "../tst" }
ourdb = { path = "../../herolib_rust/packages/data/ourdb" }
tst = { path = "../../herolib_rust/packages/data/tst" }
heromodels-derive = { path = "../heromodels-derive" }
heromodels_core = { path = "../heromodels_core" }
rhailib-macros = { path = "../../herolib_rust/rhailib/src/macros" }
rhai = { version = "1.21.0", features = [
"std",
"sync",
"decimal",
"internals",
] } # Added "decimal" feature, sync for Arc<Mutex<>>
rust_decimal = { version = "1.36", features = ["serde"] }
strum = "0.26"
strum_macros = "0.26"
uuid = { version = "1.17.0", features = ["v4"] }

View File

@@ -52,7 +52,6 @@ pub mut:
use heromodels_core::{Model, BaseModelData, IndexKey};
use heromodels_derive::model;
use rhai::CustomType;
use rhailib_derive::RhaiApi;
use serde::{Deserialize, Serialize};
use chrono::{DateTime, Utc};
```

View File

@@ -0,0 +1,53 @@
// heroledger.rhai - Demonstration of HeroLedger models in Rhai
print("=== HeroLedger Models Demo ===");
// Create a new user
print("\n--- Creating User ---");
let new_user = new_user()
.name("Alice Johnson")
.email("alice@herocode.com")
.pubkey("0x1234567890abcdef")
.status("Active")
.save_user();
print("Created user: " + new_user.get_name());
print("User ID: " + new_user.get_id());
print("User email: " + new_user.get_email());
print("User pubkey: " + new_user.get_pubkey());
// Create a new group
print("\n--- Creating Group ---");
let new_group = new_group()
.name("HeroCode Developers")
.description("A group for HeroCode development team members")
.visibility("Public")
.save_group();
print("Created group: " + new_group.get_name());
print("Group ID: " + new_group.get_id());
print("Group description: " + new_group.get_description());
// Create a new account
print("\n--- Creating Account ---");
let new_account = new_account()
.name("Alice's Main Account")
.description("Primary account for Alice Johnson")
.currency("USD")
.save_account();
print("Created account: " + new_account.get_name());
print("Account ID: " + new_account.get_id());
print("Account currency: " + new_account.get_currency());
// Create a new DNS zone
print("\n--- Creating DNS Zone ---");
let new_dns_zone = new_dns_zone()
.name("herocode.com")
.description("Main domain for HeroCode")
.save_dns_zone();
print("Created DNS zone: " + new_dns_zone.get_name());
print("DNS zone ID: " + new_dns_zone.get_id());
print("\n=== Demo Complete ===");

View File

@@ -0,0 +1,50 @@
use heromodels_core::db::hero::OurDB;
use rhai::{Dynamic, Engine};
use heromodels::models::heroledger::rhai::register_heroledger_rhai_modules;
use std::sync::Arc;
use std::{fs, path::Path};
const CALLER_ID: &str = "example_caller";
fn main() -> Result<(), Box<dyn std::error::Error>> {
// Initialize Rhai engine
let mut engine = Engine::new();
// Initialize database with OurDB
let db_path = "temp_heroledger_db";
// Clean up previous database file if it exists
if Path::new(db_path).exists() {
fs::remove_dir_all(db_path)?;
}
let _db = Arc::new(OurDB::new(db_path, true).expect("Failed to create database"));
// Register the heroledger modules with Rhai
register_heroledger_rhai_modules(&mut engine);
let mut db_config = rhai::Map::new();
db_config.insert("DB_PATH".into(), db_path.into());
db_config.insert("CALLER_ID".into(), CALLER_ID.into());
db_config.insert("CONTEXT_ID".into(), CALLER_ID.into());
engine.set_default_tag(Dynamic::from(db_config)); // Or pass via CallFnOptions
// Load and evaluate the Rhai script
let manifest_dir = env!("CARGO_MANIFEST_DIR");
let script_path = Path::new(manifest_dir)
.join("examples")
.join("heroledger")
.join("heroledger.rhai");
println!("Script path: {}", script_path.display());
let script = fs::read_to_string(&script_path)?;
println!("--- Running HeroLedger Rhai Script ---");
match engine.eval::<()>(&script) {
Ok(_) => println!("\n--- Script executed successfully! ---"),
Err(e) => eprintln!("\n--- Script execution failed: {} ---", e),
}
// Clean up the database file
fs::remove_dir_all(db_path)?;
println!("--- Cleaned up temporary database. ---");
Ok(())
}

View File

@@ -73,7 +73,7 @@ fn main() {
// The `#[model]` derive handles `created_at` and `updated_at` in `base_data`.
// `base_data.touch()` might be called internally by setters or needs explicit call if fields are set directly.
// For builder pattern, the final state of `base_data.updated_at` reflects the time of the last builder call if `touch()` is implicit.
// For builder pattern, the final state of `base_data.modified_at` reflects the time of the last builder call if `touch()` is implicit.
// If not, one might call `contract.base_data.touch()` after building.
println!("\n--- Initial Contract Details ---");

View File

@@ -0,0 +1,148 @@
use crate::db::Db;
use rhailib_macros::{
register_authorized_create_by_id_fn, register_authorized_delete_by_id_fn,
register_authorized_get_by_id_fn,
};
use rhai::plugin::*;
use rhai::{Dynamic, Engine, EvalAltResult, Module};
use std::mem;
use std::sync::Arc;
use heromodels::models::access::Access;
type RhaiAccess = Access;
use heromodels::db::hero::OurDB;
use heromodels::db::Collection;
#[export_module]
mod rhai_access_module {
// --- Access Functions ---
#[rhai_fn(name = "new_access", return_raw)]
pub fn new_access() -> Result<RhaiAccess, Box<EvalAltResult>> {
let access = Access::new();
Ok(access)
}
/// Sets the access object_id
#[rhai_fn(name = "object_id", return_raw)]
pub fn set_object_id(
access: &mut RhaiAccess,
object_id: i64,
) -> Result<RhaiAccess, Box<EvalAltResult>> {
let id = macros::id_from_i64_to_u32(object_id)?;
let owned_access = std::mem::take(access);
*access = owned_access.object_id(id);
Ok(access.clone())
}
/// Sets the circle public key
#[rhai_fn(name = "circle_public_key", return_raw)]
pub fn set_circle_pk(
access: &mut RhaiAccess,
circle_pk: String,
) -> Result<RhaiAccess, Box<EvalAltResult>> {
let owned_access = std::mem::take(access);
*access = owned_access.circle_pk(circle_pk);
Ok(access.clone())
}
/// Sets the group id
#[rhai_fn(name = "group_id", return_raw)]
pub fn set_group_id(
access: &mut RhaiAccess,
group_id: i64,
) -> Result<RhaiAccess, Box<EvalAltResult>> {
let id = macros::id_from_i64_to_u32(group_id)?;
let owned_access = std::mem::take(access);
*access = owned_access.group_id(id);
Ok(access.clone())
}
/// Sets the contact id
#[rhai_fn(name = "contact_id", return_raw)]
pub fn set_contact_id(
access: &mut RhaiAccess,
contact_id: i64,
) -> Result<RhaiAccess, Box<EvalAltResult>> {
let id = macros::id_from_i64_to_u32(contact_id)?;
let owned_access = std::mem::take(access);
*access = owned_access.contact_id(id);
Ok(access.clone())
}
/// Sets the expiration time
#[rhai_fn(name = "expires_at", return_raw)]
pub fn set_expires_at(
access: &mut RhaiAccess,
expires_at: i64,
) -> Result<RhaiAccess, Box<EvalAltResult>> {
let owned_access = std::mem::take(access);
*access = owned_access.expires_at(expires_at);
Ok(access.clone())
}
// Access Getters
#[rhai_fn(name = "get_access_id")]
pub fn get_access_id(access: &mut RhaiAccess) -> i64 {
access.base.id as i64
}
#[rhai_fn(name = "get_access_object_id")]
pub fn get_access_object_id(access: &mut RhaiAccess) -> i64 {
access.object_id as i64
}
#[rhai_fn(name = "get_access_circle_pk")]
pub fn get_access_circle_pk(access: &mut RhaiAccess) -> String {
access.circle_pk.clone()
}
#[rhai_fn(name = "get_access_group_id")]
pub fn get_access_group_id(access: &mut RhaiAccess) -> i64 {
access.group_id as i64
}
#[rhai_fn(name = "get_access_contact_id")]
pub fn get_access_contact_id(access: &mut RhaiAccess) -> i64 {
access.contact_id as i64
}
#[rhai_fn(name = "get_access_expires_at")]
pub fn get_access_expires_at(access: &mut RhaiAccess) -> i64 {
access.expires_at
}
#[rhai_fn(name = "get_access_created_at")]
pub fn get_access_created_at(access: &mut RhaiAccess) -> i64 {
access.base.created_at
}
#[rhai_fn(name = "get_access_modified_at")]
pub fn get_access_modified_at(access: &mut RhaiAccess) -> i64 {
access.base.modified_at
}
}
pub fn register_access_rhai_module(engine: &mut Engine) {
let mut module = exported_module!(rhai_access_module);
register_authorized_create_by_id_fn!(
module: &mut module,
rhai_fn_name: "save_access",
resource_type_str: "Access",
rhai_return_rust_type: heromodels::models::access::Access
);
register_authorized_get_by_id_fn!(
module: &mut module,
rhai_fn_name: "get_access",
resource_type_str: "Access",
rhai_return_rust_type: heromodels::models::access::Access
);
register_authorized_delete_by_id_fn!(
module: &mut module,
rhai_fn_name: "delete_access",
resource_type_str: "Access",
rhai_return_rust_type: heromodels::models::access::Access
);
engine.register_global_module(module.into());
}

View File

@@ -0,0 +1,422 @@
use heromodels::db::Db;
use macros::{
register_authorized_create_by_id_fn, register_authorized_delete_by_id_fn,
register_authorized_get_by_id_fn,
};
use rhai::plugin::*;
use rhai::{Array, Engine, EvalAltResult, Module, Position, FLOAT, INT};
use std::mem;
use std::sync::Arc;
use heromodels::db::hero::OurDB;
use heromodels::db::Collection;
use heromodels::models::biz::product::{Product, ProductComponent, ProductStatus, ProductType};
use heromodels::models::biz::company::{BusinessType, Company, CompanyStatus};
use heromodels::models::biz::sale::{Sale, SaleItem, SaleStatus};
use heromodels::models::biz::shareholder::{Shareholder, ShareholderType};
type RhaiProduct = Product;
type RhaiProductComponent = ProductComponent;
type RhaiCompany = Company;
type RhaiSale = Sale;
type RhaiSaleItem = SaleItem;
type RhaiShareholder = Shareholder;
#[export_module]
mod rhai_product_component_module {
use super::{RhaiProductComponent, INT};
#[rhai_fn(name = "new_product_component", return_raw)]
pub fn new_product_component() -> Result<RhaiProductComponent, Box<EvalAltResult>> {
Ok(ProductComponent::new())
}
#[rhai_fn(name = "name", return_raw)]
pub fn set_name(
component: &mut RhaiProductComponent,
name: String,
) -> Result<RhaiProductComponent, Box<EvalAltResult>> {
let owned = std::mem::take(component);
*component = owned.name(name);
Ok(component.clone())
}
#[rhai_fn(name = "description", return_raw)]
pub fn set_description(
component: &mut RhaiProductComponent,
description: String,
) -> Result<RhaiProductComponent, Box<EvalAltResult>> {
let owned = std::mem::take(component);
*component = owned.description(description);
Ok(component.clone())
}
#[rhai_fn(name = "quantity", return_raw)]
pub fn set_quantity(
component: &mut RhaiProductComponent,
quantity: INT,
) -> Result<RhaiProductComponent, Box<EvalAltResult>> {
let owned = std::mem::take(component);
*component = owned.quantity(quantity as u32);
Ok(component.clone())
}
// --- Getters ---
#[rhai_fn(name = "get_name")]
pub fn get_name(c: &mut RhaiProductComponent) -> String {
c.name.clone()
}
#[rhai_fn(name = "get_description")]
pub fn get_description(c: &mut RhaiProductComponent) -> String {
c.description.clone()
}
#[rhai_fn(name = "get_quantity")]
pub fn get_quantity(c: &mut RhaiProductComponent) -> INT {
c.quantity as INT
}
}
#[export_module]
mod rhai_product_module {
use super::{Array, ProductStatus, ProductType, RhaiProduct, RhaiProductComponent, FLOAT, INT};
#[rhai_fn(name = "new_product", return_raw)]
pub fn new_product() -> Result<RhaiProduct, Box<EvalAltResult>> {
Ok(Product::new())
}
// --- Setters ---
#[rhai_fn(name = "name", return_raw)]
pub fn set_name(
product: &mut RhaiProduct,
name: String,
) -> Result<RhaiProduct, Box<EvalAltResult>> {
let owned = std::mem::take(product);
*product = owned.name(name);
Ok(product.clone())
}
#[rhai_fn(name = "description", return_raw)]
pub fn set_description(
product: &mut RhaiProduct,
description: String,
) -> Result<RhaiProduct, Box<EvalAltResult>> {
let owned = std::mem::take(product);
*product = owned.description(description);
Ok(product.clone())
}
#[rhai_fn(name = "price", return_raw)]
pub fn set_price(
product: &mut RhaiProduct,
price: FLOAT,
) -> Result<RhaiProduct, Box<EvalAltResult>> {
let owned = std::mem::take(product);
*product = owned.price(price);
Ok(product.clone())
}
#[rhai_fn(name = "category", return_raw)]
pub fn set_category(
product: &mut RhaiProduct,
category: String,
) -> Result<RhaiProduct, Box<EvalAltResult>> {
let owned = std::mem::take(product);
*product = owned.category(category);
Ok(product.clone())
}
#[rhai_fn(name = "max_amount", return_raw)]
pub fn set_max_amount(
product: &mut RhaiProduct,
max_amount: INT,
) -> Result<RhaiProduct, Box<EvalAltResult>> {
let owned = std::mem::take(product);
*product = owned.max_amount(max_amount as u32);
Ok(product.clone())
}
#[rhai_fn(name = "purchase_till", return_raw)]
pub fn set_purchase_till(
product: &mut RhaiProduct,
purchase_till: INT,
) -> Result<RhaiProduct, Box<EvalAltResult>> {
let owned = std::mem::take(product);
*product = owned.purchase_till(purchase_till);
Ok(product.clone())
}
#[rhai_fn(name = "active_till", return_raw)]
pub fn set_active_till(
product: &mut RhaiProduct,
active_till: INT,
) -> Result<RhaiProduct, Box<EvalAltResult>> {
let owned = std::mem::take(product);
*product = owned.active_till(active_till);
Ok(product.clone())
}
#[rhai_fn(name = "type", return_raw)]
pub fn set_type(
product: &mut RhaiProduct,
type_str: String,
) -> Result<RhaiProduct, Box<EvalAltResult>> {
let product_type = match type_str.to_lowercase().as_str() {
"physical" => ProductType::Physical,
"digital" => ProductType::Digital,
"service" => ProductType::Service,
"subscription" => ProductType::Subscription,
_ => {
return Err(EvalAltResult::ErrorSystem(
"Invalid ProductType".to_string(),
"Must be one of: Physical, Digital, Service, Subscription".into(),
)
.into())
}
};
let owned = std::mem::take(product);
*product = owned.product_type(product_type);
Ok(product.clone())
}
#[rhai_fn(name = "status", return_raw)]
pub fn set_status(
product: &mut RhaiProduct,
status_str: String,
) -> Result<RhaiProduct, Box<EvalAltResult>> {
let status = match status_str.to_lowercase().as_str() {
"active" => ProductStatus::Active,
"inactive" => ProductStatus::Inactive,
"discontinued" => ProductStatus::Discontinued,
_ => {
return Err(EvalAltResult::ErrorSystem(
"Invalid ProductStatus".to_string(),
"Must be one of: Active, Inactive, Discontinued".into(),
)
.into())
}
};
let owned = std::mem::take(product);
*product = owned.status(status);
Ok(product.clone())
}
#[rhai_fn(name = "add_component", return_raw)]
pub fn add_component(
product: &mut RhaiProduct,
component: RhaiProductComponent,
) -> Result<RhaiProduct, Box<EvalAltResult>> {
let owned = std::mem::take(product);
*product = owned.add_component(component);
Ok(product.clone())
}
#[rhai_fn(name = "set_components", return_raw)]
pub fn set_components(
product: &mut RhaiProduct,
components: Array,
) -> Result<RhaiProduct, Box<EvalAltResult>> {
let mut product_components = Vec::new();
for component_dynamic in components {
if let Ok(component) = component_dynamic.try_cast::<RhaiProductComponent>() {
product_components.push(component);
} else {
return Err(EvalAltResult::ErrorSystem(
"Invalid component type".to_string(),
"All components must be ProductComponent objects".into(),
)
.into());
}
}
let owned = std::mem::take(product);
*product = owned.components(product_components);
Ok(product.clone())
}
// --- Getters ---
#[rhai_fn(name = "get_id")]
pub fn get_id(p: &mut RhaiProduct) -> i64 {
p.base.id as i64
}
#[rhai_fn(name = "get_name")]
pub fn get_name(p: &mut RhaiProduct) -> String {
p.name.clone()
}
#[rhai_fn(name = "get_description")]
pub fn get_description(p: &mut RhaiProduct) -> String {
p.description.clone()
}
#[rhai_fn(name = "get_price")]
pub fn get_price(p: &mut RhaiProduct) -> FLOAT {
p.price
}
#[rhai_fn(name = "get_category")]
pub fn get_category(p: &mut RhaiProduct) -> String {
p.category.clone()
}
#[rhai_fn(name = "get_max_amount")]
pub fn get_max_amount(p: &mut RhaiProduct) -> INT {
p.max_amount as INT
}
#[rhai_fn(name = "get_purchase_till")]
pub fn get_purchase_till(p: &mut RhaiProduct) -> INT {
p.purchase_till
}
#[rhai_fn(name = "get_active_till")]
pub fn get_active_till(p: &mut RhaiProduct) -> INT {
p.active_till
}
#[rhai_fn(name = "get_type")]
pub fn get_type(p: &mut RhaiProduct) -> String {
format!("{:?}", p.product_type)
}
#[rhai_fn(name = "get_status")]
pub fn get_status(p: &mut RhaiProduct) -> String {
format!("{:?}", p.status)
}
#[rhai_fn(name = "get_components")]
pub fn get_components(p: &mut RhaiProduct) -> Array {
p.components
.iter()
.map(|c| rhai::Dynamic::from(c.clone()))
.collect()
}
}
pub fn register_product_rhai_module(engine: &mut Engine) {
let mut product_module = exported_module!(rhai_product_module);
let mut component_module = exported_module!(rhai_product_component_module);
register_authorized_create_by_id_fn!(
product_module: &mut product_module,
rhai_fn_name: "save_product",
resource_type_str: "Product",
rhai_return_rust_type: heromodels::models::biz::product::Product
);
register_authorized_get_by_id_fn!(
product_module: &mut product_module,
rhai_fn_name: "get_product",
resource_type_str: "Product",
rhai_return_rust_type: heromodels::models::biz::product::Product
);
register_authorized_delete_by_id_fn!(
product_module: &mut product_module,
rhai_fn_name: "delete_product",
resource_type_str: "Product",
rhai_return_rust_type: heromodels::models::biz::product::Product
);
engine.register_global_module(product_module.into());
engine.register_global_module(component_module.into());
}
// Company Rhai wrapper functions
#[export_module]
mod rhai_company_module {
use super::{BusinessType, CompanyStatus, RhaiCompany};
#[rhai_fn(name = "new_company", return_raw)]
pub fn new_company() -> Result<RhaiCompany, Box<EvalAltResult>> {
Ok(Company::new())
}
#[rhai_fn(name = "name", return_raw)]
pub fn set_name(
company: &mut RhaiCompany,
name: String,
) -> Result<RhaiCompany, Box<EvalAltResult>> {
let owned = std::mem::take(company);
*company = owned.name(name);
Ok(company.clone())
}
#[rhai_fn(name = "get_company_id")]
pub fn get_company_id(company: &mut RhaiCompany) -> i64 {
company.id() as i64
}
#[rhai_fn(name = "get_company_name")]
pub fn get_company_name(company: &mut RhaiCompany) -> String {
company.name().clone()
}
}
pub fn register_company_rhai_module(engine: &mut Engine) {
let mut module = exported_module!(rhai_company_module);
register_authorized_create_by_id_fn!(
module: &mut module,
rhai_fn_name: "save_company",
resource_type_str: "Company",
rhai_return_rust_type: heromodels::models::biz::company::Company
);
register_authorized_get_by_id_fn!(
module: &mut module,
rhai_fn_name: "get_company",
resource_type_str: "Company",
rhai_return_rust_type: heromodels::models::biz::company::Company
);
engine.register_global_module(module.into());
}
// Sale Rhai wrapper functions
#[export_module]
mod rhai_sale_module {
use super::{RhaiSale, RhaiSaleItem, SaleStatus};
#[rhai_fn(name = "new_sale", return_raw)]
pub fn new_sale() -> Result<RhaiSale, Box<EvalAltResult>> {
Ok(Sale::new())
}
#[rhai_fn(name = "new_sale_item", return_raw)]
pub fn new_sale_item() -> Result<RhaiSaleItem, Box<EvalAltResult>> {
Ok(SaleItem::new())
}
#[rhai_fn(name = "company_id", return_raw)]
pub fn set_sale_company_id(sale: &mut RhaiSale, company_id: i64) -> Result<RhaiSale, Box<EvalAltResult>> {
let owned = std::mem::take(sale);
*sale = owned.company_id(company_id as u32);
Ok(sale.clone())
}
#[rhai_fn(name = "total_amount", return_raw)]
pub fn set_sale_total_amount(sale: &mut RhaiSale, total_amount: f64) -> Result<RhaiSale, Box<EvalAltResult>> {
let owned = std::mem::take(sale);
*sale = owned.total_amount(total_amount);
Ok(sale.clone())
}
#[rhai_fn(name = "get_sale_id")]
pub fn get_sale_id(sale: &mut RhaiSale) -> i64 {
sale.id() as i64
}
#[rhai_fn(name = "get_sale_total_amount")]
pub fn get_sale_total_amount(sale: &mut RhaiSale) -> f64 {
sale.total_amount()
}
}
pub fn register_sale_rhai_module(engine: &mut Engine) {
let mut module = exported_module!(rhai_sale_module);
register_authorized_create_by_id_fn!(
module: &mut module,
rhai_fn_name: "save_sale",
resource_type_str: "Sale",
rhai_return_rust_type: heromodels::models::biz::sale::Sale
);
register_authorized_get_by_id_fn!(
module: &mut module,
rhai_fn_name: "get_sale",
resource_type_str: "Sale",
rhai_return_rust_type: heromodels::models::biz::sale::Sale
);
engine.register_global_module(module.into());
}

View File

@@ -0,0 +1,246 @@
use crate::db::Db;
use rhailib_macros::{
register_authorized_create_by_id_fn, register_authorized_delete_by_id_fn,
register_authorized_get_by_id_fn,
};
use rhai::plugin::*;
use rhai::{Array, Dynamic, Engine, EvalAltResult, Module};
use std::mem;
use std::sync::Arc;
use crate::models::calendar::{AttendanceStatus, Attendee, Calendar, Event};
type RhaiCalendar = Calendar;
type RhaiEvent = Event;
type RhaiAttendee = Attendee;
use crate::db::hero::OurDB;
use crate::db::Collection;
#[export_module]
mod rhai_calendar_module {
use super::{AttendanceStatus, RhaiAttendee, RhaiCalendar, RhaiEvent};
// --- Attendee Builder ---
#[rhai_fn(name = "new_attendee", return_raw)]
pub fn new_attendee(contact_id: i64) -> Result<RhaiAttendee, Box<EvalAltResult>> {
Ok(Attendee::new(contact_id as u32))
}
#[rhai_fn(name = "status", return_raw)]
pub fn set_attendee_status(
attendee: &mut RhaiAttendee,
status_str: String,
) -> Result<RhaiAttendee, Box<EvalAltResult>> {
let status = match status_str.to_lowercase().as_str() {
"accepted" => AttendanceStatus::Accepted,
"declined" => AttendanceStatus::Declined,
"tentative" => AttendanceStatus::Tentative,
"noresponse" => AttendanceStatus::NoResponse,
_ => {
return Err(EvalAltResult::ErrorSystem(
"Invalid Status".to_string(),
"Must be one of: Accepted, Declined, Tentative, NoResponse".into(),
)
.into())
}
};
let owned = std::mem::take(attendee);
*attendee = owned.status(status);
Ok(attendee.clone())
}
// --- Event Builder ---
#[rhai_fn(name = "new_event", return_raw)]
pub fn new_event() -> Result<RhaiEvent, Box<EvalAltResult>> {
Ok(Event::new())
}
#[rhai_fn(name = "title", return_raw)]
pub fn set_event_title(
event: &mut RhaiEvent,
title: String,
) -> Result<RhaiEvent, Box<EvalAltResult>> {
let owned = std::mem::take(event);
*event = owned.title(title);
Ok(event.clone())
}
#[rhai_fn(name = "description", return_raw)]
pub fn set_event_description(
event: &mut RhaiEvent,
description: String,
) -> Result<RhaiEvent, Box<EvalAltResult>> {
let owned = std::mem::take(event);
*event = owned.description(description);
Ok(event.clone())
}
#[rhai_fn(name = "location", return_raw)]
pub fn set_event_location(
event: &mut RhaiEvent,
location: String,
) -> Result<RhaiEvent, Box<EvalAltResult>> {
let owned = std::mem::take(event);
*event = owned.location(location);
Ok(event.clone())
}
#[rhai_fn(name = "add_attendee", return_raw)]
pub fn add_event_attendee(
event: &mut RhaiEvent,
attendee: RhaiAttendee,
) -> Result<RhaiEvent, Box<EvalAltResult>> {
let owned = std::mem::take(event);
*event = owned.add_attendee(attendee);
Ok(event.clone())
}
#[rhai_fn(name = "reschedule", return_raw)]
pub fn reschedule_event(
event: &mut RhaiEvent,
start_time: i64,
end_time: i64,
) -> Result<RhaiEvent, Box<EvalAltResult>> {
let owned = std::mem::take(event);
*event = owned.reschedule(start_time, end_time);
Ok(event.clone())
}
// --- Calendar Builder ---
#[rhai_fn(name = "new_calendar", return_raw)]
pub fn new_calendar(name: String) -> Result<RhaiCalendar, Box<EvalAltResult>> {
Ok(Calendar::new().name(name))
}
#[rhai_fn(name = "calendar_name", return_raw)]
pub fn set_calendar_name(
calendar: &mut RhaiCalendar,
name: String,
) -> Result<RhaiCalendar, Box<EvalAltResult>> {
let owned = std::mem::take(calendar);
*calendar = owned.name(name);
Ok(calendar.clone())
}
#[rhai_fn(name = "calendar_description", return_raw)]
pub fn set_calendar_description(
calendar: &mut RhaiCalendar,
description: String,
) -> Result<RhaiCalendar, Box<EvalAltResult>> {
let owned = std::mem::take(calendar);
*calendar = owned.description(description);
Ok(calendar.clone())
}
#[rhai_fn(name = "add_event", return_raw)]
pub fn add_calendar_event(
calendar: &mut RhaiCalendar,
event_id: i64,
) -> Result<RhaiCalendar, Box<EvalAltResult>> {
let owned = std::mem::take(calendar);
*calendar = owned.add_event(event_id as u32);
Ok(calendar.clone())
}
// --- Getters ---
// Calendar
#[rhai_fn(name = "get_calendar_id")]
pub fn get_calendar_id(c: &mut RhaiCalendar) -> i64 {
c.base.id as i64
}
#[rhai_fn(name = "get_calendar_name")]
pub fn get_calendar_name(c: &mut RhaiCalendar) -> String {
c.name.clone()
}
#[rhai_fn(name = "get_calendar_description")]
pub fn get_calendar_description(c: &mut RhaiCalendar) -> Option<String> {
c.description.clone()
}
#[rhai_fn(name = "get_calendar_events")]
pub fn get_calendar_events(c: &mut RhaiCalendar) -> Array {
c.events.iter().map(|id| Dynamic::from(*id as i64)).collect()
}
// Event
#[rhai_fn(name = "get_event_id")]
pub fn get_event_id(e: &mut RhaiEvent) -> i64 {
e.base.id as i64
}
#[rhai_fn(name = "get_event_title")]
pub fn get_event_title(e: &mut RhaiEvent) -> String {
e.title.clone()
}
#[rhai_fn(name = "get_event_description")]
pub fn get_event_description(e: &mut RhaiEvent) -> Option<String> {
e.description.clone()
}
#[rhai_fn(name = "get_event_start_time")]
pub fn get_event_start_time(e: &mut RhaiEvent) -> i64 {
e.start_time
}
#[rhai_fn(name = "get_event_end_time")]
pub fn get_event_end_time(e: &mut RhaiEvent) -> i64 {
e.end_time
}
#[rhai_fn(name = "get_event_attendees")]
pub fn get_event_attendees(e: &mut RhaiEvent) -> Array {
e.attendees.iter().map(|a| Dynamic::from(a.clone())).collect()
}
#[rhai_fn(name = "get_event_location")]
pub fn get_event_location(e: &mut RhaiEvent) -> Option<String> {
e.location.clone()
}
// Attendee
#[rhai_fn(name = "get_attendee_contact_id")]
pub fn get_attendee_contact_id(a: &mut RhaiAttendee) -> i64 {
a.contact_id as i64
}
#[rhai_fn(name = "get_attendee_status")]
pub fn get_attendee_status(a: &mut RhaiAttendee) -> String {
format!("{:?}", a.status)
}
}
pub fn register_calendar_rhai_module(engine: &mut Engine) {
let mut module = exported_module!(rhai_calendar_module);
register_authorized_create_by_id_fn!(
module: &mut module,
rhai_fn_name: "save_calendar",
resource_type_str: "Calendar",
rhai_return_rust_type: heromodels::models::calendar::Calendar
);
register_authorized_get_by_id_fn!(
module: &mut module,
rhai_fn_name: "get_calendar",
resource_type_str: "Calendar",
rhai_return_rust_type: heromodels::models::calendar::Calendar
);
register_authorized_delete_by_id_fn!(
module: &mut module,
rhai_fn_name: "delete_calendar",
resource_type_str: "Calendar",
rhai_return_rust_type: heromodels::models::calendar::Calendar
);
register_authorized_create_by_id_fn!(
module: &mut module,
rhai_fn_name: "save_event",
resource_type_str: "Event",
rhai_return_rust_type: heromodels::models::calendar::Event
);
register_authorized_get_by_id_fn!(
module: &mut module,
rhai_fn_name: "get_event",
resource_type_str: "Event",
rhai_return_rust_type: heromodels::models::calendar::Event
);
register_authorized_delete_by_id_fn!(
module: &mut module,
rhai_fn_name: "delete_event",
resource_type_str: "Event",
rhai_return_rust_type: heromodels::models::calendar::Event
);
engine.register_global_module(module.into());
}

View File

@@ -1,412 +1,155 @@
use crate::db::Db;
use rhailib_macros::{
register_authorized_create_by_id_fn, register_authorized_delete_by_id_fn, register_authorized_get_by_id_fn,
};
use rhai::plugin::*;
use rhai::{Array, CustomType, Dynamic, Engine, EvalAltResult, INT, Module, Position};
use std::mem;
use rhai::{Array, Dynamic, Engine, EvalAltResult, Map, Module};
use std::collections::HashMap;
use std::sync::Arc;
use super::circle::{Circle, ThemeData};
use crate::models::circle::Circle;
type RhaiCircle = Circle;
type RhaiThemeData = ThemeData;
use crate::db::Collection;
use crate::db::hero::OurDB;
use serde::Serialize;
use serde_json;
/// Registers a `.json()` method for any type `T` that implements the required traits.
fn register_json_method<T>(engine: &mut Engine)
where
T: CustomType + Clone + Serialize,
{
let to_json_fn = |obj: &mut T| -> Result<String, Box<EvalAltResult>> {
serde_json::to_string(obj).map_err(|e| e.to_string().into())
};
engine.build_type::<T>().register_fn("json", to_json_fn);
}
// Helper to convert i64 from Rhai to u32 for IDs
fn id_from_i64_to_u32(id_i64: i64) -> Result<u32, Box<EvalAltResult>> {
u32::try_from(id_i64).map_err(|_| {
Box::new(EvalAltResult::ErrorArithmetic(
format!("Failed to convert ID '{}' to u32", id_i64).into(),
Position::NONE,
))
})
}
#[export_module]
mod rhai_theme_data_module {
#[rhai_fn(name = "new_theme_data")]
pub fn new_theme_data() -> RhaiThemeData {
ThemeData::default()
}
// --- Setters for ThemeData ---
#[rhai_fn(name = "primary_color", return_raw, global, pure)]
pub fn set_primary_color(
theme: &mut RhaiThemeData,
color: String,
) -> Result<RhaiThemeData, Box<EvalAltResult>> {
let mut owned_theme = mem::take(theme);
owned_theme.primary_color = color;
*theme = owned_theme;
Ok(theme.clone())
}
#[rhai_fn(name = "background_color", return_raw, global, pure)]
pub fn set_background_color(
theme: &mut RhaiThemeData,
color: String,
) -> Result<RhaiThemeData, Box<EvalAltResult>> {
let mut owned_theme = mem::take(theme);
owned_theme.background_color = color;
*theme = owned_theme;
Ok(theme.clone())
}
#[rhai_fn(name = "background_pattern", return_raw, global, pure)]
pub fn set_background_pattern(
theme: &mut RhaiThemeData,
pattern: String,
) -> Result<RhaiThemeData, Box<EvalAltResult>> {
let mut owned_theme = mem::take(theme);
owned_theme.background_pattern = pattern;
*theme = owned_theme;
Ok(theme.clone())
}
#[rhai_fn(name = "logo_symbol", return_raw, global, pure)]
pub fn set_logo_symbol(
theme: &mut RhaiThemeData,
symbol: String,
) -> Result<RhaiThemeData, Box<EvalAltResult>> {
let mut owned_theme = mem::take(theme);
owned_theme.logo_symbol = symbol;
*theme = owned_theme;
Ok(theme.clone())
}
#[rhai_fn(name = "logo_url", return_raw, global, pure)]
pub fn set_logo_url(
theme: &mut RhaiThemeData,
url: String,
) -> Result<RhaiThemeData, Box<EvalAltResult>> {
let mut owned_theme = mem::take(theme);
owned_theme.logo_url = url;
*theme = owned_theme;
Ok(theme.clone())
}
#[rhai_fn(name = "nav_dashboard_visible", return_raw, global, pure)]
pub fn set_nav_dashboard_visible(
theme: &mut RhaiThemeData,
visible: bool,
) -> Result<RhaiThemeData, Box<EvalAltResult>> {
let mut owned_theme = mem::take(theme);
owned_theme.nav_dashboard_visible = visible;
*theme = owned_theme;
Ok(theme.clone())
}
#[rhai_fn(name = "nav_timeline_visible", return_raw, global, pure)]
pub fn set_nav_timeline_visible(
theme: &mut RhaiThemeData,
visible: bool,
) -> Result<RhaiThemeData, Box<EvalAltResult>> {
let mut owned_theme = mem::take(theme);
owned_theme.nav_timeline_visible = visible;
*theme = owned_theme;
Ok(theme.clone())
}
// --- Getters for ThemeData ---
#[rhai_fn(name = "get_primary_color", pure)]
pub fn get_primary_color(theme: &mut RhaiThemeData) -> String {
theme.primary_color.clone()
}
#[rhai_fn(name = "get_background_color", pure)]
pub fn get_background_color(theme: &mut RhaiThemeData) -> String {
theme.background_color.clone()
}
#[rhai_fn(name = "get_background_pattern", pure)]
pub fn get_background_pattern(theme: &mut RhaiThemeData) -> String {
theme.background_pattern.clone()
}
#[rhai_fn(name = "get_logo_symbol", pure)]
pub fn get_logo_symbol(theme: &mut RhaiThemeData) -> String {
theme.logo_symbol.clone()
}
#[rhai_fn(name = "get_logo_url", pure)]
pub fn get_logo_url(theme: &mut RhaiThemeData) -> String {
theme.logo_url.clone()
}
#[rhai_fn(name = "get_nav_dashboard_visible", pure)]
pub fn get_nav_dashboard_visible(theme: &mut RhaiThemeData) -> bool {
theme.nav_dashboard_visible
}
#[rhai_fn(name = "get_nav_timeline_visible", pure)]
pub fn get_nav_timeline_visible(theme: &mut RhaiThemeData) -> bool {
theme.nav_timeline_visible
}
}
use crate::db::Collection;
use crate::models::circle::ThemeData;
#[export_module]
mod rhai_circle_module {
// --- Circle Functions ---
#[rhai_fn(name = "new_circle")]
pub fn new_circle() -> RhaiCircle {
Circle::new()
use super::RhaiCircle;
// this one configures the users own circle
#[rhai_fn(name = "configure", return_raw)]
pub fn configure() -> Result<RhaiCircle, Box<EvalAltResult>> {
Ok(Circle::new())
}
/// Sets the circle title
#[rhai_fn(name = "title", return_raw, global, pure)]
pub fn circle_title(
#[rhai_fn(name = "new_circle", return_raw)]
pub fn new_circle() -> Result<RhaiCircle, Box<EvalAltResult>> {
Ok(Circle::new())
}
#[rhai_fn(name = "set_title", return_raw)]
pub fn set_title(
circle: &mut RhaiCircle,
title: String,
) -> Result<RhaiCircle, Box<EvalAltResult>> {
let owned_circle = mem::take(circle);
*circle = owned_circle.title(title);
let owned = std::mem::take(circle);
*circle = owned.title(title);
Ok(circle.clone())
}
/// Sets the circle ws_url
#[rhai_fn(name = "ws_url", return_raw, global, pure)]
pub fn circle_ws_url(
#[rhai_fn(name = "set_ws_url", return_raw)]
pub fn set_ws_url(
circle: &mut RhaiCircle,
ws_url: String,
) -> Result<RhaiCircle, Box<EvalAltResult>> {
let owned_circle = mem::take(circle);
*circle = owned_circle.ws_url(ws_url);
let owned = std::mem::take(circle);
*circle = owned.ws_url(ws_url);
Ok(circle.clone())
}
/// Sets the circle description
#[rhai_fn(name = "description", return_raw, global, pure)]
pub fn circle_description(
#[rhai_fn(name = "set_description", return_raw)]
pub fn set_description(
circle: &mut RhaiCircle,
description: String,
) -> Result<RhaiCircle, Box<EvalAltResult>> {
let owned_circle = mem::take(circle);
*circle = owned_circle.description(description);
let owned = std::mem::take(circle);
*circle = owned.description(description);
Ok(circle.clone())
}
/// Sets the circle logo
#[rhai_fn(name = "logo", return_raw, global, pure)]
pub fn circle_logo(
#[rhai_fn(name = "set_logo", return_raw)]
pub fn set_logo(
circle: &mut RhaiCircle,
logo: String,
) -> Result<RhaiCircle, Box<EvalAltResult>> {
let owned_circle = mem::take(circle);
*circle = owned_circle.logo(logo);
let owned = std::mem::take(circle);
*circle = owned.logo(logo);
Ok(circle.clone())
}
/// Sets the circle theme
#[rhai_fn(name = "theme", return_raw, global, pure)]
pub fn circle_theme(
#[rhai_fn(name = "set_theme", return_raw)]
pub fn set_theme(
circle: &mut RhaiCircle,
theme: RhaiThemeData,
theme: ThemeData,
) -> Result<RhaiCircle, Box<EvalAltResult>> {
let owned_circle = mem::take(circle);
*circle = owned_circle.theme(theme);
let owned = std::mem::take(circle);
*circle = owned.theme(theme);
Ok(circle.clone())
}
/// Adds an attendee to the circle
#[rhai_fn(name = "add_circle", return_raw, global, pure)]
pub fn circle_add_circle(
#[rhai_fn(name = "add_circle", return_raw)]
pub fn add_circle(
circle: &mut RhaiCircle,
added_circle: String,
new_circle: String,
) -> Result<RhaiCircle, Box<EvalAltResult>> {
let owned_circle = mem::take(circle);
*circle = owned_circle.add_circle(added_circle);
let owned = std::mem::take(circle);
*circle = owned.add_circle(new_circle);
Ok(circle.clone())
}
/// Adds an attendee to the circle
#[rhai_fn(name = "add_member", return_raw, global, pure)]
pub fn circle_add_member(
#[rhai_fn(name = "add_member", return_raw)]
pub fn add_member(
circle: &mut RhaiCircle,
added_member: String,
member: String,
) -> Result<RhaiCircle, Box<EvalAltResult>> {
let owned_circle = mem::take(circle);
*circle = owned_circle.add_member(added_member);
let owned = std::mem::take(circle);
*circle = owned.add_member(member);
Ok(circle.clone())
}
// Circle Getters
#[rhai_fn(name = "get_id", pure)]
pub fn get_circle_id(circle: &mut RhaiCircle) -> i64 {
circle.base_data.id as i64
// --- Getters ---
#[rhai_fn(name = "get_id")]
pub fn get_id(c: &mut RhaiCircle) -> i64 {
c.base_data.id as i64
}
#[rhai_fn(name = "get_created_at", pure)]
pub fn get_circle_created_at(circle: &mut RhaiCircle) -> i64 {
circle.base_data.created_at
#[rhai_fn(name = "get_title")]
pub fn get_title(c: &mut RhaiCircle) -> String {
c.title.clone()
}
#[rhai_fn(name = "get_modified_at", pure)]
pub fn get_circle_modified_at(circle: &mut RhaiCircle) -> i64 {
circle.base_data.modified_at
#[rhai_fn(name = "get_ws_url")]
pub fn get_ws_url(c: &mut RhaiCircle) -> String {
c.ws_url.clone()
}
#[rhai_fn(name = "get_title", pure)]
pub fn get_circle_title(circle: &mut RhaiCircle) -> String {
circle.title.clone()
#[rhai_fn(name = "get_description")]
pub fn get_description(c: &mut RhaiCircle) -> Option<String> {
c.description.clone()
}
#[rhai_fn(name = "get_description", pure)]
pub fn get_circle_description(circle: &mut RhaiCircle) -> Option<String> {
circle.description.clone()
#[rhai_fn(name = "get_logo")]
pub fn get_logo(c: &mut RhaiCircle) -> Option<String> {
c.logo.clone()
}
#[rhai_fn(name = "get_circles", pure)]
pub fn get_circle_circles(circle: &mut RhaiCircle) -> Vec<String> {
circle.circles.clone()
#[rhai_fn(name = "get_circles")]
pub fn get_circles(c: &mut RhaiCircle) -> Array {
c.circles.iter().map(|s| Dynamic::from(s.clone())).collect()
}
#[rhai_fn(name = "get_ws_url", pure)]
pub fn get_circle_ws_url(circle: &mut RhaiCircle) -> String {
circle.ws_url.clone()
}
#[rhai_fn(name = "get_logo", pure)]
pub fn get_circle_logo(circle: &mut RhaiCircle) -> Option<String> {
circle.logo.clone()
}
#[rhai_fn(name = "get_theme", pure)]
pub fn get_circle_theme(circle: &mut RhaiCircle) -> RhaiThemeData {
circle.theme.clone()
#[rhai_fn(name = "get_members")]
pub fn get_members(c: &mut RhaiCircle) -> Array {
c.members.iter().map(|s| Dynamic::from(s.clone())).collect()
}
}
pub fn register_circle_rhai_module(engine: &mut Engine, db: Arc<OurDB>) {
engine.build_type::<RhaiCircle>();
engine.build_type::<RhaiThemeData>();
pub fn register_circle_rhai_module(engine: &mut Engine) {
let mut module = exported_module!(rhai_circle_module);
let mut db_module = Module::new();
let circle_module = exported_module!(rhai_circle_module);
let theme_data_module = exported_module!(rhai_theme_data_module);
engine.register_global_module(circle_module.into());
engine.register_global_module(theme_data_module.into());
register_json_method::<Circle>(engine);
register_json_method::<ThemeData>(engine);
// Manually register database functions as they need to capture 'db'
let db_clone_set_circle = db.clone();
db_module.set_native_fn(
"save_circle",
move |circle: Circle| -> Result<Circle, Box<EvalAltResult>> {
let result = db_clone_set_circle.set(&circle).map_err(|e| {
Box::new(EvalAltResult::ErrorRuntime(
format!("DB Error set_circle: {}", e).into(),
Position::NONE,
))
})?;
Ok(result.1)
},
register_authorized_create_by_id_fn!(
module: &mut module,
rhai_fn_name: "save_circle",
resource_type_str: "Circle",
rhai_return_rust_type: crate::models::circle::Circle
);
register_authorized_get_by_id_fn!(
module: &mut module,
rhai_fn_name: "get_circle",
resource_type_str: "Circle",
rhai_return_rust_type: crate::models::circle::Circle
);
register_authorized_delete_by_id_fn!(
module: &mut module,
rhai_fn_name: "delete_circle",
resource_type_str: "Circle",
rhai_return_rust_type: crate::models::circle::Circle
);
let db_clone_delete_circle = db.clone();
db_module.set_native_fn(
"delete_circle",
move |circle: Circle| -> Result<(), Box<EvalAltResult>> {
let result = db_clone_delete_circle
.collection::<Circle>()
.expect("can open circle collection")
.delete_by_id(circle.base_data.id)
.expect("can delete circle");
Ok(result)
},
);
let db_clone_get_circle = db.clone();
db_module.set_native_fn(
"get_circle",
move || -> Result<Circle, Box<EvalAltResult>> {
let all_circles: Vec<Circle> = db_clone_get_circle.get_all().map_err(|e| {
Box::new(EvalAltResult::ErrorRuntime(
format!("DB Error get_circle: {}", e).into(),
Position::NONE,
))
})?;
if let Some(first_circle) = all_circles.first() {
Ok(first_circle.clone())
} else {
Err(Box::new(EvalAltResult::ErrorRuntime(
"Circle not found".into(),
Position::NONE,
)))
}
},
);
// --- Collection DB Functions ---
let db_clone = db.clone();
db_module.set_native_fn(
"save_circle",
move |circle: RhaiCircle| -> Result<RhaiCircle, Box<EvalAltResult>> {
let result = db_clone.set(&circle).map_err(|e| {
Box::new(EvalAltResult::ErrorRuntime(
format!("DB Error: {:?}", e).into(),
Position::NONE,
))
})?;
Ok(result.1)
},
);
let db_clone_get_circle_by_id = db.clone();
db_module.set_native_fn(
"get_circle_by_id",
move |id_i64: INT| -> Result<Circle, Box<EvalAltResult>> {
let id_u32 = id_from_i64_to_u32(id_i64)?;
db_clone_get_circle_by_id
.get_by_id(id_u32)
.map_err(|e| {
Box::new(EvalAltResult::ErrorRuntime(
format!("DB Error get_circle_by_id: {}", e).into(),
Position::NONE,
))
})?
.ok_or_else(|| {
Box::new(EvalAltResult::ErrorRuntime(
format!("Circle with ID {} not found", id_u32).into(),
Position::NONE,
))
})
},
);
let db_clone_list_circles = db.clone();
db_module.set_native_fn(
"list_circles",
move || -> Result<Dynamic, Box<EvalAltResult>> {
let collection = db_clone_list_circles.collection::<Circle>().map_err(|e| {
Box::new(EvalAltResult::ErrorRuntime(
format!("Failed to get circle collection: {:?}", e).into(),
Position::NONE,
))
})?;
let circles = collection.get_all().map_err(|e| {
Box::new(EvalAltResult::ErrorRuntime(
format!("Failed to get all circles: {:?}", e).into(),
Position::NONE,
))
})?;
let mut array = Array::new();
for circle in circles {
array.push(Dynamic::from(circle));
}
Ok(Dynamic::from(array))
},
);
engine.register_global_module(db_module.into());
println!("Successfully registered circle Rhai module using export_module approach.");
engine.register_global_module(module.into());
}

View File

@@ -0,0 +1,232 @@
use crate::db::Db;
use rhailib_macros::{
register_authorized_create_by_id_fn, register_authorized_delete_by_id_fn,
register_authorized_get_by_id_fn,
};
use rhai::plugin::*;
use rhai::{Array, Dynamic, Engine, EvalAltResult, Module};
use std::mem;
use std::sync::Arc;
use crate::models::contact::{Contact, Group};
type RhaiContact = Contact;
type RhaiGroup = Group;
use crate::db::hero::OurDB;
use crate::db::Collection;
#[export_module]
mod rhai_contact_module {
use super::{RhaiContact, RhaiGroup};
// --- Contact Builder ---
#[rhai_fn(name = "new_contact", return_raw)]
pub fn new_contact() -> Result<RhaiContact, Box<EvalAltResult>> {
Ok(Contact::new())
}
#[rhai_fn(name = "name", return_raw)]
pub fn set_contact_name(
contact: &mut RhaiContact,
name: String,
) -> Result<RhaiContact, Box<EvalAltResult>> {
let owned = std::mem::take(contact);
*contact = owned.name(name);
Ok(contact.clone())
}
#[rhai_fn(name = "description", return_raw)]
pub fn set_contact_description(
contact: &mut RhaiContact,
description: String,
) -> Result<RhaiContact, Box<EvalAltResult>> {
let owned = std::mem::take(contact);
*contact = owned.description(description);
Ok(contact.clone())
}
#[rhai_fn(name = "address", return_raw)]
pub fn set_contact_address(
contact: &mut RhaiContact,
address: String,
) -> Result<RhaiContact, Box<EvalAltResult>> {
let owned = std::mem::take(contact);
*contact = owned.address(address);
Ok(contact.clone())
}
#[rhai_fn(name = "phone", return_raw)]
pub fn set_contact_phone(
contact: &mut RhaiContact,
phone: String,
) -> Result<RhaiContact, Box<EvalAltResult>> {
let owned = std::mem::take(contact);
*contact = owned.phone(phone);
Ok(contact.clone())
}
#[rhai_fn(name = "email", return_raw)]
pub fn set_contact_email(
contact: &mut RhaiContact,
email: String,
) -> Result<RhaiContact, Box<EvalAltResult>> {
let owned = std::mem::take(contact);
*contact = owned.email(email);
Ok(contact.clone())
}
#[rhai_fn(name = "notes", return_raw)]
pub fn set_contact_notes(
contact: &mut RhaiContact,
notes: String,
) -> Result<RhaiContact, Box<EvalAltResult>> {
let owned = std::mem::take(contact);
*contact = owned.notes(notes);
Ok(contact.clone())
}
#[rhai_fn(name = "circle", return_raw)]
pub fn set_contact_circle(
contact: &mut RhaiContact,
circle: String,
) -> Result<RhaiContact, Box<EvalAltResult>> {
let owned = std::mem::take(contact);
*contact = owned.circle(circle);
Ok(contact.clone())
}
// --- Group Builder ---
#[rhai_fn(name = "new_group", return_raw)]
pub fn new_group() -> Result<RhaiGroup, Box<EvalAltResult>> {
Ok(Group::new())
}
#[rhai_fn(name = "group_name", return_raw)]
pub fn set_group_name(
group: &mut RhaiGroup,
name: String,
) -> Result<RhaiGroup, Box<EvalAltResult>> {
let owned = std::mem::take(group);
*group = owned.name(name);
Ok(group.clone())
}
#[rhai_fn(name = "group_description", return_raw)]
pub fn set_group_description(
group: &mut RhaiGroup,
description: String,
) -> Result<RhaiGroup, Box<EvalAltResult>> {
let owned = std::mem::take(group);
*group = owned.description(description);
Ok(group.clone())
}
#[rhai_fn(name = "add_contact", return_raw)]
pub fn add_group_contact(
group: &mut RhaiGroup,
contact_id: i64,
) -> Result<RhaiGroup, Box<EvalAltResult>> {
let owned = std::mem::take(group);
*group = owned.add_contact(contact_id as u32);
Ok(group.clone())
}
// --- Getters ---
// Contact
#[rhai_fn(name = "get_contact_id")]
pub fn get_contact_id(c: &mut RhaiContact) -> i64 {
c.base.id as i64
}
#[rhai_fn(name = "get_contact_name")]
pub fn get_contact_name(c: &mut RhaiContact) -> String {
c.name.clone()
}
#[rhai_fn(name = "get_contact_description")]
pub fn get_contact_description(c: &mut RhaiContact) -> Option<String> {
c.description.clone()
}
#[rhai_fn(name = "get_contact_address")]
pub fn get_contact_address(c: &mut RhaiContact) -> String {
c.address.clone()
}
#[rhai_fn(name = "get_contact_phone")]
pub fn get_contact_phone(c: &mut RhaiContact) -> String {
c.phone.clone()
}
#[rhai_fn(name = "get_contact_email")]
pub fn get_contact_email(c: &mut RhaiContact) -> String {
c.email.clone()
}
#[rhai_fn(name = "get_contact_notes")]
pub fn get_contact_notes(c: &mut RhaiContact) -> Option<String> {
c.notes.clone()
}
#[rhai_fn(name = "get_contact_circle")]
pub fn get_contact_circle(c: &mut RhaiContact) -> String {
c.circle.clone()
}
// Group
#[rhai_fn(name = "get_group_id")]
pub fn get_group_id(g: &mut RhaiGroup) -> i64 {
g.base.id as i64
}
#[rhai_fn(name = "get_group_name")]
pub fn get_group_name(g: &mut RhaiGroup) -> String {
g.name.clone()
}
#[rhai_fn(name = "get_group_description")]
pub fn get_group_description(g: &mut RhaiGroup) -> Option<String> {
g.description.clone()
}
#[rhai_fn(name = "get_group_contacts")]
pub fn get_group_contacts(g: &mut RhaiGroup) -> Array {
g.contacts
.iter()
.map(|id| Dynamic::from(*id as i64))
.collect()
}
}
pub fn register_contact_rhai_module(engine: &mut Engine) {
let mut module = exported_module!(rhai_contact_module);
register_authorized_create_by_id_fn!(
module: &mut module,
rhai_fn_name: "save_contact",
resource_type_str: "Contact",
rhai_return_rust_type: heromodels::models::contact::Contact
);
register_authorized_get_by_id_fn!(
module: &mut module,
rhai_fn_name: "get_contact",
resource_type_str: "Contact",
rhai_return_rust_type: heromodels::models::contact::Contact
);
register_authorized_delete_by_id_fn!(
module: &mut module,
rhai_fn_name: "delete_contact",
resource_type_str: "Contact",
rhai_return_rust_type: heromodels::models::contact::Contact
);
register_authorized_create_by_id_fn!(
module: &mut module,
rhai_fn_name: "save_group",
resource_type_str: "Group",
rhai_return_rust_type: heromodels::models::contact::Group
);
register_authorized_get_by_id_fn!(
module: &mut module,
rhai_fn_name: "get_group",
resource_type_str: "Group",
rhai_return_rust_type: heromodels::models::contact::Group
);
register_authorized_delete_by_id_fn!(
module: &mut module,
rhai_fn_name: "delete_group",
resource_type_str: "Group",
rhai_return_rust_type: heromodels::models::contact::Group
);
engine.register_global_module(module.into());
}

View File

@@ -0,0 +1,86 @@
use heromodels::db::Db;
use macros::{
register_authorized_create_by_id_fn, register_authorized_delete_by_id_fn,
register_authorized_get_by_id_fn,
};
use rhai::plugin::*;
use rhai::{Engine, EvalAltResult, Module, INT};
use std::mem;
use std::sync::Arc;
use heromodels::models::core::comment::Comment;
type RhaiComment = Comment;
use heromodels::db::hero::OurDB;
use heromodels::db::Collection;
#[export_module]
mod rhai_comment_module {
use super::{RhaiComment, INT};
#[rhai_fn(name = "new_comment", return_raw)]
pub fn new_comment() -> Result<RhaiComment, Box<EvalAltResult>> {
Ok(Comment::new())
}
#[rhai_fn(name = "user_id", return_raw)]
pub fn set_user_id(
comment: &mut RhaiComment,
user_id: i64,
) -> Result<RhaiComment, Box<EvalAltResult>> {
let owned = std::mem::take(comment);
*comment = owned.user_id(user_id as u32);
Ok(comment.clone())
}
#[rhai_fn(name = "content", return_raw)]
pub fn set_content(
comment: &mut RhaiComment,
content: String,
) -> Result<RhaiComment, Box<EvalAltResult>> {
let owned = std::mem::take(comment);
*comment = owned.content(content);
Ok(comment.clone())
}
#[rhai_fn(name = "get_comment_id")]
pub fn get_comment_id(comment: &mut RhaiComment) -> i64 {
comment.id() as i64
}
#[rhai_fn(name = "get_comment_user_id")]
pub fn get_comment_user_id(comment: &mut RhaiComment) -> i64 {
comment.user_id() as i64
}
#[rhai_fn(name = "get_comment_content")]
pub fn get_comment_content(comment: &mut RhaiComment) -> String {
comment.content().clone()
}
}
pub fn register_comment_rhai_module(engine: &mut Engine) {
let mut module = exported_module!(rhai_comment_module);
register_authorized_create_by_id_fn!(
module: &mut module,
rhai_fn_name: "save_comment",
resource_type_str: "Comment",
rhai_return_rust_type: heromodels::models::core::comment::Comment
);
register_authorized_get_by_id_fn!(
module: &mut module,
rhai_fn_name: "get_comment",
resource_type_str: "Comment",
rhai_return_rust_type: heromodels::models::core::comment::Comment
);
register_authorized_delete_by_id_fn!(
module: &mut module,
rhai_fn_name: "delete_comment",
resource_type_str: "Comment",
rhai_return_rust_type: heromodels::models::core::comment::Comment
);
engine.register_global_module(module.into());
}

View File

@@ -0,0 +1,80 @@
use heromodels::db::Db;
use macros::{
register_authorized_create_by_id_fn, register_authorized_delete_by_id_fn,
register_authorized_get_by_id_fn,
};
use rhai::plugin::*;
use rhai::{Array, Engine, EvalAltResult, Module, INT};
use std::mem;
use std::sync::Arc;
use heromodels::db::hero::OurDB;
use heromodels::db::Collection;
use heromodels::models::finance::account::Account;
type RhaiAccount = Account;
#[export_module]
mod rhai_account_module {
use super::{Array, RhaiAccount, INT};
#[rhai_fn(name = "new_account", return_raw)]
pub fn new_account() -> Result<RhaiAccount, Box<EvalAltResult>> {
Ok(Account::new())
}
#[rhai_fn(name = "name", return_raw)]
pub fn set_name(
account: &mut RhaiAccount,
name: String,
) -> Result<RhaiAccount, Box<EvalAltResult>> {
let owned = std::mem::take(account);
*account = owned.name(name);
Ok(account.clone())
}
#[rhai_fn(name = "user_id", return_raw)]
pub fn set_user_id(
account: &mut RhaiAccount,
user_id: INT,
) -> Result<RhaiAccount, Box<EvalAltResult>> {
let owned = std::mem::take(account);
*account = owned.user_id(user_id as u32);
Ok(account.clone())
}
#[rhai_fn(name = "get_account_id")]
pub fn get_account_id(account: &mut RhaiAccount) -> i64 {
account.id() as i64
}
#[rhai_fn(name = "get_account_name")]
pub fn get_account_name(account: &mut RhaiAccount) -> String {
account.name().clone()
}
#[rhai_fn(name = "get_account_user_id")]
pub fn get_account_user_id(account: &mut RhaiAccount) -> INT {
account.user_id() as INT
}
}
pub fn register_account_rhai_module(engine: &mut Engine) {
let mut module = exported_module!(rhai_account_module);
register_authorized_create_by_id_fn!(
module: &mut module,
rhai_fn_name: "save_account",
resource_type_str: "Account",
rhai_return_rust_type: heromodels::models::finance::account::Account
);
register_authorized_get_by_id_fn!(
module: &mut module,
rhai_fn_name: "get_account",
resource_type_str: "Account",
rhai_return_rust_type: heromodels::models::finance::account::Account
);
engine.register_global_module(module.into());
}

View File

@@ -0,0 +1,16 @@
pub mod node;
pub use node::{
Node,
DeviceInfo,
StorageDevice,
MemoryDevice,
CPUDevice,
GPUDevice,
NetworkDevice,
NodeCapacity,
ComputeSlice,
StorageSlice,
PricingPolicy,
SLAPolicy,
};

View File

@@ -0,0 +1,265 @@
use heromodels_core::BaseModelData;
use heromodels_derive::model;
use rhai::CustomType;
use serde::{Deserialize, Serialize};
/// Storage device information
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct StorageDevice {
/// can be used in node
pub id: String,
/// Size of the storage device in gigabytes
pub size_gb: f64,
/// Description of the storage device
pub description: String,
}
/// Memory device information
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct MemoryDevice {
/// can be used in node
pub id: String,
/// Size of the memory device in gigabytes
pub size_gb: f64,
/// Description of the memory device
pub description: String,
}
/// CPU device information
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct CPUDevice {
/// can be used in node
pub id: String,
/// Number of CPU cores
pub cores: i32,
/// Passmark score
pub passmark: i32,
/// Description of the CPU
pub description: String,
/// Brand of the CPU
pub cpu_brand: String,
/// Version of the CPU
pub cpu_version: String,
}
/// GPU device information
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct GPUDevice {
/// can be used in node
pub id: String,
/// Number of GPU cores
pub cores: i32,
/// Size of the GPU memory in gigabytes
pub memory_gb: f64,
/// Description of the GPU
pub description: String,
pub gpu_brand: String,
pub gpu_version: String,
}
/// Network device information
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct NetworkDevice {
/// can be used in node
pub id: String,
/// Network speed in Mbps
pub speed_mbps: i32,
/// Description of the network device
pub description: String,
}
/// Aggregated device info for a node
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct DeviceInfo {
pub vendor: String,
pub storage: Vec<StorageDevice>,
pub memory: Vec<MemoryDevice>,
pub cpu: Vec<CPUDevice>,
pub gpu: Vec<GPUDevice>,
pub network: Vec<NetworkDevice>,
}
/// NodeCapacity represents the hardware capacity details of a node.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct NodeCapacity {
/// Total storage in gigabytes
pub storage_gb: f64,
/// Total memory in gigabytes
pub mem_gb: f64,
/// Total GPU memory in gigabytes
pub mem_gb_gpu: f64,
/// Passmark score for the node
pub passmark: i32,
/// Total virtual cores
pub vcores: i32,
}
/// Pricing policy for slices (minimal version until full spec available)
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct PricingPolicy {
/// Human friendly policy name (e.g. "fixed", "market")
pub name: String,
/// Optional free-form details as JSON-encoded string
pub details: Option<String>,
}
/// SLA policy for slices (minimal version until full spec available)
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct SLAPolicy {
/// Uptime in percentage (0..100)
pub uptime: f32,
/// Max response time in ms
pub max_response_time_ms: u32,
}
/// Compute slice (typically represents a base unit of compute)
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct ComputeSlice {
pub base_data: BaseModelData,
/// the node in the grid, there is an object describing the node
#[index]
pub nodeid: u32,
/// the id of the slice in the node
#[index]
pub id: i32,
pub mem_gb: f64,
pub storage_gb: f64,
pub passmark: i32,
pub vcores: i32,
pub cpu_oversubscription: i32,
pub storage_oversubscription: i32,
/// Min/max allowed price range for validation
#[serde(default)]
pub price_range: Vec<f64>,
/// nr of GPU's see node to know what GPU's are
pub gpus: u8,
/// price per slice (even if the grouped one)
pub price_cc: f64,
pub pricing_policy: PricingPolicy,
pub sla_policy: SLAPolicy,
}
impl ComputeSlice {
pub fn new() -> Self {
Self {
base_data: BaseModelData::new(),
nodeid: 0,
id: 0,
mem_gb: 0.0,
storage_gb: 0.0,
passmark: 0,
vcores: 0,
cpu_oversubscription: 0,
storage_oversubscription: 0,
price_range: vec![0.0, 0.0],
gpus: 0,
price_cc: 0.0,
pricing_policy: PricingPolicy::default(),
sla_policy: SLAPolicy::default(),
}
}
pub fn nodeid(mut self, nodeid: u32) -> Self { self.nodeid = nodeid; self }
pub fn slice_id(mut self, id: i32) -> Self { self.id = id; self }
pub fn mem_gb(mut self, v: f64) -> Self { self.mem_gb = v; self }
pub fn storage_gb(mut self, v: f64) -> Self { self.storage_gb = v; self }
pub fn passmark(mut self, v: i32) -> Self { self.passmark = v; self }
pub fn vcores(mut self, v: i32) -> Self { self.vcores = v; self }
pub fn cpu_oversubscription(mut self, v: i32) -> Self { self.cpu_oversubscription = v; self }
pub fn storage_oversubscription(mut self, v: i32) -> Self { self.storage_oversubscription = v; self }
pub fn price_range(mut self, min_max: Vec<f64>) -> Self { self.price_range = min_max; self }
pub fn gpus(mut self, v: u8) -> Self { self.gpus = v; self }
pub fn price_cc(mut self, v: f64) -> Self { self.price_cc = v; self }
pub fn pricing_policy(mut self, p: PricingPolicy) -> Self { self.pricing_policy = p; self }
pub fn sla_policy(mut self, p: SLAPolicy) -> Self { self.sla_policy = p; self }
}
/// Storage slice (typically 1GB of storage)
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct StorageSlice {
pub base_data: BaseModelData,
/// the node in the grid
#[index]
pub nodeid: u32,
/// the id of the slice in the node, are tracked in the node itself
#[index]
pub id: i32,
/// price per slice (even if the grouped one)
pub price_cc: f64,
pub pricing_policy: PricingPolicy,
pub sla_policy: SLAPolicy,
}
impl StorageSlice {
pub fn new() -> Self {
Self {
base_data: BaseModelData::new(),
nodeid: 0,
id: 0,
price_cc: 0.0,
pricing_policy: PricingPolicy::default(),
sla_policy: SLAPolicy::default(),
}
}
pub fn nodeid(mut self, nodeid: u32) -> Self { self.nodeid = nodeid; self }
pub fn slice_id(mut self, id: i32) -> Self { self.id = id; self }
pub fn price_cc(mut self, v: f64) -> Self { self.price_cc = v; self }
pub fn pricing_policy(mut self, p: PricingPolicy) -> Self { self.pricing_policy = p; self }
pub fn sla_policy(mut self, p: SLAPolicy) -> Self { self.sla_policy = p; self }
}
/// Grid4 Node model
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct Node {
pub base_data: BaseModelData,
/// Link to node group
#[index]
pub nodegroupid: i32,
/// Uptime percentage 0..100
pub uptime: i32,
pub computeslices: Vec<ComputeSlice>,
pub storageslices: Vec<StorageSlice>,
pub devices: DeviceInfo,
/// 2 letter code
#[index]
pub country: String,
/// Hardware capacity details
pub capacity: NodeCapacity,
/// lets keep it simple and compatible
pub provisiontime: u32,
}
impl Node {
pub fn new() -> Self {
Self {
base_data: BaseModelData::new(),
nodegroupid: 0,
uptime: 0,
computeslices: Vec::new(),
storageslices: Vec::new(),
devices: DeviceInfo::default(),
country: String::new(),
capacity: NodeCapacity::default(),
provisiontime: 0,
}
}
pub fn nodegroupid(mut self, v: i32) -> Self { self.nodegroupid = v; self }
pub fn uptime(mut self, v: i32) -> Self { self.uptime = v; self }
pub fn add_compute_slice(mut self, s: ComputeSlice) -> Self { self.computeslices.push(s); self }
pub fn add_storage_slice(mut self, s: StorageSlice) -> Self { self.storageslices.push(s); self }
pub fn devices(mut self, d: DeviceInfo) -> Self { self.devices = d; self }
pub fn country(mut self, c: impl ToString) -> Self { self.country = c.to_string(); self }
pub fn capacity(mut self, c: NodeCapacity) -> Self { self.capacity = c; self }
pub fn provisiontime(mut self, t: u32) -> Self { self.provisiontime = t; self }
/// Placeholder for capacity recalculation out of the devices on the Node
pub fn recalc_capacity(mut self) -> Self {
// TODO: calculate NodeCapacity out of the devices on the Node
self
}
}

View File

@@ -7,6 +7,7 @@ pub mod dnsrecord;
pub mod secretbox;
pub mod signature;
pub mod user_kvs;
pub mod rhai;
// Re-export key types for convenience
pub use user::{User, UserStatus, UserProfile, KYCInfo, KYCStatus, SecretBox};

View File

@@ -0,0 +1,317 @@
use ::rhai::plugin::*;
use ::rhai::{Array, Dynamic, Engine, EvalAltResult, Map, Module};
use std::mem;
use crate::models::heroledger::*;
// ============================================================================
// User Module
// ============================================================================
type RhaiUser = User;
#[export_module]
mod rhai_user_module {
use super::RhaiUser;
#[rhai_fn(name = "new_user", return_raw)]
pub fn new_user() -> Result<RhaiUser, Box<EvalAltResult>> {
Ok(User::new(0))
}
#[rhai_fn(name = "username", return_raw)]
pub fn set_username(
user: &mut RhaiUser,
username: String,
) -> Result<RhaiUser, Box<EvalAltResult>> {
let owned = std::mem::take(user);
*user = owned.username(username);
Ok(user.clone())
}
#[rhai_fn(name = "add_email", return_raw)]
pub fn add_email(
user: &mut RhaiUser,
email: String,
) -> Result<RhaiUser, Box<EvalAltResult>> {
let owned = std::mem::take(user);
*user = owned.add_email(email);
Ok(user.clone())
}
#[rhai_fn(name = "pubkey", return_raw)]
pub fn set_pubkey(
user: &mut RhaiUser,
pubkey: String,
) -> Result<RhaiUser, Box<EvalAltResult>> {
let owned = std::mem::take(user);
*user = owned.pubkey(pubkey);
Ok(user.clone())
}
#[rhai_fn(name = "status", return_raw)]
pub fn set_status(
user: &mut RhaiUser,
status: String,
) -> Result<RhaiUser, Box<EvalAltResult>> {
let status_enum = match status.as_str() {
"Active" => UserStatus::Active,
"Inactive" => UserStatus::Inactive,
"Suspended" => UserStatus::Suspended,
"Archived" => UserStatus::Archived,
_ => return Err(format!("Invalid user status: {}", status).into()),
};
let owned = std::mem::take(user);
*user = owned.status(status_enum);
Ok(user.clone())
}
#[rhai_fn(name = "save_user", return_raw)]
pub fn save_user(user: &mut RhaiUser) -> Result<RhaiUser, Box<EvalAltResult>> {
// This would integrate with the database save functionality
// For now, just return the user as-is
Ok(user.clone())
}
// Getters
#[rhai_fn(name = "get_id")]
pub fn get_id(user: &mut RhaiUser) -> i64 {
user.base_data.id as i64
}
#[rhai_fn(name = "get_username")]
pub fn get_username(user: &mut RhaiUser) -> String {
user.username.clone()
}
#[rhai_fn(name = "get_email")]
pub fn get_email(user: &mut RhaiUser) -> String {
if let Some(first_email) = user.email.first() {
first_email.clone()
} else {
String::new()
}
}
#[rhai_fn(name = "get_pubkey")]
pub fn get_pubkey(user: &mut RhaiUser) -> String {
user.pubkey.clone()
}
}
// ============================================================================
// Group Module
// ============================================================================
type RhaiGroup = Group;
#[export_module]
mod rhai_group_module {
use super::RhaiGroup;
#[rhai_fn(name = "new_group", return_raw)]
pub fn new_group() -> Result<RhaiGroup, Box<EvalAltResult>> {
Ok(Group::new(0))
}
#[rhai_fn(name = "name", return_raw)]
pub fn set_name(
group: &mut RhaiGroup,
name: String,
) -> Result<RhaiGroup, Box<EvalAltResult>> {
let owned = std::mem::take(group);
*group = owned.name(name);
Ok(group.clone())
}
#[rhai_fn(name = "description", return_raw)]
pub fn set_description(
group: &mut RhaiGroup,
description: String,
) -> Result<RhaiGroup, Box<EvalAltResult>> {
let owned = std::mem::take(group);
*group = owned.description(description);
Ok(group.clone())
}
#[rhai_fn(name = "visibility", return_raw)]
pub fn set_visibility(
group: &mut RhaiGroup,
visibility: String,
) -> Result<RhaiGroup, Box<EvalAltResult>> {
let visibility_enum = match visibility.as_str() {
"Public" => Visibility::Public,
"Private" => Visibility::Private,
_ => return Err(format!("Invalid visibility: {}", visibility).into()),
};
let owned = std::mem::take(group);
*group = owned.visibility(visibility_enum);
Ok(group.clone())
}
#[rhai_fn(name = "save_group", return_raw)]
pub fn save_group(group: &mut RhaiGroup) -> Result<RhaiGroup, Box<EvalAltResult>> {
Ok(group.clone())
}
// Getters
#[rhai_fn(name = "get_id")]
pub fn get_id(group: &mut RhaiGroup) -> i64 {
group.base_data.id as i64
}
#[rhai_fn(name = "get_name")]
pub fn get_name(group: &mut RhaiGroup) -> String {
group.name.clone()
}
#[rhai_fn(name = "get_description")]
pub fn get_description(group: &mut RhaiGroup) -> String {
group.description.clone()
}
}
// ============================================================================
// Account Module (from money.rs)
// ============================================================================
type RhaiAccount = Account;
#[export_module]
mod rhai_account_module {
use super::RhaiAccount;
#[rhai_fn(name = "new_account", return_raw)]
pub fn new_account() -> Result<RhaiAccount, Box<EvalAltResult>> {
Ok(Account::new(0))
}
#[rhai_fn(name = "owner_id", return_raw)]
pub fn set_owner_id(
account: &mut RhaiAccount,
owner_id: i64,
) -> Result<RhaiAccount, Box<EvalAltResult>> {
let owned = std::mem::take(account);
*account = owned.owner_id(owner_id as u32);
Ok(account.clone())
}
#[rhai_fn(name = "address", return_raw)]
pub fn set_address(
account: &mut RhaiAccount,
address: String,
) -> Result<RhaiAccount, Box<EvalAltResult>> {
let owned = std::mem::take(account);
*account = owned.address(address);
Ok(account.clone())
}
#[rhai_fn(name = "currency", return_raw)]
pub fn set_currency(
account: &mut RhaiAccount,
currency: String,
) -> Result<RhaiAccount, Box<EvalAltResult>> {
let owned = std::mem::take(account);
*account = owned.currency(currency);
Ok(account.clone())
}
#[rhai_fn(name = "save_account", return_raw)]
pub fn save_account(account: &mut RhaiAccount) -> Result<RhaiAccount, Box<EvalAltResult>> {
Ok(account.clone())
}
// Getters
#[rhai_fn(name = "get_id")]
pub fn get_id(account: &mut RhaiAccount) -> i64 {
account.base_data.id as i64
}
#[rhai_fn(name = "get_address")]
pub fn get_address(account: &mut RhaiAccount) -> String {
account.address.clone()
}
#[rhai_fn(name = "get_currency")]
pub fn get_currency(account: &mut RhaiAccount) -> String {
account.currency.clone()
}
}
// ============================================================================
// DNS Zone Module
// ============================================================================
type RhaiDNSZone = DNSZone;
#[export_module]
mod rhai_dns_zone_module {
use super::RhaiDNSZone;
#[rhai_fn(name = "new_dns_zone", return_raw)]
pub fn new_dns_zone() -> Result<RhaiDNSZone, Box<EvalAltResult>> {
Ok(DNSZone::new(0))
}
#[rhai_fn(name = "domain", return_raw)]
pub fn set_domain(
zone: &mut RhaiDNSZone,
domain: String,
) -> Result<RhaiDNSZone, Box<EvalAltResult>> {
let owned = std::mem::take(zone);
*zone = owned.domain(domain);
Ok(zone.clone())
}
#[rhai_fn(name = "save_dns_zone", return_raw)]
pub fn save_dns_zone(zone: &mut RhaiDNSZone) -> Result<RhaiDNSZone, Box<EvalAltResult>> {
Ok(zone.clone())
}
// Getters
#[rhai_fn(name = "get_id")]
pub fn get_id(zone: &mut RhaiDNSZone) -> i64 {
zone.base_data.id as i64
}
#[rhai_fn(name = "get_domain")]
pub fn get_domain(zone: &mut RhaiDNSZone) -> String {
zone.domain.clone()
}
}
// ============================================================================
// Registration Functions
// ============================================================================
// Registration functions
pub fn register_user_functions(engine: &mut Engine) {
let module = exported_module!(rhai_user_module);
engine.register_static_module("user", module.into());
}
pub fn register_group_functions(engine: &mut Engine) {
let module = exported_module!(rhai_group_module);
engine.register_static_module("group", module.into());
}
pub fn register_account_functions(engine: &mut Engine) {
let module = exported_module!(rhai_account_module);
engine.register_static_module("account", module.into());
}
pub fn register_dnszone_functions(engine: &mut Engine) {
let module = exported_module!(rhai_dns_zone_module);
engine.register_static_module("dnszone", module.into());
}
/// Register all heroledger Rhai modules with the engine
pub fn register_heroledger_rhai_modules(engine: &mut Engine) {
register_user_functions(engine);
register_group_functions(engine);
register_account_functions(engine);
register_dnszone_functions(engine);
}

View File

@@ -0,0 +1,156 @@
use derive::FromVec;
use heromodels::db::Db;
use macros::{
register_authorized_create_by_id_fn, register_authorized_delete_by_id_fn,
register_authorized_get_by_id_fn, register_authorized_list_fn,
};
use rhai::plugin::*;
use rhai::{CustomType, Dynamic, Engine, EvalAltResult, Module, Position, TypeBuilder};
use serde::Serialize;
use serde_json;
use std::mem;
use std::sync::Arc;
use heromodels::db::hero::OurDB;
use heromodels::db::Collection as DbCollectionTrait;
use heromodels::models::library::collection::Collection as RhaiCollection;
use heromodels::models::library::items::{
Book as RhaiBook, Image as RhaiImage, Markdown as RhaiMarkdown, Pdf as RhaiPdf,
Slide as RhaiSlide, Slideshow as RhaiSlideshow, TocEntry as RhaiTocEntry,
};
/// Registers a `.json()` method for any type `T` that implements the required traits.
fn register_json_method<T>(engine: &mut Engine)
where
T: CustomType + Clone + Serialize,
{
let to_json_fn = |obj: &mut T| -> Result<String, Box<EvalAltResult>> {
match serde_json::to_string_pretty(obj) {
Ok(json_str) => Ok(json_str),
Err(e) => Err(format!("Failed to serialize to JSON: {}", e).into()),
}
};
engine.register_fn("json", to_json_fn);
}
// Wrapper types for arrays
#[derive(Debug, Clone, Serialize, CustomType, FromVec)]
#[rhai_type(name = "CollectionArray")]
pub struct RhaiCollectionArray(pub Vec<RhaiCollection>);
#[derive(Debug, Clone, Serialize, CustomType, FromVec)]
#[rhai_type(name = "ImageArray")]
pub struct RhaiImageArray(pub Vec<RhaiImage>);
#[derive(Debug, Clone, Serialize, CustomType, FromVec)]
#[rhai_type(name = "PdfArray")]
pub struct RhaiPdfArray(pub Vec<RhaiPdf>);
#[derive(Debug, Clone, Serialize, CustomType, FromVec)]
#[rhai_type(name = "MarkdownArray")]
pub struct RhaiMarkdownArray(pub Vec<RhaiMarkdown>);
#[derive(Debug, Clone, Serialize, CustomType, FromVec)]
#[rhai_type(name = "BookArray")]
pub struct RhaiBookArray(pub Vec<RhaiBook>);
#[derive(Debug, Clone, Serialize, CustomType, FromVec)]
#[rhai_type(name = "SlideshowArray")]
pub struct RhaiSlideshowArray(pub Vec<RhaiSlideshow>);
#[derive(Debug, Clone, Serialize, CustomType, FromVec)]
#[rhai_type(name = "TocEntryArray")]
pub struct RhaiTocEntryArray(pub Vec<RhaiTocEntry>);
#[export_module]
mod rhai_library_module {
use super::*;
// --- Collection Functions ---
#[rhai_fn(name = "new_collection", return_raw)]
pub fn new_collection() -> Result<RhaiCollection, Box<EvalAltResult>> {
Ok(RhaiCollection::new())
}
#[rhai_fn(name = "collection_title", return_raw)]
pub fn collection_title(
collection: &mut RhaiCollection,
title: String,
) -> Result<RhaiCollection, Box<EvalAltResult>> {
let owned = std::mem::take(collection);
*collection = owned.title(title);
Ok(collection.clone())
}
#[rhai_fn(name = "collection_description", return_raw)]
pub fn collection_description(
collection: &mut RhaiCollection,
description: String,
) -> Result<RhaiCollection, Box<EvalAltResult>> {
let owned = std::mem::take(collection);
*collection = owned.description(description);
Ok(collection.clone())
}
#[rhai_fn(name = "get_collection_id")]
pub fn get_collection_id(collection: &mut RhaiCollection) -> i64 {
collection.id() as i64
}
#[rhai_fn(name = "get_collection_title")]
pub fn get_collection_title(collection: &mut RhaiCollection) -> String {
collection.title().clone()
}
// --- Image Functions ---
#[rhai_fn(name = "new_image", return_raw)]
pub fn new_image() -> Result<RhaiImage, Box<EvalAltResult>> {
Ok(RhaiImage::new())
}
#[rhai_fn(name = "image_title", return_raw)]
pub fn image_title(
image: &mut RhaiImage,
title: String,
) -> Result<RhaiImage, Box<EvalAltResult>> {
let owned = std::mem::take(image);
*image = owned.title(title);
Ok(image.clone())
}
#[rhai_fn(name = "get_image_id")]
pub fn get_image_id(image: &mut RhaiImage) -> i64 {
image.id() as i64
}
// Additional functions would continue here...
}
pub fn register_library_rhai_module(engine: &mut Engine) {
let mut module = exported_module!(rhai_library_module);
register_json_method::<RhaiCollection>(engine);
register_json_method::<RhaiImage>(engine);
register_json_method::<RhaiPdf>(engine);
register_json_method::<RhaiMarkdown>(engine);
register_json_method::<RhaiBook>(engine);
register_json_method::<RhaiSlideshow>(engine);
register_json_method::<RhaiTocEntry>(engine);
register_json_method::<RhaiCollectionArray>(engine);
register_authorized_create_by_id_fn!(
module: &mut module,
rhai_fn_name: "save_collection",
resource_type_str: "Collection",
rhai_return_rust_type: heromodels::models::library::collection::Collection
);
register_authorized_get_by_id_fn!(
module: &mut module,
rhai_fn_name: "get_collection",
resource_type_str: "Collection",
rhai_return_rust_type: heromodels::models::library::collection::Collection
);
engine.register_global_module(module.into());
}

View File

@@ -0,0 +1,11 @@
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Address {
pub street: String,
pub city: String,
pub state: Option<String>,
pub postal_code: String,
pub country: String,
pub company: Option<String>,
}

View File

@@ -0,0 +1,2 @@
// Export location models
pub mod address;

View File

@@ -13,10 +13,13 @@ pub mod governance;
pub mod heroledger;
pub mod legal;
pub mod library;
pub mod location;
pub mod object;
pub mod projects;
pub mod payment;
pub mod identity;
pub mod tfmarketplace;
pub mod grid4;
// Re-export key types for convenience
pub use core::Comment;

View File

@@ -1,5 +1,6 @@
// Export contact module
// Export object module
pub mod object;
pub mod object_rhai_dsl;
// Re-export contact, Group from the inner contact module (contact.rs) within src/models/contact/mod.rs
// Re-export Object from the inner object module (object.rs) within src/models/object/mod.rs
pub use self::object::Object;

View File

@@ -0,0 +1,56 @@
use rhai::plugin::*;
use rhai::{CustomType, Dynamic, Engine, EvalAltResult, Module};
use super::Object;
type RhaiObject = Object;
#[export_module]
pub mod generated_rhai_module {
use super::*;
/// Create a new Object
#[rhai_fn(name = "new_object")]
pub fn new_object() -> RhaiObject {
Object::new()
}
/// Set the title of an Object
#[rhai_fn(name = "object_title")]
pub fn object_title(
object: &mut RhaiObject,
title: String,
) -> RhaiObject {
let mut result = object.clone();
result.title = title;
result
}
/// Set the description of an Object
#[rhai_fn(name = "object_description")]
pub fn object_description(
object: &mut RhaiObject,
description: String,
) -> RhaiObject {
let mut result = object.clone();
result.description = description;
result
}
/// Get the ID of an Object
#[rhai_fn(name = "get_object_id")]
pub fn get_object_id(object: &mut RhaiObject) -> i64 {
object.id() as i64
}
/// Get the title of an Object
#[rhai_fn(name = "get_object_title")]
pub fn get_object_title(object: &mut RhaiObject) -> String {
object.title.clone()
}
/// Get the description of an Object
#[rhai_fn(name = "get_object_description")]
pub fn get_object_description(object: &mut RhaiObject) -> String {
object.description.clone()
}
}

View File

@@ -0,0 +1,27 @@
use heromodels::db::hero::OurDB;
use heromodels::db::{Collection, Db};
use heromodels::models::object::Object;
use macros::{register_authorized_create_by_id_fn, register_authorized_get_by_id_fn};
use rhai::{exported_module, Engine, EvalAltResult, FuncRegistration, Module};
use std::sync::Arc;
pub fn register_object_fns(engine: &mut Engine) {
let mut module = Module::new();
register_authorized_get_by_id_fn!(
module: &mut module,
rhai_fn_name: "get_object_by_id",
resource_type_str: "Object",
rhai_return_rust_type: heromodels::models::object::Object
);
register_authorized_create_by_id_fn!(
module: &mut module,
rhai_fn_name: "save_object",
resource_type_str: "Object",
rhai_return_rust_type: heromodels::models::object::Object
);
engine.register_global_module(module.into());
engine.register_type_with_name::<Object>("Object");
}

View File

@@ -0,0 +1,49 @@
use rhai::plugin::*;
use rhai::{Dynamic, Engine, EvalAltResult, Module};
// Simplified payment module - contains the core Stripe integration
// This is a condensed version of the original payment.rs DSL file
#[export_module]
mod rhai_payment_module {
// Payment configuration and basic functions
#[rhai_fn(name = "configure_stripe", return_raw)]
pub fn configure_stripe(api_key: String) -> Result<String, Box<EvalAltResult>> {
Ok(format!("Stripe configured with key: {}...", &api_key[..8]))
}
// Product functions
#[rhai_fn(name = "new_product", return_raw)]
pub fn new_product() -> Result<Dynamic, Box<EvalAltResult>> {
Ok(Dynamic::from("product_created"))
}
// Price functions
#[rhai_fn(name = "new_price", return_raw)]
pub fn new_price() -> Result<Dynamic, Box<EvalAltResult>> {
Ok(Dynamic::from("price_created"))
}
// Subscription functions
#[rhai_fn(name = "new_subscription", return_raw)]
pub fn new_subscription() -> Result<Dynamic, Box<EvalAltResult>> {
Ok(Dynamic::from("subscription_created"))
}
// Payment intent functions
#[rhai_fn(name = "new_payment_intent", return_raw)]
pub fn new_payment_intent() -> Result<Dynamic, Box<EvalAltResult>> {
Ok(Dynamic::from("payment_intent_created"))
}
// Coupon functions
#[rhai_fn(name = "new_coupon", return_raw)]
pub fn new_coupon() -> Result<Dynamic, Box<EvalAltResult>> {
Ok(Dynamic::from("coupon_created"))
}
}
pub fn register_payment_rhai_module(engine: &mut Engine) {
let module = exported_module!(rhai_payment_module);
engine.register_global_module(module.into());
}

View File

@@ -0,0 +1,115 @@
use heromodels_core::BaseModelData;
use crate::models::tfmarketplace::user::ResourceUtilization;
#[derive(Default)]
pub struct UserActivityBuilder {
base_data: BaseModelData::new(),
// id: Option<String> - moved to base_data,
activity_type: Option<crate::models::user::ActivityType>,
description: Option<String>,
timestamp: Option<chrono::DateTime<chrono::Utc>>,
metadata: Option<std::collections::HashMap<String, serde_json::Value>>,
category: Option<String>,
importance: Option<crate::models::user::ActivityImportance>,
}
impl UserActivityBuilder {
pub fn new() -> Self {
Self::default()
}
pub fn id(mut self) -> Self{
self.base_data.id = Some(id.into());
self
}
pub fn activity_type(mut self, activity_type: crate::models::user::ActivityType) -> Self {
self.activity_type = Some(activity_type);
self
}
pub fn description(mut self, description: impl Into<String>) -> Self {
self.description = Some(description.into());
self
}
pub fn timestamp(mut self, timestamp: chrono::DateTime<chrono::Utc>) -> Self {
self.timestamp = Some(timestamp);
self
}
pub fn metadata(mut self, metadata: std::collections::HashMap<String, serde_json::Value>) -> Self {
self.metadata = Some(metadata);
self
}
pub fn category(mut self, category: impl Into<String>) -> Self {
self.category = Some(category.into());
self
}
pub fn importance(mut self, importance: crate::models::user::ActivityImportance) -> Self {
self.importance = Some(importance);
self
}
pub fn build(self) -> Result<crate::models::user::UserActivity, String> {
Ok(crate::models::user::UserActivity {
base_data: BaseModelData::new(),
// id: self.base_data.id.unwrap_or_else(|| uuid::Uuid::new_v4().to_string()) - moved to base_data,
activity_type: self.activity_type.ok_or("activity_type is required")?,
description: self.description.unwrap_or_else(|| "No description".to_string()),
timestamp: self.timestamp.unwrap_or_else(|| chrono::Utc::now()),
metadata: self.metadata.unwrap_or_default(),
category: self.category.unwrap_or_else(|| "General".to_string()),
importance: self.importance.unwrap_or(crate::models::user::ActivityImportance::Medium),
})
}
}
/// User Activity Tracking
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct UserActivity {
/// Base model data (includes id, created_at, updated_at)
pub base_data: BaseModelData,
pub activity_type: ActivityType,
pub description: String,
#[serde(deserialize_with = "deserialize_datetime")]
pub timestamp: DateTime<Utc>,
pub metadata: std::collections::HashMap<String, serde_json::Value>,
pub category: String,
pub importance: ActivityImportance,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum ActivityType {
Login,
Purchase,
Deployment,
ServiceCreated,
AppPublished,
NodeAdded,
NodeUpdated,
WalletTransaction,
ProfileUpdate,
SettingsChange,
MarketplaceView,
SliceCreated,
SliceAllocated,
SliceReleased,
SliceRentalStarted,
SliceRentalStopped,
SliceRentalRestarted,
SliceRentalCancelled,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum ActivityImportance {
Low,
Medium,
High,
Critical,
}

View File

@@ -0,0 +1,361 @@
use heromodels_core::BaseModelData;
use chrono::{DateTime, Utc};
use rust_decimal::Decimal;
use serde::{Deserialize, Serialize};
/// Unified App struct that can represent published apps, deployments, and deployment stats
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct App {
/// Base model data (includes id, created_at, updated_at)
pub base_data: BaseModelData,
// Core app information
pub name: String,
pub category: Option<String>,
pub version: Option<String>,
pub status: String,
// Deployment information
pub customer_name: Option<String>,
pub customer_email: Option<String>,
pub deployed_date: Option<String>,
pub health_score: Option<f32>,
pub region: Option<String>,
pub instances: Option<i32>,
pub resource_usage: Option<ResourceUtilization>,
// Business metrics
pub deployments: Option<i32>,
pub rating: Option<f32>,
pub monthly_revenue_usd: Option<i32>,
pub cost_per_month: Option<Decimal>,
// Metadata
pub last_updated: Option<String>,
pub auto_healing: Option<bool>,
pub provider: Option<String>,
pub deployed_at: Option<DateTime<Utc>>,
}
impl App {
/// Convenience method to get the app ID
pub fn id(&self) -> &u32 {
&self.base_data.id
}
/// Get category with default
pub fn category_or_default(&self) -> String {
self.category.clone().unwrap_or_else(|| "Application".to_string())
}
/// Get version with default
pub fn version_or_default(&self) -> String {
self.version.clone().unwrap_or_else(|| "1.0.0".to_string())
}
/// Get deployments count with default
pub fn deployments_or_default(&self) -> i32 {
self.deployments.unwrap_or(0)
}
/// Get rating with default
pub fn rating_or_default(&self) -> f32 {
self.rating.unwrap_or(4.0)
}
/// Get monthly revenue with default
pub fn monthly_revenue_usd_or_default(&self) -> i32 {
self.monthly_revenue_usd.unwrap_or(0)
}
/// Get last updated with default
pub fn last_updated_or_default(&self) -> String {
self.last_updated.clone().unwrap_or_else(|| "Unknown".to_string())
}
/// Get auto healing with default
pub fn auto_healing_or_default(&self) -> bool {
self.auto_healing.unwrap_or(false)
}
}
pub struct Deployment {
pub base_data: BaseModelData,
pub app_id: String,
pub instance_id: String,
pub status: String,
pub region: String,
pub health_score: Option<f32>,
pub resource_usage: Option<ResourceUtilization>,
pub deployed_at: Option<DateTime<Utc>>,
}
/// Resource utilization information
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct ResourceUtilization {
pub cpu: i32,
pub memory: i32,
pub storage: i32,
pub network: i32,
}
/// Deployment status enumeration
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub enum DeploymentStatus {
#[default]
Running,
Stopped,
Failed,
Pending,
Maintenance,
}
/// Unified App builder
#[derive(Default)]
pub struct AppBuilder {
base_data: BaseModelData,
name: Option<String>,
category: Option<String>,
version: Option<String>,
status: Option<String>,
customer_name: Option<String>,
customer_email: Option<String>,
deployed_date: Option<String>,
health_score: Option<f32>,
region: Option<String>,
instances: Option<i32>,
resource_usage: Option<ResourceUtilization>,
deployments: Option<i32>,
rating: Option<f32>,
monthly_revenue_usd: Option<i32>,
cost_per_month: Option<Decimal>,
last_updated: Option<String>,
auto_healing: Option<bool>,
provider: Option<String>,
deployed_at: Option<DateTime<Utc>>,
}
impl AppBuilder {
pub fn new() -> Self {
Self {
base_data: BaseModelData::new(),
..Default::default()
}
}
pub fn name(mut self, name: impl Into<String>) -> Self {
self.name = Some(name.into());
self
}
pub fn category(mut self, category: impl Into<String>) -> Self {
self.category = Some(category.into());
self
}
pub fn version(mut self, version: impl Into<String>) -> Self {
self.version = Some(version.into());
self
}
pub fn status(mut self, status: impl Into<String>) -> Self {
self.status = Some(status.into());
self
}
pub fn customer_name(mut self, name: impl Into<String>) -> Self {
self.customer_name = Some(name.into());
self
}
pub fn customer_email(mut self, email: impl Into<String>) -> Self {
self.customer_email = Some(email.into());
self
}
pub fn deployed_date(mut self, date: impl Into<String>) -> Self {
self.deployed_date = Some(date.into());
self
}
pub fn health_score(mut self, score: f32) -> Self {
self.health_score = Some(score);
self
}
pub fn region(mut self, region: impl Into<String>) -> Self {
self.region = Some(region.into());
self
}
pub fn instances(mut self, instances: i32) -> Self {
self.instances = Some(instances);
self
}
pub fn resource_usage(mut self, usage: ResourceUtilization) -> Self {
self.resource_usage = Some(usage);
self
}
pub fn deployments(mut self, deployments: i32) -> Self {
self.deployments = Some(deployments);
self
}
pub fn rating(mut self, rating: f32) -> Self {
self.rating = Some(rating);
self
}
pub fn monthly_revenue_usd(mut self, revenue: i32) -> Self {
self.monthly_revenue_usd = Some(revenue);
self
}
pub fn cost_per_month(mut self, cost: Decimal) -> Self {
self.cost_per_month = Some(cost);
self
}
pub fn last_updated(mut self, updated: impl Into<String>) -> Self {
self.last_updated = Some(updated.into());
self
}
pub fn auto_healing(mut self, enabled: bool) -> Self {
self.auto_healing = Some(enabled);
self
}
pub fn provider(mut self, provider: impl Into<String>) -> Self {
self.provider = Some(provider.into());
self
}
pub fn deployed_at(mut self, date: DateTime<Utc>) -> Self {
self.deployed_at = Some(date);
self
}
pub fn build(self) -> Result<App, String> {
Ok(App {
base_data: self.base_data,
name: self.name.ok_or("name is required")?,
category: self.category,
version: self.version,
status: self.status.unwrap_or_else(|| "Active".to_string()),
customer_name: self.customer_name,
customer_email: self.customer_email,
deployed_date: self.deployed_date,
health_score: self.health_score,
region: self.region,
instances: self.instances,
resource_usage: self.resource_usage,
deployments: self.deployments,
rating: self.rating,
monthly_revenue_usd: self.monthly_revenue_usd,
cost_per_month: self.cost_per_month,
last_updated: self.last_updated,
auto_healing: self.auto_healing,
provider: self.provider,
deployed_at: self.deployed_at,
})
}
}
impl App {
pub fn builder() -> AppBuilder {
AppBuilder::new()
}
// Template methods for common app types
pub fn analytics_template(name: &str) -> Self {
Self::builder()
.name(name)
.category("Analytics")
.version("1.0.0")
.status("Active")
.rating(4.5)
.auto_healing(true)
.build()
.unwrap()
}
pub fn database_template(name: &str) -> Self {
Self::builder()
.name(name)
.category("Database")
.version("1.0.0")
.status("Active")
.rating(4.2)
.auto_healing(false) // Databases need manual intervention
.build()
.unwrap()
}
pub fn web_template(name: &str) -> Self {
Self::builder()
.name(name)
.category("Web")
.version("1.0.0")
.status("Active")
.rating(4.0)
.auto_healing(true)
.build()
.unwrap()
}
// Fluent methods for chaining
pub fn with_stats(mut self, deployments: i32, rating: f32, monthly_revenue_usd: i32) -> Self {
self.deployments = Some(deployments);
self.rating = Some(rating);
self.monthly_revenue_usd = Some(monthly_revenue_usd);
self
}
pub fn with_auto_healing(mut self, enabled: bool) -> Self {
self.auto_healing = Some(enabled);
self
}
pub fn with_version(mut self, version: impl Into<String>) -> Self {
self.version = Some(version.into());
self
}
pub fn with_last_updated(mut self, updated: impl Into<String>) -> Self {
self.last_updated = Some(updated.into());
self
}
pub fn with_deployment_info(mut self, customer_name: &str, customer_email: &str, region: &str) -> Self {
self.customer_name = Some(customer_name.to_string());
self.customer_email = Some(customer_email.to_string());
self.region = Some(region.to_string());
self.deployed_at = Some(Utc::now());
self
}
pub fn with_resource_usage(mut self, cpu: i32, memory: i32, storage: i32, network: i32) -> Self {
self.resource_usage = Some(ResourceUtilization {
cpu,
memory,
storage,
network,
});
self
}
}
// Type aliases for backward compatibility
pub type PublishedApp = App;
pub type AppDeployment = App;
pub type DeploymentStat = App;
pub type UserDeployment = App;
pub type PublishedAppBuilder = AppBuilder;
pub type AppDeploymentBuilder = AppBuilder;
pub type DeploymentStatBuilder = AppBuilder;
pub type UserDeploymentBuilder = AppBuilder;

View File

@@ -0,0 +1,351 @@
//! Builder patterns for all marketplace models
//! This module provides a centralized, maintainable way to construct complex structs
//! with sensible defaults and validation.
use chrono::{DateTime, Utc};
use rust_decimal::Decimal;
use rust_decimal_macros::dec;
use serde_json::Value;
use std::collections::HashMap;
use super::{
user::{PublishedApp, DeploymentStat, ResourceUtilization, User, UserRole, MockUserData, ServiceBooking},
product::{Product, ProductAttribute, ProductAvailability, ProductMetadata},
order::{Order, OrderItem, OrderStatus, PaymentDetails, Address, PurchaseType},
};
use crate::services::user_persistence::AppDeployment;
use heromodels_core::BaseModelData;
// =============================================================================
// USER MODEL BUILDERS
// =============================================================================
#[derive(Default)]
pub struct MockDataBuilder {
user_type: Option<String>,
include_farmer_data: Option<bool>,
include_service_data: Option<bool>,
include_app_data: Option<bool>,
}
impl MockDataBuilder {
pub fn new() -> Self {
Self::default()
}
pub fn user_type(mut self, user_type: impl Into<String>) -> Self {
self.user_type = Some(user_type.into());
self
}
pub fn include_farmer_data(mut self, include: bool) -> Self {
self.include_farmer_data = Some(include);
self
}
pub fn include_service_data(mut self, include: bool) -> Self {
self.include_service_data = Some(include);
self
}
pub fn include_app_data(mut self, include: bool) -> Self {
self.include_app_data = Some(include);
self
}
pub fn build(self) -> crate::models::user::MockUserData {
// This would create appropriate mock data based on configuration
// For now, return a default instance
crate::models::user::MockUserData::new_user()
}
}
// =============================================================================
// FARMER DATA BUILDER
// =============================================================================
#[derive(Default)]
pub struct FarmerDataBuilder {
total_nodes: Option<i32>,
online_nodes: Option<i32>,
total_capacity: Option<crate::models::user::NodeCapacity>,
used_capacity: Option<crate::models::user::NodeCapacity>,
monthly_earnings: Option<i32>,
total_earnings: Option<i32>,
uptime_percentage: Option<f32>,
nodes: Option<Vec<crate::models::user::FarmNode>>,
earnings_history: Option<Vec<crate::models::user::EarningsRecord>>,
active_slices: Option<i32>,
}
impl FarmerDataBuilder {
pub fn new() -> Self {
Self::default()
}
pub fn total_nodes(mut self, total_nodes: i32) -> Self {
self.total_nodes = Some(total_nodes);
self
}
pub fn online_nodes(mut self, online_nodes: i32) -> Self {
self.online_nodes = Some(online_nodes);
self
}
pub fn total_capacity(mut self, capacity: crate::models::user::NodeCapacity) -> Self {
self.total_capacity = Some(capacity);
self
}
pub fn used_capacity(mut self, capacity: crate::models::user::NodeCapacity) -> Self {
self.used_capacity = Some(capacity);
self
}
pub fn monthly_earnings_usd(mut self, earnings: i32) -> Self {
self.monthly_earnings = Some(earnings);
self
}
pub fn total_earnings_usd(mut self, earnings: i32) -> Self {
self.total_earnings = Some(earnings);
self
}
pub fn uptime_percentage(mut self, uptime: f32) -> Self {
self.uptime_percentage = Some(uptime);
self
}
pub fn nodes(mut self, nodes: Vec<crate::models::user::FarmNode>) -> Self {
self.nodes = Some(nodes);
self
}
pub fn earnings_history(mut self, history: Vec<crate::models::user::EarningsRecord>) -> Self {
self.earnings_history = Some(history);
self
}
pub fn earnings(mut self, earnings: Vec<crate::models::user::EarningsRecord>) -> Self {
self.earnings_history = Some(earnings);
self
}
pub fn active_slices(mut self, active_slices: i32) -> Self {
self.active_slices = Some(active_slices);
self
}
pub fn calculate_totals(mut self) -> Self {
// Calculate totals from existing data
if let Some(ref nodes) = self.nodes {
self.total_nodes = Some(nodes.len() as i32);
self.online_nodes = Some(nodes.iter().filter(|n| matches!(n.status, crate::models::user::NodeStatus::Online)).count() as i32);
// Calculate total and used capacity from all nodes
let mut total_capacity = crate::models::user::NodeCapacity {
cpu_cores: 0,
memory_gb: 0,
storage_gb: 0,
bandwidth_mbps: 0,
ssd_storage_gb: 0,
hdd_storage_gb: 0,
};
let mut used_capacity = crate::models::user::NodeCapacity {
cpu_cores: 0,
memory_gb: 0,
storage_gb: 0,
bandwidth_mbps: 0,
ssd_storage_gb: 0,
hdd_storage_gb: 0,
};
for node in nodes {
total_capacity.cpu_cores += node.capacity.cpu_cores;
total_capacity.memory_gb += node.capacity.memory_gb;
total_capacity.storage_gb += node.capacity.storage_gb;
total_capacity.bandwidth_mbps += node.capacity.bandwidth_mbps;
total_capacity.ssd_storage_gb += node.capacity.ssd_storage_gb;
total_capacity.hdd_storage_gb += node.capacity.hdd_storage_gb;
used_capacity.cpu_cores += node.used_capacity.cpu_cores;
used_capacity.memory_gb += node.used_capacity.memory_gb;
used_capacity.storage_gb += node.used_capacity.storage_gb;
used_capacity.bandwidth_mbps += node.used_capacity.bandwidth_mbps;
used_capacity.ssd_storage_gb += node.used_capacity.ssd_storage_gb;
used_capacity.hdd_storage_gb += node.used_capacity.hdd_storage_gb;
}
self.total_capacity = Some(total_capacity);
self.used_capacity = Some(used_capacity);
// Calculate uptime percentage
if !nodes.is_empty() {
let avg_uptime = nodes.iter().map(|n| n.uptime_percentage).sum::<f32>() / nodes.len() as f32;
self.uptime_percentage = Some(avg_uptime);
}
}
if let Some(ref earnings) = self.earnings_history {
let total: i32 = earnings.iter().map(|e| e.amount.to_string().parse::<i32>().unwrap_or(0)).sum();
self.total_earnings = Some(total);
self.monthly_earnings = Some(total); // Set monthly earnings as well
}
self
}
pub fn build(self) -> Result<crate::models::user::FarmerData, String> {
Ok(crate::models::user::FarmerData {
total_nodes: self.total_nodes.unwrap_or(0),
online_nodes: self.online_nodes.unwrap_or(0),
total_capacity: self.total_capacity.unwrap_or(crate::models::user::NodeCapacity {
cpu_cores: 0,
memory_gb: 0,
storage_gb: 0,
bandwidth_mbps: 0,
ssd_storage_gb: 0,
hdd_storage_gb: 0,
}),
used_capacity: self.used_capacity.unwrap_or(crate::models::user::NodeCapacity {
cpu_cores: 0,
memory_gb: 0,
storage_gb: 0,
bandwidth_mbps: 0,
ssd_storage_gb: 0,
hdd_storage_gb: 0,
}),
monthly_earnings_usd: self.monthly_earnings.unwrap_or(0),
total_earnings_usd: self.total_earnings.unwrap_or(0),
uptime_percentage: self.uptime_percentage.unwrap_or(0.0),
nodes: self.nodes.unwrap_or_default(),
earnings_history: self.earnings_history.unwrap_or_default(),
slice_templates: Vec::default(), // Will be populated separately
active_slices: self.active_slices.unwrap_or(0),
})
}
}
// =============================================================================
// SERVICE BOOKING BUILDER
// =============================================================================
#[derive(Default)]
pub struct SpendingRecordBuilder {
date: Option<String>,
amount: Option<i32>,
service_name: Option<String>,
provider_name: Option<String>,
}
impl SpendingRecordBuilder {
pub fn new() -> Self {
Self::default()
}
pub fn date(mut self, date: &str) -> Self {
self.date = Some(date.to_string());
self
}
pub fn amount(mut self, amount: i32) -> Self {
self.amount = Some(amount);
self
}
pub fn service_name(mut self, name: &str) -> Self {
self.service_name = Some(name.to_string());
self
}
pub fn provider_name(mut self, name: &str) -> Self {
self.provider_name = Some(name.to_string());
self
}
pub fn build(self) -> Result<crate::models::user::SpendingRecord, String> {
Ok(crate::models::user::SpendingRecord {
date: self.date.ok_or("Date is required")?,
amount: self.amount.unwrap_or(0),
service_name: self.service_name.ok_or("Service name is required")?,
provider_name: self.provider_name.ok_or("Provider name is required")?,
})
}
}
impl crate::models::user::SpendingRecord {
pub fn builder() -> SpendingRecordBuilder {
SpendingRecordBuilder::new()
}
}
// =============================================================================
// AUTO TOP-UP BUILDERS
// =============================================================================
#[derive(Default)]
pub struct AutoTopUpSettingsBuilder {
enabled: Option<bool>,
threshold_amount: Option<Decimal>,
topup_amount: Option<Decimal>,
payment_method_base_data: BaseModelData::new(),
// id: Option<String> - moved to base_data,
daily_limit: Option<Decimal>,
monthly_limit: Option<Decimal>,
}
impl AutoTopUpSettingsBuilder {
pub fn new() -> Self {
Self::default()
}
pub fn enabled(mut self, enabled: bool) -> Self {
self.enabled = Some(enabled);
self
}
pub fn threshold_amount(mut self, amount: Decimal) -> Self {
self.threshold_amount = Some(amount);
self
}
pub fn topup_amount(mut self, amount: Decimal) -> Self {
self.topup_amount = Some(amount);
self
}
pub fn payment_method_id(mut self) -> Self{
self.payment_method_id = Some(id.into());
self
}
pub fn daily_limit(mut self, limit: Decimal) -> Self {
self.daily_limit = Some(limit);
self
}
pub fn monthly_limit(mut self, limit: Decimal) -> Self {
self.monthly_limit = Some(limit);
self
}
pub fn build(self) -> Result<crate::services::user_persistence::AutoTopUpSettings, String> {
Ok(crate::services::user_persistence::AutoTopUpSettings {
enabled: self.enabled.unwrap_or(false),
threshold_amount_usd: self.threshold_amount.unwrap_or(dec!(10.0)),
topup_amount_usd: self.topup_amount.unwrap_or(dec!(25.0)),
payment_method_base_data: BaseModelData::new(),
// id: self.payment_method_id.ok_or("payment_method_id is required")? - moved to base_data,
daily_limit_usd: self.daily_limit,
monthly_limit_usd: self.monthly_limit,
// created_at: chrono::Utc::now() - moved to base_data,
// updated_at: chrono::Utc::now() - moved to base_data,
})
}
}

View File

@@ -0,0 +1,105 @@
use serde::{Deserialize, Serialize};
use chrono::{DateTime, Utc};
use rust_decimal::Decimal;
use std::collections::HashMap;
use heromodels_core::BaseModelData;
use crate::models::tfmarketplace::user::ResourceUtilization;
/// Shopping Cart Models
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CartItem {
pub product_id: u32,
pub quantity: u32,
pub selected_specifications: HashMap<String, serde_json::Value>,
pub added_at: DateTime<Utc>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Cart {
pub base_data: BaseModelData,
pub items: Vec<CartItem>,
}
impl Cart {
pub fn new() -> Self{
let now = Utc::now();
Self {
base_data: BaseModelData::new(),
items: Vec::default(),
}
}
pub fn add_item(&mut self, item: CartItem) {
// Check if item already exists and update quantity
if let Some(existing_item) = self.items.iter_mut()
.find(|i| i.product_id == item.product_id && i.selected_specifications == item.selected_specifications) {
existing_item.quantity += item.quantity;
} else {
self.items.push(item);
}
}
pub fn remove_item(&mut self, product_id: &str, name: &str) -> bool{
let initial_len = self.items.len();
self.items.retain(|item| item.product_id != product_id);
if self.items.len() != initial_len {
self.base_data.updated_at = Utc::now();
true
} else {
false
}
}
pub fn update_item_quantity(&mut self, product_id: &str, name: &str) -> bool {
if let Some(item) = self.items.iter_mut().find(|i| i.product_id == product_id) {
if quantity == 0 {
return self.remove_item(product_id);
}
item.quantity = quantity;
item.updated_at = Utc::now();
self.base_data.updated_at = Utc::now();
true
} else {
false
}
}
pub fn clear(&mut self) {
self.items.clear();
self.base_data.updated_at = Utc::now();
}
pub fn get_total_items(&self) -> u32 {
self.items.iter().map(|item| item.quantity).sum()
}
pub fn is_empty(&self) -> bool {
self.items.is_empty()
}
}
impl CartItem {
pub fn new(product_id: &str, name: &str) -> Self {
let now = Utc::now();
Self {
product_id,
quantity,
selected_specifications: HashMap::default(),
added_at: now,
// updated_at: now - moved to base_data,
}
}
pub fn with_specifications(
product_id: &str, name: &str) -> Self {
let now = Utc::now();
Self {
product_id,
quantity,
selected_specifications: specifications,
added_at: now,
// updated_at: now - moved to base_data,
}
}
}

View File

@@ -0,0 +1,90 @@
use serde::{Deserialize, Serialize};
use chrono::{DateTime, Utc};
use rust_decimal::Decimal;
use std::collections::HashMap;
use heromodels_core::BaseModelData;
use heromodels_derive::model;
use rhai::CustomType;
use crate::models::tfmarketplace::user::ResourceUtilization;
/// Configurable currency support for any currency type
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, CustomType)]
pub struct Currency {
/// Base model data (includes id, created_at, updated_at)
pub base_data: BaseModelData,
#[index]
pub code: String, // USD, EUR, BTC, ETH, etc.
pub name: String,
pub symbol: String,
pub currency_type: CurrencyType,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum CurrencyType {
Fiat,
Cryptocurrency,
Token,
Points, // For loyalty/reward systems
Custom(String), // For marketplace-specific currencies
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Price {
pub base_amount: Decimal, // Amount in marketplace base currency
pub base_currency: String,
pub display_currency: String,
pub display_amount: Decimal,
pub formatted_display: String,
pub conversion_rate: Decimal,
pub conversion_timestamp: DateTime<Utc>,
}
impl Currency {
pub fn new(
code: String,
name: String,
symbol: String,
currency_type: CurrencyType,
) -> Self {
Self {
base_data: BaseModelData::new(),
code,
name,
symbol,
currency_type,
}
}
}
impl Price {
pub fn new(
base_amount: Decimal,
base_currency: String,
display_currency: String,
conversion_rate: Decimal,
) -> Self {
let display_amount = base_amount * conversion_rate;
// Use proper currency symbol formatting - this will be updated by the currency service
Self {
base_amount,
base_currency: base_currency.clone(),
display_currency: display_currency.clone(),
display_amount,
formatted_display: format!("{} {}", display_amount.round_dp(2), display_currency),
conversion_rate,
conversion_timestamp: Utc::now(),
}
}
pub fn format_with_symbol(&self, symbol: &str) -> String {
format!("{} {}",
self.display_amount.round_dp(2),
symbol
)
}
pub fn update_formatted_display(&mut self, formatted: String) {
self.formatted_display = formatted;
}
}

View File

@@ -0,0 +1,30 @@
/// Farmer-specific data
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FarmerData {
pub total_nodes: i32,
pub online_nodes: i32,
pub total_capacity: NodeCapacity,
pub used_capacity: NodeCapacity,
pub monthly_earnings_usd: i32,
pub total_earnings_usd: i32,
pub uptime_percentage: f32,
pub nodes: Vec<FarmNode>,
pub earnings_history: Vec<EarningsRecord>,
pub slice_templates: Vec<crate::models::product::Product>,
pub active_slices: i32,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FarmerSettings {
#[serde(default)]
pub auto_accept_deployments: bool,
#[serde(default = "default_maintenance_window")]
pub maintenance_window: String,
#[serde(default)]
pub notification_preferences: NotificationSettings,
pub minimum_deployment_duration: i32, // hours
pub preferred_regions: Vec<String>,
#[serde(default)]
pub default_slice_customizations: Option<std::collections::HashMap<String, serde_json::Value>>, // Placeholder for DefaultSliceFormat
}

View File

@@ -0,0 +1,17 @@
// Export models - starting with basic models first
// pub mod user;
// pub mod product;
// pub mod currency;
// pub mod order;
// pub mod pool;
// pub mod builders; // Re-enabled with essential builders only
// pub mod cart;
// pub mod payment;
// pub mod service;
// pub mod slice;
// pub mod node;
pub mod app;
// Re-export commonly used types for easier access
pub use app::{App, PublishedApp, PublishedAppBuilder, ResourceUtilization, AppBuilder, DeploymentStatus};
// pub mod node; // Temporarily disabled - has many service dependencies

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,8 @@
# Notes
all id's of base objects are u32
Cart is front end specific,
currency and exchange rates should be calculated by client
stuff such as decomal numbers related to presentation shouldnt be in base models
purchase doesnt need to now wether it is instant or cart
all base objects contain created_at and updated_at, so not needed to be added to every model

View File

@@ -0,0 +1,402 @@
use serde::{Deserialize, Serialize};
use chrono::{DateTime, Utc};
use rust_decimal::Decimal;
use std::collections::HashMap;
use heromodels_core::BaseModelData;
use heromodels_derive::model;
use rhai::CustomType;
use crate::models::tfmarketplace::user::ResourceUtilization;
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, CustomType)]
pub struct Order {
/// Base model data (includes id, created_at, updated_at)
pub base_data: BaseModelData,
#[index]
pub user_base_data: BaseModelData::new(),
// id: String - moved to base_data,
pub items: Vec<OrderItem>,
pub subtotal_base: Decimal, // In base currency
pub total_base: Decimal, // In base currency
pub base_currency: String,
pub currency_used: String, // Currency user paid in
pub currency_total: Decimal, // Amount in user's currency
pub conversion_rate: Decimal, // Rate used for conversion
pub status: OrderStatus,
pub payment_method: String,
pub payment_details: Option<PaymentDetails>,
pub billing_address: Option<Address>,
pub shipping_address: Option<Address>,
pub notes: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct OrderItem {
pub product_base_data: BaseModelData::new(),
// id: String - moved to base_data,
pub product_name: String,
pub product_category: String,
pub quantity: u32,
pub unit_price_base: Decimal, // In base currency
pub total_price_base: Decimal, // In base currency
pub specifications: HashMap<String, serde_json::Value>,
pub provider_base_data: BaseModelData::new(),
// id: String - moved to base_data,
pub provider_name: String,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum OrderStatus {
Pending,
Confirmed,
Processing,
Deployed,
Completed,
Cancelled,
Refunded,
Failed,
}
/// Order summary for display purposes
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct OrderSummary {
pub subtotal: Decimal,
pub tax: Decimal,
pub shipping: Decimal,
pub discount: Decimal,
pub total: Decimal,
pub currency: String,
pub item_count: u32,
}
impl Order {
pub fn new(
base_data: BaseModelData::new(),
// id: String - moved to base_data,
user_base_data: BaseModelData::new(),
// id: String - moved to base_data,
base_currency: String,
currency_used: String,
conversion_rate: Decimal,
) -> Self {
Self {
base_data: BaseModelData::new(),
user_id,
items: Vec::default(),
subtotal_base: Decimal::from(0),
total_base: Decimal::from(0),
base_currency,
currency_used,
currency_total: Decimal::from(0),
conversion_rate,
status: OrderStatus::Pending,
payment_method: String::new(),
payment_details: None,
billing_address: None,
shipping_address: None,
notes: None,
}
}
pub fn add_item(&mut self, item: OrderItem) {
self.items.push(item);
self.calculate_totals();
}
pub fn calculate_totals(&mut self) {
self.subtotal_base = self.items.iter()
.map(|item| item.total_price_base)
.sum();
self.total_base = self.subtotal_base; // Add taxes, fees, etc. here
self.currency_total = self.total_base * self.conversion_rate;
self.base_data.modified_at = Utc::now().timestamp();
}
pub fn update_status(&mut self, status: OrderStatus) {
self.status = status;
self.base_data.modified_at = Utc::now().timestamp();
}
pub fn set_payment_details(&mut self, payment_details: PaymentDetails) {
self.payment_details = Some(payment_details);
self.base_data.modified_at = Utc::now().timestamp();
}
pub fn get_item_count(&self) -> u32 {
self.items.iter().map(|item| item.quantity).sum()
}
}
impl OrderItem {
pub fn new(
product_base_data: BaseModelData::new(),
// id: String - moved to base_data,
product_name: String,
product_category: String,
quantity: u32,
unit_price_base: Decimal,
provider_base_data: BaseModelData::new(),
// id: String - moved to base_data,
provider_name: String,
) -> Self {
Self {
product_id,
product_name,
product_category,
quantity,
unit_price_base,
total_price_base: unit_price_base * Decimal::from(quantity),
specifications: HashMap::default(),
provider_id,
provider_name,
}
}
pub fn add_specification(&mut self, key: String, value: serde_json::Value) {
self.specifications.insert(key, value);
}
pub fn update_quantity(&mut self, quantity: u32) {
self.quantity = quantity;
self.total_price_base = self.unit_price_base * Decimal::from(quantity);
}
}
#[derive(Default)]
pub struct OrderBuilder {
base_data: BaseModelData::new(),
// id: Option<String> - moved to base_data,
user_base_data: BaseModelData::new(),
// id: Option<String> - moved to base_data,
items: Vec<OrderItem>,
subtotal_base: Option<Decimal>,
total_base: Option<Decimal>,
base_currency: Option<String>,
currency_used: Option<String>,
currency_total: Option<Decimal>,
conversion_rate: Option<Decimal>,
status: Option<OrderStatus>,
payment_method: Option<String>,
payment_details: Option<PaymentDetails>,
billing_address: Option<Address>,
shipping_address: Option<Address>,
notes: Option<String>,
purchase_type: Option<PurchaseType>,
// created_at: Option<DateTime<Utc>> - moved to base_data,
// updated_at: Option<DateTime<Utc>> - moved to base_data,
}
impl OrderBuilder {
pub fn new() -> Self {
Self::default()
}
pub fn id(mut self) -> Self{
self.base_data.id = Some(id.into());
self
}
pub fn user_id(mut self, user_id: &str, name: &str) -> Self{
self.user_id = Some(user_id.into());
self
}
pub fn add_item(mut self, item: OrderItem) -> Self {
self.items.push(item);
self
}
pub fn items(mut self, items: Vec<OrderItem>) -> Self {
self.items = items;
self
}
pub fn subtotal_base(mut self, subtotal: Decimal) -> Self {
self.subtotal_base = Some(subtotal);
self
}
pub fn total_base(mut self, total: Decimal) -> Self {
self.total_base = Some(total);
self
}
pub fn base_currency(mut self, currency: impl Into<String>) -> Self {
self.base_currency = Some(currency.into());
self
}
pub fn currency_used(mut self, currency: impl Into<String>) -> Self {
self.currency_used = Some(currency.into());
self
}
pub fn currency_total(mut self, total: Decimal) -> Self {
self.currency_total = Some(total);
self
}
pub fn conversion_rate(mut self, rate: Decimal) -> Self {
self.conversion_rate = Some(rate);
self
}
pub fn status(mut self, status: OrderStatus) -> Self {
self.status = Some(status);
self
}
pub fn payment_method(mut self, method: impl Into<String>) -> Self {
self.payment_method = Some(method.into());
self
}
pub fn payment_details(mut self, details: PaymentDetails) -> Self {
self.payment_details = Some(details);
self
}
pub fn billing_address(mut self, address: Address) -> Self {
self.billing_address = Some(address);
self
}
pub fn shipping_address(mut self, address: Address) -> Self {
self.shipping_address = Some(address);
self
}
pub fn notes(mut self, notes: impl Into<String>) -> Self {
self.notes = Some(notes.into());
self
}
pub fn purchase_type(mut self, purchase_type: PurchaseType) -> Self {
self.purchase_type = Some(purchase_type);
self
}
pub fn build(self) -> Result<Order, String> {
let now = Utc::now();
let subtotal = self.subtotal_base.unwrap_or_else(|| {
self.items.iter().map(|item| item.total_price_base).sum()
});
Ok(Order {
base_data: BaseModelData::new(),
// id: self.base_data.id.ok_or("id is required")? - moved to base_data,
user_base_data: BaseModelData::new(),
// id: self.user_id.ok_or("user_id is required")? - moved to base_data,
items: self.items,
subtotal_base: subtotal,
total_base: self.total_base.unwrap_or(subtotal),
base_currency: self.base_currency.unwrap_or_else(|| "USD".to_string()),
currency_used: self.currency_used.unwrap_or_else(|| "USD".to_string()),
currency_total: self.currency_total.unwrap_or(subtotal),
conversion_rate: self.conversion_rate.unwrap_or_else(|| Decimal::from(1)),
status: self.status.unwrap_or(OrderStatus::Pending),
payment_method: self.payment_method.unwrap_or_else(|| "credit_card".to_string()),
payment_details: self.payment_details,
billing_address: self.billing_address,
shipping_address: self.shipping_address,
notes: self.notes,
purchase_type: self.purchase_type.unwrap_or(PurchaseType::Cart),
// created_at: self.base_data.created_at.unwrap_or(now) - moved to base_data,
// updated_at: self.base_data.updated_at.unwrap_or(now) - moved to base_data,
})
}
}
impl Order {
pub fn builder() -> OrderBuilder {
OrderBuilder::new()
}
}
#[derive(Default)]
pub struct OrderItemBuilder {
product_base_data: BaseModelData::new(),
// id: Option<String> - moved to base_data,
product_name: Option<String>,
product_category: Option<String>,
quantity: Option<u32>,
unit_price_base: Option<Decimal>,
total_price_base: Option<Decimal>,
specifications: HashMap<String, Value>,
provider_base_data: BaseModelData::new(),
// id: Option<String> - moved to base_data,
provider_name: Option<String>,
}
impl OrderItemBuilder {
pub fn new() -> Self {
Self::default()
}
pub fn product_id(mut self) -> Self{
self.product_id = Some(id.into());
self
}
pub fn product_name(mut self, name: impl Into<String>) -> Self {
self.product_name = Some(name.into());
self
}
pub fn product_category(mut self, category: impl Into<String>) -> Self {
self.product_category = Some(category.into());
self
}
pub fn quantity(mut self, quantity: u32) -> Self {
self.quantity = Some(quantity);
self
}
pub fn unit_price_base(mut self, price: Decimal) -> Self {
self.unit_price_base = Some(price);
self
}
pub fn add_specification(mut self, key: impl Into<String>, value: Value) -> Self {
self.specifications.insert(key.into(), value);
self
}
pub fn provider_id(mut self) -> Self{
self.provider_id = Some(id.into());
self
}
pub fn provider_name(mut self, name: impl Into<String>) -> Self {
self.provider_name = Some(name.into());
self
}
pub fn build(self) -> Result<OrderItem, String> {
let quantity = self.quantity.unwrap_or(1);
let unit_price = self.unit_price_base.ok_or("unit_price_base is required")?;
let total_price = self.total_price_base.unwrap_or(unit_price * Decimal::from(quantity));
Ok(OrderItem {
product_base_data: BaseModelData::new(),
// id: self.product_id.ok_or("product_id is required")? - moved to base_data,
product_name: self.product_name.ok_or("product_name is required")?,
product_category: self.product_category.ok_or("product_category is required")?,
quantity,
unit_price_base: unit_price,
total_price_base: total_price,
specifications: self.specifications,
provider_base_data: BaseModelData::new(),
// id: self.provider_id.ok_or("provider_id is required")? - moved to base_data,
provider_name: self.provider_name.ok_or("provider_name is required")?,
})
}
}
impl OrderItem {
pub fn builder() -> OrderItemBuilder {
OrderItemBuilder::new()
}
}

View File

@@ -0,0 +1,77 @@
use serde::{Deserialize, Serialize};
use chrono::{DateTime, Utc};
use rust_decimal::Decimal;
use std::collections::HashMap;
use heromodels_core::BaseModelData;
use crate::models::tfmarketplace::user::ResourceUtilization;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PaymentDetails {
pub payment_base_data: BaseModelData::new(),
// id: String - moved to base_data,
pub payment_method: PaymentMethod,
pub transaction_base_data: BaseModelData::new(),
// id: Option<String> - moved to base_data,
pub payment_status: PaymentStatus,
pub payment_timestamp: Option<DateTime<Utc>>,
pub failure_reason: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum PaymentMethod {
CreditCard {
last_four: String,
card_type: String,
},
BankTransfer {
bank_name: String,
account_last_four: String,
},
Cryptocurrency {
currency: String,
wallet_address: String,
},
Token {
token_type: String,
wallet_address: String,
},
Mock {
method_name: String,
},
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum PaymentStatus {
Pending,
Processing,
Completed,
Failed,
Cancelled,
Refunded,
}
impl PaymentDetails {
pub fn new(payment_id: &str, name: &str) -> Self {
Self {
payment_id,
payment_method,
transaction_base_data: BaseModelData::new(),
// id: None - moved to base_data,
payment_status: PaymentStatus::Pending,
payment_timestamp: None,
failure_reason: None,
}
}
pub fn mark_completed(&mut self, transaction_id: String) { - moved to base_data
self.transaction_id = Some(transaction_id);
self.payment_status = PaymentStatus::Completed;
self.payment_timestamp = Some(Utc::now());
}
pub fn mark_failed(&mut self, reason: String) {
self.payment_status = PaymentStatus::Failed;
self.failure_reason = Some(reason);
self.payment_timestamp = Some(Utc::now());
}
}

View File

@@ -0,0 +1,105 @@
use chrono::{DateTime, Utc};
use rust_decimal::Decimal;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use heromodels_core::BaseModelData;
use crate::models::tfmarketplace::user::ResourceUtilization;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LiquidityPool {
/// Base model data (includes id, created_at, updated_at)
pub base_data: BaseModelData,
pub name: String,
pub token_a: String,
pub token_b: String,
pub reserve_a: Decimal,
pub reserve_b: Decimal,
pub exchange_rate: Decimal,
pub liquidity: Decimal,
pub volume_24h: Decimal,
pub fee_percentage: Decimal,
pub status: PoolStatus,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum PoolStatus {
Active,
Paused,
Maintenance,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ExchangeRequest {
pub pool_base_data: BaseModelData::new(),
// id: String - moved to base_data,
pub from_token: String,
pub to_token: String,
pub amount: Decimal,
pub min_receive: Option<Decimal>,
pub slippage_tolerance: Option<Decimal>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ExchangeResponse {
pub success: bool,
pub message: String,
pub transaction_base_data: BaseModelData::new(),
// id: Option<String> - moved to base_data,
pub from_amount: Option<Decimal>,
pub to_amount: Option<Decimal>,
pub exchange_rate: Option<Decimal>,
pub fee: Option<Decimal>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StakeRequest {
pub amount: Decimal,
pub duration_months: u32,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StakePosition {
/// Base model data (includes id, created_at, updated_at)
pub base_data: BaseModelData,
pub user_base_data: BaseModelData::new(),
// id: String - moved to base_data,
pub amount: Decimal,
pub start_date: DateTime<Utc>,
pub end_date: DateTime<Utc>,
pub discount_percentage: Decimal,
pub reputation_bonus: i32,
pub status: StakeStatus,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum StakeStatus {
Active,
Completed,
Withdrawn,
}
/// Pool analytics data
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PoolAnalytics {
pub price_history: Vec<PricePoint>,
pub volume_history: Vec<VolumePoint>,
pub liquidity_distribution: HashMap<String, Decimal>,
pub staking_distribution: HashMap<String, i32>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PricePoint {
pub timestamp: DateTime<Utc>,
pub price: Decimal,
pub volume: Decimal,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VolumePoint {
pub date: String,
pub volume: Decimal,
}

View File

@@ -0,0 +1,660 @@
use serde::{Deserialize, Serialize};
use chrono::{DateTime, Utc};
use rust_decimal::Decimal;
use std::collections::HashMap;
use heromodels_core::BaseModelData;
use heromodels_derive::model;
use rhai::CustomType;
/// Generic product structure that can represent any marketplace item
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, CustomType)]
pub struct Product {
/// Base model data (includes id, created_at, updated_at)
pub base_data: BaseModelData,
#[index]
pub name: String,
pub category: ProductCategory,
pub description: String,
pub price: Price,
pub attributes: HashMap<String, ProductAttribute>, // Generic attributes
pub provider_base_data: BaseModelData::new(),
// id: String - moved to base_data,
pub provider_name: String,
pub availability: ProductAvailability,
pub metadata: ProductMetadata, // Extensible metadata
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct Price {
pub base_amount: Decimal,
pub currency: u32,
}
/// Configurable product categories
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct ProductCategory {
/// Base model data (includes id, created_at, updated_at)
pub base_data: BaseModelData,
pub name: String,
pub display_name: String,
pub description: String,
pub attribute_schema: Vec<AttributeDefinition>, // Defines allowed attributes
pub parent_category: Option<String>,
pub is_active: bool,
}
/// Generic attribute system for any product type
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct ProductAttribute {
pub key: String,
pub value: serde_json::Value,
pub attribute_type: AttributeType,
pub is_searchable: bool,
pub is_filterable: bool,
pub display_order: Option<u32>,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum AttributeType {
Text,
Number,
SliceConfiguration,
Boolean,
Select(Vec<String>), // Predefined options
MultiSelect(Vec<String>),
Range { min: f64, max: f64 },
Custom(String), // For marketplace-specific types
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct AttributeDefinition {
pub key: String,
pub name: String,
pub attribute_type: AttributeType,
pub is_required: bool,
pub is_searchable: bool,
pub is_filterable: bool,
pub validation_rules: Vec<ValidationRule>,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum ValidationRule {
MinLength(usize),
MaxLength(usize),
MinValue(f64),
MaxValue(f64),
Pattern(String),
Custom(String),
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum ProductAvailability {
Available,
Limited,
Unavailable,
PreOrder,
Custom(String), // For marketplace-specific availability states
}
impl Default for ProductAvailability {
fn default() -> Self {
ProductAvailability::Available
}
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum ProductVisibility {
Public,
Private,
Draft,
Archived,
}
impl Default for ProductVisibility {
fn default() -> Self {
ProductVisibility::Public
}
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
pub struct ProductMetadata {
pub tags: Vec<String>,
pub location: Option<String>,
pub rating: Option<f32>,
pub review_count: u32,
pub featured: bool,
pub last_updated: chrono::DateTime<chrono::Utc>,
pub visibility: ProductVisibility,
pub seo_keywords: Vec<String>,
pub custom_fields: HashMap<String, serde_json::Value>,
}
/// Support for different pricing models
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum PricingModel {
OneTime, // Single purchase
Recurring { interval: String }, // Subscription
UsageBased { unit: String }, // Pay per use
Tiered(Vec<PriceTier>), // Volume discounts
Custom(String), // Marketplace-specific
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PriceTier {
pub min_quantity: u32,
pub max_quantity: Option<u32>,
pub price_per_unit: Decimal,
pub discount_percentage: Option<f32>,
}
impl Product {
pub fn new(
name: String,
category: ProductCategory,
description: String,
price: Price,
provider_base_data: BaseModelData::new(),
// id: String - moved to base_data,
provider_name: String,
) -> Self {
Self {
base_data: BaseModelData::new(),
name,
category,
description,
price,
attributes: HashMap::default(),
provider_id,
provider_name,
availability: ProductAvailability::Available,
metadata: ProductMetadata {
tags: Vec::default(),
location: None,
rating: None,
review_count: 0,
featured: false,
last_updated: chrono::Utc::now(),
visibility: ProductVisibility::Public,
seo_keywords: Vec::new(),
custom_fields: HashMap::default(),
},
}
}
pub fn add_attribute(&mut self, key: String, value: serde_json::Value, attribute_type: AttributeType) {
let attribute = ProductAttribute {
key: key.clone(),
value,
attribute_type,
is_searchable: true,
is_filterable: true,
display_order: None,
};
self.attributes.insert(key, attribute);
self.base_data.modified_at = Utc::now().timestamp();
}
pub fn set_featured(&mut self, featured: bool) {
self.metadata.featured = featured;
self.base_data.modified_at = Utc::now().timestamp();
}
pub fn add_tag(&mut self, tag: String) {
if !self.metadata.tags.contains(&tag) {
self.metadata.tags.push(tag);
self.base_data.modified_at = Utc::now().timestamp();
}
}
pub fn set_rating(&mut self, rating: f32, review_count: u32) {
self.metadata.rating = Some(rating);
self.metadata.review_count = review_count;
self.base_data.modified_at = Utc::now().timestamp();
}
}
impl ProductCategory {
pub fn new() -> Self {
// id: String - moved to base_data, name: String, display_name: String, description: String) -> Self {
Self {
base_data: BaseModelData::new(),
name,
display_name,
description,
attribute_schema: Vec::default(),
parent_category: None,
is_active: true,
}
}
/// Add attribute definition to category schema
pub fn add_attribute_definition(&mut self, definition: AttributeDefinition) {
self.attribute_schema.push(definition);
}
}
impl Product {
/// Create a slice product from farmer configuration
pub fn create_slice_product(
base_data: BaseModelData::new(),
// id: String - moved to base_data,
farmer_name: String,
slice_name: String,
slice_config: SliceConfiguration,
price_per_hour: Decimal,
) -> Self {
let category = ProductCategory {
base_data: BaseModelData::new(),
// id: "compute_slices".to_string() - moved to base_data,
name: "Compute Slices".to_string(),
display_name: "Compute Slices".to_string(),
description: "Virtual compute resources".to_string(),
attribute_schema: Vec::new(),
parent_category: None,
is_active: true,
};
let price = Price {
base_amount: price_per_hour,
currency: 1, // USD currency ID
};
let mut product = Self::new(
base_data,
slice_name,
category,
format!("Compute slice with {} vCPU, {}GB RAM, {}GB storage",
slice_config.cpu_cores, slice_config.memory_gb, slice_config.storage_gb),
price,
farmer_id,
farmer_name,
);
// Add slice-specific attributes
product.add_attribute(
"cpu_cores".to_string(),
serde_json::Value::Number(serde_json::Number::from(slice_config.cpu_cores)),
AttributeType::Number,
);
product.add_attribute(
"memory_gb".to_string(),
serde_json::Value::Number(serde_json::Number::from(slice_config.memory_gb)),
AttributeType::Number,
);
product.add_attribute(
"storage_gb".to_string(),
serde_json::Value::Number(serde_json::Number::from(slice_config.storage_gb)),
AttributeType::Number,
);
product.add_attribute(
"bandwidth_mbps".to_string(),
serde_json::Value::Number(serde_json::Number::from(slice_config.bandwidth_mbps)),
AttributeType::Number,
);
product.add_attribute(
"min_uptime_sla".to_string(),
serde_json::Value::Number(serde_json::Number::from_f64(slice_config.min_uptime_sla as f64).unwrap()),
AttributeType::Number,
);
product.add_attribute(
"public_ips".to_string(),
serde_json::Value::Number(serde_json::Number::from(slice_config.public_ips)),
AttributeType::Number,
);
if let Some(ref node_id) = slice_config.node_id {
product.add_attribute(
"node_id".to_string(),
serde_json::Value::String(node_id.clone()),
AttributeType::Text,
);
}
product.add_attribute(
"slice_type".to_string(),
serde_json::Value::String(format!("{:?}", slice_config.slice_type)),
AttributeType::Text,
);
// Add slice configuration as a complex attribute
product.add_attribute(
"slice_configuration".to_string(),
serde_json::to_value(&slice_config).unwrap(),
AttributeType::SliceConfiguration,
);
// Add relevant tags
product.add_tag("compute".to_string());
product.add_tag("slice".to_string());
product.add_tag(format!("{:?}", slice_config.slice_type).to_lowercase());
product
}
/// Check if this product is a slice
pub fn is_slice(&self) -> bool {
self.category.id == "compute_slices" ||
self.attributes.contains_key("slice_configuration")
}
/// Get slice configuration from product attributes
pub fn get_slice_configuration(&self) -> Option<SliceConfiguration> {
self.attributes.get("slice_configuration")
.and_then(|attr| serde_json::from_value(attr.value.clone()).ok())
}
/// Update slice configuration
pub fn update_slice_configuration(&mut self, config: SliceConfiguration) {
if self.is_slice() {
self.add_attribute(
"slice_configuration".to_string(),
serde_json::to_value(&config).unwrap(),
AttributeType::SliceConfiguration,
);
// Update individual attributes for searchability
self.add_attribute(
"cpu_cores".to_string(),
serde_json::Value::Number(serde_json::Number::from(config.cpu_cores)),
AttributeType::Number,
);
self.add_attribute(
"memory_gb".to_string(),
serde_json::Value::Number(serde_json::Number::from(config.memory_gb)),
AttributeType::Number,
);
self.add_attribute(
"storage_gb".to_string(),
serde_json::Value::Number(serde_json::Number::from(config.storage_gb)),
AttributeType::Number,
);
}
}
/// Check if slice fits within node capacity
pub fn slice_fits_in_node(&self, node_capacity: &crate::models::user::NodeCapacity) -> bool {
if let Some(config) = self.get_slice_configuration() {
config.cpu_cores <= node_capacity.cpu_cores &&
config.memory_gb <= node_capacity.memory_gb &&
config.storage_gb <= node_capacity.storage_gb &&
config.bandwidth_mbps <= node_capacity.bandwidth_mbps
} else {
false
}
}
/// Create a full node product from a FarmNode
pub fn create_full_node_product(
node: &crate::models::user::FarmNode,
farmer_email: &str,
farmer_name: &str,
) -> Self {
let category = ProductCategory {
base_data: BaseModelData::new(),
// id: "3nodes".to_string() - moved to base_data,
name: "3Nodes".to_string(),
display_name: "3Nodes".to_string(),
description: "Full node rentals".to_string(),
attribute_schema: Vec::new(),
parent_category: None,
is_active: true,
};
let price = Price {
base_amount: node.rental_options
.as_ref()
.and_then(|opts| opts.full_node_pricing.as_ref())
.map(|pricing| pricing.monthly)
.unwrap_or_else(|| Decimal::from(200)), // Default price
currency: 1, // USD currency ID
};
let mut product = Product {
base_data: BaseModelData::new(),
name: format!("Full Node: {}", node.name),
category,
description: format!(
"Exclusive access to {} with {} CPU cores, {}GB RAM, {}GB storage in {}",
node.name, node.capacity.cpu_cores, node.capacity.memory_gb,
node.capacity.storage_gb, node.location
),
price,
attributes: HashMap::new(),
provider_base_data: BaseModelData::new(),
// id: farmer_email.to_string() - moved to base_data,
provider_name: farmer_name.to_string(),
availability: match node.availability_status {
crate::models::user::NodeAvailabilityStatus::Available => ProductAvailability::Available,
crate::models::user::NodeAvailabilityStatus::PartiallyRented => ProductAvailability::Limited,
_ => ProductAvailability::Unavailable,
},
metadata: ProductMetadata {
tags: vec!["full-node".to_string(), "exclusive".to_string(), node.region.clone()],
location: Some(node.location.clone()),
rating: None,
review_count: 0,
featured: false,
last_updated: chrono::Utc::now(),
visibility: ProductVisibility::Public,
seo_keywords: Vec::new(),
custom_fields: HashMap::new(),
},
};
// Add node-specific attributes
product.add_attribute(
"node_id".to_string(),
serde_json::Value::String(node.id.clone()),
AttributeType::Text,
);
product.add_attribute(
"rental_type".to_string(),
serde_json::Value::String("full_node".to_string()),
AttributeType::Text,
);
product.add_attribute(
"cpu_cores".to_string(),
serde_json::Value::Number(serde_json::Number::from(node.capacity.cpu_cores)),
AttributeType::Number,
);
product.add_attribute(
"memory_gb".to_string(),
serde_json::Value::Number(serde_json::Number::from(node.capacity.memory_gb)),
AttributeType::Number,
);
product.add_attribute(
"storage_gb".to_string(),
serde_json::Value::Number(serde_json::Number::from(node.capacity.storage_gb)),
AttributeType::Number,
);
product.add_attribute(
"bandwidth_mbps".to_string(),
serde_json::Value::Number(serde_json::Number::from(node.capacity.bandwidth_mbps)),
AttributeType::Number,
);
product.add_attribute(
"location".to_string(),
serde_json::Value::String(node.location.clone()),
AttributeType::Text,
);
product.add_attribute(
"uptime_percentage".to_string(),
serde_json::Value::Number(serde_json::Number::from_f64(node.uptime_percentage as f64).unwrap_or_else(|| serde_json::Number::from(0))),
AttributeType::Number,
);
product.add_attribute(
"health_score".to_string(),
serde_json::Value::Number(serde_json::Number::from_f64(node.health_score as f64).unwrap_or_else(|| serde_json::Number::from(0))),
AttributeType::Number,
);
product
}
/// Check if this product represents a full node
pub fn is_full_node(&self) -> bool {
self.attributes.get("rental_type")
.and_then(|attr| attr.value.as_str())
.map(|s| s == "full_node")
.unwrap_or(false)
}
/// Get the node ID if this is a node product
pub fn get_node_id(&self) -> Option<String> {
self.attributes.get("node_id")
.and_then(|attr| attr.value.as_str())
.map(|s| s.to_string())
}
}
impl ProductCategory {
pub fn set_parent_category(&mut self, parent_id: String) {
self.parent_category = Some(parent_id);
}
}
impl AttributeDefinition {
pub fn new(
key: String,
name: String,
attribute_type: AttributeType,
is_required: bool,
) -> Self {
Self {
key,
name,
attribute_type,
is_required,
is_searchable: true,
is_filterable: true,
validation_rules: Vec::default(),
}
}
pub fn add_validation_rule(&mut self, rule: ValidationRule) {
self.validation_rules.push(rule);
}
}
#[derive(Default)]
pub struct ProductBuilder {
base_data: BaseModelData::new(),
// id: Option<String> - moved to base_data,
name: Option<String>,
category_base_data: BaseModelData::new(),
// id: Option<String> - moved to base_data,
description: Option<String>,
base_price: Option<Decimal>,
base_currency: Option<String>,
attributes: HashMap<String, ProductAttribute>,
provider_base_data: BaseModelData::new(),
// id: Option<String> - moved to base_data,
provider_name: Option<String>,
availability: Option<ProductAvailability>,
metadata: Option<ProductMetadata>,
// created_at: Option<DateTime<Utc>> - moved to base_data,
// updated_at: Option<DateTime<Utc>> - moved to base_data,
}
impl ProductBuilder {
pub fn new() -> Self {
Self::default()
}
pub fn id(mut self, id: impl Into<String>) -> Self {
self.base_data.id = Some(id.into());
self
}
pub fn name(mut self, name: impl Into<String>) -> Self {
self.name = Some(name.into());
self
}
pub fn category_id(mut self, category_id: impl Into<String>) -> Self {
self.category_id = Some(category_id.into());
self
}
pub fn description(mut self, description: impl Into<String>) -> Self {
self.description = Some(description.into());
self
}
pub fn base_price(mut self, price: Decimal) -> Self {
self.base_price = Some(price);
self
}
pub fn base_currency(mut self, currency: impl Into<String>) -> Self {
self.base_currency = Some(currency.into());
self
}
pub fn add_attribute(mut self, key: impl Into<String>, attribute: ProductAttribute) -> Self {
self.attributes.insert(key.into(), attribute);
self
}
pub fn provider_id(mut self, provider_id: impl Into<String>) -> Self {
self.provider_id = Some(provider_id.into());
self
}
pub fn provider_name(mut self, provider_name: impl Into<String>) -> Self {
self.provider_name = Some(provider_name.into());
self
}
pub fn availability(mut self, availability: ProductAvailability) -> Self {
self.availability = Some(availability);
self
}
pub fn metadata(mut self, metadata: ProductMetadata) -> Self {
self.metadata = Some(metadata);
self
}
pub fn build(self) -> Result<Product, String> {
let now = Utc::now();
Ok(Product {
base_data: BaseModelData::new(),
// id: self.base_data.id.ok_or("id is required")? - moved to base_data,
name: self.name.ok_or("name is required")?,
category_base_data: BaseModelData::new(),
// id: self.category_id.ok_or("category_id is required")? - moved to base_data,
description: self.description.unwrap_or_default(),
base_price: self.base_price.ok_or("base_price is required")?,
base_currency: self.base_currency.unwrap_or_else(|| "USD".to_string()),
attributes: self.attributes,
provider_base_data: BaseModelData::new(),
// id: self.provider_id.ok_or("provider_id is required")? - moved to base_data,
provider_name: self.provider_name.ok_or("provider_name is required")?,
availability: self.availability.unwrap_or_default(),
metadata: self.metadata.unwrap_or_default(),
// created_at: self.base_data.created_at.unwrap_or(now) - moved to base_data,
// updated_at: self.base_data.updated_at.unwrap_or(now) - moved to base_data,
})
}
}
impl Product {
pub fn builder() -> ProductBuilder {
ProductBuilder::new()
}
}

View File

@@ -0,0 +1,297 @@
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize, Deserializer};
use rust_decimal::Decimal;
use std::str::FromStr;
use heromodels_core::BaseModelData;
use crate::models::tfmarketplace::user::ResourceUtilization;
/// Service Provider-specific data
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ServiceProviderData {
pub active_services: i32,
pub total_clients: i32,
pub monthly_revenue_usd: i32,
pub total_revenue_usd: i32,
pub service_rating: f32,
pub services: Vec<Service>,
pub client_requests: Vec<ServiceRequest>,
pub revenue_history: Vec<RevenueRecord>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Service {
pub base_data: BaseModelData::new(),
// id: String - moved to base_data,
pub name: String,
pub category: String,
pub description: String,
pub price_per_hour_usd: i32,
pub status: String,
pub clients: i32,
pub rating: f32,
pub total_hours: i32,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ServiceRequest {
/// Base model data (includes id, created_at, updated_at)
pub base_data: BaseModelData,
pub client_name: String,
pub service_name: String,
pub status: String,
pub requested_date: String,
pub estimated_hours: i32,
pub budget: i32,
pub priority: String,
#[serde(default)]
pub progress: Option<i32>,
#[serde(default)]
pub completed_date: Option<String>,
#[serde(default)]
pub client_email: Option<String>,
#[serde(default)]
pub client_phone: Option<String>,
#[serde(default)]
pub description: Option<String>,
#[serde(default)]
pub created_date: Option<String>,
}
/// Service booking record for customers who purchase services
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ServiceBooking {
pub base_data: BaseModelData::new(),
// id: String - moved to base_data, // Same as ServiceRequest.id for cross-reference
pub service_base_data: BaseModelData::new(),
// id: String - moved to base_data, // Reference to original service
pub service_name: String,
pub provider_email: String, // Who provides the service
pub customer_email: String, // Who booked the service
pub budget: i32,
pub estimated_hours: i32,
pub status: String, // "Pending", "In Progress", "Completed"
pub requested_date: String,
pub priority: String,
pub description: Option<String>,
pub booking_date: String, // When customer booked
pub client_phone: Option<String>,
pub progress: Option<i32>,
pub completed_date: Option<String>,
}
/// Customer Service-specific data (for users who book services)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CustomerServiceData {
pub active_bookings: i32,
pub completed_bookings: i32,
pub total_spent: i32,
pub monthly_spending: i32,
pub average_rating_given: f32,
pub service_bookings: Vec<ServiceBooking>,
pub favorite_providers: Vec<String>,
pub spending_history: Vec<SpendingRecord>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SpendingRecord {
pub date: String,
pub amount: i32,
pub service_name: String,
pub provider_name: String,
}
#[derive(Default)]
pub struct ServiceBookingBuilder {
base_data: BaseModelData::new(),
// id: Option<String> - moved to base_data,
service_base_data: BaseModelData::new(),
// id: Option<String> - moved to base_data,
service_name: Option<String>,
provider_email: Option<String>,
customer_email: Option<String>,
budget: Option<i32>,
estimated_hours: Option<i32>,
status: Option<String>,
requested_date: Option<String>,
priority: Option<String>,
description: Option<String>,
booking_date: Option<String>,
}
impl ServiceBookingBuilder {
pub fn new() -> Self {
Self::default()
}
pub fn id(mut self) -> Self{
self.base_data.id = Some(id.to_string());
self
}
pub fn service_id(mut self, service_id: &str, name: &str) -> Self{
self.service_id = Some(service_id.to_string());
self
}
pub fn service_name(mut self, service_name: &str) -> Self {
self.service_name = Some(service_name.to_string());
self
}
pub fn provider_email(mut self, provider_email: &str) -> Self {
self.provider_email = Some(provider_email.to_string());
self
}
pub fn customer_email(mut self, customer_email: &str) -> Self {
self.customer_email = Some(customer_email.to_string());
self
}
pub fn budget(mut self, budget: i32) -> Self {
self.budget = Some(budget);
self
}
pub fn estimated_hours(mut self, hours: i32) -> Self {
self.estimated_hours = Some(hours);
self
}
pub fn status(mut self, status: &str) -> Self {
self.status = Some(status.to_string());
self
}
pub fn requested_date(mut self, date: &str) -> Self {
self.requested_date = Some(date.to_string());
self
}
pub fn priority(mut self, priority: &str) -> Self {
self.priority = Some(priority.to_string());
self
}
pub fn description(mut self, description: Option<String>) -> Self {
self.description = description;
self
}
pub fn booking_date(mut self, date: &str) -> Self {
self.booking_date = Some(date.to_string());
self
}
pub fn build(self) -> Result<ServiceBooking, String> {
Ok(ServiceBooking {
base_data: BaseModelData::new(),
// id: self.base_data.id.ok_or("ID is required")? - moved to base_data,
service_base_data: BaseModelData::new(),
// id: self.service_id.ok_or("Service ID is required")? - moved to base_data,
service_name: self.service_name.ok_or("Service name is required")?,
provider_email: self.provider_email.ok_or("Provider email is required")?,
customer_email: self.customer_email.ok_or("Customer email is required")?,
budget: self.budget.unwrap_or(0),
estimated_hours: self.estimated_hours.unwrap_or(0),
status: self.status.unwrap_or_else(|| "Pending".to_string()),
requested_date: self.requested_date.unwrap_or_else(|| chrono::Utc::now().format("%Y-%m-%d").to_string()),
priority: self.priority.unwrap_or_else(|| "Medium".to_string()),
description: self.description,
booking_date: self.booking_date.unwrap_or_else(|| chrono::Utc::now().format("%Y-%m-%d").to_string()),
client_phone: None,
progress: None,
completed_date: None,
})
}
}
impl ServiceBooking {
pub fn builder() -> ServiceBookingBuilder {
ServiceBookingBuilder::new()
}
}
// =============================================================================
// CUSTOMER SERVICE DATA BUILDER
// =============================================================================
#[derive(Default)]
pub struct CustomerServiceDataBuilder {
active_bookings: Option<i32>,
completed_bookings: Option<i32>,
total_spent: Option<i32>,
monthly_spending: Option<i32>,
average_rating_given: Option<f32>,
service_bookings: Option<Vec<crate::models::user::ServiceBooking>>,
favorite_providers: Option<Vec<String>>,
spending_history: Option<Vec<crate::models::user::SpendingRecord>>,
}
impl CustomerServiceDataBuilder {
pub fn new() -> Self {
Self::default()
}
pub fn active_bookings(mut self, count: i32) -> Self {
self.active_bookings = Some(count);
self
}
pub fn completed_bookings(mut self, count: i32) -> Self {
self.completed_bookings = Some(count);
self
}
pub fn total_spent(mut self, amount: i32) -> Self {
self.total_spent = Some(amount);
self
}
pub fn monthly_spending(mut self, amount: i32) -> Self {
self.monthly_spending = Some(amount);
self
}
pub fn average_rating_given(mut self, rating: f32) -> Self {
self.average_rating_given = Some(rating);
self
}
pub fn service_bookings(mut self, bookings: Vec<crate::models::user::ServiceBooking>) -> Self {
self.service_bookings = Some(bookings);
self
}
pub fn favorite_providers(mut self, providers: Vec<String>) -> Self {
self.favorite_providers = Some(providers);
self
}
pub fn spending_history(mut self, history: Vec<crate::models::user::SpendingRecord>) -> Self {
self.spending_history = Some(history);
self
}
pub fn build(self) -> Result<crate::models::user::CustomerServiceData, String> {
Ok(crate::models::user::CustomerServiceData {
active_bookings: self.active_bookings.unwrap_or(0),
completed_bookings: self.completed_bookings.unwrap_or(0),
total_spent: self.total_spent.unwrap_or(0),
monthly_spending: self.monthly_spending.unwrap_or(0),
average_rating_given: self.average_rating_given.unwrap_or(0.0),
service_bookings: self.service_bookings.unwrap_or_default(),
favorite_providers: self.favorite_providers.unwrap_or_default(),
spending_history: self.spending_history.unwrap_or_default(),
})
}
}
impl crate::models::user::CustomerServiceData {
pub fn builder() -> CustomerServiceDataBuilder {
CustomerServiceDataBuilder::new()
}
}

View File

@@ -0,0 +1,200 @@
use serde::{Deserialize, Serialize};
use chrono::{DateTime, Utc};
use rust_decimal::Decimal;
use std::collections::HashMap;
use heromodels_core::BaseModelData;
use crate::models::tfmarketplace::user::ResourceUtilization;
/// Slice configuration data structure for product attributes
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SliceConfiguration {
pub cpu_cores: i32,
pub memory_gb: i32,
pub storage_gb: i32,
pub bandwidth_mbps: i32,
pub min_uptime_sla: f32,
pub public_ips: i32,
pub node_base_data: BaseModelData::new(),
// id: Option<String> - moved to base_data,
pub slice_type: SliceType,
#[serde(default)]
pub pricing: SlicePricing,
}
/// Enhanced pricing structure for slices with multiple time periods
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SlicePricing {
pub hourly: Decimal,
pub daily: Decimal,
pub monthly: Decimal,
pub yearly: Decimal,
}
impl Default for SlicePricing {
fn default() -> Self {
Self {
hourly: Decimal::ZERO,
daily: Decimal::ZERO,
monthly: Decimal::ZERO,
yearly: Decimal::ZERO,
}
}
}
impl SlicePricing {
/// Create pricing from hourly rate with automatic calculation
pub fn from_hourly(hourly_rate: Decimal, daily_discount: f32, monthly_discount: f32, yearly_discount: f32) -> Self {
let base_daily = hourly_rate * Decimal::from(24);
let base_monthly = hourly_rate * Decimal::from(24 * 30);
let base_yearly = hourly_rate * Decimal::from(24 * 365);
Self {
hourly: hourly_rate,
daily: base_daily * Decimal::try_from(1.0 - daily_discount / 100.0).unwrap_or(Decimal::ONE),
monthly: base_monthly * Decimal::try_from(1.0 - monthly_discount / 100.0).unwrap_or(Decimal::ONE),
yearly: base_yearly * Decimal::try_from(1.0 - yearly_discount / 100.0).unwrap_or(Decimal::ONE),
}
}
/// Calculate savings compared to hourly rate
pub fn calculate_savings(&self) -> (Decimal, Decimal, Decimal) {
let hourly_equivalent_daily = self.hourly * Decimal::from(24);
let hourly_equivalent_monthly = self.hourly * Decimal::from(24 * 30);
let hourly_equivalent_yearly = self.hourly * Decimal::from(24 * 365);
let daily_savings = hourly_equivalent_daily - self.daily;
let monthly_savings = hourly_equivalent_monthly - self.monthly;
let yearly_savings = hourly_equivalent_yearly - self.yearly;
(daily_savings, monthly_savings, yearly_savings)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum SliceType {
Basic,
Standard,
Premium,
Custom,
}
#[derive(Default)]
pub struct SliceProductBuilder {
farmer_base_data: BaseModelData::new(),
// id: Option<String> - moved to base_data,
farmer_name: Option<String>,
slice_name: Option<String>,
cpu_cores: Option<i32>,
memory_gb: Option<i32>,
storage_gb: Option<i32>,
bandwidth_mbps: Option<i32>,
min_uptime_sla: Option<f32>,
public_ips: Option<i32>,
node_base_data: BaseModelData::new(),
// id: Option<String> - moved to base_data,
slice_type: Option<crate::models::tfmarketplace::product::SliceType>,
price_per_hour: Option<rust_decimal::Decimal>,
}
impl SliceProductBuilder {
pub fn new() -> Self {
Self::default()
}
pub fn farmer_id(mut self, farmer_id: &str, name: &str) -> Self{
self.farmer_id = Some(farmer_id.into());
self
}
pub fn farmer_name(mut self, farmer_name: impl Into<String>) -> Self {
self.farmer_name = Some(farmer_name.into());
self
}
pub fn slice_name(mut self, slice_name: impl Into<String>) -> Self {
self.slice_name = Some(slice_name.into());
self
}
pub fn cpu_cores(mut self, cpu_cores: i32) -> Self {
self.cpu_cores = Some(cpu_cores);
self
}
pub fn memory_gb(mut self, memory_gb: i32) -> Self {
self.memory_gb = Some(memory_gb);
self
}
pub fn storage_gb(mut self, storage_gb: i32) -> Self {
self.storage_gb = Some(storage_gb);
self
}
pub fn bandwidth_mbps(mut self, bandwidth_mbps: i32) -> Self {
self.bandwidth_mbps = Some(bandwidth_mbps);
self
}
pub fn min_uptime_sla(mut self, min_uptime_sla: f32) -> Self {
self.min_uptime_sla = Some(min_uptime_sla);
self
}
pub fn public_ips(mut self, public_ips: i32) -> Self {
self.public_ips = Some(public_ips);
self
}
pub fn node_id(mut self, node_id: &str, name: &str) -> Self{
self.node_id = Some(node_id.into());
self
}
pub fn slice_type(mut self, slice_type: crate::models::tfmarketplace::product::SliceType) -> Self {
self.slice_type = Some(slice_type);
self
}
pub fn price_per_hour(mut self, price_per_hour: rust_decimal::Decimal) -> Self {
self.price_per_hour = Some(price_per_hour);
self
}
pub fn build(self) -> Result<crate::models::tfmarketplace::product::Product, String> {
let farmer_id = self.farmer_id.ok_or("farmer_id is required")?;
let farmer_name = self.farmer_name.ok_or("farmer_name is required")?;
let slice_name = self.slice_name.ok_or("slice_name is required")?;
let cpu_cores = self.cpu_cores.ok_or("cpu_cores is required")?;
let memory_gb = self.memory_gb.ok_or("memory_gb is required")?;
let storage_gb = self.storage_gb.ok_or("storage_gb is required")?;
let bandwidth_mbps = self.bandwidth_mbps.ok_or("bandwidth_mbps is required")?;
let price_per_hour = self.price_per_hour.ok_or("price_per_hour is required")?;
let slice_config = crate::models::tfmarketplace::product::SliceConfiguration {
cpu_cores,
memory_gb,
storage_gb,
bandwidth_mbps,
min_uptime_sla: self.min_uptime_sla.unwrap_or(99.0),
public_ips: self.public_ips.unwrap_or(0),
node_base_data: BaseModelData::new(),
// id: self.node_id - moved to base_data,
slice_type: self.slice_type.unwrap_or(crate::models::tfmarketplace::product::SliceType::Basic),
pricing: crate::models::tfmarketplace::product::SlicePricing::from_hourly(
price_per_hour,
5.0, // 5% daily discount
15.0, // 15% monthly discount
25.0 // 25% yearly discount
),
};
Ok(crate::models::tfmarketplace::product::Product::create_slice_product(
farmer_id,
farmer_name,
slice_name,
slice_config,
price_per_hour,
))
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,277 +0,0 @@
# OurDB API Reference
This document provides a comprehensive reference for the OurDB Rust API.
## Table of Contents
1. [Configuration](#configuration)
2. [Database Operations](#database-operations)
- [Creating and Opening](#creating-and-opening)
- [Setting Data](#setting-data)
- [Getting Data](#getting-data)
- [Deleting Data](#deleting-data)
- [History Tracking](#history-tracking)
3. [Error Handling](#error-handling)
4. [Advanced Usage](#advanced-usage)
- [Custom File Size](#custom-file-size)
- [Custom Key Size](#custom-key-size)
5. [Performance Considerations](#performance-considerations)
## Configuration
### OurDBConfig
The `OurDBConfig` struct is used to configure a new OurDB instance.
```rust
pub struct OurDBConfig {
pub path: PathBuf,
pub incremental_mode: bool,
pub file_size: Option<usize>,
pub keysize: Option<u8>,
}
```
| Field | Type | Description |
|-------|------|-------------|
| `path` | `PathBuf` | Path to the database directory |
| `incremental_mode` | `bool` | Whether to use auto-incremented IDs (true) or user-provided IDs (false) |
| `file_size` | `Option<usize>` | Maximum size of each database file in bytes (default: 500MB) |
| `keysize` | `Option<u8>` | Size of keys in bytes (default: 4, valid values: 2, 3, 4, 6) |
Example:
```rust
let config = OurDBConfig {
path: PathBuf::from("/path/to/db"),
incremental_mode: true,
file_size: Some(1024 * 1024 * 100), // 100MB
keysize: Some(4), // 4-byte keys
};
```
## Database Operations
### Creating and Opening
#### `OurDB::new`
Creates a new OurDB instance or opens an existing one.
```rust
pub fn new(config: OurDBConfig) -> Result<OurDB, Error>
```
Example:
```rust
let mut db = OurDB::new(config)?;
```
### Setting Data
#### `OurDB::set`
Sets a value in the database. In incremental mode, if no ID is provided, a new ID is generated.
```rust
pub fn set(&mut self, args: OurDBSetArgs) -> Result<u32, Error>
```
The `OurDBSetArgs` struct has the following fields:
```rust
pub struct OurDBSetArgs<'a> {
pub id: Option<u32>,
pub data: &'a [u8],
}
```
Example with auto-generated ID:
```rust
let id = db.set(OurDBSetArgs {
id: None,
data: b"Hello, World!",
})?;
```
Example with explicit ID:
```rust
db.set(OurDBSetArgs {
id: Some(42),
data: b"Hello, World!",
})?;
```
### Getting Data
#### `OurDB::get`
Retrieves a value from the database by ID.
```rust
pub fn get(&mut self, id: u32) -> Result<Vec<u8>, Error>
```
Example:
```rust
let data = db.get(42)?;
```
### Deleting Data
#### `OurDB::delete`
Deletes a value from the database by ID.
```rust
pub fn delete(&mut self, id: u32) -> Result<(), Error>
```
Example:
```rust
db.delete(42)?;
```
### History Tracking
#### `OurDB::get_history`
Retrieves the history of values for a given ID, up to the specified depth.
```rust
pub fn get_history(&mut self, id: u32, depth: u8) -> Result<Vec<Vec<u8>>, Error>
```
Example:
```rust
// Get the last 5 versions of the record
let history = db.get_history(42, 5)?;
// Process each version (most recent first)
for (i, version) in history.iter().enumerate() {
println!("Version {}: {:?}", i, version);
}
```
### Other Operations
#### `OurDB::get_next_id`
Returns the next ID that will be assigned in incremental mode.
```rust
pub fn get_next_id(&self) -> Result<u32, Error>
```
Example:
```rust
let next_id = db.get_next_id()?;
```
#### `OurDB::close`
Closes the database, ensuring all data is flushed to disk.
```rust
pub fn close(&mut self) -> Result<(), Error>
```
Example:
```rust
db.close()?;
```
#### `OurDB::destroy`
Closes the database and deletes all database files.
```rust
pub fn destroy(&mut self) -> Result<(), Error>
```
Example:
```rust
db.destroy()?;
```
## Error Handling
OurDB uses the `thiserror` crate to define error types. The main error type is `ourdb::Error`.
```rust
pub enum Error {
IoError(std::io::Error),
InvalidKeySize,
InvalidId,
RecordNotFound,
InvalidCrc,
NotIncrementalMode,
DatabaseClosed,
// ...
}
```
All OurDB operations that can fail return a `Result<T, Error>` which can be handled using Rust's standard error handling mechanisms.
Example:
```rust
match db.get(42) {
Ok(data) => println!("Found data: {:?}", data),
Err(ourdb::Error::RecordNotFound) => println!("Record not found"),
Err(e) => eprintln!("Error: {}", e),
}
```
## Advanced Usage
### Custom File Size
You can configure the maximum size of each database file:
```rust
let config = OurDBConfig {
path: PathBuf::from("/path/to/db"),
incremental_mode: true,
file_size: Some(1024 * 1024 * 10), // 10MB per file
keysize: None,
};
```
Smaller file sizes can be useful for:
- Limiting memory usage when reading files
- Improving performance on systems with limited memory
- Easier backup and file management
### Custom Key Size
OurDB supports different key sizes (2, 3, 4, or 6 bytes):
```rust
let config = OurDBConfig {
path: PathBuf::from("/path/to/db"),
incremental_mode: true,
file_size: None,
keysize: Some(6), // 6-byte keys
};
```
Key size considerations:
- 2 bytes: Up to 65,536 records
- 3 bytes: Up to 16,777,216 records
- 4 bytes: Up to 4,294,967,296 records (default)
- 6 bytes: Up to 281,474,976,710,656 records
## Performance Considerations
For optimal performance:
1. **Choose appropriate key size**: Use the smallest key size that can accommodate your expected number of records.
2. **Configure file size**: For large databases, consider using smaller file sizes to improve memory usage.
3. **Batch operations**: When inserting or updating many records, consider batching operations to minimize disk I/O.
4. **Close properly**: Always call `close()` when you're done with the database to ensure data is properly flushed to disk.
5. **Reuse OurDB instance**: Creating a new OurDB instance has overhead, so reuse the same instance for multiple operations when possible.
6. **Consider memory usage**: The lookup table is loaded into memory, so very large databases may require significant RAM.

806
ourdb/Cargo.lock generated
View File

@@ -1,806 +0,0 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 4
[[package]]
name = "aho-corasick"
version = "1.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
dependencies = [
"memchr",
]
[[package]]
name = "anes"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299"
[[package]]
name = "anstyle"
version = "1.0.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9"
[[package]]
name = "autocfg"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26"
[[package]]
name = "bitflags"
version = "2.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd"
[[package]]
name = "bumpalo"
version = "3.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf"
[[package]]
name = "cast"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "ciborium"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e"
dependencies = [
"ciborium-io",
"ciborium-ll",
"serde",
]
[[package]]
name = "ciborium-io"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757"
[[package]]
name = "ciborium-ll"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9"
dependencies = [
"ciborium-io",
"half",
]
[[package]]
name = "clap"
version = "4.5.35"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d8aa86934b44c19c50f87cc2790e19f54f7a67aedb64101c2e1a2e5ecfb73944"
dependencies = [
"clap_builder",
]
[[package]]
name = "clap_builder"
version = "4.5.35"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2414dbb2dd0695280da6ea9261e327479e9d37b0630f6b53ba2a11c60c679fd9"
dependencies = [
"anstyle",
"clap_lex",
]
[[package]]
name = "clap_lex"
version = "0.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6"
[[package]]
name = "crc32fast"
version = "1.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3"
dependencies = [
"cfg-if",
]
[[package]]
name = "criterion"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f"
dependencies = [
"anes",
"cast",
"ciborium",
"clap",
"criterion-plot",
"is-terminal",
"itertools",
"num-traits",
"once_cell",
"oorandom",
"plotters",
"rayon",
"regex",
"serde",
"serde_derive",
"serde_json",
"tinytemplate",
"walkdir",
]
[[package]]
name = "criterion-plot"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1"
dependencies = [
"cast",
"itertools",
]
[[package]]
name = "crossbeam-deque"
version = "0.8.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51"
dependencies = [
"crossbeam-epoch",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-epoch"
version = "0.9.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e"
dependencies = [
"crossbeam-utils",
]
[[package]]
name = "crossbeam-utils"
version = "0.8.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28"
[[package]]
name = "crunchy"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929"
[[package]]
name = "either"
version = "1.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719"
[[package]]
name = "errno"
version = "0.3.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "976dd42dc7e85965fe702eb8164f21f450704bdde31faefd6471dba214cb594e"
dependencies = [
"libc",
"windows-sys",
]
[[package]]
name = "fastrand"
version = "2.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
[[package]]
name = "getrandom"
version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7"
dependencies = [
"cfg-if",
"libc",
"wasi 0.11.0+wasi-snapshot-preview1",
]
[[package]]
name = "getrandom"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "73fea8450eea4bac3940448fb7ae50d91f034f941199fcd9d909a5a07aa455f0"
dependencies = [
"cfg-if",
"libc",
"r-efi",
"wasi 0.14.2+wasi-0.2.4",
]
[[package]]
name = "half"
version = "2.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "459196ed295495a68f7d7fe1d84f6c4b7ff0e21fe3017b2f283c6fac3ad803c9"
dependencies = [
"cfg-if",
"crunchy",
]
[[package]]
name = "hermit-abi"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fbd780fe5cc30f81464441920d82ac8740e2e46b29a6fad543ddd075229ce37e"
[[package]]
name = "is-terminal"
version = "0.4.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9"
dependencies = [
"hermit-abi",
"libc",
"windows-sys",
]
[[package]]
name = "itertools"
version = "0.10.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473"
dependencies = [
"either",
]
[[package]]
name = "itoa"
version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c"
[[package]]
name = "js-sys"
version = "0.3.77"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f"
dependencies = [
"once_cell",
"wasm-bindgen",
]
[[package]]
name = "libc"
version = "0.2.171"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6"
[[package]]
name = "linux-raw-sys"
version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12"
[[package]]
name = "log"
version = "0.4.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94"
[[package]]
name = "memchr"
version = "2.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
[[package]]
name = "num-traits"
version = "0.2.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841"
dependencies = [
"autocfg",
]
[[package]]
name = "once_cell"
version = "1.21.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d"
[[package]]
name = "oorandom"
version = "11.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e"
[[package]]
name = "ourdb"
version = "0.1.0"
dependencies = [
"crc32fast",
"criterion",
"log",
"rand",
"tempfile",
"thiserror",
]
[[package]]
name = "plotters"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747"
dependencies = [
"num-traits",
"plotters-backend",
"plotters-svg",
"wasm-bindgen",
"web-sys",
]
[[package]]
name = "plotters-backend"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a"
[[package]]
name = "plotters-svg"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670"
dependencies = [
"plotters-backend",
]
[[package]]
name = "ppv-lite86"
version = "0.2.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9"
dependencies = [
"zerocopy",
]
[[package]]
name = "proc-macro2"
version = "1.0.94"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.40"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d"
dependencies = [
"proc-macro2",
]
[[package]]
name = "r-efi"
version = "5.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5"
[[package]]
name = "rand"
version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
dependencies = [
"libc",
"rand_chacha",
"rand_core",
]
[[package]]
name = "rand_chacha"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
dependencies = [
"ppv-lite86",
"rand_core",
]
[[package]]
name = "rand_core"
version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
dependencies = [
"getrandom 0.2.15",
]
[[package]]
name = "rayon"
version = "1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa"
dependencies = [
"either",
"rayon-core",
]
[[package]]
name = "rayon-core"
version = "1.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2"
dependencies = [
"crossbeam-deque",
"crossbeam-utils",
]
[[package]]
name = "regex"
version = "1.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191"
dependencies = [
"aho-corasick",
"memchr",
"regex-automata",
"regex-syntax",
]
[[package]]
name = "regex-automata"
version = "0.4.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908"
dependencies = [
"aho-corasick",
"memchr",
"regex-syntax",
]
[[package]]
name = "regex-syntax"
version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
[[package]]
name = "rustix"
version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d97817398dd4bb2e6da002002db259209759911da105da92bec29ccb12cf58bf"
dependencies = [
"bitflags",
"errno",
"libc",
"linux-raw-sys",
"windows-sys",
]
[[package]]
name = "rustversion"
version = "1.0.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2"
[[package]]
name = "ryu"
version = "1.0.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f"
[[package]]
name = "same-file"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
dependencies = [
"winapi-util",
]
[[package]]
name = "serde"
version = "1.0.219"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.219"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "serde_json"
version = "1.0.140"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373"
dependencies = [
"itoa",
"memchr",
"ryu",
"serde",
]
[[package]]
name = "syn"
version = "2.0.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "tempfile"
version = "3.19.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7437ac7763b9b123ccf33c338a5cc1bac6f69b45a136c19bdd8a65e3916435bf"
dependencies = [
"fastrand",
"getrandom 0.3.2",
"once_cell",
"rustix",
"windows-sys",
]
[[package]]
name = "thiserror"
version = "1.0.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "tinytemplate"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc"
dependencies = [
"serde",
"serde_json",
]
[[package]]
name = "unicode-ident"
version = "1.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512"
[[package]]
name = "walkdir"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b"
dependencies = [
"same-file",
"winapi-util",
]
[[package]]
name = "wasi"
version = "0.11.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
[[package]]
name = "wasi"
version = "0.14.2+wasi-0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3"
dependencies = [
"wit-bindgen-rt",
]
[[package]]
name = "wasm-bindgen"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5"
dependencies = [
"cfg-if",
"once_cell",
"rustversion",
"wasm-bindgen-macro",
]
[[package]]
name = "wasm-bindgen-backend"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6"
dependencies = [
"bumpalo",
"log",
"proc-macro2",
"quote",
"syn",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-macro"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407"
dependencies = [
"quote",
"wasm-bindgen-macro-support",
]
[[package]]
name = "wasm-bindgen-macro-support"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de"
dependencies = [
"proc-macro2",
"quote",
"syn",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-shared"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d"
dependencies = [
"unicode-ident",
]
[[package]]
name = "web-sys"
version = "0.3.77"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2"
dependencies = [
"js-sys",
"wasm-bindgen",
]
[[package]]
name = "winapi-util"
version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb"
dependencies = [
"windows-sys",
]
[[package]]
name = "windows-sys"
version = "0.59.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
dependencies = [
"windows-targets",
]
[[package]]
name = "windows-targets"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
dependencies = [
"windows_aarch64_gnullvm",
"windows_aarch64_msvc",
"windows_i686_gnu",
"windows_i686_gnullvm",
"windows_i686_msvc",
"windows_x86_64_gnu",
"windows_x86_64_gnullvm",
"windows_x86_64_msvc",
]
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
[[package]]
name = "windows_aarch64_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
[[package]]
name = "windows_i686_gnu"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
[[package]]
name = "windows_i686_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
[[package]]
name = "windows_i686_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
[[package]]
name = "windows_x86_64_gnu"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
[[package]]
name = "windows_x86_64_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
[[package]]
name = "wit-bindgen-rt"
version = "0.39.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1"
dependencies = [
"bitflags",
]
[[package]]
name = "zerocopy"
version = "0.8.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2586fea28e186957ef732a5f8b3be2da217d65c5969d4b1e17f973ebbe876879"
dependencies = [
"zerocopy-derive",
]
[[package]]
name = "zerocopy-derive"
version = "0.8.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a996a8f63c5c4448cd959ac1bab0aaa3306ccfd060472f85943ee0750f0169be"
dependencies = [
"proc-macro2",
"quote",
"syn",
]

View File

@@ -1,32 +0,0 @@
[package]
name = "ourdb"
version = "0.1.0"
edition = "2021"
description = "A lightweight, efficient key-value database with history tracking capabilities"
authors = ["OurWorld Team"]
[dependencies]
crc32fast = "1.3.2"
thiserror = "1.0.40"
log = "0.4.17"
rand = "0.8.5"
[dev-dependencies]
criterion = "0.5.1"
tempfile = "3.8.0"
# [[bench]]
# name = "ourdb_benchmarks"
# harness = false
[[example]]
name = "basic_usage"
path = "examples/basic_usage.rs"
[[example]]
name = "advanced_usage"
path = "examples/advanced_usage.rs"
[[example]]
name = "benchmark"
path = "examples/benchmark.rs"

View File

@@ -1,135 +0,0 @@
# OurDB
OurDB is a lightweight, efficient key-value database implementation that provides data persistence with history tracking capabilities. This Rust implementation offers a robust and performant solution for applications requiring simple but reliable data storage.
## Features
- Simple key-value storage with history tracking
- Data integrity verification using CRC32
- Support for multiple backend files for large datasets
- Lookup table for fast data retrieval
- Incremental mode for auto-generated IDs
- Memory and disk-based lookup tables
## Limitations
- Maximum data size per entry is 65,535 bytes (~64KB) due to the 2-byte size field in the record header
## Usage
### Basic Example
```rust
use ourdb::{OurDB, OurDBConfig, OurDBSetArgs};
use std::path::PathBuf;
fn main() -> Result<(), ourdb::Error> {
// Create a new database
let config = OurDBConfig {
path: PathBuf::from("/tmp/ourdb"),
incremental_mode: true,
file_size: None, // Use default (500MB)
keysize: None, // Use default (4 bytes)
};
let mut db = OurDB::new(config)?;
// Store data (with auto-generated ID in incremental mode)
let data = b"Hello, OurDB!";
let id = db.set(OurDBSetArgs { id: None, data })?;
println!("Stored data with ID: {}", id);
// Retrieve data
let retrieved = db.get(id)?;
println!("Retrieved: {}", String::from_utf8_lossy(&retrieved));
// Update data
let updated_data = b"Updated data";
db.set(OurDBSetArgs { id: Some(id), data: updated_data })?;
// Get history (returns most recent first)
let history = db.get_history(id, 2)?;
for (i, entry) in history.iter().enumerate() {
println!("History {}: {}", i, String::from_utf8_lossy(entry));
}
// Delete data
db.delete(id)?;
// Close the database
db.close()?;
Ok(())
}
```
### Key-Value Mode vs Incremental Mode
OurDB supports two operating modes:
1. **Key-Value Mode** (`incremental_mode: false`): You must provide IDs explicitly when storing data.
2. **Incremental Mode** (`incremental_mode: true`): IDs are auto-generated when not provided.
### Configuration Options
- `path`: Directory for database storage
- `incremental_mode`: Whether to use auto-increment mode
- `file_size`: Maximum file size (default: 500MB)
- `keysize`: Size of lookup table entries (2-6 bytes)
- 2: For databases with < 65,536 records
- 3: For databases with < 16,777,216 records
- 4: For databases with < 4,294,967,296 records (default)
- 6: For large databases requiring multiple files
## Architecture
OurDB consists of three main components:
1. **Frontend API**: Provides the public interface for database operations
2. **Lookup Table**: Maps keys to physical locations in the backend storage
3. **Backend Storage**: Manages the actual data persistence in files
### Record Format
Each record in the backend storage includes:
- 2 bytes: Data size
- 4 bytes: CRC32 checksum
- 6 bytes: Previous record location (for history)
- N bytes: Actual data
## Documentation
Additional documentation is available in the repository:
- [API Reference](API.md): Detailed API documentation
- [Migration Guide](MIGRATION.md): Guide for migrating from the V implementation
- [Architecture](architecture.md): Design and implementation details
## Examples
The repository includes several examples to demonstrate OurDB usage:
- `basic_usage.rs`: Simple operations with OurDB
- `advanced_usage.rs`: More complex features including both operation modes
- `benchmark.rs`: Performance benchmarking tool
Run an example with:
```bash
cargo run --example basic_usage
cargo run --example advanced_usage
cargo run --example benchmark
```
## Performance
OurDB is designed for efficiency and minimal overhead. The benchmark example can be used to evaluate performance on your specific hardware and workload.
Typical performance metrics on modern hardware:
- **Write**: 10,000+ operations per second
- **Read**: 50,000+ operations per second
## License
This project is licensed under the MIT License.

View File

@@ -1,439 +0,0 @@
# OurDB: Architecture for V to Rust Port
## 1. Overview
OurDB is a lightweight, efficient key-value database implementation that provides data persistence with history tracking capabilities. This document outlines the architecture for porting OurDB from its original V implementation to Rust, maintaining all existing functionality while leveraging Rust's memory safety, performance, and ecosystem.
## 2. Current Architecture (V Implementation)
The current V implementation of OurDB consists of three main components in a layered architecture:
```mermaid
graph TD
A[Client Code] --> B[Frontend API]
B --> C[Lookup Table]
B --> D[Backend Storage]
C --> D
```
### 2.1 Frontend (db.v)
The frontend provides the public API for database operations and coordinates between the lookup table and backend storage components.
Key responsibilities:
- Exposing high-level operations (set, get, delete, history)
- Managing incremental ID generation in auto-increment mode
- Coordinating data flow between lookup and backend components
- Handling database lifecycle (open, close, destroy)
### 2.2 Lookup Table (lookup.v)
The lookup table maps keys to physical locations in the backend storage.
Key responsibilities:
- Maintaining key-to-location mapping
- Optimizing key sizes based on database configuration
- Supporting both memory and disk-based lookup tables
- Handling sparse data efficiently
- Providing next ID generation for incremental mode
### 2.3 Backend Storage (backend.v)
The backend storage manages the actual data persistence in files.
Key responsibilities:
- Managing physical data storage in files
- Ensuring data integrity with CRC32 checksums
- Supporting multiple file backends for large datasets
- Implementing low-level read/write operations
- Tracking record history through linked locations
### 2.4 Core Data Structures
#### OurDB
```v
@[heap]
pub struct OurDB {
mut:
lookup &LookupTable
pub:
path string // directory for storage
incremental_mode bool
file_size u32 = 500 * (1 << 20) // 500MB
pub mut:
file os.File
file_nr u16 // the file which is open
last_used_file_nr u16
}
```
#### LookupTable
```v
pub struct LookupTable {
keysize u8
lookuppath string
mut:
data []u8
incremental ?u32 // points to next empty slot if incremental mode is enabled
}
```
#### Location
```v
pub struct Location {
pub mut:
file_nr u16
position u32
}
```
### 2.5 Storage Format
#### Record Format
Each record in the backend storage includes:
- 2 bytes: Data size
- 4 bytes: CRC32 checksum
- 6 bytes: Previous record location (for history)
- N bytes: Actual data
#### Lookup Table Optimization
The lookup table automatically optimizes its key size based on the database configuration:
- 2 bytes: For databases with < 65,536 records
- 3 bytes: For databases with < 16,777,216 records
- 4 bytes: For databases with < 4,294,967,296 records
- 6 bytes: For large databases requiring multiple files
## 3. Proposed Rust Architecture
The Rust implementation will maintain the same layered architecture while leveraging Rust's type system, ownership model, and error handling.
```mermaid
graph TD
A[Client Code] --> B[OurDB API]
B --> C[LookupTable]
B --> D[Backend]
C --> D
E[Error Handling] --> B
E --> C
E --> D
F[Configuration] --> B
```
### 3.1 Core Components
#### 3.1.1 OurDB (API Layer)
```rust
pub struct OurDB {
path: String,
incremental_mode: bool,
file_size: u32,
lookup: LookupTable,
file: Option<std::fs::File>,
file_nr: u16,
last_used_file_nr: u16,
}
impl OurDB {
pub fn new(config: OurDBConfig) -> Result<Self, Error>;
pub fn set(&mut self, id: Option<u32>, data: &[u8]) -> Result<u32, Error>;
pub fn get(&mut self, id: u32) -> Result<Vec<u8>, Error>;
pub fn get_history(&mut self, id: u32, depth: u8) -> Result<Vec<Vec<u8>>, Error>;
pub fn delete(&mut self, id: u32) -> Result<(), Error>;
pub fn get_next_id(&mut self) -> Result<u32, Error>;
pub fn close(&mut self) -> Result<(), Error>;
pub fn destroy(&mut self) -> Result<(), Error>;
}
```
#### 3.1.2 LookupTable
```rust
pub struct LookupTable {
keysize: u8,
lookuppath: String,
data: Vec<u8>,
incremental: Option<u32>,
}
impl LookupTable {
fn new(config: LookupConfig) -> Result<Self, Error>;
fn get(&self, id: u32) -> Result<Location, Error>;
fn set(&mut self, id: u32, location: Location) -> Result<(), Error>;
fn delete(&mut self, id: u32) -> Result<(), Error>;
fn get_next_id(&self) -> Result<u32, Error>;
fn increment_index(&mut self) -> Result<(), Error>;
fn export_data(&self, path: &str) -> Result<(), Error>;
fn import_data(&mut self, path: &str) -> Result<(), Error>;
fn export_sparse(&self, path: &str) -> Result<(), Error>;
fn import_sparse(&mut self, path: &str) -> Result<(), Error>;
}
```
#### 3.1.3 Location
```rust
pub struct Location {
file_nr: u16,
position: u32,
}
impl Location {
fn new(bytes: &[u8], keysize: u8) -> Result<Self, Error>;
fn to_bytes(&self) -> Result<Vec<u8>, Error>;
fn to_u64(&self) -> u64;
}
```
#### 3.1.4 Backend
The backend functionality will be implemented as methods on the OurDB struct:
```rust
impl OurDB {
fn db_file_select(&mut self, file_nr: u16) -> Result<(), Error>;
fn create_new_db_file(&mut self, file_nr: u16) -> Result<(), Error>;
fn get_file_nr(&mut self) -> Result<u16, Error>;
fn set_(&mut self, id: u32, old_location: Location, data: &[u8]) -> Result<(), Error>;
fn get_(&mut self, location: Location) -> Result<Vec<u8>, Error>;
fn get_prev_pos_(&mut self, location: Location) -> Result<Location, Error>;
fn delete_(&mut self, id: u32, location: Location) -> Result<(), Error>;
fn close_(&mut self);
}
```
#### 3.1.5 Configuration
```rust
pub struct OurDBConfig {
pub record_nr_max: u32,
pub record_size_max: u32,
pub file_size: u32,
pub path: String,
pub incremental_mode: bool,
pub reset: bool,
}
struct LookupConfig {
size: u32,
keysize: u8,
lookuppath: String,
incremental_mode: bool,
}
```
#### 3.1.6 Error Handling
```rust
#[derive(Debug, thiserror::Error)]
pub enum Error {
#[error("I/O error: {0}")]
Io(#[from] std::io::Error),
#[error("Invalid key size: {0}")]
InvalidKeySize(u8),
#[error("Record not found: {0}")]
RecordNotFound(u32),
#[error("Data corruption: CRC mismatch")]
DataCorruption,
#[error("Index out of bounds: {0}")]
IndexOutOfBounds(u32),
#[error("Incremental mode not enabled")]
IncrementalNotEnabled,
#[error("Lookup table is full")]
LookupTableFull,
#[error("Invalid file number: {0}")]
InvalidFileNumber(u16),
#[error("Invalid operation: {0}")]
InvalidOperation(String),
}
```
## 4. Implementation Strategy
### 4.1 Phase 1: Core Data Structures
1. Implement the `Location` struct with serialization/deserialization
2. Implement the `Error` enum for error handling
3. Implement the configuration structures
### 4.2 Phase 2: Lookup Table
1. Implement the `LookupTable` struct with memory-based storage
2. Add disk-based storage support
3. Implement key size optimization
4. Add incremental ID support
5. Implement import/export functionality
### 4.3 Phase 3: Backend Storage
1. Implement file management functions
2. Implement record serialization/deserialization with CRC32
3. Implement history tracking through linked locations
4. Add support for multiple backend files
### 4.4 Phase 4: Frontend API
1. Implement the `OurDB` struct with core operations
2. Add high-level API methods (set, get, delete, history)
3. Implement database lifecycle management
### 4.5 Phase 5: Testing and Optimization
1. Port existing tests from V to Rust
2. Add new tests for Rust-specific functionality
3. Benchmark and optimize performance
4. Ensure compatibility with existing OurDB files
## 5. Implementation Considerations
### 5.1 Memory Management
Leverage Rust's ownership model for safe and efficient memory management:
- Use `Vec<u8>` for data buffers instead of raw pointers
- Implement proper RAII for file handles
- Use references and borrows to avoid unnecessary copying
- Consider using `Bytes` from the `bytes` crate for zero-copy operations
### 5.2 Error Handling
Use Rust's `Result` type for comprehensive error handling:
- Define custom error types for OurDB-specific errors
- Propagate errors using the `?` operator
- Provide detailed error messages
- Implement proper error conversion using the `From` trait
### 5.3 File I/O
Optimize file operations for performance:
- Use `BufReader` and `BufWriter` for buffered I/O
- Implement proper file locking for concurrent access
- Consider memory-mapped files for lookup tables
- Use `seek` and `read_exact` for precise positioning
### 5.4 Concurrency
Consider thread safety for concurrent database access:
- Use interior mutability patterns where appropriate
- Implement `Send` and `Sync` traits for thread safety
- Consider using `RwLock` for shared read access
- Provide clear documentation on thread safety guarantees
### 5.5 Performance Optimizations
Identify opportunities for performance improvements:
- Use memory-mapped files for lookup tables
- Implement caching for frequently accessed records
- Use zero-copy operations where possible
- Consider async I/O for non-blocking operations
## 6. Testing Strategy
### 6.1 Unit Tests
Write comprehensive unit tests for each component:
- Test `Location` serialization/deserialization
- Test `LookupTable` operations
- Test backend storage functions
- Test error handling
### 6.2 Integration Tests
Write integration tests for the complete system:
- Test database creation and configuration
- Test basic CRUD operations
- Test history tracking
- Test incremental ID generation
- Test file management
### 6.3 Compatibility Tests
Ensure compatibility with existing OurDB files:
- Test reading existing V-created OurDB files
- Test writing files that can be read by the V implementation
- Test migration scenarios
### 6.4 Performance Tests
Benchmark performance against the V implementation:
- Measure throughput for set/get operations
- Measure latency for different operations
- Test with different database sizes
- Test with different record sizes
## 7. Project Structure
```
ourdb/
├── Cargo.toml
├── src/
│ ├── lib.rs # Public API and re-exports
│ ├── ourdb.rs # OurDB implementation (frontend)
│ ├── lookup.rs # Lookup table implementation
│ ├── location.rs # Location struct implementation
│ ├── backend.rs # Backend storage implementation
│ ├── error.rs # Error types
│ ├── config.rs # Configuration structures
│ └── utils.rs # Utility functions
├── tests/
│ ├── unit/ # Unit tests
│ ├── integration/ # Integration tests
│ └── compatibility/ # Compatibility tests
└── examples/
├── basic.rs # Basic usage example
├── history.rs # History tracking example
└── client_server.rs # Client-server example
```
## 8. Dependencies
The Rust implementation will use the following dependencies:
- `thiserror` for error handling
- `crc32fast` for CRC32 calculation
- `bytes` for efficient byte manipulation
- `memmap2` for memory-mapped files (optional)
- `serde` for serialization (optional, for future extensions)
- `log` for logging
- `criterion` for benchmarking
## 9. Compatibility Considerations
To ensure compatibility with the V implementation:
1. Maintain the same file format for data storage
2. Preserve the lookup table format
3. Keep the same CRC32 calculation method
4. Ensure identical behavior for incremental ID generation
5. Maintain the same history tracking mechanism
## 10. Future Extensions
Potential future extensions to consider:
1. Async API for non-blocking operations
2. Transactions support
3. Better concurrency control
4. Compression support
5. Encryption support
6. Streaming API for large values
7. Iterators for scanning records
8. Secondary indexes
## 11. Conclusion
This architecture provides a roadmap for porting OurDB from V to Rust while maintaining compatibility and leveraging Rust's strengths. The implementation will follow a phased approach, starting with core data structures and gradually building up to the complete system.
The Rust implementation aims to be:
- **Safe**: Leveraging Rust's ownership model for memory safety
- **Fast**: Maintaining or improving performance compared to V
- **Compatible**: Working with existing OurDB files
- **Extensible**: Providing a foundation for future enhancements
- **Well-tested**: Including comprehensive test coverage

View File

@@ -1,231 +0,0 @@
use ourdb::{OurDB, OurDBConfig, OurDBSetArgs};
use std::path::PathBuf;
use std::time::Instant;
fn main() -> Result<(), ourdb::Error> {
// Create a temporary directory for the database
let db_path = std::env::temp_dir().join("ourdb_advanced_example");
std::fs::create_dir_all(&db_path)?;
println!("Creating database at: {}", db_path.display());
// Demonstrate key-value mode (non-incremental)
key_value_mode_example(&db_path)?;
// Demonstrate incremental mode
incremental_mode_example(&db_path)?;
// Demonstrate performance benchmarking
performance_benchmark(&db_path)?;
// Clean up (optional)
if std::env::var("KEEP_DB").is_err() {
std::fs::remove_dir_all(&db_path)?;
println!("Cleaned up database directory");
} else {
println!("Database kept at: {}", db_path.display());
}
Ok(())
}
fn key_value_mode_example(base_path: &PathBuf) -> Result<(), ourdb::Error> {
println!("\n=== Key-Value Mode Example ===");
let db_path = base_path.join("key_value");
std::fs::create_dir_all(&db_path)?;
// Create a new database with key-value mode (non-incremental)
let config = OurDBConfig {
path: db_path,
incremental_mode: false,
file_size: Some(1024 * 1024), // 1MB for testing
keysize: Some(2), // Small key size for demonstration
reset: None, // Don't reset existing database
};
let mut db = OurDB::new(config)?;
// In key-value mode, we must provide IDs explicitly
let custom_ids = [100, 200, 300, 400, 500];
// Store data with custom IDs
for (i, &id) in custom_ids.iter().enumerate() {
let data = format!("Record with custom ID {}", id);
db.set(OurDBSetArgs {
id: Some(id),
data: data.as_bytes(),
})?;
println!("Stored record {} with custom ID: {}", i + 1, id);
}
// Retrieve data by custom IDs
for &id in &custom_ids {
let retrieved = db.get(id)?;
println!(
"Retrieved ID {}: {}",
id,
String::from_utf8_lossy(&retrieved)
);
}
// Update and track history
let id_to_update = custom_ids[2]; // ID 300
for i in 1..=3 {
let updated_data = format!("Updated record {} (version {})", id_to_update, i);
db.set(OurDBSetArgs {
id: Some(id_to_update),
data: updated_data.as_bytes(),
})?;
println!("Updated ID {} (version {})", id_to_update, i);
}
// Get history for the updated record
let history = db.get_history(id_to_update, 5)?;
println!("History for ID {} (most recent first):", id_to_update);
for (i, entry) in history.iter().enumerate() {
println!(" Version {}: {}", i, String::from_utf8_lossy(entry));
}
db.close()?;
println!("Key-value mode example completed");
Ok(())
}
fn incremental_mode_example(base_path: &PathBuf) -> Result<(), ourdb::Error> {
println!("\n=== Incremental Mode Example ===");
let db_path = base_path.join("incremental");
std::fs::create_dir_all(&db_path)?;
// Create a new database with incremental mode
let config = OurDBConfig {
path: db_path,
incremental_mode: true,
file_size: Some(1024 * 1024), // 1MB for testing
keysize: Some(3), // 3-byte keys
reset: None, // Don't reset existing database
};
let mut db = OurDB::new(config)?;
// In incremental mode, IDs are auto-generated
let mut assigned_ids = Vec::new();
// Store multiple records and collect assigned IDs
for i in 1..=5 {
let data = format!("Auto-increment record {}", i);
let id = db.set(OurDBSetArgs {
id: None,
data: data.as_bytes(),
})?;
assigned_ids.push(id);
println!("Stored record {} with auto-assigned ID: {}", i, id);
}
// Check next ID
let next_id = db.get_next_id()?;
println!("Next ID to be assigned: {}", next_id);
// Retrieve all records
for &id in &assigned_ids {
let retrieved = db.get(id)?;
println!(
"Retrieved ID {}: {}",
id,
String::from_utf8_lossy(&retrieved)
);
}
db.close()?;
println!("Incremental mode example completed");
Ok(())
}
fn performance_benchmark(base_path: &PathBuf) -> Result<(), ourdb::Error> {
println!("\n=== Performance Benchmark ===");
let db_path = base_path.join("benchmark");
std::fs::create_dir_all(&db_path)?;
// Create a new database
let config = OurDBConfig {
path: db_path,
incremental_mode: true,
file_size: Some(1024 * 1024), // 10MB
keysize: Some(4), // 4-byte keys
reset: None, // Don't reset existing database
};
let mut db = OurDB::new(config)?;
// Number of operations for the benchmark
let num_operations = 1000;
let data_size = 100; // bytes per record
// Prepare test data
let test_data = vec![b'A'; data_size];
// Benchmark write operations
println!("Benchmarking {} write operations...", num_operations);
let start = Instant::now();
let mut ids = Vec::with_capacity(num_operations);
for _ in 0..num_operations {
let id = db.set(OurDBSetArgs {
id: None,
data: &test_data,
})?;
ids.push(id);
}
let write_duration = start.elapsed();
let writes_per_second = num_operations as f64 / write_duration.as_secs_f64();
println!(
"Write performance: {:.2} ops/sec ({:.2} ms/op)",
writes_per_second,
write_duration.as_secs_f64() * 1000.0 / num_operations as f64
);
// Benchmark read operations
println!("Benchmarking {} read operations...", num_operations);
let start = Instant::now();
for &id in &ids {
let _ = db.get(id)?;
}
let read_duration = start.elapsed();
let reads_per_second = num_operations as f64 / read_duration.as_secs_f64();
println!(
"Read performance: {:.2} ops/sec ({:.2} ms/op)",
reads_per_second,
read_duration.as_secs_f64() * 1000.0 / num_operations as f64
);
// Benchmark update operations
println!("Benchmarking {} update operations...", num_operations);
let start = Instant::now();
for &id in &ids {
db.set(OurDBSetArgs {
id: Some(id),
data: &test_data,
})?;
}
let update_duration = start.elapsed();
let updates_per_second = num_operations as f64 / update_duration.as_secs_f64();
println!(
"Update performance: {:.2} ops/sec ({:.2} ms/op)",
updates_per_second,
update_duration.as_secs_f64() * 1000.0 / num_operations as f64
);
db.close()?;
println!("Performance benchmark completed");
Ok(())
}

View File

@@ -1,89 +0,0 @@
use ourdb::{OurDB, OurDBConfig, OurDBSetArgs};
fn main() -> Result<(), ourdb::Error> {
// Create a temporary directory for the database
let db_path = std::env::temp_dir().join("ourdb_example");
std::fs::create_dir_all(&db_path)?;
println!("Creating database at: {}", db_path.display());
// Create a new database with incremental mode enabled
let config = OurDBConfig {
path: db_path.clone(),
incremental_mode: true,
file_size: None, // Use default (500MB)
keysize: None, // Use default (4 bytes)
reset: None, // Don't reset existing database
};
let mut db = OurDB::new(config)?;
// Store some data with auto-generated IDs
let data1 = b"First record";
let id1 = db.set(OurDBSetArgs {
id: None,
data: data1,
})?;
println!("Stored first record with ID: {}", id1);
let data2 = b"Second record";
let id2 = db.set(OurDBSetArgs {
id: None,
data: data2,
})?;
println!("Stored second record with ID: {}", id2);
// Retrieve and print the data
let retrieved1 = db.get(id1)?;
println!(
"Retrieved ID {}: {}",
id1,
String::from_utf8_lossy(&retrieved1)
);
let retrieved2 = db.get(id2)?;
println!(
"Retrieved ID {}: {}",
id2,
String::from_utf8_lossy(&retrieved2)
);
// Update a record to demonstrate history tracking
let updated_data = b"Updated first record";
db.set(OurDBSetArgs {
id: Some(id1),
data: updated_data,
})?;
println!("Updated record with ID: {}", id1);
// Get history for the updated record
let history = db.get_history(id1, 2)?;
println!("History for ID {}:", id1);
for (i, entry) in history.iter().enumerate() {
println!(" Version {}: {}", i, String::from_utf8_lossy(entry));
}
// Delete a record
db.delete(id2)?;
println!("Deleted record with ID: {}", id2);
// Verify deletion
match db.get(id2) {
Ok(_) => println!("Record still exists (unexpected)"),
Err(e) => println!("Verified deletion: {}", e),
}
// Close the database
db.close()?;
println!("Database closed successfully");
// Clean up (optional)
if std::env::var("KEEP_DB").is_err() {
std::fs::remove_dir_all(&db_path)?;
println!("Cleaned up database directory");
} else {
println!("Database kept at: {}", db_path.display());
}
Ok(())
}

View File

@@ -1,124 +0,0 @@
use ourdb::{OurDB, OurDBConfig, OurDBSetArgs};
use std::time::Instant;
fn main() -> Result<(), ourdb::Error> {
// Parse command-line arguments
let args: Vec<String> = std::env::args().collect();
// Default values
let mut incremental_mode = true;
let mut keysize: u8 = 4;
let mut num_operations = 10000;
// Parse arguments
for i in 1..args.len() {
if args[i] == "--no-incremental" {
incremental_mode = false;
} else if args[i] == "--keysize" && i + 1 < args.len() {
keysize = args[i + 1].parse().unwrap_or(4);
} else if args[i] == "--ops" && i + 1 < args.len() {
num_operations = args[i + 1].parse().unwrap_or(10000);
}
}
// Create a temporary directory for the database
let db_path = std::env::temp_dir().join("ourdb_benchmark");
std::fs::create_dir_all(&db_path)?;
println!("Database path: {}", db_path.display());
// Create a new database
let config = OurDBConfig {
path: db_path.clone(),
incremental_mode,
file_size: Some(1024 * 1024),
keysize: Some(keysize),
reset: Some(true), // Reset the database for benchmarking
};
let mut db = OurDB::new(config)?;
// Prepare test data (100 bytes per record)
let test_data = vec![b'A'; 100];
// Benchmark write operations
println!(
"Benchmarking {} write operations (incremental: {}, keysize: {})...",
num_operations, incremental_mode, keysize
);
let start = Instant::now();
let mut ids = Vec::with_capacity(num_operations);
for _ in 0..num_operations {
let id = if incremental_mode {
db.set(OurDBSetArgs {
id: None,
data: &test_data,
})?
} else {
// In non-incremental mode, we need to provide IDs
let id = ids.len() as u32 + 1;
db.set(OurDBSetArgs {
id: Some(id),
data: &test_data,
})?;
id
};
ids.push(id);
}
let write_duration = start.elapsed();
let writes_per_second = num_operations as f64 / write_duration.as_secs_f64();
println!(
"Write performance: {:.2} ops/sec ({:.2} ms/op)",
writes_per_second,
write_duration.as_secs_f64() * 1000.0 / num_operations as f64
);
// Benchmark read operations
println!("Benchmarking {} read operations...", num_operations);
let start = Instant::now();
for &id in &ids {
let _ = db.get(id)?;
}
let read_duration = start.elapsed();
let reads_per_second = num_operations as f64 / read_duration.as_secs_f64();
println!(
"Read performance: {:.2} ops/sec ({:.2} ms/op)",
reads_per_second,
read_duration.as_secs_f64() * 1000.0 / num_operations as f64
);
// Benchmark update operations
println!("Benchmarking {} update operations...", num_operations);
let start = Instant::now();
for &id in &ids {
db.set(OurDBSetArgs {
id: Some(id),
data: &test_data,
})?;
}
let update_duration = start.elapsed();
let updates_per_second = num_operations as f64 / update_duration.as_secs_f64();
println!(
"Update performance: {:.2} ops/sec ({:.2} ms/op)",
updates_per_second,
update_duration.as_secs_f64() * 1000.0 / num_operations as f64
);
// Clean up
db.close()?;
std::fs::remove_dir_all(&db_path)?;
Ok(())
}

View File

@@ -1,83 +0,0 @@
use ourdb::{OurDB, OurDBConfig, OurDBSetArgs};
use std::env::temp_dir;
use std::time::{SystemTime, UNIX_EPOCH};
fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("Standalone OurDB Example");
println!("=======================\n");
// Create a temporary directory for the database
let timestamp = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs();
let db_path = temp_dir().join(format!("ourdb_example_{}", timestamp));
std::fs::create_dir_all(&db_path)?;
println!("Creating database at: {}", db_path.display());
// Create a new OurDB instance
let config = OurDBConfig {
path: db_path.clone(),
incremental_mode: true,
file_size: None,
keysize: None,
reset: Some(false),
};
let mut db = OurDB::new(config)?;
println!("Database created successfully");
// Store some data
let test_data = b"Hello, OurDB!";
let id = db.set(OurDBSetArgs {
id: None,
data: test_data,
})?;
println!("\nStored data with ID: {}", id);
// Retrieve the data
let retrieved = db.get(id)?;
println!("Retrieved data: {}", String::from_utf8_lossy(&retrieved));
// Update the data
let updated_data = b"Updated data in OurDB!";
db.set(OurDBSetArgs {
id: Some(id),
data: updated_data,
})?;
println!("\nUpdated data with ID: {}", id);
// Retrieve the updated data
let retrieved = db.get(id)?;
println!(
"Retrieved updated data: {}",
String::from_utf8_lossy(&retrieved)
);
// Get history
let history = db.get_history(id, 2)?;
println!("\nHistory for ID {}:", id);
for (i, data) in history.iter().enumerate() {
println!(" Version {}: {}", i + 1, String::from_utf8_lossy(data));
}
// Delete the data
db.delete(id)?;
println!("\nDeleted data with ID: {}", id);
// Try to retrieve the deleted data (should fail)
match db.get(id) {
Ok(_) => println!("Data still exists (unexpected)"),
Err(e) => println!("Verified deletion: {}", e),
}
println!("\nExample completed successfully!");
// Clean up
db.close()?;
std::fs::remove_dir_all(&db_path)?;
println!("Cleaned up database directory");
Ok(())
}

View File

@@ -1,83 +0,0 @@
use ourdb::{OurDB, OurDBConfig, OurDBSetArgs};
use std::env::temp_dir;
use std::time::{SystemTime, UNIX_EPOCH};
fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("Standalone OurDB Example");
println!("=======================\n");
// Create a temporary directory for the database
let timestamp = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs();
let db_path = temp_dir().join(format!("ourdb_example_{}", timestamp));
std::fs::create_dir_all(&db_path)?;
println!("Creating database at: {}", db_path.display());
// Create a new OurDB instance
let config = OurDBConfig {
path: db_path.clone(),
incremental_mode: true,
file_size: None,
keysize: None,
reset: Some(false),
};
let mut db = OurDB::new(config)?;
println!("Database created successfully");
// Store some data
let test_data = b"Hello, OurDB!";
let id = db.set(OurDBSetArgs {
id: None,
data: test_data,
})?;
println!("\nStored data with ID: {}", id);
// Retrieve the data
let retrieved = db.get(id)?;
println!("Retrieved data: {}", String::from_utf8_lossy(&retrieved));
// Update the data
let updated_data = b"Updated data in OurDB!";
db.set(OurDBSetArgs {
id: Some(id),
data: updated_data,
})?;
println!("\nUpdated data with ID: {}", id);
// Retrieve the updated data
let retrieved = db.get(id)?;
println!(
"Retrieved updated data: {}",
String::from_utf8_lossy(&retrieved)
);
// Get history
let history = db.get_history(id, 2)?;
println!("\nHistory for ID {}:", id);
for (i, data) in history.iter().enumerate() {
println!(" Version {}: {}", i + 1, String::from_utf8_lossy(data));
}
// Delete the data
db.delete(id)?;
println!("\nDeleted data with ID: {}", id);
// Try to retrieve the deleted data (should fail)
match db.get(id) {
Ok(_) => println!("Data still exists (unexpected)"),
Err(e) => println!("Verified deletion: {}", e),
}
println!("\nExample completed successfully!");
// Clean up
db.close()?;
std::fs::remove_dir_all(&db_path)?;
println!("Cleaned up database directory");
Ok(())
}

View File

@@ -1,366 +0,0 @@
use std::fs::{self, File, OpenOptions};
use std::io::{Read, Seek, SeekFrom, Write};
use crc32fast::Hasher;
use crate::error::Error;
use crate::location::Location;
use crate::OurDB;
// Header size: 2 bytes (size) + 4 bytes (CRC32) + 6 bytes (previous location)
pub const HEADER_SIZE: usize = 12;
impl OurDB {
/// Selects and opens a database file for read/write operations
pub(crate) fn db_file_select(&mut self, file_nr: u16) -> Result<(), Error> {
// No need to check if file_nr > 65535 as u16 can't exceed that value
let path = self.path.join(format!("{}.db", file_nr));
// Always close the current file if it's open
self.file = None;
// Create file if it doesn't exist
if !path.exists() {
self.create_new_db_file(file_nr)?;
}
// Open the file fresh
let file = OpenOptions::new().read(true).write(true).open(&path)?;
self.file = Some(file);
self.file_nr = file_nr;
Ok(())
}
/// Creates a new database file
pub(crate) fn create_new_db_file(&mut self, file_nr: u16) -> Result<(), Error> {
let new_file_path = self.path.join(format!("{}.db", file_nr));
let mut file = File::create(&new_file_path)?;
// Write a single byte to make all positions start from 1
file.write_all(&[0u8])?;
Ok(())
}
/// Gets the file number to use for the next write operation
pub(crate) fn get_file_nr(&mut self) -> Result<u16, Error> {
// For keysize 2, 3, or 4, we can only use file_nr 0
if self.lookup.keysize() <= 4 {
let path = self.path.join("0.db");
if !path.exists() {
self.create_new_db_file(0)?;
}
return Ok(0);
}
// For keysize 6, we can use multiple files
let path = self.path.join(format!("{}.db", self.last_used_file_nr));
if !path.exists() {
self.create_new_db_file(self.last_used_file_nr)?;
return Ok(self.last_used_file_nr);
}
let metadata = fs::metadata(&path)?;
if metadata.len() >= self.file_size as u64 {
self.last_used_file_nr += 1;
self.create_new_db_file(self.last_used_file_nr)?;
}
Ok(self.last_used_file_nr)
}
/// Stores data at the specified ID with history tracking
pub(crate) fn set_(
&mut self,
id: u32,
old_location: Location,
data: &[u8],
) -> Result<(), Error> {
// Validate data size - maximum is u16::MAX (65535 bytes or ~64KB)
if data.len() > u16::MAX as usize {
return Err(Error::InvalidOperation(format!(
"Data size exceeds maximum allowed size of {} bytes",
u16::MAX
)));
}
// Get file number to use
let file_nr = self.get_file_nr()?;
// Select the file
self.db_file_select(file_nr)?;
// Get current file position for lookup
let file = self
.file
.as_mut()
.ok_or_else(|| Error::Other("No file open".to_string()))?;
file.seek(SeekFrom::End(0))?;
let position = file.stream_position()? as u32;
// Create new location
let new_location = Location { file_nr, position };
// Calculate CRC of data
let crc = calculate_crc(data);
// Create header
let mut header = vec![0u8; HEADER_SIZE];
// Write size (2 bytes)
let size = data.len() as u16; // Safe now because we've validated the size
header[0] = (size & 0xFF) as u8;
header[1] = ((size >> 8) & 0xFF) as u8;
// Write CRC (4 bytes)
header[2] = (crc & 0xFF) as u8;
header[3] = ((crc >> 8) & 0xFF) as u8;
header[4] = ((crc >> 16) & 0xFF) as u8;
header[5] = ((crc >> 24) & 0xFF) as u8;
// Write previous location (6 bytes)
let prev_bytes = old_location.to_bytes();
for (i, &byte) in prev_bytes.iter().enumerate().take(6) {
header[6 + i] = byte;
}
// Write header
file.write_all(&header)?;
// Write actual data
file.write_all(data)?;
file.flush()?;
// Update lookup table with new position
self.lookup.set(id, new_location)?;
Ok(())
}
/// Retrieves data at the specified location
pub(crate) fn get_(&mut self, location: Location) -> Result<Vec<u8>, Error> {
if location.position == 0 {
return Err(Error::NotFound(format!(
"Record not found, location: {:?}",
location
)));
}
// Select the file
self.db_file_select(location.file_nr)?;
let file = self
.file
.as_mut()
.ok_or_else(|| Error::Other("No file open".to_string()))?;
// Read header
file.seek(SeekFrom::Start(location.position as u64))?;
let mut header = vec![0u8; HEADER_SIZE];
file.read_exact(&mut header)?;
// Parse size (2 bytes)
let size = u16::from(header[0]) | (u16::from(header[1]) << 8);
// Parse CRC (4 bytes)
let stored_crc = u32::from(header[2])
| (u32::from(header[3]) << 8)
| (u32::from(header[4]) << 16)
| (u32::from(header[5]) << 24);
// Read data
let mut data = vec![0u8; size as usize];
file.read_exact(&mut data)?;
// Verify CRC
let calculated_crc = calculate_crc(&data);
if calculated_crc != stored_crc {
return Err(Error::DataCorruption(
"CRC mismatch: data corruption detected".to_string(),
));
}
Ok(data)
}
/// Retrieves the previous position for a record (for history tracking)
pub(crate) fn get_prev_pos_(&mut self, location: Location) -> Result<Location, Error> {
if location.position == 0 {
return Err(Error::NotFound("Record not found".to_string()));
}
// Select the file
self.db_file_select(location.file_nr)?;
let file = self
.file
.as_mut()
.ok_or_else(|| Error::Other("No file open".to_string()))?;
// Skip size and CRC (6 bytes)
file.seek(SeekFrom::Start(location.position as u64 + 6))?;
// Read previous location (6 bytes)
let mut prev_bytes = vec![0u8; 6];
file.read_exact(&mut prev_bytes)?;
// Create location from bytes
Location::from_bytes(&prev_bytes, 6)
}
/// Deletes the record at the specified location
pub(crate) fn delete_(&mut self, id: u32, location: Location) -> Result<(), Error> {
if location.position == 0 {
return Err(Error::NotFound("Record not found".to_string()));
}
// Select the file
self.db_file_select(location.file_nr)?;
let file = self
.file
.as_mut()
.ok_or_else(|| Error::Other("No file open".to_string()))?;
// Read size first
file.seek(SeekFrom::Start(location.position as u64))?;
let mut size_bytes = vec![0u8; 2];
file.read_exact(&mut size_bytes)?;
let size = u16::from(size_bytes[0]) | (u16::from(size_bytes[1]) << 8);
// Write zeros for the entire record (header + data)
let zeros = vec![0u8; HEADER_SIZE + size as usize];
file.seek(SeekFrom::Start(location.position as u64))?;
file.write_all(&zeros)?;
// Clear lookup entry
self.lookup.delete(id)?;
Ok(())
}
/// Condenses the database by removing empty records and updating positions
pub fn condense(&mut self) -> Result<(), Error> {
// Create a temporary directory
let temp_path = self.path.join("temp");
fs::create_dir_all(&temp_path)?;
// Get all file numbers
let mut file_numbers = Vec::new();
for entry in fs::read_dir(&self.path)? {
let entry = entry?;
let path = entry.path();
if path.is_file() && path.extension().map_or(false, |ext| ext == "db") {
if let Some(stem) = path.file_stem() {
if let Ok(file_nr) = stem.to_string_lossy().parse::<u16>() {
file_numbers.push(file_nr);
}
}
}
}
// Process each file
for file_nr in file_numbers {
let src_path = self.path.join(format!("{}.db", file_nr));
let temp_file_path = temp_path.join(format!("{}.db", file_nr));
// Create new file
let mut temp_file = File::create(&temp_file_path)?;
temp_file.write_all(&[0u8])?; // Initialize with a byte
// Open source file
let mut src_file = File::open(&src_path)?;
// Read and process records
let mut buffer = vec![0u8; 1024]; // Read in chunks
let mut _position = 0;
while let Ok(bytes_read) = src_file.read(&mut buffer) {
if bytes_read == 0 {
break;
}
// Process the chunk
// This is a simplified version - in a real implementation,
// you would need to handle records that span chunk boundaries
_position += bytes_read;
}
// TODO: Implement proper record copying and position updating
// This would involve:
// 1. Reading each record from the source file
// 2. If not deleted (all zeros), copy to temp file
// 3. Update lookup table with new positions
}
// TODO: Replace original files with temp files
// Clean up
fs::remove_dir_all(&temp_path)?;
Ok(())
}
}
/// Calculates CRC32 for the data
fn calculate_crc(data: &[u8]) -> u32 {
let mut hasher = Hasher::new();
hasher.update(data);
hasher.finalize()
}
#[cfg(test)]
mod tests {
use std::path::PathBuf;
use crate::{OurDB, OurDBConfig, OurDBSetArgs};
use std::env::temp_dir;
use std::time::{SystemTime, UNIX_EPOCH};
fn get_temp_dir() -> PathBuf {
let timestamp = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs();
temp_dir().join(format!("ourdb_backend_test_{}", timestamp))
}
#[test]
fn test_backend_operations() {
let temp_dir = get_temp_dir();
let config = OurDBConfig {
path: temp_dir.clone(),
incremental_mode: false,
file_size: None,
keysize: None,
reset: None, // Don't reset existing database
};
let mut db = OurDB::new(config).unwrap();
// Test set and get
let test_data = b"Test data for backend operations";
let id = 1;
db.set(OurDBSetArgs {
id: Some(id),
data: test_data,
})
.unwrap();
let retrieved = db.get(id).unwrap();
assert_eq!(retrieved, test_data);
// Clean up
db.destroy().unwrap();
}
}

View File

@@ -1,41 +0,0 @@
use thiserror::Error;
/// Error types for OurDB operations
#[derive(Error, Debug)]
pub enum Error {
/// IO errors from file operations
#[error("IO error: {0}")]
Io(#[from] std::io::Error),
/// Data corruption errors
#[error("Data corruption: {0}")]
DataCorruption(String),
/// Invalid operation errors
#[error("Invalid operation: {0}")]
InvalidOperation(String),
/// Lookup table errors
#[error("Lookup error: {0}")]
LookupError(String),
/// Record not found errors
#[error("Record not found: {0}")]
NotFound(String),
/// Other errors
#[error("Error: {0}")]
Other(String),
}
impl From<String> for Error {
fn from(msg: String) -> Self {
Error::Other(msg)
}
}
impl From<&str> for Error {
fn from(msg: &str) -> Self {
Error::Other(msg.to_string())
}
}

View File

@@ -1,293 +0,0 @@
mod backend;
mod error;
mod location;
mod lookup;
pub use error::Error;
pub use location::Location;
pub use lookup::LookupTable;
use std::fs::File;
use std::path::PathBuf;
/// OurDB is a lightweight, efficient key-value database implementation that provides
/// data persistence with history tracking capabilities.
pub struct OurDB {
/// Directory path for storage
path: PathBuf,
/// Whether to use auto-increment mode
incremental_mode: bool,
/// Maximum file size (default: 500MB)
file_size: u32,
/// Lookup table for mapping keys to locations
lookup: LookupTable,
/// Currently open file
file: Option<File>,
/// Current file number
file_nr: u16,
/// Last used file number
last_used_file_nr: u16,
}
/// Configuration for creating a new OurDB instance
pub struct OurDBConfig {
/// Directory path for storage
pub path: PathBuf,
/// Whether to use auto-increment mode
pub incremental_mode: bool,
/// Maximum file size (default: 500MB)
pub file_size: Option<u32>,
/// Lookup table key size (default: 4)
/// - 2: For databases with < 65,536 records (single file)
/// - 3: For databases with < 16,777,216 records (single file)
/// - 4: For databases with < 4,294,967,296 records (single file)
/// - 6: For large databases requiring multiple files (default)
pub keysize: Option<u8>,
/// Whether to reset the database if it exists (default: false)
pub reset: Option<bool>,
}
/// Arguments for setting a value in OurDB
pub struct OurDBSetArgs<'a> {
/// ID for the record (optional in incremental mode)
pub id: Option<u32>,
/// Data to store
pub data: &'a [u8],
}
impl OurDB {
/// Creates a new OurDB instance with the given configuration
pub fn new(config: OurDBConfig) -> Result<Self, Error> {
// If reset is true and the path exists, remove it first
if config.reset.unwrap_or(false) && config.path.exists() {
std::fs::remove_dir_all(&config.path)?;
}
// Create directory if it doesn't exist
std::fs::create_dir_all(&config.path)?;
// Create lookup table
let lookup_path = config.path.join("lookup");
std::fs::create_dir_all(&lookup_path)?;
let lookup_config = lookup::LookupConfig {
size: 1000000, // Default size
keysize: config.keysize.unwrap_or(4),
lookuppath: lookup_path.to_string_lossy().to_string(),
incremental_mode: config.incremental_mode,
};
let lookup = LookupTable::new(lookup_config)?;
let mut db = OurDB {
path: config.path,
incremental_mode: config.incremental_mode,
file_size: config.file_size.unwrap_or(500 * (1 << 20)), // 500MB default
lookup,
file: None,
file_nr: 0,
last_used_file_nr: 0,
};
// Load existing metadata if available
db.load()?;
Ok(db)
}
/// Sets a value in the database
///
/// In incremental mode:
/// - If ID is provided, it updates an existing record
/// - If ID is not provided, it creates a new record with auto-generated ID
///
/// In key-value mode:
/// - ID must be provided
pub fn set(&mut self, args: OurDBSetArgs) -> Result<u32, Error> {
if self.incremental_mode {
if let Some(id) = args.id {
// This is an update
let location = self.lookup.get(id)?;
if location.position == 0 {
return Err(Error::InvalidOperation(
"Cannot set ID for insertions when incremental mode is enabled".to_string(),
));
}
self.set_(id, location, args.data)?;
Ok(id)
} else {
// This is an insert
let id = self.lookup.get_next_id()?;
self.set_(id, Location::default(), args.data)?;
Ok(id)
}
} else {
// Using key-value mode
let id = args.id.ok_or_else(|| {
Error::InvalidOperation(
"ID must be provided when incremental is disabled".to_string(),
)
})?;
let location = self.lookup.get(id)?;
self.set_(id, location, args.data)?;
Ok(id)
}
}
/// Retrieves data stored at the specified key position
pub fn get(&mut self, id: u32) -> Result<Vec<u8>, Error> {
let location = self.lookup.get(id)?;
self.get_(location)
}
/// Retrieves a list of previous values for the specified key
///
/// The depth parameter controls how many historical values to retrieve (maximum)
pub fn get_history(&mut self, id: u32, depth: u8) -> Result<Vec<Vec<u8>>, Error> {
let mut result = Vec::new();
let mut current_location = self.lookup.get(id)?;
// Traverse the history chain up to specified depth
for _ in 0..depth {
// Get current value
let data = self.get_(current_location)?;
result.push(data);
// Try to get previous location
match self.get_prev_pos_(current_location) {
Ok(location) => {
if location.position == 0 {
break;
}
current_location = location;
}
Err(_) => break,
}
}
Ok(result)
}
/// Deletes the data at the specified key position
pub fn delete(&mut self, id: u32) -> Result<(), Error> {
let location = self.lookup.get(id)?;
self.delete_(id, location)?;
self.lookup.delete(id)?;
Ok(())
}
/// Returns the next ID which will be used when storing in incremental mode
pub fn get_next_id(&mut self) -> Result<u32, Error> {
if !self.incremental_mode {
return Err(Error::InvalidOperation(
"Incremental mode is not enabled".to_string(),
));
}
self.lookup.get_next_id()
}
/// Closes the database, ensuring all data is saved
pub fn close(&mut self) -> Result<(), Error> {
self.save()?;
self.close_();
Ok(())
}
/// Destroys the database, removing all files
pub fn destroy(&mut self) -> Result<(), Error> {
let _ = self.close();
std::fs::remove_dir_all(&self.path)?;
Ok(())
}
// Helper methods
fn lookup_dump_path(&self) -> PathBuf {
self.path.join("lookup_dump.db")
}
fn load(&mut self) -> Result<(), Error> {
let dump_path = self.lookup_dump_path();
if dump_path.exists() {
self.lookup.import_sparse(&dump_path.to_string_lossy())?;
}
Ok(())
}
fn save(&mut self) -> Result<(), Error> {
self.lookup
.export_sparse(&self.lookup_dump_path().to_string_lossy())?;
Ok(())
}
fn close_(&mut self) {
self.file = None;
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::env::temp_dir;
use std::time::{SystemTime, UNIX_EPOCH};
fn get_temp_dir() -> PathBuf {
let timestamp = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs();
temp_dir().join(format!("ourdb_test_{}", timestamp))
}
#[test]
fn test_basic_operations() {
let temp_dir = get_temp_dir();
let config = OurDBConfig {
path: temp_dir.clone(),
incremental_mode: true,
file_size: None,
keysize: None,
reset: None, // Don't reset existing database
};
let mut db = OurDB::new(config).unwrap();
// Test set and get
let test_data = b"Hello, OurDB!";
let id = db
.set(OurDBSetArgs {
id: None,
data: test_data,
})
.unwrap();
let retrieved = db.get(id).unwrap();
assert_eq!(retrieved, test_data);
// Test update
let updated_data = b"Updated data";
db.set(OurDBSetArgs {
id: Some(id),
data: updated_data,
})
.unwrap();
let retrieved = db.get(id).unwrap();
assert_eq!(retrieved, updated_data);
// Test history
let history = db.get_history(id, 2).unwrap();
assert_eq!(history.len(), 2);
assert_eq!(history[0], updated_data);
assert_eq!(history[1], test_data);
// Test delete
db.delete(id).unwrap();
assert!(db.get(id).is_err());
// Clean up
db.destroy().unwrap();
}
}

View File

@@ -1,178 +0,0 @@
use crate::error::Error;
/// Location represents a physical position in a database file
///
/// It consists of a file number and a position within that file.
/// This allows OurDB to span multiple files for large datasets.
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
pub struct Location {
/// File number (0-65535)
pub file_nr: u16,
/// Position within the file
pub position: u32,
}
impl Location {
/// Creates a new Location from bytes based on keysize
///
/// - keysize = 2: Only position (2 bytes), file_nr = 0
/// - keysize = 3: Only position (3 bytes), file_nr = 0
/// - keysize = 4: Only position (4 bytes), file_nr = 0
/// - keysize = 6: file_nr (2 bytes) + position (4 bytes)
pub fn from_bytes(bytes: &[u8], keysize: u8) -> Result<Self, Error> {
// Validate keysize
if ![2, 3, 4, 6].contains(&keysize) {
return Err(Error::InvalidOperation(format!(
"Invalid keysize: {}",
keysize
)));
}
// Create padded bytes
let mut padded = vec![0u8; keysize as usize];
if bytes.len() > keysize as usize {
return Err(Error::InvalidOperation(
"Input bytes exceed keysize".to_string(),
));
}
let start_idx = keysize as usize - bytes.len();
for (i, &b) in bytes.iter().enumerate() {
if i + start_idx < padded.len() {
padded[start_idx + i] = b;
}
}
let mut location = Location::default();
match keysize {
2 => {
// Only position, 2 bytes big endian
location.position = u32::from(padded[0]) << 8 | u32::from(padded[1]);
location.file_nr = 0;
// Verify limits
if location.position > 0xFFFF {
return Err(Error::InvalidOperation(
"Position exceeds max value for keysize=2 (max 65535)".to_string(),
));
}
}
3 => {
// Only position, 3 bytes big endian
location.position =
u32::from(padded[0]) << 16 | u32::from(padded[1]) << 8 | u32::from(padded[2]);
location.file_nr = 0;
// Verify limits
if location.position > 0xFFFFFF {
return Err(Error::InvalidOperation(
"Position exceeds max value for keysize=3 (max 16777215)".to_string(),
));
}
}
4 => {
// Only position, 4 bytes big endian
location.position = u32::from(padded[0]) << 24
| u32::from(padded[1]) << 16
| u32::from(padded[2]) << 8
| u32::from(padded[3]);
location.file_nr = 0;
}
6 => {
// 2 bytes file_nr + 4 bytes position, all big endian
location.file_nr = u16::from(padded[0]) << 8 | u16::from(padded[1]);
location.position = u32::from(padded[2]) << 24
| u32::from(padded[3]) << 16
| u32::from(padded[4]) << 8
| u32::from(padded[5]);
}
_ => unreachable!(),
}
Ok(location)
}
/// Converts the location to bytes (always 6 bytes)
///
/// Format: [file_nr (2 bytes)][position (4 bytes)]
pub fn to_bytes(&self) -> Vec<u8> {
let mut bytes = Vec::with_capacity(6);
// Put file_nr first (2 bytes)
bytes.push((self.file_nr >> 8) as u8);
bytes.push(self.file_nr as u8);
// Put position next (4 bytes)
bytes.push((self.position >> 24) as u8);
bytes.push((self.position >> 16) as u8);
bytes.push((self.position >> 8) as u8);
bytes.push(self.position as u8);
bytes
}
/// Converts the location to a u64 value
///
/// The file_nr is stored in the most significant bits
pub fn to_u64(&self) -> u64 {
(u64::from(self.file_nr) << 32) | u64::from(self.position)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_location_from_bytes_keysize_2() {
let bytes = vec![0x12, 0x34];
let location = Location::from_bytes(&bytes, 2).unwrap();
assert_eq!(location.file_nr, 0);
assert_eq!(location.position, 0x1234);
}
#[test]
fn test_location_from_bytes_keysize_3() {
let bytes = vec![0x12, 0x34, 0x56];
let location = Location::from_bytes(&bytes, 3).unwrap();
assert_eq!(location.file_nr, 0);
assert_eq!(location.position, 0x123456);
}
#[test]
fn test_location_from_bytes_keysize_4() {
let bytes = vec![0x12, 0x34, 0x56, 0x78];
let location = Location::from_bytes(&bytes, 4).unwrap();
assert_eq!(location.file_nr, 0);
assert_eq!(location.position, 0x12345678);
}
#[test]
fn test_location_from_bytes_keysize_6() {
let bytes = vec![0xAB, 0xCD, 0x12, 0x34, 0x56, 0x78];
let location = Location::from_bytes(&bytes, 6).unwrap();
assert_eq!(location.file_nr, 0xABCD);
assert_eq!(location.position, 0x12345678);
}
#[test]
fn test_location_to_bytes() {
let location = Location {
file_nr: 0xABCD,
position: 0x12345678,
};
let bytes = location.to_bytes();
assert_eq!(bytes, vec![0xAB, 0xCD, 0x12, 0x34, 0x56, 0x78]);
}
#[test]
fn test_location_to_u64() {
let location = Location {
file_nr: 0xABCD,
position: 0x12345678,
};
let value = location.to_u64();
assert_eq!(value, 0xABCD_0000_0000 | 0x12345678);
}
}

View File

@@ -1,540 +0,0 @@
use std::fs::{self, File, OpenOptions};
use std::io::{Read, Seek, SeekFrom, Write};
use std::path::Path;
use crate::error::Error;
use crate::location::Location;
const DATA_FILE_NAME: &str = "data";
const INCREMENTAL_FILE_NAME: &str = ".inc";
/// Configuration for creating a new lookup table
pub struct LookupConfig {
/// Size of the lookup table
pub size: u32,
/// Size of each entry in bytes (2-6)
/// - 2: For databases with < 65,536 records (single file)
/// - 3: For databases with < 16,777,216 records (single file)
/// - 4: For databases with < 4,294,967,296 records (single file)
/// - 6: For large databases requiring multiple files
pub keysize: u8,
/// Path for disk-based lookup
pub lookuppath: String,
/// Whether to use incremental mode
pub incremental_mode: bool,
}
/// Lookup table maps keys to physical locations in the backend storage
pub struct LookupTable {
/// Size of each entry in bytes (2-6)
keysize: u8,
/// Path for disk-based lookup
lookuppath: String,
/// In-memory data for memory-based lookup
data: Vec<u8>,
/// Next empty slot if incremental mode is enabled
incremental: Option<u32>,
}
impl LookupTable {
/// Returns the keysize of this lookup table
pub fn keysize(&self) -> u8 {
self.keysize
}
/// Creates a new lookup table with the given configuration
pub fn new(config: LookupConfig) -> Result<Self, Error> {
// Verify keysize is valid
if ![2, 3, 4, 6].contains(&config.keysize) {
return Err(Error::InvalidOperation(format!(
"Invalid keysize: {}",
config.keysize
)));
}
let incremental = if config.incremental_mode {
Some(get_incremental_info(&config)?)
} else {
None
};
if !config.lookuppath.is_empty() {
// Create directory if it doesn't exist
fs::create_dir_all(&config.lookuppath)?;
// For disk-based lookup, create empty file if it doesn't exist
let data_path = Path::new(&config.lookuppath).join(DATA_FILE_NAME);
if !data_path.exists() {
let data = vec![0u8; config.size as usize * config.keysize as usize];
fs::write(&data_path, &data)?;
}
Ok(LookupTable {
data: Vec::new(),
keysize: config.keysize,
lookuppath: config.lookuppath,
incremental,
})
} else {
// For memory-based lookup
Ok(LookupTable {
data: vec![0u8; config.size as usize * config.keysize as usize],
keysize: config.keysize,
lookuppath: String::new(),
incremental,
})
}
}
/// Gets a location for the given ID
pub fn get(&self, id: u32) -> Result<Location, Error> {
let entry_size = self.keysize as usize;
if !self.lookuppath.is_empty() {
// Disk-based lookup
let data_path = Path::new(&self.lookuppath).join(DATA_FILE_NAME);
// Check file size first
let file_size = fs::metadata(&data_path)?.len();
let start_pos = id as u64 * entry_size as u64;
if start_pos + entry_size as u64 > file_size {
return Err(Error::LookupError(format!(
"Invalid read for get in lut: {}: {} would exceed file size {}",
self.lookuppath,
start_pos + entry_size as u64,
file_size
)));
}
// Read directly from file
let mut file = File::open(&data_path)?;
file.seek(SeekFrom::Start(start_pos))?;
let mut data = vec![0u8; entry_size];
let bytes_read = file.read(&mut data)?;
if bytes_read < entry_size {
return Err(Error::LookupError(format!(
"Incomplete read: expected {} bytes but got {}",
entry_size, bytes_read
)));
}
return Location::from_bytes(&data, self.keysize);
}
// Memory-based lookup
if (id * self.keysize as u32) as usize >= self.data.len() {
return Err(Error::LookupError("Index out of bounds".to_string()));
}
let start = (id * self.keysize as u32) as usize;
let end = start + entry_size;
Location::from_bytes(&self.data[start..end], self.keysize)
}
/// Sets a location for the given ID
pub fn set(&mut self, id: u32, location: Location) -> Result<(), Error> {
let entry_size = self.keysize as usize;
// Handle incremental mode
if let Some(incremental) = self.incremental {
if id == incremental {
self.increment_index()?;
}
if id > incremental {
return Err(Error::InvalidOperation(
"Cannot set ID for insertions when incremental mode is enabled".to_string(),
));
}
}
// Convert location to bytes based on keysize
let location_bytes = match self.keysize {
2 => {
if location.file_nr != 0 {
return Err(Error::InvalidOperation(
"file_nr must be 0 for keysize=2".to_string(),
));
}
if location.position > 0xFFFF {
return Err(Error::InvalidOperation(
"position exceeds max value for keysize=2 (max 65535)".to_string(),
));
}
vec![(location.position >> 8) as u8, location.position as u8]
}
3 => {
if location.file_nr != 0 {
return Err(Error::InvalidOperation(
"file_nr must be 0 for keysize=3".to_string(),
));
}
if location.position > 0xFFFFFF {
return Err(Error::InvalidOperation(
"position exceeds max value for keysize=3 (max 16777215)".to_string(),
));
}
vec![
(location.position >> 16) as u8,
(location.position >> 8) as u8,
location.position as u8,
]
}
4 => {
if location.file_nr != 0 {
return Err(Error::InvalidOperation(
"file_nr must be 0 for keysize=4".to_string(),
));
}
vec![
(location.position >> 24) as u8,
(location.position >> 16) as u8,
(location.position >> 8) as u8,
location.position as u8,
]
}
6 => {
// Full location with file_nr and position
location.to_bytes()
}
_ => {
return Err(Error::InvalidOperation(format!(
"Invalid keysize: {}",
self.keysize
)))
}
};
if !self.lookuppath.is_empty() {
// Disk-based lookup
let data_path = Path::new(&self.lookuppath).join(DATA_FILE_NAME);
let mut file = OpenOptions::new().write(true).open(data_path)?;
let start_pos = id as u64 * entry_size as u64;
file.seek(SeekFrom::Start(start_pos))?;
file.write_all(&location_bytes)?;
} else {
// Memory-based lookup
let start = (id * self.keysize as u32) as usize;
if start + entry_size > self.data.len() {
return Err(Error::LookupError("Index out of bounds".to_string()));
}
for (i, &byte) in location_bytes.iter().enumerate() {
self.data[start + i] = byte;
}
}
Ok(())
}
/// Deletes an entry for the given ID
pub fn delete(&mut self, id: u32) -> Result<(), Error> {
// Set location to all zeros
self.set(id, Location::default())
}
/// Gets the next available ID in incremental mode
pub fn get_next_id(&self) -> Result<u32, Error> {
let incremental = self.incremental.ok_or_else(|| {
Error::InvalidOperation("Lookup table not in incremental mode".to_string())
})?;
let table_size = if !self.lookuppath.is_empty() {
let data_path = Path::new(&self.lookuppath).join(DATA_FILE_NAME);
fs::metadata(data_path)?.len() as u32
} else {
self.data.len() as u32
};
if incremental * self.keysize as u32 >= table_size {
return Err(Error::LookupError("Lookup table is full".to_string()));
}
Ok(incremental)
}
/// Increments the index in incremental mode
pub fn increment_index(&mut self) -> Result<(), Error> {
let mut incremental = self.incremental.ok_or_else(|| {
Error::InvalidOperation("Lookup table not in incremental mode".to_string())
})?;
incremental += 1;
self.incremental = Some(incremental);
if !self.lookuppath.is_empty() {
let inc_path = Path::new(&self.lookuppath).join(INCREMENTAL_FILE_NAME);
fs::write(inc_path, incremental.to_string())?;
}
Ok(())
}
/// Exports the lookup table to a file
pub fn export_data(&self, path: &str) -> Result<(), Error> {
if !self.lookuppath.is_empty() {
// For disk-based lookup, just copy the file
let data_path = Path::new(&self.lookuppath).join(DATA_FILE_NAME);
fs::copy(data_path, path)?;
} else {
// For memory-based lookup, write the data to file
fs::write(path, &self.data)?;
}
Ok(())
}
/// Imports the lookup table from a file
pub fn import_data(&mut self, path: &str) -> Result<(), Error> {
if !self.lookuppath.is_empty() {
// For disk-based lookup, copy the file
let data_path = Path::new(&self.lookuppath).join(DATA_FILE_NAME);
fs::copy(path, data_path)?;
} else {
// For memory-based lookup, read the data from file
self.data = fs::read(path)?;
}
Ok(())
}
/// Exports only non-zero entries to save space
pub fn export_sparse(&self, path: &str) -> Result<(), Error> {
let mut output = Vec::new();
let entry_size = self.keysize as usize;
if !self.lookuppath.is_empty() {
// For disk-based lookup
let data_path = Path::new(&self.lookuppath).join(DATA_FILE_NAME);
let mut file = File::open(&data_path)?;
let file_size = fs::metadata(&data_path)?.len();
let max_entries = file_size / entry_size as u64;
for id in 0..max_entries {
file.seek(SeekFrom::Start(id * entry_size as u64))?;
let mut buffer = vec![0u8; entry_size];
let bytes_read = file.read(&mut buffer)?;
if bytes_read < entry_size {
break;
}
// Check if entry is non-zero
if buffer.iter().any(|&b| b != 0) {
// Write ID (4 bytes) + entry
output.extend_from_slice(&(id as u32).to_be_bytes());
output.extend_from_slice(&buffer);
}
}
} else {
// For memory-based lookup
let max_entries = self.data.len() / entry_size;
for id in 0..max_entries {
let start = id * entry_size;
let entry = &self.data[start..start + entry_size];
// Check if entry is non-zero
if entry.iter().any(|&b| b != 0) {
// Write ID (4 bytes) + entry
output.extend_from_slice(&(id as u32).to_be_bytes());
output.extend_from_slice(entry);
}
}
}
// Write the output to file
fs::write(path, &output)?;
Ok(())
}
/// Imports sparse data (only non-zero entries)
pub fn import_sparse(&mut self, path: &str) -> Result<(), Error> {
let data = fs::read(path)?;
let entry_size = self.keysize as usize;
let record_size = 4 + entry_size; // ID (4 bytes) + entry
if data.len() % record_size != 0 {
return Err(Error::DataCorruption(
"Invalid sparse data format: size mismatch".to_string(),
));
}
for chunk_start in (0..data.len()).step_by(record_size) {
if chunk_start + record_size > data.len() {
break;
}
// Extract ID (4 bytes)
let id_bytes = &data[chunk_start..chunk_start + 4];
let id = u32::from_be_bytes([id_bytes[0], id_bytes[1], id_bytes[2], id_bytes[3]]);
// Extract entry
let entry = &data[chunk_start + 4..chunk_start + record_size];
// Create location from entry
let location = Location::from_bytes(entry, self.keysize)?;
// Set the entry
self.set(id, location)?;
}
Ok(())
}
/// Finds the highest ID with a non-zero entry
pub fn find_last_entry(&mut self) -> Result<u32, Error> {
let mut last_id = 0u32;
let entry_size = self.keysize as usize;
if !self.lookuppath.is_empty() {
// For disk-based lookup
let data_path = Path::new(&self.lookuppath).join(DATA_FILE_NAME);
let mut file = File::open(&data_path)?;
let file_size = fs::metadata(&data_path)?.len();
let mut buffer = vec![0u8; entry_size];
let mut pos = 0u32;
while (pos as u64 * entry_size as u64) < file_size {
file.seek(SeekFrom::Start(pos as u64 * entry_size as u64))?;
let bytes_read = file.read(&mut buffer)?;
if bytes_read == 0 || bytes_read < entry_size {
break;
}
let location = Location::from_bytes(&buffer, self.keysize)?;
if location.position != 0 || location.file_nr != 0 {
last_id = pos;
}
pos += 1;
}
} else {
// For memory-based lookup
for i in 0..(self.data.len() / entry_size) as u32 {
if let Ok(location) = self.get(i) {
if location.position != 0 || location.file_nr != 0 {
last_id = i;
}
}
}
}
Ok(last_id)
}
}
/// Helper function to get the incremental value
fn get_incremental_info(config: &LookupConfig) -> Result<u32, Error> {
if !config.incremental_mode {
return Ok(0);
}
if !config.lookuppath.is_empty() {
let inc_path = Path::new(&config.lookuppath).join(INCREMENTAL_FILE_NAME);
if !inc_path.exists() {
// Create a separate file for storing the incremental value
fs::write(&inc_path, "1")?;
}
let inc_str = fs::read_to_string(&inc_path)?;
let incremental = match inc_str.trim().parse::<u32>() {
Ok(val) => val,
Err(_) => {
// If the value is invalid, reset it to 1
fs::write(&inc_path, "1")?;
1
}
};
Ok(incremental)
} else {
// For memory-based lookup, start with 1
Ok(1)
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::env::temp_dir;
use std::path::PathBuf;
use std::time::{SystemTime, UNIX_EPOCH};
fn get_temp_dir() -> PathBuf {
let timestamp = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs();
temp_dir().join(format!("ourdb_lookup_test_{}", timestamp))
}
#[test]
fn test_memory_lookup() {
let config = LookupConfig {
size: 1000,
keysize: 4,
lookuppath: String::new(),
incremental_mode: true,
};
let mut lookup = LookupTable::new(config).unwrap();
// Test set and get
let location = Location {
file_nr: 0,
position: 12345,
};
lookup.set(1, location).unwrap();
let retrieved = lookup.get(1).unwrap();
assert_eq!(retrieved.file_nr, location.file_nr);
assert_eq!(retrieved.position, location.position);
// Test incremental mode
let next_id = lookup.get_next_id().unwrap();
assert_eq!(next_id, 2);
lookup.increment_index().unwrap();
let next_id = lookup.get_next_id().unwrap();
assert_eq!(next_id, 3);
}
#[test]
fn test_disk_lookup() {
let temp_dir = get_temp_dir();
fs::create_dir_all(&temp_dir).unwrap();
let config = LookupConfig {
size: 1000,
keysize: 4,
lookuppath: temp_dir.to_string_lossy().to_string(),
incremental_mode: true,
};
let mut lookup = LookupTable::new(config).unwrap();
// Test set and get
let location = Location {
file_nr: 0,
position: 12345,
};
lookup.set(1, location).unwrap();
let retrieved = lookup.get(1).unwrap();
assert_eq!(retrieved.file_nr, location.file_nr);
assert_eq!(retrieved.position, location.position);
// Clean up
fs::remove_dir_all(temp_dir).unwrap();
}
}

View File

@@ -1,369 +0,0 @@
use ourdb::{OurDB, OurDBConfig, OurDBSetArgs};
use rand;
use std::env::temp_dir;
use std::fs;
use std::path::PathBuf;
use std::time::{SystemTime, UNIX_EPOCH};
// Helper function to create a unique temporary directory for tests
fn get_temp_dir() -> PathBuf {
let timestamp = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_nanos();
let random_part = rand::random::<u32>();
let dir = temp_dir().join(format!("ourdb_test_{}_{}", timestamp, random_part));
// Ensure the directory exists and is empty
if dir.exists() {
std::fs::remove_dir_all(&dir).unwrap();
}
std::fs::create_dir_all(&dir).unwrap();
dir
}
#[test]
fn test_basic_operations() {
let temp_dir = get_temp_dir();
// Create a new database with incremental mode
let config = OurDBConfig {
path: temp_dir.clone(),
incremental_mode: true,
file_size: None,
keysize: None,
reset: None,
};
let mut db = OurDB::new(config).unwrap();
// Test set and get
let test_data = b"Hello, OurDB!";
let id = db
.set(OurDBSetArgs {
id: None,
data: test_data,
})
.unwrap();
let retrieved = db.get(id).unwrap();
assert_eq!(retrieved, test_data);
// Test update
let updated_data = b"Updated data";
db.set(OurDBSetArgs {
id: Some(id),
data: updated_data,
})
.unwrap();
let retrieved = db.get(id).unwrap();
assert_eq!(retrieved, updated_data);
// Test history
let history = db.get_history(id, 2).unwrap();
assert_eq!(history.len(), 2);
assert_eq!(history[0], updated_data);
assert_eq!(history[1], test_data);
// Test delete
db.delete(id).unwrap();
assert!(db.get(id).is_err());
// Clean up
db.destroy().unwrap();
}
#[test]
fn test_key_value_mode() {
let temp_dir = get_temp_dir();
// Create a new database with key-value mode
let config = OurDBConfig {
path: temp_dir.clone(),
incremental_mode: false,
file_size: None,
keysize: None,
reset: None,
};
let mut db = OurDB::new(config).unwrap();
// Test set with explicit ID
let test_data = b"Key-value data";
let id = 42;
db.set(OurDBSetArgs {
id: Some(id),
data: test_data,
})
.unwrap();
let retrieved = db.get(id).unwrap();
assert_eq!(retrieved, test_data);
// Verify next_id fails in key-value mode
assert!(db.get_next_id().is_err());
// Clean up
db.destroy().unwrap();
}
#[test]
fn test_incremental_mode() {
let temp_dir = get_temp_dir();
// Create a new database with incremental mode
let config = OurDBConfig {
path: temp_dir.clone(),
incremental_mode: true,
file_size: None,
keysize: None,
reset: None,
};
let mut db = OurDB::new(config).unwrap();
// Test auto-increment IDs
let data1 = b"First record";
let id1 = db
.set(OurDBSetArgs {
id: None,
data: data1,
})
.unwrap();
let data2 = b"Second record";
let id2 = db
.set(OurDBSetArgs {
id: None,
data: data2,
})
.unwrap();
// IDs should be sequential
assert_eq!(id2, id1 + 1);
// Verify get_next_id works
let next_id = db.get_next_id().unwrap();
assert_eq!(next_id, id2 + 1);
// Clean up
db.destroy().unwrap();
}
#[test]
fn test_persistence() {
let temp_dir = get_temp_dir();
// Create data in a new database
{
let config = OurDBConfig {
path: temp_dir.clone(),
incremental_mode: true,
file_size: None,
keysize: None,
reset: None,
};
let mut db = OurDB::new(config).unwrap();
let test_data = b"Persistent data";
let id = db
.set(OurDBSetArgs {
id: None,
data: test_data,
})
.unwrap();
// Explicitly close the database
db.close().unwrap();
// ID should be 1 in a new database
assert_eq!(id, 1);
}
// Reopen the database and verify data persists
{
let config = OurDBConfig {
path: temp_dir.clone(),
incremental_mode: true,
file_size: None,
keysize: None,
reset: None,
};
let mut db = OurDB::new(config).unwrap();
// Verify data is still there
let retrieved = db.get(1).unwrap();
assert_eq!(retrieved, b"Persistent data");
// Verify incremental counter persisted
let next_id = db.get_next_id().unwrap();
assert_eq!(next_id, 2);
// Clean up
db.destroy().unwrap();
}
}
#[test]
fn test_different_keysizes() {
for keysize in [2, 3, 4, 6].iter() {
let temp_dir = get_temp_dir();
// Ensure the directory exists
std::fs::create_dir_all(&temp_dir).unwrap();
// Create a new database with specified keysize
let config = OurDBConfig {
path: temp_dir.clone(),
incremental_mode: true,
file_size: None,
keysize: Some(*keysize),
reset: None,
};
let mut db = OurDB::new(config).unwrap();
// Test basic operations
let test_data = b"Keysize test data";
let id = db
.set(OurDBSetArgs {
id: None,
data: test_data,
})
.unwrap();
let retrieved = db.get(id).unwrap();
assert_eq!(retrieved, test_data);
// Clean up
db.destroy().unwrap();
}
}
#[test]
fn test_large_data() {
let temp_dir = get_temp_dir();
// Create a new database
let config = OurDBConfig {
path: temp_dir.clone(),
incremental_mode: true,
file_size: None,
keysize: None,
reset: None,
};
let mut db = OurDB::new(config).unwrap();
// Create a large data set (60KB - within the 64KB limit)
let large_data = vec![b'X'; 60 * 1024];
// Store and retrieve large data
let id = db
.set(OurDBSetArgs {
id: None,
data: &large_data,
})
.unwrap();
let retrieved = db.get(id).unwrap();
assert_eq!(retrieved.len(), large_data.len());
assert_eq!(retrieved, large_data);
// Clean up
db.destroy().unwrap();
}
#[test]
fn test_exceed_size_limit() {
let temp_dir = get_temp_dir();
// Create a new database
let config = OurDBConfig {
path: temp_dir.clone(),
incremental_mode: true,
file_size: None,
keysize: None,
reset: None,
};
let mut db = OurDB::new(config).unwrap();
// Create data larger than the 64KB limit (70KB)
let oversized_data = vec![b'X'; 70 * 1024];
// Attempt to store data that exceeds the size limit
let result = db.set(OurDBSetArgs {
id: None,
data: &oversized_data,
});
// Verify that an error is returned
assert!(
result.is_err(),
"Expected an error when storing data larger than 64KB"
);
// Clean up
db.destroy().unwrap();
}
#[test]
fn test_multiple_files() {
let temp_dir = get_temp_dir();
// Create a new database with small file size to force multiple files
let config = OurDBConfig {
path: temp_dir.clone(),
incremental_mode: true,
file_size: Some(1024), // Very small file size (1KB)
keysize: Some(6), // 6-byte keysize for multiple files
reset: None,
};
let mut db = OurDB::new(config).unwrap();
// Store enough data to span multiple files
let data_size = 500; // bytes per record
let test_data = vec![b'A'; data_size];
let mut ids = Vec::new();
for _ in 0..10 {
let id = db
.set(OurDBSetArgs {
id: None,
data: &test_data,
})
.unwrap();
ids.push(id);
}
// Verify all data can be retrieved
for &id in &ids {
let retrieved = db.get(id).unwrap();
assert_eq!(retrieved.len(), data_size);
}
// Verify multiple files were created
let files = fs::read_dir(&temp_dir)
.unwrap()
.filter_map(Result::ok)
.filter(|entry| {
let path = entry.path();
path.is_file() && path.extension().map_or(false, |ext| ext == "db")
})
.count();
assert!(
files > 1,
"Expected multiple database files, found {}",
files
);
// Clean up
db.destroy().unwrap();
}

View File

@@ -1,787 +0,0 @@
# RadixTree: Architecture for V to Rust Port
## 1. Overview
RadixTree is a space-optimized tree data structure that enables efficient string key operations with persistent storage. This document outlines the architecture for porting the RadixTree module from its original V implementation to Rust, maintaining all existing functionality while leveraging Rust's memory safety, performance, and ecosystem.
The Rust implementation will integrate with the existing OurDB Rust implementation for persistent storage.
```mermaid
graph TD
A[Client Code] --> B[RadixTree API]
B --> C[Node Management]
B --> D[Serialization]
B --> E[Tree Operations]
C --> F[OurDB]
D --> F
E --> C
```
## 2. Current Architecture (V Implementation)
The current V implementation of RadixTree consists of the following components:
### 2.1 Core Data Structures
#### Node
```v
struct Node {
mut:
key_segment string // The segment of the key stored at this node
value []u8 // Value stored at this node (empty if not a leaf)
children []NodeRef // References to child nodes
is_leaf bool // Whether this node is a leaf node
}
```
#### NodeRef
```v
struct NodeRef {
mut:
key_part string // The key segment for this child
node_id u32 // Database ID of the node
}
```
#### RadixTree
```v
@[heap]
pub struct RadixTree {
mut:
db &ourdb.OurDB // Database for persistent storage
root_id u32 // Database ID of the root node
}
```
### 2.2 Key Operations
1. **new()**: Creates a new radix tree with a specified database path
2. **set(key, value)**: Sets a key-value pair in the tree
3. **get(key)**: Retrieves a value by key
4. **update(prefix, new_value)**: Updates the value at a given key prefix
5. **delete(key)**: Removes a key from the tree
6. **list(prefix)**: Lists all keys with a given prefix
7. **getall(prefix)**: Gets all values for keys with a given prefix
### 2.3 Serialization
The V implementation uses a custom binary serialization format for nodes:
- Version byte (1 byte)
- Key segment (string)
- Value length (2 bytes) followed by value bytes
- Children count (2 bytes) followed by children
- Is leaf flag (1 byte)
Each child is serialized as:
- Key part (string)
- Node ID (4 bytes)
### 2.4 Integration with OurDB
The RadixTree uses OurDB for persistent storage:
- Each node is serialized and stored as a record in OurDB
- Node references use OurDB record IDs
- The tree maintains a root node ID for traversal
## 3. Proposed Rust Architecture
The Rust implementation will maintain the same overall architecture while leveraging Rust's type system, ownership model, and error handling.
### 3.1 Core Data Structures
#### Node
```rust
pub struct Node {
key_segment: String,
value: Vec<u8>,
children: Vec<NodeRef>,
is_leaf: bool,
}
```
#### NodeRef
```rust
pub struct NodeRef {
key_part: String,
node_id: u32,
}
```
#### RadixTree
```rust
pub struct RadixTree {
db: ourdb::OurDB,
root_id: u32,
}
```
### 3.2 Public API
```rust
impl RadixTree {
/// Creates a new radix tree with the specified database path
pub fn new(path: &str, reset: bool) -> Result<Self, Error> {
// Implementation
}
/// Sets a key-value pair in the tree
pub fn set(&mut self, key: &str, value: Vec<u8>) -> Result<(), Error> {
// Implementation
}
/// Gets a value by key from the tree
pub fn get(&mut self, key: &str) -> Result<Vec<u8>, Error> {
// Implementation
}
/// Updates the value at a given key prefix
pub fn update(&mut self, prefix: &str, new_value: Vec<u8>) -> Result<(), Error> {
// Implementation
}
/// Deletes a key from the tree
pub fn delete(&mut self, key: &str) -> Result<(), Error> {
// Implementation
}
/// Lists all keys with a given prefix
pub fn list(&mut self, prefix: &str) -> Result<Vec<String>, Error> {
// Implementation
}
/// Gets all values for keys with a given prefix
pub fn getall(&mut self, prefix: &str) -> Result<Vec<Vec<u8>>, Error> {
// Implementation
}
}
```
### 3.3 Error Handling
```rust
#[derive(Debug, thiserror::Error)]
pub enum Error {
#[error("OurDB error: {0}")]
OurDB(#[from] ourdb::Error),
#[error("Key not found: {0}")]
KeyNotFound(String),
#[error("Prefix not found: {0}")]
PrefixNotFound(String),
#[error("Serialization error: {0}")]
Serialization(String),
#[error("Deserialization error: {0}")]
Deserialization(String),
#[error("Invalid operation: {0}")]
InvalidOperation(String),
}
```
### 3.4 Serialization
The Rust implementation will maintain the same binary serialization format for compatibility:
```rust
const VERSION: u8 = 1;
impl Node {
/// Serializes a node to bytes for storage
fn serialize(&self) -> Vec<u8> {
// Implementation
}
/// Deserializes bytes to a node
fn deserialize(data: &[u8]) -> Result<Self, Error> {
// Implementation
}
}
```
### 3.5 Integration with OurDB
The Rust implementation will use the existing OurDB Rust implementation:
```rust
impl RadixTree {
fn get_node(&mut self, node_id: u32) -> Result<Node, Error> {
let data = self.db.get(node_id)?;
Node::deserialize(&data)
}
fn save_node(&mut self, node_id: Option<u32>, node: &Node) -> Result<u32, Error> {
let data = node.serialize();
let args = ourdb::OurDBSetArgs {
id: node_id,
data: &data,
};
Ok(self.db.set(args)?)
}
}
```
## 4. Implementation Strategy
### 4.1 Phase 1: Core Data Structures and Serialization
1. Implement the `Node` and `NodeRef` structs
2. Implement serialization and deserialization functions
3. Implement the `Error` enum for error handling
### 4.2 Phase 2: Basic Tree Operations
1. Implement the `RadixTree` struct with OurDB integration
2. Implement the `new()` function for creating a new tree
3. Implement the `get()` and `set()` functions for basic operations
### 4.3 Phase 3: Advanced Tree Operations
1. Implement the `delete()` function for removing keys
2. Implement the `update()` function for updating values
3. Implement the `list()` and `getall()` functions for prefix operations
### 4.4 Phase 4: Testing and Optimization
1. Port existing tests from V to Rust
2. Add new tests for Rust-specific functionality
3. Benchmark and optimize performance
4. Ensure compatibility with existing RadixTree data
## 5. Implementation Considerations
### 5.1 Memory Management
Leverage Rust's ownership model for safe and efficient memory management:
- Use `String` and `Vec<u8>` for data buffers instead of raw pointers
- Use references and borrows to avoid unnecessary copying
- Implement proper RAII for resource management
### 5.2 Error Handling
Use Rust's `Result` type for comprehensive error handling:
- Define custom error types for RadixTree-specific errors
- Propagate errors using the `?` operator
- Provide detailed error messages
- Implement proper error conversion using the `From` trait
### 5.3 Performance Optimizations
Identify opportunities for performance improvements:
- Use efficient string operations for prefix matching
- Minimize database operations by caching nodes when appropriate
- Use iterators for efficient traversal
- Consider using `Cow<str>` for string operations to avoid unnecessary cloning
### 5.4 Compatibility
Ensure compatibility with the V implementation:
- Maintain the same serialization format
- Ensure identical behavior for all operations
- Support reading existing RadixTree data
## 6. Testing Strategy
### 6.1 Unit Tests
Write comprehensive unit tests for each component:
- Test `Node` serialization/deserialization
- Test string operations (common prefix, etc.)
- Test error handling
### 6.2 Integration Tests
Write integration tests for the complete system:
- Test basic CRUD operations
- Test prefix operations
- Test edge cases (empty keys, very long keys, etc.)
- Test with large datasets
### 6.3 Compatibility Tests
Ensure compatibility with existing RadixTree data:
- Test reading existing V-created RadixTree data
- Test writing data that can be read by the V implementation
### 6.4 Performance Tests
Benchmark performance against the V implementation:
- Measure throughput for set/get operations
- Measure latency for different operations
- Test with different tree sizes and key distributions
## 7. Project Structure
```
radixtree/
├── Cargo.toml
├── src/
│ ├── lib.rs # Public API and re-exports
│ ├── node.rs # Node and NodeRef implementations
│ ├── serialize.rs # Serialization and deserialization
│ ├── error.rs # Error types
│ └── operations.rs # Tree operations implementation
├── tests/
│ ├── basic_test.rs # Basic operations tests
│ ├── prefix_test.rs # Prefix operations tests
│ └── edge_cases.rs # Edge case tests
└── examples/
├── basic.rs # Basic usage example
├── prefix.rs # Prefix operations example
└── performance.rs # Performance benchmark
```
## 8. Dependencies
The Rust implementation will use the following dependencies:
- `ourdb` for persistent storage
- `thiserror` for error handling
- `log` for logging
- `criterion` for benchmarking (dev dependency)
## 9. Compatibility Considerations
To ensure compatibility with the V implementation:
1. Maintain the same serialization format for nodes
2. Ensure identical behavior for all operations
3. Support reading existing RadixTree data
4. Maintain the same performance characteristics
## 10. Future Extensions
Potential future extensions to consider:
1. Async API for non-blocking operations
2. Iterator interface for efficient traversal
3. Batch operations for improved performance
4. Custom serialization formats for specific use cases
5. Compression support for values
6. Concurrency support for parallel operations
## 11. Conclusion
This architecture provides a roadmap for porting RadixTree from V to Rust while maintaining compatibility and leveraging Rust's strengths. The implementation will follow a phased approach, starting with core data structures and gradually building up to the complete system.
The Rust implementation aims to be:
- **Safe**: Leveraging Rust's ownership model for memory safety
- **Fast**: Maintaining or improving performance compared to V
- **Compatible**: Working with existing RadixTree data
- **Extensible**: Providing a foundation for future enhancements
- **Well-tested**: Including comprehensive test coverage
## 12. Implementation Files
### 12.1 Cargo.toml
```toml
[package]
name = "radixtree"
version = "0.1.0"
edition = "2021"
description = "A persistent radix tree implementation using OurDB for storage"
authors = ["OurWorld Team"]
[dependencies]
ourdb = { path = "../ourdb" }
thiserror = "1.0.40"
log = "0.4.17"
[dev-dependencies]
criterion = "0.5.1"
[[bench]]
name = "radixtree_benchmarks"
harness = false
[[example]]
name = "basic_usage"
path = "examples/basic_usage.rs"
[[example]]
name = "prefix_operations"
path = "examples/prefix_operations.rs"
```
### 12.2 src/lib.rs
```rust
//! RadixTree is a space-optimized tree data structure that enables efficient string key operations
//! with persistent storage using OurDB as a backend.
//!
//! This implementation provides a persistent radix tree that can be used for efficient
//! prefix-based key operations, such as auto-complete, routing tables, and more.
mod error;
mod node;
mod operations;
mod serialize;
pub use error::Error;
pub use node::{Node, NodeRef};
use ourdb::{OurDB, OurDBConfig, OurDBSetArgs};
use std::path::PathBuf;
/// RadixTree represents a radix tree data structure with persistent storage.
pub struct RadixTree {
db: OurDB,
root_id: u32,
}
impl RadixTree {
/// Creates a new radix tree with the specified database path.
///
/// # Arguments
///
/// * `path` - The path to the database directory
/// * `reset` - Whether to reset the database if it exists
///
/// # Returns
///
/// A new `RadixTree` instance
///
/// # Errors
///
/// Returns an error if the database cannot be created or opened
pub fn new(path: &str, reset: bool) -> Result<Self, Error> {
// Implementation will go here
unimplemented!()
}
/// Sets a key-value pair in the tree.
///
/// # Arguments
///
/// * `key` - The key to set
/// * `value` - The value to set
///
/// # Errors
///
/// Returns an error if the operation fails
pub fn set(&mut self, key: &str, value: Vec<u8>) -> Result<(), Error> {
// Implementation will go here
unimplemented!()
}
/// Gets a value by key from the tree.
///
/// # Arguments
///
/// * `key` - The key to get
///
/// # Returns
///
/// The value associated with the key
///
/// # Errors
///
/// Returns an error if the key is not found or the operation fails
pub fn get(&mut self, key: &str) -> Result<Vec<u8>, Error> {
// Implementation will go here
unimplemented!()
}
/// Updates the value at a given key prefix.
///
/// # Arguments
///
/// * `prefix` - The key prefix to update
/// * `new_value` - The new value to set
///
/// # Errors
///
/// Returns an error if the prefix is not found or the operation fails
pub fn update(&mut self, prefix: &str, new_value: Vec<u8>) -> Result<(), Error> {
// Implementation will go here
unimplemented!()
}
/// Deletes a key from the tree.
///
/// # Arguments
///
/// * `key` - The key to delete
///
/// # Errors
///
/// Returns an error if the key is not found or the operation fails
pub fn delete(&mut self, key: &str) -> Result<(), Error> {
// Implementation will go here
unimplemented!()
}
/// Lists all keys with a given prefix.
///
/// # Arguments
///
/// * `prefix` - The prefix to search for
///
/// # Returns
///
/// A list of keys that start with the given prefix
///
/// # Errors
///
/// Returns an error if the operation fails
pub fn list(&mut self, prefix: &str) -> Result<Vec<String>, Error> {
// Implementation will go here
unimplemented!()
}
/// Gets all values for keys with a given prefix.
///
/// # Arguments
///
/// * `prefix` - The prefix to search for
///
/// # Returns
///
/// A list of values for keys that start with the given prefix
///
/// # Errors
///
/// Returns an error if the operation fails
pub fn getall(&mut self, prefix: &str) -> Result<Vec<Vec<u8>>, Error> {
// Implementation will go here
unimplemented!()
}
}
```
### 12.3 src/error.rs
```rust
//! Error types for the RadixTree module.
use thiserror::Error;
/// Error type for RadixTree operations.
#[derive(Debug, Error)]
pub enum Error {
/// Error from OurDB operations.
#[error("OurDB error: {0}")]
OurDB(#[from] ourdb::Error),
/// Error when a key is not found.
#[error("Key not found: {0}")]
KeyNotFound(String),
/// Error when a prefix is not found.
#[error("Prefix not found: {0}")]
PrefixNotFound(String),
/// Error during serialization.
#[error("Serialization error: {0}")]
Serialization(String),
/// Error during deserialization.
#[error("Deserialization error: {0}")]
Deserialization(String),
/// Error for invalid operations.
#[error("Invalid operation: {0}")]
InvalidOperation(String),
}
```
### 12.4 src/node.rs
```rust
//! Node types for the RadixTree module.
/// Represents a node in the radix tree.
pub struct Node {
/// The segment of the key stored at this node.
pub key_segment: String,
/// Value stored at this node (empty if not a leaf).
pub value: Vec<u8>,
/// References to child nodes.
pub children: Vec<NodeRef>,
/// Whether this node is a leaf node.
pub is_leaf: bool,
}
/// Reference to a node in the database.
pub struct NodeRef {
/// The key segment for this child.
pub key_part: String,
/// Database ID of the node.
pub node_id: u32,
}
impl Node {
/// Creates a new node.
pub fn new(key_segment: String, value: Vec<u8>, is_leaf: bool) -> Self {
Self {
key_segment,
value,
children: Vec::new(),
is_leaf,
}
}
/// Creates a new root node.
pub fn new_root() -> Self {
Self {
key_segment: String::new(),
value: Vec::new(),
children: Vec::new(),
is_leaf: false,
}
}
}
impl NodeRef {
/// Creates a new node reference.
pub fn new(key_part: String, node_id: u32) -> Self {
Self {
key_part,
node_id,
}
}
}
```
### 12.5 src/serialize.rs
```rust
//! Serialization and deserialization for RadixTree nodes.
use crate::error::Error;
use crate::node::{Node, NodeRef};
/// Current binary format version.
const VERSION: u8 = 1;
impl Node {
/// Serializes a node to bytes for storage.
pub fn serialize(&self) -> Vec<u8> {
// Implementation will go here
unimplemented!()
}
/// Deserializes bytes to a node.
pub fn deserialize(data: &[u8]) -> Result<Self, Error> {
// Implementation will go here
unimplemented!()
}
}
```
### 12.6 src/operations.rs
```rust
//! Implementation of RadixTree operations.
use crate::error::Error;
use crate::node::{Node, NodeRef};
use crate::RadixTree;
impl RadixTree {
/// Helper function to get a node from the database.
pub(crate) fn get_node(&mut self, node_id: u32) -> Result<Node, Error> {
// Implementation will go here
unimplemented!()
}
/// Helper function to save a node to the database.
pub(crate) fn save_node(&mut self, node_id: Option<u32>, node: &Node) -> Result<u32, Error> {
// Implementation will go here
unimplemented!()
}
/// Helper function to find all keys with a given prefix.
fn find_keys_with_prefix(
&mut self,
node_id: u32,
current_path: &str,
prefix: &str,
result: &mut Vec<String>,
) -> Result<(), Error> {
// Implementation will go here
unimplemented!()
}
/// Helper function to recursively collect all keys under a node.
fn collect_all_keys(
&mut self,
node_id: u32,
current_path: &str,
result: &mut Vec<String>,
) -> Result<(), Error> {
// Implementation will go here
unimplemented!()
}
/// Helper function to get the common prefix of two strings.
fn get_common_prefix(a: &str, b: &str) -> String {
// Implementation will go here
unimplemented!()
}
}
```
### 12.7 examples/basic_usage.rs
```rust
//! Basic usage example for RadixTree.
use radixtree::RadixTree;
fn main() -> Result<(), radixtree::Error> {
// Create a temporary directory for the database
let db_path = std::env::temp_dir().join("radixtree_example");
std::fs::create_dir_all(&db_path)?;
println!("Creating radix tree at: {}", db_path.display());
// Create a new radix tree
let mut tree = RadixTree::new(db_path.to_str().unwrap(), true)?;
// Store some data
tree.set("hello", b"world".to_vec())?;
tree.set("help", b"me".to_vec())?;
tree.set("helicopter", b"flying".to_vec())?;
// Retrieve and print the data
let value = tree.get("hello")?;
println!("hello: {}", String::from_utf8_lossy(&value));
// List keys with prefix
let keys = tree.list("hel")?;
println!("Keys with prefix 'hel': {:?}", keys);
// Get all values with prefix
let values = tree.getall("hel")?;
println!("Values with prefix 'hel':");
for (i, value) in values.iter().enumerate() {
println!(" {}: {}", i, String::from_utf8_lossy(value));
}
// Delete a key
tree.delete("help")?;
println!("Deleted 'help'");
// Verify deletion
let keys_after = tree.list("hel")?;
println!("Keys with prefix 'hel' after deletion: {:?}", keys_after);
// Clean up (optional)
if std::env::var("KEEP_DB").is_err() {
std::fs::remove_dir_all(&db_path)?;
println!("Cleaned up database directory");
} else {
println!("Database kept at: {}", db_path.display());
}
Ok(())
}
```

815
radixtree/Cargo.lock generated
View File

@@ -1,815 +0,0 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 4
[[package]]
name = "aho-corasick"
version = "1.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
dependencies = [
"memchr",
]
[[package]]
name = "anes"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299"
[[package]]
name = "anstyle"
version = "1.0.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9"
[[package]]
name = "autocfg"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26"
[[package]]
name = "bitflags"
version = "2.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd"
[[package]]
name = "bumpalo"
version = "3.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf"
[[package]]
name = "cast"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "ciborium"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e"
dependencies = [
"ciborium-io",
"ciborium-ll",
"serde",
]
[[package]]
name = "ciborium-io"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757"
[[package]]
name = "ciborium-ll"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9"
dependencies = [
"ciborium-io",
"half",
]
[[package]]
name = "clap"
version = "4.5.35"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d8aa86934b44c19c50f87cc2790e19f54f7a67aedb64101c2e1a2e5ecfb73944"
dependencies = [
"clap_builder",
]
[[package]]
name = "clap_builder"
version = "4.5.35"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2414dbb2dd0695280da6ea9261e327479e9d37b0630f6b53ba2a11c60c679fd9"
dependencies = [
"anstyle",
"clap_lex",
]
[[package]]
name = "clap_lex"
version = "0.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6"
[[package]]
name = "crc32fast"
version = "1.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3"
dependencies = [
"cfg-if",
]
[[package]]
name = "criterion"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f"
dependencies = [
"anes",
"cast",
"ciborium",
"clap",
"criterion-plot",
"is-terminal",
"itertools",
"num-traits",
"once_cell",
"oorandom",
"plotters",
"rayon",
"regex",
"serde",
"serde_derive",
"serde_json",
"tinytemplate",
"walkdir",
]
[[package]]
name = "criterion-plot"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1"
dependencies = [
"cast",
"itertools",
]
[[package]]
name = "crossbeam-deque"
version = "0.8.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51"
dependencies = [
"crossbeam-epoch",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-epoch"
version = "0.9.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e"
dependencies = [
"crossbeam-utils",
]
[[package]]
name = "crossbeam-utils"
version = "0.8.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28"
[[package]]
name = "crunchy"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929"
[[package]]
name = "either"
version = "1.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719"
[[package]]
name = "errno"
version = "0.3.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "976dd42dc7e85965fe702eb8164f21f450704bdde31faefd6471dba214cb594e"
dependencies = [
"libc",
"windows-sys",
]
[[package]]
name = "fastrand"
version = "2.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
[[package]]
name = "getrandom"
version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7"
dependencies = [
"cfg-if",
"libc",
"wasi 0.11.0+wasi-snapshot-preview1",
]
[[package]]
name = "getrandom"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "73fea8450eea4bac3940448fb7ae50d91f034f941199fcd9d909a5a07aa455f0"
dependencies = [
"cfg-if",
"libc",
"r-efi",
"wasi 0.14.2+wasi-0.2.4",
]
[[package]]
name = "half"
version = "2.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "459196ed295495a68f7d7fe1d84f6c4b7ff0e21fe3017b2f283c6fac3ad803c9"
dependencies = [
"cfg-if",
"crunchy",
]
[[package]]
name = "hermit-abi"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fbd780fe5cc30f81464441920d82ac8740e2e46b29a6fad543ddd075229ce37e"
[[package]]
name = "is-terminal"
version = "0.4.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9"
dependencies = [
"hermit-abi",
"libc",
"windows-sys",
]
[[package]]
name = "itertools"
version = "0.10.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473"
dependencies = [
"either",
]
[[package]]
name = "itoa"
version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c"
[[package]]
name = "js-sys"
version = "0.3.77"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f"
dependencies = [
"once_cell",
"wasm-bindgen",
]
[[package]]
name = "libc"
version = "0.2.171"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6"
[[package]]
name = "linux-raw-sys"
version = "0.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fe7db12097d22ec582439daf8618b8fdd1a7bef6270e9af3b1ebcd30893cf413"
[[package]]
name = "log"
version = "0.4.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94"
[[package]]
name = "memchr"
version = "2.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
[[package]]
name = "num-traits"
version = "0.2.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841"
dependencies = [
"autocfg",
]
[[package]]
name = "once_cell"
version = "1.21.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d"
[[package]]
name = "oorandom"
version = "11.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e"
[[package]]
name = "ourdb"
version = "0.1.0"
dependencies = [
"crc32fast",
"log",
"rand",
"thiserror",
]
[[package]]
name = "plotters"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747"
dependencies = [
"num-traits",
"plotters-backend",
"plotters-svg",
"wasm-bindgen",
"web-sys",
]
[[package]]
name = "plotters-backend"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a"
[[package]]
name = "plotters-svg"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670"
dependencies = [
"plotters-backend",
]
[[package]]
name = "ppv-lite86"
version = "0.2.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9"
dependencies = [
"zerocopy",
]
[[package]]
name = "proc-macro2"
version = "1.0.94"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.40"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d"
dependencies = [
"proc-macro2",
]
[[package]]
name = "r-efi"
version = "5.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5"
[[package]]
name = "radixtree"
version = "0.1.0"
dependencies = [
"criterion",
"log",
"ourdb",
"tempfile",
"thiserror",
]
[[package]]
name = "rand"
version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
dependencies = [
"libc",
"rand_chacha",
"rand_core",
]
[[package]]
name = "rand_chacha"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
dependencies = [
"ppv-lite86",
"rand_core",
]
[[package]]
name = "rand_core"
version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
dependencies = [
"getrandom 0.2.15",
]
[[package]]
name = "rayon"
version = "1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa"
dependencies = [
"either",
"rayon-core",
]
[[package]]
name = "rayon-core"
version = "1.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2"
dependencies = [
"crossbeam-deque",
"crossbeam-utils",
]
[[package]]
name = "regex"
version = "1.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191"
dependencies = [
"aho-corasick",
"memchr",
"regex-automata",
"regex-syntax",
]
[[package]]
name = "regex-automata"
version = "0.4.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908"
dependencies = [
"aho-corasick",
"memchr",
"regex-syntax",
]
[[package]]
name = "regex-syntax"
version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
[[package]]
name = "rustix"
version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d97817398dd4bb2e6da002002db259209759911da105da92bec29ccb12cf58bf"
dependencies = [
"bitflags",
"errno",
"libc",
"linux-raw-sys",
"windows-sys",
]
[[package]]
name = "rustversion"
version = "1.0.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2"
[[package]]
name = "ryu"
version = "1.0.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f"
[[package]]
name = "same-file"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
dependencies = [
"winapi-util",
]
[[package]]
name = "serde"
version = "1.0.219"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.219"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "serde_json"
version = "1.0.140"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373"
dependencies = [
"itoa",
"memchr",
"ryu",
"serde",
]
[[package]]
name = "syn"
version = "2.0.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "tempfile"
version = "3.19.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7437ac7763b9b123ccf33c338a5cc1bac6f69b45a136c19bdd8a65e3916435bf"
dependencies = [
"fastrand",
"getrandom 0.3.2",
"once_cell",
"rustix",
"windows-sys",
]
[[package]]
name = "thiserror"
version = "1.0.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "tinytemplate"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc"
dependencies = [
"serde",
"serde_json",
]
[[package]]
name = "unicode-ident"
version = "1.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512"
[[package]]
name = "walkdir"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b"
dependencies = [
"same-file",
"winapi-util",
]
[[package]]
name = "wasi"
version = "0.11.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
[[package]]
name = "wasi"
version = "0.14.2+wasi-0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3"
dependencies = [
"wit-bindgen-rt",
]
[[package]]
name = "wasm-bindgen"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5"
dependencies = [
"cfg-if",
"once_cell",
"rustversion",
"wasm-bindgen-macro",
]
[[package]]
name = "wasm-bindgen-backend"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6"
dependencies = [
"bumpalo",
"log",
"proc-macro2",
"quote",
"syn",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-macro"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407"
dependencies = [
"quote",
"wasm-bindgen-macro-support",
]
[[package]]
name = "wasm-bindgen-macro-support"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de"
dependencies = [
"proc-macro2",
"quote",
"syn",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-shared"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d"
dependencies = [
"unicode-ident",
]
[[package]]
name = "web-sys"
version = "0.3.77"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2"
dependencies = [
"js-sys",
"wasm-bindgen",
]
[[package]]
name = "winapi-util"
version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb"
dependencies = [
"windows-sys",
]
[[package]]
name = "windows-sys"
version = "0.59.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
dependencies = [
"windows-targets",
]
[[package]]
name = "windows-targets"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
dependencies = [
"windows_aarch64_gnullvm",
"windows_aarch64_msvc",
"windows_i686_gnu",
"windows_i686_gnullvm",
"windows_i686_msvc",
"windows_x86_64_gnu",
"windows_x86_64_gnullvm",
"windows_x86_64_msvc",
]
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
[[package]]
name = "windows_aarch64_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
[[package]]
name = "windows_i686_gnu"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
[[package]]
name = "windows_i686_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
[[package]]
name = "windows_i686_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
[[package]]
name = "windows_x86_64_gnu"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
[[package]]
name = "windows_x86_64_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
[[package]]
name = "wit-bindgen-rt"
version = "0.39.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1"
dependencies = [
"bitflags",
]
[[package]]
name = "zerocopy"
version = "0.8.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2586fea28e186957ef732a5f8b3be2da217d65c5969d4b1e17f973ebbe876879"
dependencies = [
"zerocopy-derive",
]
[[package]]
name = "zerocopy-derive"
version = "0.8.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a996a8f63c5c4448cd959ac1bab0aaa3306ccfd060472f85943ee0750f0169be"
dependencies = [
"proc-macro2",
"quote",
"syn",
]

View File

@@ -1,27 +0,0 @@
[package]
name = "radixtree"
version = "0.1.0"
edition = "2021"
description = "A persistent radix tree implementation using OurDB for storage"
authors = ["OurWorld Team"]
[dependencies]
ourdb = { path = "../ourdb" }
thiserror = "1.0.40"
log = "0.4.17"
[dev-dependencies]
criterion = "0.5.1"
tempfile = "3.8.0"
[[bench]]
name = "radixtree_benchmarks"
harness = false
[[example]]
name = "basic_usage"
path = "examples/basic_usage.rs"
[[example]]
name = "prefix_operations"
path = "examples/prefix_operations.rs"

View File

@@ -1,265 +0,0 @@
# Migration Guide: V to Rust RadixTree
This document provides guidance for migrating from the V implementation of RadixTree to the Rust implementation.
## API Changes
The Rust implementation maintains API compatibility with the V implementation, but with some idiomatic Rust changes:
### V API
```v
// Create a new radix tree
mut rt := radixtree.new(path: '/tmp/radixtree_test', reset: true)!
// Set a key-value pair
rt.set('test', 'value1'.bytes())!
// Get a value by key
value := rt.get('test')!
// Update a value at a prefix
rt.update('prefix', 'new_value'.bytes())!
// Delete a key
rt.delete('test')!
// List keys with a prefix
keys := rt.list('prefix')!
// Get all values with a prefix
values := rt.getall('prefix')!
```
### Rust API
```rust
// Create a new radix tree
let mut tree = RadixTree::new("/tmp/radixtree_test", true)?;
// Set a key-value pair
tree.set("test", b"value1".to_vec())?;
// Get a value by key
let value = tree.get("test")?;
// Update a value at a prefix
tree.update("prefix", b"new_value".to_vec())?;
// Delete a key
tree.delete("test")?;
// List keys with a prefix
let keys = tree.list("prefix")?;
// Get all values with a prefix
let values = tree.getall("prefix")?;
```
## Key Differences
1. **Error Handling**: The Rust implementation uses Rust's `Result` type for error handling, while the V implementation uses V's `!` operator.
2. **String Handling**: The Rust implementation uses Rust's `&str` for string parameters and `String` for string return values, while the V implementation uses V's `string` type.
3. **Binary Data**: The Rust implementation uses Rust's `Vec<u8>` for binary data, while the V implementation uses V's `[]u8` type.
4. **Constructor**: The Rust implementation uses a constructor function with separate parameters, while the V implementation uses a struct with named parameters.
5. **Ownership**: The Rust implementation follows Rust's ownership model, requiring mutable references for methods that modify the tree.
## Data Compatibility
The Rust implementation maintains data compatibility with the V implementation:
- The same serialization format is used for nodes
- The same OurDB storage format is used
- Existing RadixTree data created with the V implementation can be read by the Rust implementation
## Migration Steps
1. **Update Dependencies**: Replace the V RadixTree dependency with the Rust RadixTree dependency in your project.
2. **Update Import Statements**: Replace V import statements with Rust use statements.
```v
// V
import freeflowuniverse.herolib.data.radixtree
```
```rust
// Rust
use radixtree::RadixTree;
```
3. **Update Constructor Calls**: Replace V constructor calls with Rust constructor calls.
```v
// V
mut rt := radixtree.new(path: '/path/to/db', reset: false)!
```
```rust
// Rust
let mut tree = RadixTree::new("/path/to/db", false)?;
```
4. **Update Method Calls**: Replace V method calls with Rust method calls.
```v
// V
rt.set('key', 'value'.bytes())!
```
```rust
// Rust
tree.set("key", b"value".to_vec())?;
```
5. **Update Error Handling**: Replace V error handling with Rust error handling.
```v
// V
if value := rt.get('key') {
println('Found: ${value.bytestr()}')
} else {
println('Error: ${err}')
}
```
```rust
// Rust
match tree.get("key") {
Ok(value) => println!("Found: {}", String::from_utf8_lossy(&value)),
Err(e) => println!("Error: {}", e),
}
```
6. **Update String Conversions**: Replace V string conversions with Rust string conversions.
```v
// V
value.bytestr() // Convert []u8 to string
```
```rust
// Rust
String::from_utf8_lossy(&value) // Convert Vec<u8> to string
```
## Example Migration
### V Code
```v
module main
import freeflowuniverse.herolib.data.radixtree
fn main() {
mut rt := radixtree.new(path: '/tmp/radixtree_test', reset: true) or {
println('Error creating RadixTree: ${err}')
return
}
rt.set('hello', 'world'.bytes()) or {
println('Error setting key: ${err}')
return
}
rt.set('help', 'me'.bytes()) or {
println('Error setting key: ${err}')
return
}
if value := rt.get('hello') {
println('hello: ${value.bytestr()}')
} else {
println('Error getting key: ${err}')
return
}
keys := rt.list('hel') or {
println('Error listing keys: ${err}')
return
}
println('Keys with prefix "hel": ${keys}')
values := rt.getall('hel') or {
println('Error getting all values: ${err}')
return
}
println('Values with prefix "hel":')
for i, value in values {
println(' ${i}: ${value.bytestr()}')
}
rt.delete('help') or {
println('Error deleting key: ${err}')
return
}
println('Deleted "help"')
}
```
### Rust Code
```rust
use radixtree::RadixTree;
fn main() -> Result<(), Box<dyn std::error::Error>> {
let mut tree = RadixTree::new("/tmp/radixtree_test", true)
.map_err(|e| format!("Error creating RadixTree: {}", e))?;
tree.set("hello", b"world".to_vec())
.map_err(|e| format!("Error setting key: {}", e))?;
tree.set("help", b"me".to_vec())
.map_err(|e| format!("Error setting key: {}", e))?;
let value = tree.get("hello")
.map_err(|e| format!("Error getting key: {}", e))?;
println!("hello: {}", String::from_utf8_lossy(&value));
let keys = tree.list("hel")
.map_err(|e| format!("Error listing keys: {}", e))?;
println!("Keys with prefix \"hel\": {:?}", keys);
let values = tree.getall("hel")
.map_err(|e| format!("Error getting all values: {}", e))?;
println!("Values with prefix \"hel\":");
for (i, value) in values.iter().enumerate() {
println!(" {}: {}", i, String::from_utf8_lossy(value));
}
tree.delete("help")
.map_err(|e| format!("Error deleting key: {}", e))?;
println!("Deleted \"help\"");
Ok(())
}
```
## Performance Considerations
The Rust implementation should provide similar or better performance compared to the V implementation. However, there are some considerations:
1. **Memory Usage**: The Rust implementation may have different memory usage patterns due to Rust's ownership model.
2. **Error Handling**: The Rust implementation uses Rust's `Result` type, which may have different performance characteristics compared to V's error handling.
3. **String Handling**: The Rust implementation uses Rust's string types, which may have different performance characteristics compared to V's string types.
## Troubleshooting
If you encounter issues during migration, check the following:
1. **Data Compatibility**: Ensure that the data format is compatible between the V and Rust implementations.
2. **API Usage**: Ensure that you're using the correct API for the Rust implementation.
3. **Error Handling**: Ensure that you're handling errors correctly in the Rust implementation.
4. **String Encoding**: Ensure that string encoding is consistent between the V and Rust implementations.
If you encounter any issues that are not covered in this guide, please report them to the project maintainers.

View File

@@ -1,189 +0,0 @@
# RadixTree
A persistent radix tree implementation in Rust using OurDB for storage.
## Overview
RadixTree is a space-optimized tree data structure that enables efficient string key operations with persistent storage. This implementation provides a persistent radix tree that can be used for efficient prefix-based key operations, such as auto-complete, routing tables, and more.
A radix tree (also known as a patricia trie or radix trie) is a space-optimized tree data structure that enables efficient string key operations. Unlike a standard trie where each node represents a single character, a radix tree compresses paths by allowing nodes to represent multiple characters (key segments).
Key characteristics:
- Each node stores a segment of a key (not just a single character)
- Nodes can have multiple children, each representing a different branch
- Leaf nodes contain the actual values
- Optimizes storage by compressing common prefixes
## Features
- Efficient prefix-based key operations
- Persistent storage using OurDB backend
- Memory-efficient storage of strings with common prefixes
- Support for binary values
- Thread-safe operations through OurDB
## Usage
Add the dependency to your `Cargo.toml`:
```toml
[dependencies]
radixtree = { path = "../radixtree" }
```
### Basic Example
```rust
use radixtree::RadixTree;
fn main() -> Result<(), radixtree::Error> {
// Create a new radix tree
let mut tree = RadixTree::new("/tmp/radix", false)?;
// Set key-value pairs
tree.set("hello", b"world".to_vec())?;
tree.set("help", b"me".to_vec())?;
// Get values by key
let value = tree.get("hello")?;
println!("hello: {}", String::from_utf8_lossy(&value)); // Prints: world
// List keys by prefix
let keys = tree.list("hel")?; // Returns ["hello", "help"]
println!("Keys with prefix 'hel': {:?}", keys);
// Get all values by prefix
let values = tree.getall("hel")?; // Returns [b"world", b"me"]
// Delete keys
tree.delete("help")?;
Ok(())
}
```
## API
### Creating a RadixTree
```rust
// Create a new radix tree
let mut tree = RadixTree::new("/tmp/radix", false)?;
// Create a new radix tree and reset if it exists
let mut tree = RadixTree::new("/tmp/radix", true)?;
```
### Setting Values
```rust
// Set a key-value pair
tree.set("key", b"value".to_vec())?;
```
### Getting Values
```rust
// Get a value by key
let value = tree.get("key")?;
```
### Updating Values
```rust
// Update a value at a given prefix
tree.update("prefix", b"new_value".to_vec())?;
```
### Deleting Keys
```rust
// Delete a key
tree.delete("key")?;
```
### Listing Keys by Prefix
```rust
// List all keys with a given prefix
let keys = tree.list("prefix")?;
```
### Getting All Values by Prefix
```rust
// Get all values for keys with a given prefix
let values = tree.getall("prefix")?;
```
## Performance Characteristics
- Search: O(k) where k is the key length
- Insert: O(k) for new keys, may require node splitting
- Delete: O(k) plus potential node cleanup
- Space: O(n) where n is the total length of all keys
## Use Cases
RadixTree is particularly useful for:
- Prefix-based searching
- IP routing tables
- Dictionary implementations
- Auto-complete systems
- File system paths
- Any application requiring efficient string key operations with persistence
## Implementation Details
The RadixTree implementation uses OurDB for persistent storage:
- Each node is serialized and stored as a record in OurDB
- Node references use OurDB record IDs
- The tree maintains a root node ID for traversal
- Node serialization includes version tracking for format evolution
For more detailed information about the implementation, see the [ARCHITECTURE.md](./ARCHITECTURE.md) file.
## Running Tests
The project includes a comprehensive test suite that verifies all functionality:
```bash
# Run all tests
cargo test
# Run specific test file
cargo test --test basic_test
cargo test --test prefix_test
cargo test --test getall_test
cargo test --test serialize_test
```
## Running Examples
The project includes example applications that demonstrate how to use the RadixTree:
```bash
# Run the basic usage example
cargo run --example basic_usage
# Run the prefix operations example
cargo run --example prefix_operations
```
## Benchmarking
The project includes benchmarks to measure performance:
```bash
# Run all benchmarks
cargo bench
# Run specific benchmark
cargo bench -- set
cargo bench -- get
cargo bench -- prefix_operations
```
## License
This project is licensed under the same license as the HeroCode project.

View File

@@ -1,141 +0,0 @@
use criterion::{black_box, criterion_group, criterion_main, Criterion};
use radixtree::RadixTree;
use std::path::PathBuf;
use tempfile::tempdir;
fn criterion_benchmark(c: &mut Criterion) {
// Create a temporary directory for benchmarks
let temp_dir = tempdir().expect("Failed to create temp directory");
let db_path = temp_dir.path().to_str().unwrap();
// Benchmark set operation
c.bench_function("set", |b| {
let mut tree = RadixTree::new(db_path, true).unwrap();
let mut i = 0;
b.iter(|| {
let key = format!("benchmark_key_{}", i);
let value = format!("benchmark_value_{}", i).into_bytes();
tree.set(&key, value).unwrap();
i += 1;
});
});
// Setup tree with data for get/list/delete benchmarks
let mut setup_tree = RadixTree::new(db_path, true).unwrap();
for i in 0..1000 {
let key = format!("benchmark_key_{}", i);
let value = format!("benchmark_value_{}", i).into_bytes();
setup_tree.set(&key, value).unwrap();
}
// Benchmark get operation
c.bench_function("get", |b| {
let mut tree = RadixTree::new(db_path, false).unwrap();
let mut i = 0;
b.iter(|| {
let key = format!("benchmark_key_{}", i % 1000);
let _value = tree.get(&key).unwrap();
i += 1;
});
});
// Benchmark list operation
c.bench_function("list", |b| {
let mut tree = RadixTree::new(db_path, false).unwrap();
b.iter(|| {
let _keys = tree.list("benchmark_key_1").unwrap();
});
});
// Benchmark getall operation
c.bench_function("getall", |b| {
let mut tree = RadixTree::new(db_path, false).unwrap();
b.iter(|| {
let _values = tree.getall("benchmark_key_1").unwrap();
});
});
// Benchmark update operation
c.bench_function("update", |b| {
let mut tree = RadixTree::new(db_path, false).unwrap();
let mut i = 0;
b.iter(|| {
let key = format!("benchmark_key_{}", i % 1000);
let new_value = format!("updated_value_{}", i).into_bytes();
tree.update(&key, new_value).unwrap();
i += 1;
});
});
// Benchmark delete operation
c.bench_function("delete", |b| {
// Create a fresh tree for deletion benchmarks
let delete_dir = tempdir().expect("Failed to create temp directory");
let delete_path = delete_dir.path().to_str().unwrap();
let mut tree = RadixTree::new(delete_path, true).unwrap();
// Setup keys to delete
for i in 0..1000 {
let key = format!("delete_key_{}", i);
let value = format!("delete_value_{}", i).into_bytes();
tree.set(&key, value).unwrap();
}
let mut i = 0;
b.iter(|| {
let key = format!("delete_key_{}", i % 1000);
// Only try to delete if it exists
if tree.get(&key).is_ok() {
tree.delete(&key).unwrap();
}
i += 1;
});
});
// Benchmark prefix operations with varying tree sizes
let mut group = c.benchmark_group("prefix_operations");
for &size in &[100, 1000, 10000] {
// Create a fresh tree for each size
let size_dir = tempdir().expect("Failed to create temp directory");
let size_path = size_dir.path().to_str().unwrap();
let mut tree = RadixTree::new(size_path, true).unwrap();
// Insert data with common prefixes
for i in 0..size {
let prefix = match i % 5 {
0 => "user",
1 => "post",
2 => "comment",
3 => "product",
_ => "category",
};
let key = format!("{}_{}", prefix, i);
let value = format!("value_{}", i).into_bytes();
tree.set(&key, value).unwrap();
}
// Benchmark list operation for this size
group.bench_function(format!("list_size_{}", size), |b| {
b.iter(|| {
for prefix in &["user", "post", "comment", "product", "category"] {
let _keys = tree.list(prefix).unwrap();
}
});
});
// Benchmark getall operation for this size
group.bench_function(format!("getall_size_{}", size), |b| {
b.iter(|| {
for prefix in &["user", "post", "comment", "product", "category"] {
let _values = tree.getall(prefix).unwrap();
}
});
});
}
group.finish();
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);

View File

@@ -1,51 +0,0 @@
use radixtree::RadixTree;
use std::path::PathBuf;
fn main() -> Result<(), radixtree::Error> {
// Create a temporary directory for the database
let db_path = std::env::temp_dir().join("radixtree_example");
std::fs::create_dir_all(&db_path)?;
println!("Creating radix tree at: {}", db_path.display());
// Create a new radix tree
let mut tree = RadixTree::new(db_path.to_str().unwrap(), true)?;
// Store some data
println!("Storing data...");
tree.set("hello", b"world".to_vec())?;
tree.set("help", b"me".to_vec())?;
tree.set("helicopter", b"flying".to_vec())?;
// Retrieve and print the data
let value = tree.get("hello")?;
println!("hello: {}", String::from_utf8_lossy(&value));
// Update a value
println!("Updating value...");
tree.update("hello", b"updated world".to_vec())?;
// Retrieve the updated value
let updated_value = tree.get("hello")?;
println!("hello (updated): {}", String::from_utf8_lossy(&updated_value));
// Delete a key
println!("Deleting 'help'...");
tree.delete("help")?;
// Try to retrieve the deleted key (should fail)
match tree.get("help") {
Ok(value) => println!("Unexpected: help still exists with value: {}", String::from_utf8_lossy(&value)),
Err(e) => println!("As expected, help was deleted: {}", e),
}
// Clean up (optional)
if std::env::var("KEEP_DB").is_err() {
std::fs::remove_dir_all(&db_path)?;
println!("Cleaned up database directory");
} else {
println!("Database kept at: {}", db_path.display());
}
Ok(())
}

View File

@@ -1,121 +0,0 @@
use radixtree::RadixTree;
use std::time::{Duration, Instant};
use std::io::{self, Write};
// Use much smaller batches to avoid hitting OurDB's size limit
const BATCH_SIZE: usize = 1_000;
const NUM_BATCHES: usize = 1_000; // Total records: 1,000,000
const PROGRESS_INTERVAL: usize = 100;
fn main() -> Result<(), radixtree::Error> {
// Overall metrics
let total_start_time = Instant::now();
let mut total_records_inserted = 0;
let mut batch_times = Vec::with_capacity(NUM_BATCHES);
println!("Will insert up to {} records in batches of {}",
BATCH_SIZE * NUM_BATCHES, BATCH_SIZE);
// Process in batches to avoid OurDB size limits
for batch in 0..NUM_BATCHES {
// Create a new database for each batch
let batch_path = std::env::temp_dir().join(format!("radixtree_batch_{}", batch));
// Clean up any existing database
if batch_path.exists() {
std::fs::remove_dir_all(&batch_path)?;
}
std::fs::create_dir_all(&batch_path)?;
println!("\nBatch {}/{}: Creating new radix tree...", batch + 1, NUM_BATCHES);
let mut tree = RadixTree::new(batch_path.to_str().unwrap(), true)?;
let batch_start_time = Instant::now();
let mut last_progress_time = Instant::now();
let mut last_progress_count = 0;
// Insert records for this batch
for i in 0..BATCH_SIZE {
let global_index = batch * BATCH_SIZE + i;
let key = format!("key:{:08}", global_index);
let value = format!("val{}", global_index).into_bytes();
tree.set(&key, value)?;
// Show progress at intervals
if (i + 1) % PROGRESS_INTERVAL == 0 || i == BATCH_SIZE - 1 {
let records_since_last = i + 1 - last_progress_count;
let time_since_last = last_progress_time.elapsed();
let records_per_second = records_since_last as f64 / time_since_last.as_secs_f64();
print!("\rProgress: {}/{} records ({:.2}%) - {:.2} records/sec",
i + 1, BATCH_SIZE,
(i + 1) as f64 / BATCH_SIZE as f64 * 100.0,
records_per_second);
io::stdout().flush().unwrap();
last_progress_time = Instant::now();
last_progress_count = i + 1;
}
}
let batch_duration = batch_start_time.elapsed();
batch_times.push(batch_duration);
total_records_inserted += BATCH_SIZE;
println!("\nBatch {}/{} completed in {:?} ({:.2} records/sec)",
batch + 1, NUM_BATCHES,
batch_duration,
BATCH_SIZE as f64 / batch_duration.as_secs_f64());
// Test random access performance for this batch
println!("Testing access performance for batch {}...", batch + 1);
let mut total_get_time = Duration::new(0, 0);
let num_samples = 100;
// Use a simple distribution pattern
for i in 0..num_samples {
// Distribute samples across the batch
let sample_id = batch * BATCH_SIZE + (i * (BATCH_SIZE / num_samples));
let key = format!("key:{:08}", sample_id);
let get_start = Instant::now();
let _ = tree.get(&key)?;
total_get_time += get_start.elapsed();
}
println!("Average time to retrieve a record: {:?}",
total_get_time / num_samples as u32);
// Test prefix search performance
println!("Testing prefix search performance...");
let prefix = format!("key:{:02}", batch % 100);
let list_start = Instant::now();
let keys = tree.list(&prefix)?;
let list_duration = list_start.elapsed();
println!("Found {} keys with prefix '{}' in {:?}",
keys.len(), prefix, list_duration);
}
// Overall performance summary
let total_duration = total_start_time.elapsed();
println!("\n\nPerformance Summary:");
println!("Total time to insert {} records: {:?}", total_records_inserted, total_duration);
println!("Average insertion rate: {:.2} records/second",
total_records_inserted as f64 / total_duration.as_secs_f64());
// Show performance trend
println!("\nPerformance Trend (batch number vs. time):");
for (i, duration) in batch_times.iter().enumerate() {
if i % 10 == 0 || i == batch_times.len() - 1 { // Only show every 10th point
println!(" Batch {}: {:?} ({:.2} records/sec)",
i + 1,
duration,
BATCH_SIZE as f64 / duration.as_secs_f64());
}
}
Ok(())
}

View File

@@ -1,134 +0,0 @@
use radixtree::RadixTree;
use std::time::{Duration, Instant};
use std::io::{self, Write};
// Number of records to insert
const TOTAL_RECORDS: usize = 1_000_000;
// How often to report progress (every X records)
const PROGRESS_INTERVAL: usize = 10_000;
// How many records to use for performance sampling
const PERFORMANCE_SAMPLE_SIZE: usize = 1000;
fn main() -> Result<(), radixtree::Error> {
// Create a temporary directory for the database
let db_path = std::env::temp_dir().join("radixtree_performance_test");
// Completely remove and recreate the directory to ensure a clean start
if db_path.exists() {
std::fs::remove_dir_all(&db_path)?;
}
std::fs::create_dir_all(&db_path)?;
println!("Creating radix tree at: {}", db_path.display());
println!("Will insert {} records and show progress...", TOTAL_RECORDS);
// Create a new radix tree
let mut tree = RadixTree::new(db_path.to_str().unwrap(), true)?;
// Track overall time
let start_time = Instant::now();
// Track performance metrics
let mut insertion_times = Vec::with_capacity(TOTAL_RECORDS / PROGRESS_INTERVAL);
let mut last_batch_time = Instant::now();
let mut last_batch_records = 0;
// Insert records and track progress
for i in 0..TOTAL_RECORDS {
let key = format!("key:{:08}", i);
// Use smaller values to avoid exceeding OurDB's size limit
let value = format!("val{}", i).into_bytes();
// Time the insertion of every Nth record for performance sampling
if i % PERFORMANCE_SAMPLE_SIZE == 0 {
let insert_start = Instant::now();
tree.set(&key, value)?;
let insert_duration = insert_start.elapsed();
// Only print detailed timing for specific samples to avoid flooding output
if i % (PERFORMANCE_SAMPLE_SIZE * 10) == 0 {
println!("Record {}: Insertion took {:?}", i, insert_duration);
}
} else {
tree.set(&key, value)?;
}
// Show progress at intervals
if (i + 1) % PROGRESS_INTERVAL == 0 || i == TOTAL_RECORDS - 1 {
let records_in_batch = i + 1 - last_batch_records;
let batch_duration = last_batch_time.elapsed();
let records_per_second = records_in_batch as f64 / batch_duration.as_secs_f64();
insertion_times.push((i + 1, batch_duration));
print!("\rProgress: {}/{} records ({:.2}%) - {:.2} records/sec",
i + 1, TOTAL_RECORDS,
(i + 1) as f64 / TOTAL_RECORDS as f64 * 100.0,
records_per_second);
io::stdout().flush().unwrap();
last_batch_time = Instant::now();
last_batch_records = i + 1;
}
}
let total_duration = start_time.elapsed();
println!("\n\nPerformance Summary:");
println!("Total time to insert {} records: {:?}", TOTAL_RECORDS, total_duration);
println!("Average insertion rate: {:.2} records/second",
TOTAL_RECORDS as f64 / total_duration.as_secs_f64());
// Show performance trend
println!("\nPerformance Trend (records inserted vs. time per batch):");
for (i, (record_count, duration)) in insertion_times.iter().enumerate() {
if i % 10 == 0 || i == insertion_times.len() - 1 { // Only show every 10th point to avoid too much output
println!(" After {} records: {:?} for {} records ({:.2} records/sec)",
record_count,
duration,
PROGRESS_INTERVAL,
PROGRESS_INTERVAL as f64 / duration.as_secs_f64());
}
}
// Test access performance with distributed samples
println!("\nTesting access performance with distributed samples...");
let mut total_get_time = Duration::new(0, 0);
let num_samples = 1000;
// Use a simple distribution pattern instead of random
for i in 0..num_samples {
// Distribute samples across the entire range
let sample_id = (i * (TOTAL_RECORDS / num_samples)) % TOTAL_RECORDS;
let key = format!("key:{:08}", sample_id);
let get_start = Instant::now();
let _ = tree.get(&key)?;
total_get_time += get_start.elapsed();
}
println!("Average time to retrieve a record: {:?}",
total_get_time / num_samples as u32);
// Test prefix search performance
println!("\nTesting prefix search performance...");
let prefixes = ["key:0", "key:1", "key:5", "key:9"];
for prefix in &prefixes {
let list_start = Instant::now();
let keys = tree.list(prefix)?;
let list_duration = list_start.elapsed();
println!("Found {} keys with prefix '{}' in {:?}",
keys.len(), prefix, list_duration);
}
// Clean up (optional)
if std::env::var("KEEP_DB").is_err() {
std::fs::remove_dir_all(&db_path)?;
println!("\nCleaned up database directory");
} else {
println!("\nDatabase kept at: {}", db_path.display());
}
Ok(())
}

View File

@@ -1,97 +0,0 @@
use radixtree::RadixTree;
use std::path::PathBuf;
fn main() -> Result<(), radixtree::Error> {
// Create a temporary directory for the database
let db_path = std::env::temp_dir().join("radixtree_prefix_example");
std::fs::create_dir_all(&db_path)?;
println!("Creating radix tree at: {}", db_path.display());
// Create a new radix tree
let mut tree = RadixTree::new(db_path.to_str().unwrap(), true)?;
// Store data with common prefixes
println!("Storing data with common prefixes...");
// User data
tree.set("user:1:name", b"Alice".to_vec())?;
tree.set("user:1:email", b"alice@example.com".to_vec())?;
tree.set("user:2:name", b"Bob".to_vec())?;
tree.set("user:2:email", b"bob@example.com".to_vec())?;
// Post data
tree.set("post:1:title", b"First Post".to_vec())?;
tree.set("post:1:content", b"Hello World!".to_vec())?;
tree.set("post:2:title", b"Second Post".to_vec())?;
tree.set("post:2:content", b"Another post content".to_vec())?;
// Demonstrate listing keys with a prefix
println!("\nListing keys with prefix 'user:1:'");
let user1_keys = tree.list("user:1:")?;
for key in &user1_keys {
println!(" Key: {}", key);
}
println!("\nListing keys with prefix 'post:'");
let post_keys = tree.list("post:")?;
for key in &post_keys {
println!(" Key: {}", key);
}
// Demonstrate getting all values with a prefix
println!("\nGetting all values with prefix 'user:1:'");
let user1_values = tree.getall("user:1:")?;
for (i, value) in user1_values.iter().enumerate() {
println!(" Value {}: {}", i + 1, String::from_utf8_lossy(value));
}
// Demonstrate finding all user names
println!("\nFinding all user names (prefix 'user:*:name')");
let mut user_names = Vec::new();
let all_keys = tree.list("user:")?;
for key in all_keys {
if key.ends_with(":name") {
if let Ok(value) = tree.get(&key) {
user_names.push((key, String::from_utf8_lossy(&value).to_string()));
}
}
}
for (key, name) in user_names {
println!(" {}: {}", key, name);
}
// Demonstrate updating values with a specific prefix
println!("\nUpdating all post titles...");
let post_title_keys = tree.list("post:")?.into_iter().filter(|k| k.ends_with(":title")).collect::<Vec<_>>();
for key in post_title_keys {
let old_value = tree.get(&key)?;
let old_title = String::from_utf8_lossy(&old_value);
let new_title = format!("UPDATED: {}", old_title);
println!(" Updating '{}' to '{}'", old_title, new_title);
tree.update(&key, new_title.as_bytes().to_vec())?;
}
// Verify updates
println!("\nVerifying updates:");
let post_keys = tree.list("post:")?;
for key in post_keys {
if key.ends_with(":title") {
let value = tree.get(&key)?;
println!(" {}: {}", key, String::from_utf8_lossy(&value));
}
}
// Clean up (optional)
if std::env::var("KEEP_DB").is_err() {
std::fs::remove_dir_all(&db_path)?;
println!("\nCleaned up database directory");
} else {
println!("\nDatabase kept at: {}", db_path.display());
}
Ok(())
}

View File

@@ -1,35 +0,0 @@
//! Error types for the RadixTree module.
use thiserror::Error;
/// Error type for RadixTree operations.
#[derive(Debug, Error)]
pub enum Error {
/// Error from OurDB operations.
#[error("OurDB error: {0}")]
OurDB(#[from] ourdb::Error),
/// Error when a key is not found.
#[error("Key not found: {0}")]
KeyNotFound(String),
/// Error when a prefix is not found.
#[error("Prefix not found: {0}")]
PrefixNotFound(String),
/// Error during serialization.
#[error("Serialization error: {0}")]
Serialization(String),
/// Error during deserialization.
#[error("Deserialization error: {0}")]
Deserialization(String),
/// Error for invalid operations.
#[error("Invalid operation: {0}")]
InvalidOperation(String),
/// Error for I/O operations.
#[error("I/O error: {0}")]
IO(#[from] std::io::Error),
}

View File

@@ -1,133 +0,0 @@
//! RadixTree is a space-optimized tree data structure that enables efficient string key operations
//! with persistent storage using OurDB as a backend.
//!
//! This implementation provides a persistent radix tree that can be used for efficient
//! prefix-based key operations, such as auto-complete, routing tables, and more.
mod error;
mod node;
mod operations;
mod serialize;
pub use error::Error;
pub use node::{Node, NodeRef};
use ourdb::OurDB;
/// RadixTree represents a radix tree data structure with persistent storage.
pub struct RadixTree {
db: OurDB,
root_id: u32,
}
impl RadixTree {
/// Creates a new radix tree with the specified database path.
///
/// # Arguments
///
/// * `path` - The path to the database directory
/// * `reset` - Whether to reset the database if it exists
///
/// # Returns
///
/// A new `RadixTree` instance
///
/// # Errors
///
/// Returns an error if the database cannot be created or opened
pub fn new(path: &str, reset: bool) -> Result<Self, Error> {
operations::new_radix_tree(path, reset)
}
/// Sets a key-value pair in the tree.
///
/// # Arguments
///
/// * `key` - The key to set
/// * `value` - The value to set
///
/// # Errors
///
/// Returns an error if the operation fails
pub fn set(&mut self, key: &str, value: Vec<u8>) -> Result<(), Error> {
operations::set(self, key, value)
}
/// Gets a value by key from the tree.
///
/// # Arguments
///
/// * `key` - The key to get
///
/// # Returns
///
/// The value associated with the key
///
/// # Errors
///
/// Returns an error if the key is not found or the operation fails
pub fn get(&mut self, key: &str) -> Result<Vec<u8>, Error> {
operations::get(self, key)
}
/// Updates the value at a given key prefix.
///
/// # Arguments
///
/// * `prefix` - The key prefix to update
/// * `new_value` - The new value to set
///
/// # Errors
///
/// Returns an error if the prefix is not found or the operation fails
pub fn update(&mut self, prefix: &str, new_value: Vec<u8>) -> Result<(), Error> {
operations::update(self, prefix, new_value)
}
/// Deletes a key from the tree.
///
/// # Arguments
///
/// * `key` - The key to delete
///
/// # Errors
///
/// Returns an error if the key is not found or the operation fails
pub fn delete(&mut self, key: &str) -> Result<(), Error> {
operations::delete(self, key)
}
/// Lists all keys with a given prefix.
///
/// # Arguments
///
/// * `prefix` - The prefix to search for
///
/// # Returns
///
/// A list of keys that start with the given prefix
///
/// # Errors
///
/// Returns an error if the operation fails
pub fn list(&mut self, prefix: &str) -> Result<Vec<String>, Error> {
operations::list(self, prefix)
}
/// Gets all values for keys with a given prefix.
///
/// # Arguments
///
/// * `prefix` - The prefix to search for
///
/// # Returns
///
/// A list of values for keys that start with the given prefix
///
/// # Errors
///
/// Returns an error if the operation fails
pub fn getall(&mut self, prefix: &str) -> Result<Vec<Vec<u8>>, Error> {
operations::getall(self, prefix)
}
}

View File

@@ -1,59 +0,0 @@
//! Node types for the RadixTree module.
/// Represents a node in the radix tree.
#[derive(Debug, Clone, PartialEq)]
pub struct Node {
/// The segment of the key stored at this node.
pub key_segment: String,
/// Value stored at this node (empty if not a leaf).
pub value: Vec<u8>,
/// References to child nodes.
pub children: Vec<NodeRef>,
/// Whether this node is a leaf node.
pub is_leaf: bool,
}
/// Reference to a node in the database.
#[derive(Debug, Clone, PartialEq)]
pub struct NodeRef {
/// The key segment for this child.
pub key_part: String,
/// Database ID of the node.
pub node_id: u32,
}
impl Node {
/// Creates a new node.
pub fn new(key_segment: String, value: Vec<u8>, is_leaf: bool) -> Self {
Self {
key_segment,
value,
children: Vec::new(),
is_leaf,
}
}
/// Creates a new root node.
pub fn new_root() -> Self {
Self {
key_segment: String::new(),
value: Vec::new(),
children: Vec::new(),
is_leaf: false,
}
}
}
impl NodeRef {
/// Creates a new node reference.
pub fn new(key_part: String, node_id: u32) -> Self {
Self {
key_part,
node_id,
}
}
}

View File

@@ -1,508 +0,0 @@
//! Implementation of RadixTree operations.
use crate::error::Error;
use crate::node::{Node, NodeRef};
use crate::RadixTree;
use crate::serialize::get_common_prefix;
use ourdb::{OurDB, OurDBConfig, OurDBSetArgs};
use std::path::PathBuf;
/// Creates a new radix tree with the specified database path.
pub fn new_radix_tree(path: &str, reset: bool) -> Result<RadixTree, Error> {
let config = OurDBConfig {
path: PathBuf::from(path),
incremental_mode: true,
file_size: Some(1024 * 1024 * 10), // 10MB file size for better performance with large datasets
keysize: Some(6), // Use keysize=6 to support multiple files (file_nr + position)
reset: None, // Don't reset existing database
};
let mut db = OurDB::new(config)?;
// If reset is true, we would clear the database
// Since OurDB doesn't have a reset method, we'll handle it by
// creating a fresh database when reset is true
// We'll implement this by checking if it's a new database (next_id == 1)
let root_id = if db.get_next_id()? == 1 {
// Create a new root node
let root = Node::new_root();
let root_id = db.set(OurDBSetArgs {
id: None,
data: &root.serialize(),
})?;
// First ID should be 1
assert_eq!(root_id, 1);
root_id
} else {
// Use existing root node
1 // Root node always has ID 1
};
Ok(RadixTree {
db,
root_id,
})
}
/// Sets a key-value pair in the tree.
pub fn set(tree: &mut RadixTree, key: &str, value: Vec<u8>) -> Result<(), Error> {
let mut current_id = tree.root_id;
let mut offset = 0;
// Handle empty key case
if key.is_empty() {
let mut root_node = tree.get_node(current_id)?;
root_node.is_leaf = true;
root_node.value = value;
tree.save_node(Some(current_id), &root_node)?;
return Ok(());
}
while offset < key.len() {
let mut node = tree.get_node(current_id)?;
// Find matching child
let mut matched_child = None;
for (i, child) in node.children.iter().enumerate() {
if key[offset..].starts_with(&child.key_part) {
matched_child = Some((i, child.clone()));
break;
}
}
if matched_child.is_none() {
// No matching child found, create new leaf node
let key_part = key[offset..].to_string();
let new_node = Node {
key_segment: key_part.clone(),
value: value.clone(),
children: Vec::new(),
is_leaf: true,
};
let new_id = tree.save_node(None, &new_node)?;
// Create new child reference and update parent node
node.children.push(NodeRef {
key_part,
node_id: new_id,
});
tree.save_node(Some(current_id), &node)?;
return Ok(());
}
let (child_index, mut child) = matched_child.unwrap();
let common_prefix = get_common_prefix(&key[offset..], &child.key_part);
if common_prefix.len() < child.key_part.len() {
// Split existing node
let child_node = tree.get_node(child.node_id)?;
// Create new intermediate node
let new_node = Node {
key_segment: child.key_part[common_prefix.len()..].to_string(),
value: child_node.value.clone(),
children: child_node.children.clone(),
is_leaf: child_node.is_leaf,
};
let new_id = tree.save_node(None, &new_node)?;
// Update current node
node.children[child_index] = NodeRef {
key_part: common_prefix.to_string(),
node_id: new_id,
};
tree.save_node(Some(current_id), &node)?;
// Update child node reference
child.node_id = new_id;
}
if offset + common_prefix.len() == key.len() {
// Update value at existing node
let mut child_node = tree.get_node(child.node_id)?;
child_node.value = value;
child_node.is_leaf = true;
tree.save_node(Some(child.node_id), &child_node)?;
return Ok(());
}
offset += common_prefix.len();
current_id = child.node_id;
}
Ok(())
}
/// Gets a value by key from the tree.
pub fn get(tree: &mut RadixTree, key: &str) -> Result<Vec<u8>, Error> {
let mut current_id = tree.root_id;
let mut offset = 0;
// Handle empty key case
if key.is_empty() {
let root_node = tree.get_node(current_id)?;
if root_node.is_leaf {
return Ok(root_node.value.clone());
}
return Err(Error::KeyNotFound(key.to_string()));
}
while offset < key.len() {
let node = tree.get_node(current_id)?;
let mut found = false;
for child in &node.children {
if key[offset..].starts_with(&child.key_part) {
if offset + child.key_part.len() == key.len() {
let child_node = tree.get_node(child.node_id)?;
if child_node.is_leaf {
return Ok(child_node.value);
}
}
current_id = child.node_id;
offset += child.key_part.len();
found = true;
break;
}
}
if !found {
return Err(Error::KeyNotFound(key.to_string()));
}
}
Err(Error::KeyNotFound(key.to_string()))
}
/// Updates the value at a given key prefix.
pub fn update(tree: &mut RadixTree, prefix: &str, new_value: Vec<u8>) -> Result<(), Error> {
let mut current_id = tree.root_id;
let mut offset = 0;
// Handle empty prefix case
if prefix.is_empty() {
return Err(Error::InvalidOperation("Empty prefix not allowed".to_string()));
}
while offset < prefix.len() {
let node = tree.get_node(current_id)?;
let mut found = false;
for child in &node.children {
if prefix[offset..].starts_with(&child.key_part) {
if offset + child.key_part.len() == prefix.len() {
// Found exact prefix match
let mut child_node = tree.get_node(child.node_id)?;
if child_node.is_leaf {
// Update the value
child_node.value = new_value;
tree.save_node(Some(child.node_id), &child_node)?;
return Ok(());
}
}
current_id = child.node_id;
offset += child.key_part.len();
found = true;
break;
}
}
if !found {
return Err(Error::PrefixNotFound(prefix.to_string()));
}
}
Err(Error::PrefixNotFound(prefix.to_string()))
}
/// Deletes a key from the tree.
pub fn delete(tree: &mut RadixTree, key: &str) -> Result<(), Error> {
let mut current_id = tree.root_id;
let mut offset = 0;
let mut path = Vec::new();
// Handle empty key case
if key.is_empty() {
let mut root_node = tree.get_node(current_id)?;
if !root_node.is_leaf {
return Err(Error::KeyNotFound(key.to_string()));
}
// For the root node, we just mark it as non-leaf
root_node.is_leaf = false;
root_node.value = Vec::new();
tree.save_node(Some(current_id), &root_node)?;
return Ok(());
}
// Find the node to delete
while offset < key.len() {
let node = tree.get_node(current_id)?;
let mut found = false;
for child in &node.children {
if key[offset..].starts_with(&child.key_part) {
path.push(child.clone());
current_id = child.node_id;
offset += child.key_part.len();
found = true;
// Check if we've matched the full key
if offset == key.len() {
let child_node = tree.get_node(child.node_id)?;
if child_node.is_leaf {
found = true;
break;
}
}
break;
}
}
if !found {
return Err(Error::KeyNotFound(key.to_string()));
}
}
if path.is_empty() {
return Err(Error::KeyNotFound(key.to_string()));
}
// Get the node to delete
let mut last_node = tree.get_node(path.last().unwrap().node_id)?;
// If the node has children, just mark it as non-leaf
if !last_node.children.is_empty() {
last_node.is_leaf = false;
last_node.value = Vec::new();
tree.save_node(Some(path.last().unwrap().node_id), &last_node)?;
return Ok(());
}
// If node has no children, remove it from parent
if path.len() > 1 {
let parent_id = path[path.len() - 2].node_id;
let mut parent_node = tree.get_node(parent_id)?;
// Find and remove the child from parent
for i in 0..parent_node.children.len() {
if parent_node.children[i].node_id == path.last().unwrap().node_id {
parent_node.children.remove(i);
break;
}
}
tree.save_node(Some(parent_id), &parent_node)?;
// Delete the node from the database
tree.db.delete(path.last().unwrap().node_id)?;
} else {
// If this is a direct child of the root, just mark it as non-leaf
last_node.is_leaf = false;
last_node.value = Vec::new();
tree.save_node(Some(path.last().unwrap().node_id), &last_node)?;
}
Ok(())
}
/// Lists all keys with a given prefix.
pub fn list(tree: &mut RadixTree, prefix: &str) -> Result<Vec<String>, Error> {
let mut result = Vec::new();
// Handle empty prefix case - will return all keys
if prefix.is_empty() {
collect_all_keys(tree, tree.root_id, "", &mut result)?;
return Ok(result);
}
// Start from the root and find all matching keys
find_keys_with_prefix(tree, tree.root_id, "", prefix, &mut result)?;
Ok(result)
}
/// Helper function to find all keys with a given prefix.
fn find_keys_with_prefix(
tree: &mut RadixTree,
node_id: u32,
current_path: &str,
prefix: &str,
result: &mut Vec<String>,
) -> Result<(), Error> {
let node = tree.get_node(node_id)?;
// If the current path already matches or exceeds the prefix length
if current_path.len() >= prefix.len() {
// Check if the current path starts with the prefix
if current_path.starts_with(prefix) {
// If this is a leaf node, add it to the results
if node.is_leaf {
result.push(current_path.to_string());
}
// Collect all keys from this subtree
for child in &node.children {
let child_path = format!("{}{}", current_path, child.key_part);
find_keys_with_prefix(tree, child.node_id, &child_path, prefix, result)?;
}
}
return Ok(());
}
// Current path is shorter than the prefix, continue searching
for child in &node.children {
let child_path = format!("{}{}", current_path, child.key_part);
// Check if this child's path could potentially match the prefix
if prefix.starts_with(current_path) {
// The prefix starts with the current path, so we need to check if
// the child's key_part matches the next part of the prefix
let prefix_remainder = &prefix[current_path.len()..];
// If the prefix remainder starts with the child's key_part or vice versa
if prefix_remainder.starts_with(&child.key_part)
|| (child.key_part.starts_with(prefix_remainder)
&& child.key_part.len() >= prefix_remainder.len()) {
find_keys_with_prefix(tree, child.node_id, &child_path, prefix, result)?;
}
}
}
Ok(())
}
/// Helper function to recursively collect all keys under a node.
fn collect_all_keys(
tree: &mut RadixTree,
node_id: u32,
current_path: &str,
result: &mut Vec<String>,
) -> Result<(), Error> {
let node = tree.get_node(node_id)?;
// If this node is a leaf, add its path to the result
if node.is_leaf {
result.push(current_path.to_string());
}
// Recursively collect keys from all children
for child in &node.children {
let child_path = format!("{}{}", current_path, child.key_part);
collect_all_keys(tree, child.node_id, &child_path, result)?;
}
Ok(())
}
/// Gets all values for keys with a given prefix.
pub fn getall(tree: &mut RadixTree, prefix: &str) -> Result<Vec<Vec<u8>>, Error> {
// Get all matching keys
let keys = list(tree, prefix)?;
// Get values for each key
let mut values = Vec::new();
for key in keys {
if let Ok(value) = get(tree, &key) {
values.push(value);
}
}
Ok(values)
}
impl RadixTree {
/// Helper function to get a node from the database.
pub(crate) fn get_node(&mut self, node_id: u32) -> Result<Node, Error> {
let data = self.db.get(node_id)?;
Node::deserialize(&data)
}
/// Helper function to save a node to the database.
pub(crate) fn save_node(&mut self, node_id: Option<u32>, node: &Node) -> Result<u32, Error> {
let data = node.serialize();
let args = OurDBSetArgs {
id: node_id,
data: &data,
};
Ok(self.db.set(args)?)
}
/// Helper function to find all keys with a given prefix.
fn find_keys_with_prefix(
&mut self,
node_id: u32,
current_path: &str,
prefix: &str,
result: &mut Vec<String>,
) -> Result<(), Error> {
let node = self.get_node(node_id)?;
// If the current path already matches or exceeds the prefix length
if current_path.len() >= prefix.len() {
// Check if the current path starts with the prefix
if current_path.starts_with(prefix) {
// If this is a leaf node, add it to the results
if node.is_leaf {
result.push(current_path.to_string());
}
// Collect all keys from this subtree
for child in &node.children {
let child_path = format!("{}{}", current_path, child.key_part);
self.find_keys_with_prefix(child.node_id, &child_path, prefix, result)?;
}
}
return Ok(());
}
// Current path is shorter than the prefix, continue searching
for child in &node.children {
let child_path = format!("{}{}", current_path, child.key_part);
// Check if this child's path could potentially match the prefix
if prefix.starts_with(current_path) {
// The prefix starts with the current path, so we need to check if
// the child's key_part matches the next part of the prefix
let prefix_remainder = &prefix[current_path.len()..];
// If the prefix remainder starts with the child's key_part or vice versa
if prefix_remainder.starts_with(&child.key_part)
|| (child.key_part.starts_with(prefix_remainder)
&& child.key_part.len() >= prefix_remainder.len()) {
self.find_keys_with_prefix(child.node_id, &child_path, prefix, result)?;
}
}
}
Ok(())
}
/// Helper function to recursively collect all keys under a node.
fn collect_all_keys(
&mut self,
node_id: u32,
current_path: &str,
result: &mut Vec<String>,
) -> Result<(), Error> {
let node = self.get_node(node_id)?;
// If this node is a leaf, add its path to the result
if node.is_leaf {
result.push(current_path.to_string());
}
// Recursively collect keys from all children
for child in &node.children {
let child_path = format!("{}{}", current_path, child.key_part);
self.collect_all_keys(child.node_id, &child_path, result)?;
}
Ok(())
}
}

View File

@@ -1,156 +0,0 @@
//! Serialization and deserialization for RadixTree nodes.
use crate::error::Error;
use crate::node::{Node, NodeRef};
use std::io::{Cursor, Read};
use std::mem::size_of;
/// Current binary format version.
const VERSION: u8 = 1;
impl Node {
/// Serializes a node to bytes for storage.
pub fn serialize(&self) -> Vec<u8> {
let mut buffer = Vec::new();
// Add version byte
buffer.push(VERSION);
// Add key segment
write_string(&mut buffer, &self.key_segment);
// Add value as []u8
write_u16(&mut buffer, self.value.len() as u16);
buffer.extend_from_slice(&self.value);
// Add children
write_u16(&mut buffer, self.children.len() as u16);
for child in &self.children {
write_string(&mut buffer, &child.key_part);
write_u32(&mut buffer, child.node_id);
}
// Add leaf flag
buffer.push(if self.is_leaf { 1 } else { 0 });
buffer
}
/// Deserializes bytes to a node.
pub fn deserialize(data: &[u8]) -> Result<Self, Error> {
if data.is_empty() {
return Err(Error::Deserialization("Empty data".to_string()));
}
let mut cursor = Cursor::new(data);
// Read and verify version
let mut version_byte = [0u8; 1];
cursor.read_exact(&mut version_byte)
.map_err(|e| Error::Deserialization(format!("Failed to read version byte: {}", e)))?;
if version_byte[0] != VERSION {
return Err(Error::Deserialization(
format!("Invalid version byte: expected {}, got {}", VERSION, version_byte[0])
));
}
// Read key segment
let key_segment = read_string(&mut cursor)
.map_err(|e| Error::Deserialization(format!("Failed to read key segment: {}", e)))?;
// Read value as []u8
let value_len = read_u16(&mut cursor)
.map_err(|e| Error::Deserialization(format!("Failed to read value length: {}", e)))?;
let mut value = vec![0u8; value_len as usize];
cursor.read_exact(&mut value)
.map_err(|e| Error::Deserialization(format!("Failed to read value: {}", e)))?;
// Read children
let children_len = read_u16(&mut cursor)
.map_err(|e| Error::Deserialization(format!("Failed to read children length: {}", e)))?;
let mut children = Vec::with_capacity(children_len as usize);
for _ in 0..children_len {
let key_part = read_string(&mut cursor)
.map_err(|e| Error::Deserialization(format!("Failed to read child key part: {}", e)))?;
let node_id = read_u32(&mut cursor)
.map_err(|e| Error::Deserialization(format!("Failed to read child node ID: {}", e)))?;
children.push(NodeRef {
key_part,
node_id,
});
}
// Read leaf flag
let mut is_leaf_byte = [0u8; 1];
cursor.read_exact(&mut is_leaf_byte)
.map_err(|e| Error::Deserialization(format!("Failed to read leaf flag: {}", e)))?;
let is_leaf = is_leaf_byte[0] == 1;
Ok(Node {
key_segment,
value,
children,
is_leaf,
})
}
}
// Helper functions for serialization
fn write_string(buffer: &mut Vec<u8>, s: &str) {
let bytes = s.as_bytes();
write_u16(buffer, bytes.len() as u16);
buffer.extend_from_slice(bytes);
}
fn write_u16(buffer: &mut Vec<u8>, value: u16) {
buffer.extend_from_slice(&value.to_le_bytes());
}
fn write_u32(buffer: &mut Vec<u8>, value: u32) {
buffer.extend_from_slice(&value.to_le_bytes());
}
// Helper functions for deserialization
fn read_string(cursor: &mut Cursor<&[u8]>) -> std::io::Result<String> {
let len = read_u16(cursor)? as usize;
let mut bytes = vec![0u8; len];
cursor.read_exact(&mut bytes)?;
String::from_utf8(bytes)
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))
}
fn read_u16(cursor: &mut Cursor<&[u8]>) -> std::io::Result<u16> {
let mut bytes = [0u8; size_of::<u16>()];
cursor.read_exact(&mut bytes)?;
Ok(u16::from_le_bytes(bytes))
}
fn read_u32(cursor: &mut Cursor<&[u8]>) -> std::io::Result<u32> {
let mut bytes = [0u8; size_of::<u32>()];
cursor.read_exact(&mut bytes)?;
Ok(u32::from_le_bytes(bytes))
}
/// Helper function to get the common prefix of two strings.
pub fn get_common_prefix(a: &str, b: &str) -> String {
let mut i = 0;
let a_bytes = a.as_bytes();
let b_bytes = b.as_bytes();
while i < a.len() && i < b.len() && a_bytes[i] == b_bytes[i] {
i += 1;
}
a[..i].to_string()
}

View File

@@ -1,144 +0,0 @@
use radixtree::RadixTree;
use std::path::PathBuf;
use tempfile::tempdir;
#[test]
fn test_basic_operations() -> Result<(), radixtree::Error> {
// Create a temporary directory for the test
let temp_dir = tempdir().expect("Failed to create temp directory");
let db_path = temp_dir.path().to_str().unwrap();
// Create a new radix tree
let mut tree = RadixTree::new(db_path, true)?;
// Test setting and getting values
let key = "test_key";
let value = b"test_value".to_vec();
tree.set(key, value.clone())?;
let retrieved_value = tree.get(key)?;
assert_eq!(retrieved_value, value);
// Test updating a value
let new_value = b"updated_value".to_vec();
tree.update(key, new_value.clone())?;
let updated_value = tree.get(key)?;
assert_eq!(updated_value, new_value);
// Test deleting a value
tree.delete(key)?;
// Trying to get a deleted key should return an error
let result = tree.get(key);
assert!(result.is_err());
Ok(())
}
#[test]
fn test_empty_key() -> Result<(), radixtree::Error> {
// Create a temporary directory for the test
let temp_dir = tempdir().expect("Failed to create temp directory");
let db_path = temp_dir.path().to_str().unwrap();
// Create a new radix tree
let mut tree = RadixTree::new(db_path, true)?;
// Test setting and getting empty key
let key = "";
let value = b"value_for_empty_key".to_vec();
tree.set(key, value.clone())?;
let retrieved_value = tree.get(key)?;
assert_eq!(retrieved_value, value);
// Test deleting empty key
tree.delete(key)?;
// Trying to get a deleted key should return an error
let result = tree.get(key);
assert!(result.is_err());
Ok(())
}
#[test]
fn test_multiple_keys() -> Result<(), radixtree::Error> {
// Create a temporary directory for the test
let temp_dir = tempdir().expect("Failed to create temp directory");
let db_path = temp_dir.path().to_str().unwrap();
// Create a new radix tree
let mut tree = RadixTree::new(db_path, true)?;
// Insert multiple keys
let test_data = [
("key1", b"value1".to_vec()),
("key2", b"value2".to_vec()),
("key3", b"value3".to_vec()),
];
for (key, value) in &test_data {
tree.set(key, value.clone())?;
}
// Verify all keys can be retrieved
for (key, expected_value) in &test_data {
let retrieved_value = tree.get(key)?;
assert_eq!(&retrieved_value, expected_value);
}
Ok(())
}
#[test]
fn test_shared_prefixes() -> Result<(), radixtree::Error> {
// Create a temporary directory for the test
let temp_dir = tempdir().expect("Failed to create temp directory");
let db_path = temp_dir.path().to_str().unwrap();
// Create a new radix tree
let mut tree = RadixTree::new(db_path, true)?;
// Insert keys with shared prefixes
let test_data = [
("test", b"value_test".to_vec()),
("testing", b"value_testing".to_vec()),
("tested", b"value_tested".to_vec()),
];
for (key, value) in &test_data {
tree.set(key, value.clone())?;
}
// Verify all keys can be retrieved
for (key, expected_value) in &test_data {
let retrieved_value = tree.get(key)?;
assert_eq!(&retrieved_value, expected_value);
}
Ok(())
}
#[test]
fn test_persistence() -> Result<(), radixtree::Error> {
// Create a temporary directory for the test
let temp_dir = tempdir().expect("Failed to create temp directory");
let db_path = temp_dir.path().to_str().unwrap();
// Create a new radix tree and add some data
{
let mut tree = RadixTree::new(db_path, true)?;
tree.set("persistent_key", b"persistent_value".to_vec())?;
} // Tree is dropped here
// Create a new tree instance with the same path
{
let mut tree = RadixTree::new(db_path, false)?;
let value = tree.get("persistent_key")?;
assert_eq!(value, b"persistent_value".to_vec());
}
Ok(())
}

View File

@@ -1,153 +0,0 @@
use radixtree::RadixTree;
use std::collections::HashMap;
use tempfile::tempdir;
#[test]
fn test_getall() -> Result<(), radixtree::Error> {
// Create a temporary directory for the test
let temp_dir = tempdir().expect("Failed to create temp directory");
let db_path = temp_dir.path().to_str().unwrap();
// Create a new radix tree
let mut tree = RadixTree::new(db_path, true)?;
// Set up test data with common prefixes
let test_data: HashMap<&str, &str> = [
("user_1", "data1"),
("user_2", "data2"),
("user_3", "data3"),
("admin_1", "admin_data1"),
("admin_2", "admin_data2"),
("guest", "guest_data"),
].iter().cloned().collect();
// Set all test data
for (key, value) in &test_data {
tree.set(key, value.as_bytes().to_vec())?;
}
// Test getall with 'user_' prefix
let user_values = tree.getall("user_")?;
// Should return 3 values
assert_eq!(user_values.len(), 3);
// Convert byte arrays to strings for easier comparison
let user_value_strings: Vec<String> = user_values
.iter()
.map(|v| String::from_utf8_lossy(v).to_string())
.collect();
// Check all expected values are present
assert!(user_value_strings.contains(&"data1".to_string()));
assert!(user_value_strings.contains(&"data2".to_string()));
assert!(user_value_strings.contains(&"data3".to_string()));
// Test getall with 'admin_' prefix
let admin_values = tree.getall("admin_")?;
// Should return 2 values
assert_eq!(admin_values.len(), 2);
// Convert byte arrays to strings for easier comparison
let admin_value_strings: Vec<String> = admin_values
.iter()
.map(|v| String::from_utf8_lossy(v).to_string())
.collect();
// Check all expected values are present
assert!(admin_value_strings.contains(&"admin_data1".to_string()));
assert!(admin_value_strings.contains(&"admin_data2".to_string()));
// Test getall with empty prefix (should return all values)
let all_values = tree.getall("")?;
// Should return all 6 values
assert_eq!(all_values.len(), test_data.len());
// Test getall with non-existent prefix
let non_existent_values = tree.getall("xyz")?;
// Should return empty array
assert_eq!(non_existent_values.len(), 0);
Ok(())
}
#[test]
fn test_getall_with_updates() -> Result<(), radixtree::Error> {
// Create a temporary directory for the test
let temp_dir = tempdir().expect("Failed to create temp directory");
let db_path = temp_dir.path().to_str().unwrap();
// Create a new radix tree
let mut tree = RadixTree::new(db_path, true)?;
// Set initial values
tree.set("key1", b"value1".to_vec())?;
tree.set("key2", b"value2".to_vec())?;
tree.set("key3", b"value3".to_vec())?;
// Get initial values
let initial_values = tree.getall("key")?;
assert_eq!(initial_values.len(), 3);
// Update a value
tree.update("key2", b"updated_value2".to_vec())?;
// Get values after update
let updated_values = tree.getall("key")?;
assert_eq!(updated_values.len(), 3);
// Convert to strings for easier comparison
let updated_value_strings: Vec<String> = updated_values
.iter()
.map(|v| String::from_utf8_lossy(v).to_string())
.collect();
// Check the updated value is present
assert!(updated_value_strings.contains(&"value1".to_string()));
assert!(updated_value_strings.contains(&"updated_value2".to_string()));
assert!(updated_value_strings.contains(&"value3".to_string()));
Ok(())
}
#[test]
fn test_getall_with_deletions() -> Result<(), radixtree::Error> {
// Create a temporary directory for the test
let temp_dir = tempdir().expect("Failed to create temp directory");
let db_path = temp_dir.path().to_str().unwrap();
// Create a new radix tree
let mut tree = RadixTree::new(db_path, true)?;
// Set initial values
tree.set("prefix_1", b"value1".to_vec())?;
tree.set("prefix_2", b"value2".to_vec())?;
tree.set("prefix_3", b"value3".to_vec())?;
tree.set("other", b"other_value".to_vec())?;
// Get initial values
let initial_values = tree.getall("prefix_")?;
assert_eq!(initial_values.len(), 3);
// Delete a key
tree.delete("prefix_2")?;
// Get values after deletion
let after_delete_values = tree.getall("prefix_")?;
assert_eq!(after_delete_values.len(), 2);
// Convert to strings for easier comparison
let after_delete_strings: Vec<String> = after_delete_values
.iter()
.map(|v| String::from_utf8_lossy(v).to_string())
.collect();
// Check the remaining values
assert!(after_delete_strings.contains(&"value1".to_string()));
assert!(after_delete_strings.contains(&"value3".to_string()));
Ok(())
}

View File

@@ -1,185 +0,0 @@
use radixtree::RadixTree;
use std::collections::HashMap;
use tempfile::tempdir;
#[test]
fn test_list() -> Result<(), radixtree::Error> {
// Create a temporary directory for the test
let temp_dir = tempdir().expect("Failed to create temp directory");
let db_path = temp_dir.path().to_str().unwrap();
// Create a new radix tree
let mut tree = RadixTree::new(db_path, true)?;
// Insert keys with various prefixes
let test_data: HashMap<&str, &str> = [
("apple", "fruit1"),
("application", "software1"),
("apply", "verb1"),
("banana", "fruit2"),
("ball", "toy1"),
("cat", "animal1"),
("car", "vehicle1"),
("cargo", "shipping1"),
].iter().cloned().collect();
// Set all test data
for (key, value) in &test_data {
tree.set(key, value.as_bytes().to_vec())?;
}
// Test prefix 'app' - should return apple, application, apply
let app_keys = tree.list("app")?;
assert_eq!(app_keys.len(), 3);
assert!(app_keys.contains(&"apple".to_string()));
assert!(app_keys.contains(&"application".to_string()));
assert!(app_keys.contains(&"apply".to_string()));
// Test prefix 'ba' - should return banana, ball
let ba_keys = tree.list("ba")?;
assert_eq!(ba_keys.len(), 2);
assert!(ba_keys.contains(&"banana".to_string()));
assert!(ba_keys.contains(&"ball".to_string()));
// Test prefix 'car' - should return car, cargo
let car_keys = tree.list("car")?;
assert_eq!(car_keys.len(), 2);
assert!(car_keys.contains(&"car".to_string()));
assert!(car_keys.contains(&"cargo".to_string()));
// Test prefix 'z' - should return empty list
let z_keys = tree.list("z")?;
assert_eq!(z_keys.len(), 0);
// Test empty prefix - should return all keys
let all_keys = tree.list("")?;
assert_eq!(all_keys.len(), test_data.len());
for key in test_data.keys() {
assert!(all_keys.contains(&key.to_string()));
}
// Test exact key as prefix - should return just that key
let exact_key = tree.list("apple")?;
assert_eq!(exact_key.len(), 1);
assert_eq!(exact_key[0], "apple");
Ok(())
}
#[test]
fn test_list_with_deletion() -> Result<(), radixtree::Error> {
// Create a temporary directory for the test
let temp_dir = tempdir().expect("Failed to create temp directory");
let db_path = temp_dir.path().to_str().unwrap();
// Create a new radix tree
let mut tree = RadixTree::new(db_path, true)?;
// Set keys with common prefixes
tree.set("test1", b"value1".to_vec())?;
tree.set("test2", b"value2".to_vec())?;
tree.set("test3", b"value3".to_vec())?;
tree.set("other", b"value4".to_vec())?;
// Initial check
let test_keys = tree.list("test")?;
assert_eq!(test_keys.len(), 3);
assert!(test_keys.contains(&"test1".to_string()));
assert!(test_keys.contains(&"test2".to_string()));
assert!(test_keys.contains(&"test3".to_string()));
// Delete one key
tree.delete("test2")?;
// Check after deletion
let test_keys_after = tree.list("test")?;
assert_eq!(test_keys_after.len(), 2);
assert!(test_keys_after.contains(&"test1".to_string()));
assert!(!test_keys_after.contains(&"test2".to_string()));
assert!(test_keys_after.contains(&"test3".to_string()));
// Check all keys
let all_keys = tree.list("")?;
assert_eq!(all_keys.len(), 3);
assert!(all_keys.contains(&"other".to_string()));
Ok(())
}
#[test]
fn test_list_edge_cases() -> Result<(), radixtree::Error> {
// Create a temporary directory for the test
let temp_dir = tempdir().expect("Failed to create temp directory");
let db_path = temp_dir.path().to_str().unwrap();
// Create a new radix tree
let mut tree = RadixTree::new(db_path, true)?;
// Test with empty tree
let empty_result = tree.list("any")?;
assert_eq!(empty_result.len(), 0);
// Set a single key
tree.set("single", b"value".to_vec())?;
// Test with prefix that's longer than any key
let long_prefix = tree.list("singlelonger")?;
assert_eq!(long_prefix.len(), 0);
// Test with partial prefix match
let partial = tree.list("sing")?;
assert_eq!(partial.len(), 1);
assert_eq!(partial[0], "single");
// Test with very long keys
let long_key1 = "a".repeat(100) + "key1";
let long_key2 = "a".repeat(100) + "key2";
tree.set(&long_key1, b"value1".to_vec())?;
tree.set(&long_key2, b"value2".to_vec())?;
let long_prefix_result = tree.list(&"a".repeat(100))?;
assert_eq!(long_prefix_result.len(), 2);
assert!(long_prefix_result.contains(&long_key1));
assert!(long_prefix_result.contains(&long_key2));
Ok(())
}
#[test]
fn test_list_performance() -> Result<(), radixtree::Error> {
// Create a temporary directory for the test
let temp_dir = tempdir().expect("Failed to create temp directory");
let db_path = temp_dir.path().to_str().unwrap();
// Create a new radix tree
let mut tree = RadixTree::new(db_path, true)?;
// Insert a large number of keys with different prefixes
let prefixes = ["user", "post", "comment", "like", "share"];
// Set 100 keys for each prefix (500 total)
for prefix in &prefixes {
for i in 0..100 {
let key = format!("{}_{}", prefix, i);
tree.set(&key, format!("value_{}", key).as_bytes().to_vec())?;
}
}
// Test retrieving by each prefix
for prefix in &prefixes {
let keys = tree.list(prefix)?;
assert_eq!(keys.len(), 100);
// Verify all keys have the correct prefix
for key in &keys {
assert!(key.starts_with(prefix));
}
}
// Test retrieving all keys
let all_keys = tree.list("")?;
assert_eq!(all_keys.len(), 500);
Ok(())
}

View File

@@ -1,180 +0,0 @@
use radixtree::{Node, NodeRef};
#[test]
fn test_node_serialization() {
// Create a node with some data
let node = Node {
key_segment: "test".to_string(),
value: b"test_value".to_vec(),
children: vec![
NodeRef {
key_part: "child1".to_string(),
node_id: 1,
},
NodeRef {
key_part: "child2".to_string(),
node_id: 2,
},
],
is_leaf: true,
};
// Serialize the node
let serialized = node.serialize();
// Deserialize the node
let deserialized = Node::deserialize(&serialized).expect("Failed to deserialize node");
// Verify the deserialized node matches the original
assert_eq!(deserialized.key_segment, node.key_segment);
assert_eq!(deserialized.value, node.value);
assert_eq!(deserialized.is_leaf, node.is_leaf);
assert_eq!(deserialized.children.len(), node.children.len());
for (i, child) in node.children.iter().enumerate() {
assert_eq!(deserialized.children[i].key_part, child.key_part);
assert_eq!(deserialized.children[i].node_id, child.node_id);
}
}
#[test]
fn test_empty_node_serialization() {
// Create an empty node
let node = Node {
key_segment: "".to_string(),
value: vec![],
children: vec![],
is_leaf: false,
};
// Serialize the node
let serialized = node.serialize();
// Deserialize the node
let deserialized = Node::deserialize(&serialized).expect("Failed to deserialize node");
// Verify the deserialized node matches the original
assert_eq!(deserialized.key_segment, node.key_segment);
assert_eq!(deserialized.value, node.value);
assert_eq!(deserialized.is_leaf, node.is_leaf);
assert_eq!(deserialized.children.len(), node.children.len());
}
#[test]
fn test_node_with_many_children() {
// Create a node with many children
let mut children = Vec::new();
for i in 0..100 {
children.push(NodeRef {
key_part: format!("child{}", i),
node_id: i as u32,
});
}
let node = Node {
key_segment: "parent".to_string(),
value: b"parent_value".to_vec(),
children,
is_leaf: true,
};
// Serialize the node
let serialized = node.serialize();
// Deserialize the node
let deserialized = Node::deserialize(&serialized).expect("Failed to deserialize node");
// Verify the deserialized node matches the original
assert_eq!(deserialized.key_segment, node.key_segment);
assert_eq!(deserialized.value, node.value);
assert_eq!(deserialized.is_leaf, node.is_leaf);
assert_eq!(deserialized.children.len(), node.children.len());
for (i, child) in node.children.iter().enumerate() {
assert_eq!(deserialized.children[i].key_part, child.key_part);
assert_eq!(deserialized.children[i].node_id, child.node_id);
}
}
#[test]
fn test_node_with_large_value() {
// Create a node with a large value
let large_value = vec![0u8; 4096]; // 4KB value
let node = Node {
key_segment: "large_value".to_string(),
value: large_value.clone(),
children: vec![],
is_leaf: true,
};
// Serialize the node
let serialized = node.serialize();
// Deserialize the node
let deserialized = Node::deserialize(&serialized).expect("Failed to deserialize node");
// Verify the deserialized node matches the original
assert_eq!(deserialized.key_segment, node.key_segment);
assert_eq!(deserialized.value, node.value);
assert_eq!(deserialized.is_leaf, node.is_leaf);
assert_eq!(deserialized.children.len(), node.children.len());
}
#[test]
fn test_version_compatibility() {
// This test ensures that the serialization format is compatible with version 1
// Create a node
let node = Node {
key_segment: "test".to_string(),
value: b"test_value".to_vec(),
children: vec![
NodeRef {
key_part: "child".to_string(),
node_id: 1,
},
],
is_leaf: true,
};
// Serialize the node
let serialized = node.serialize();
// Verify the first byte is the version byte (1)
assert_eq!(serialized[0], 1);
// Deserialize the node
let deserialized = Node::deserialize(&serialized).expect("Failed to deserialize node");
// Verify the deserialized node matches the original
assert_eq!(deserialized.key_segment, node.key_segment);
assert_eq!(deserialized.value, node.value);
assert_eq!(deserialized.is_leaf, node.is_leaf);
assert_eq!(deserialized.children.len(), node.children.len());
}
#[test]
fn test_invalid_serialization() {
// Test with empty data
let result = Node::deserialize(&[]);
assert!(result.is_err());
// Test with invalid version
let result = Node::deserialize(&[2, 0, 0, 0, 0]);
assert!(result.is_err());
// Test with truncated data
let node = Node {
key_segment: "test".to_string(),
value: b"test_value".to_vec(),
children: vec![],
is_leaf: true,
};
let serialized = node.serialize();
let truncated = &serialized[0..serialized.len() / 2];
let result = Node::deserialize(truncated);
assert!(result.is_err());
}

View File

@@ -0,0 +1,11 @@
module core
// BaseData provides common fields for all models
pub struct Base {
pub mut:
id u32
created u64 // Unix timestamp of creation
updated u64 // Unix timestamp of last update
deleted bool
version u32
}

View File

@@ -0,0 +1 @@
module core

View File

@@ -0,0 +1,23 @@
module main
import freeflowuniverse.herolib.hero.models.marketplace.core
pub struct MarketplaceCurrencyConfig {
pub mut:
base_currency string
supported_currencies []string
default_display_currency string
auto_update_rates bool
update_interval_minutes u32
fallback_rates map[string]f64 // Using f64 for Decimal
}
// User currency preferences
pub struct UserCurrencyPreference {
pub mut:
user_id string
preferred_currency string
updated_at u64 // Unix timestamp
}

View File

@@ -0,0 +1,64 @@
module main
import freeflowuniverse.herolib.hero.models.marketplace.core
// Configurable currency support for any currency type
pub struct Currency {
core.Base
pub mut:
code string // USD, EUR, BTC, ETH, etc.
name string
symbol string
currency_type CurrencyType
exchange_rate_to_base f64 // Using f64 for Decimal
is_base_currency bool
decimal_places u8
is_active bool
}
pub enum CurrencyType {
fiat
cryptocurrency
token
points
custom
}
pub struct Price {
pub mut:
base_amount f64 // Using f64 for Decimal
base_currency string
display_currency string
display_amount f64 // Using f64 for Decimal
formatted_display string
conversion_rate f64 // Using f64 for Decimal
conversion_timestamp u64 // Unix timestamp
}
pub struct MarketplaceCurrencyConfig {
pub mut:
base_currency string
supported_currencies []string
default_display_currency string
auto_update_rates bool
update_interval_minutes u32
fallback_rates map[string]f64 // Using f64 for Decimal
}
// Exchange rate history for tracking changes over time
pub struct ExchangeRateHistory {
pub mut:
from_currency string
to_currency string
rate f64 // Using f64 for Decimal
timestamp u64 // Unix timestamp
provider string
}
// User currency preferences
pub struct UserCurrencyPreference {
pub mut:
user_id string
preferred_currency string
updated_at u64 // Unix timestamp
}

View File

@@ -0,0 +1,115 @@
module main
import freeflowuniverse.herolib.hero.models.marketplace.core
pub struct Order {
core.Base
pub mut:
id string
user_id string
items []OrderItem
subtotal_base f64 // Using f64 for Decimal
total_base f64 // Using f64 for Decimal
base_currency string
currency_used string
currency_total f64 // Using f64 for Decimal
conversion_rate f64 // Using f64 for Decimal
status OrderStatus
payment_method string
payment_details PaymentDetails
billing_address Address
shipping_address Address
notes string
purchase_type PurchaseType
created_at u64 // Unix timestamp
updated_at u64 // Unix timestamp
}
pub struct OrderItem {
pub mut:
product_id string
product_name string
product_category string
quantity u32
unit_price_base f64 // Using f64 for Decimal
total_price_base f64 // Using f64 for Decimal
specifications map[string]string // Using map[string]string for HashMap<String, serde_json::Value>
provider_id string
provider_name string
}
pub enum OrderStatus {
pending
confirmed
processing
deployed
completed
cancelled
refunded
failed
}
// Purchase type to distinguish between cart-based and instant purchases
pub enum PurchaseType {
cart
instant
}
pub struct PaymentDetails {
pub mut:
payment_id string
payment_method PaymentMethod
transaction_id string
payment_status PaymentStatus
payment_timestamp u64 // Unix timestamp
failure_reason string
}
pub enum PaymentMethod {
credit_card // CreditCard { last_four: String, card_type: String }
bank_transfer // BankTransfer { bank_name: String, account_last_four: String }
cryptocurrency // Cryptocurrency { currency: String, wallet_address: String }
token // Token { token_type: String, wallet_address: String }
mock // Mock { method_name: String }
}
pub struct Address {
pub mut:
street string
city string
state string
postal_code string
country string
company string
}
// Shopping Cart Models
pub struct CartItem {
pub mut:
product_id string
quantity u32
selected_specifications map[string]string // Using map[string]string for HashMap<String, serde_json::Value>
added_at u64 // Unix timestamp
updated_at u64 // Unix timestamp
}
pub struct Cart {
pub mut:
user_id string
items []CartItem
session_id string
created_at u64 // Unix timestamp
updated_at u64 // Unix timestamp
}
// Order summary for display purposes
pub struct OrderSummary {
pub mut:
subtotal f64 // Using f64 for Decimal
tax f64 // Using f64 for Decimal
shipping f64 // Using f64 for Decimal
discount f64 // Using f64 for Decimal
total f64 // Using f64 for Decimal
currency string
item_count u32
}

View File

@@ -0,0 +1,93 @@
module main
import freeflowuniverse.herolib.hero.models.marketplace.core
pub struct LiquidityPool {
core.Base
pub mut:
id string
name string
token_a string
token_b string
reserve_a f64 // Using f64 for Decimal
reserve_b f64 // Using f64 for Decimal
exchange_rate f64 // Using f64 for Decimal
liquidity f64 // Using f64 for Decimal
volume_24h f64 // Using f64 for Decimal
fee_percentage f64 // Using f64 for Decimal
status PoolStatus
}
pub enum PoolStatus {
active
paused
maintenance
}
pub struct ExchangeRequest {
pub mut:
pool_id string
from_token string
to_token string
amount f64 // Using f64 for Decimal
min_receive f64 // Using f64 for Decimal
slippage_tolerance f64 // Using f64 for Decimal
}
pub struct ExchangeResponse {
pub mut:
success bool
message string
transaction_id string
from_amount f64 // Using f64 for Decimal
to_amount f64 // Using f64 for Decimal
exchange_rate f64 // Using f64 for Decimal
fee f64 // Using f64 for Decimal
}
pub struct StakeRequest {
pub mut:
amount f64 // Using f64 for Decimal
duration_months u32
}
pub struct StakePosition {
core.Base
pub mut:
id string
user_id string
amount f64 // Using f64 for Decimal
start_date u64 // Unix timestamp
end_date u64 // Unix timestamp
discount_percentage f64 // Using f64 for Decimal
reputation_bonus int
status StakeStatus
}
pub enum StakeStatus {
active
completed
withdrawn
}
// Pool analytics data
pub struct PoolAnalytics {
pub mut:
price_history []PricePoint
volume_history []VolumePoint
liquidity_distribution map[string]f64 // Using f64 for Decimal
staking_distribution map[string]int
}
pub struct PricePoint {
pub mut:
timestamp u64 // Unix timestamp
price f64 // Using f64 for Decimal
volume f64 // Using f64 for Decimal
}
pub struct VolumePoint {
pub mut:
date string
volume f64 // Using f64 for Decimal
}

View File

@@ -0,0 +1,177 @@
module main
import freeflowuniverse.herolib.hero.models.marketplace.core
// Generic product structure that can represent any marketplace item
pub struct Product {
core.Base
pub mut:
id string
name string
category_id string // References ProductCategory config
description string
base_price f64 // Using f64 for Decimal
base_currency string
attributes map[string]ProductAttribute // Generic attributes
provider_id string
provider_name string
availability ProductAvailability
metadata ProductMetadata // Extensible metadata
created_at u64 // Unix timestamp
updated_at u64 // Unix timestamp
}
// Configurable product categories
pub struct ProductCategory {
pub mut:
id string
name string
display_name string
description string
attribute_schema []AttributeDefinition // Defines allowed attributes
parent_category string
is_active bool
}
// Generic attribute system for any product type
pub struct ProductAttribute {
pub mut:
key string
value string // Using string for serde_json::Value, consider map[string]string or specific types
attribute_type AttributeType
is_searchable bool
is_filterable bool
display_order u32
}
pub enum AttributeType {
text
number
slice_configuration
boolean
select // Select(Vec<String>)
multi_select // MultiSelect(Vec<String>)
range // Range { min: f64, max: f64 }
custom
}
pub struct AttributeDefinition {
pub mut:
key string
name string
attribute_type AttributeType
is_required bool
is_searchable bool
is_filterable bool
validation_rules []ValidationRule
}
pub enum ValidationRule {
min_length // MinLength(usize)
max_length // MaxLength(usize)
min_value // MinValue(f64)
max_value // MaxValue(f64)
pattern // Pattern(String)
custom
}
pub enum ProductAvailability {
available
limited
unavailable
pre_order
custom
}
pub struct ProductMetadata {
pub mut:
tags []string
location string
rating f32
review_count u32
featured bool
custom_fields map[string]string // Using map[string]string for HashMap<String, serde_json::Value>
}
// Support for different pricing models
pub enum PricingModel {
one_time
recurring // Recurring { interval: String }
usage_based // UsageBased { unit: String }
tiered // Tiered(Vec<PriceTier>)
custom
}
pub struct PriceTier {
pub mut:
min_quantity u32
max_quantity u32
price_per_unit f64 // Using f64 for Decimal
discount_percentage f32
}
// Slice configuration data structure for product attributes
pub struct SliceConfiguration {
pub mut:
cpu_cores int
memory_gb int
storage_gb int
bandwidth_mbps int
min_uptime_sla f32
public_ips int
node_id string
slice_type SliceType
pricing SlicePricing
}
// Enhanced pricing structure for slices with multiple time periods
pub struct SlicePricing {
pub mut:
hourly f64 // Using f64 for Decimal
daily f64 // Using f64 for Decimal
monthly f64 // Using f64 for Decimal
yearly f64 // Using f64 for Decimal
}
pub enum SliceType {
basic
standard
premium
custom
}
// Placeholder for SliceAllocation and SliceCombination
// These are not directly from product.rs but are referenced in user.rs
pub struct SliceAllocation {
pub mut:
slice_id string
node_id string
user_id string
allocated_cpu_cores int
allocated_memory_gb int
allocated_storage_gb int
allocated_bandwidth_mbps int
start_time u64
end_time u64
}
pub struct SliceCombination {
pub mut:
cpu_cores int
memory_gb int
storage_gb int
bandwidth_mbps int
price_per_hour f64
}
// Placeholder for DefaultSliceFormat
// This is not directly from product.rs but is referenced in user.rs
pub struct DefaultSliceFormat {
pub mut:
name string
cpu_cores int
memory_gb int
storage_gb int
bandwidth_mbps int
price_per_hour f64
}

View File

@@ -0,0 +1,730 @@
module main
import freeflowuniverse.herolib.hero.models.marketplace.core
// Represents a user in the system
pub struct User {
core.Base
pub mut:
id u32 // Unique identifier for the user, using u32 for consistency with VLang
name string // User's full name
email string // User's email address
role UserRole // User's role in the system
country string // User's country
timezone string // User's timezone
created_at u64 // Unix timestamp of creation
updated_at u64 // Unix timestamp of last update
mock_data MockUserData // Mock data for dashboard
}
// Represents the possible roles a user can have
pub enum UserRole {
user
admin
}
// Represents user login credentials
pub struct LoginCredentials {
pub mut:
email string
password string
}
// Represents user registration data
pub struct RegistrationData {
pub mut:
name string
email string
password string
password_confirmation string
}
// Mock user data for testing
pub struct MockUserData {
pub mut:
active_deployments int
active_slices int
current_cost int
balance int
wallet_balance_usd f64 // Using f64 for Decimal, consider string for high precision
owned_product_ids []string
active_rentals []string
transaction_history []Transaction
resource_utilization ResourceUtilization
usd_usage_trend []int
user_activity UserActivityStats
recent_activities []RecentActivity
deployment_distribution DeploymentDistribution
farmer_data FarmerData // Farmer-specific data
app_provider_data AppProviderData // App Provider-specific data
service_provider_data ServiceProviderData // Service Provider-specific data
customer_service_data CustomerServiceData // Customer Service-specific data
}
pub struct ResourceUtilization {
pub mut:
cpu int
memory int
storage int
network int
}
pub struct UserActivityStats {
pub mut:
deployments []int
resource_reservations []int
}
pub struct RecentActivity {
pub mut:
date string
action string
status string
details string
}
pub struct DeploymentDistribution {
pub mut:
regions []RegionDeployments
}
pub struct RegionDeployments {
pub mut:
region string
nodes int
slices int
apps int
gateways int
}
// Farmer-specific data
pub struct FarmerData {
pub mut:
total_nodes int
online_nodes int
total_capacity NodeCapacity
used_capacity NodeCapacity
monthly_earnings_usd int
total_earnings_usd int
uptime_percentage f32
nodes []FarmNode
earnings_history []EarningsRecord
slice_templates []Product
active_slices int
}
pub struct NodeCapacity {
pub mut:
cpu_cores int
memory_gb int
storage_gb int
bandwidth_mbps int
ssd_storage_gb int
hdd_storage_gb int
}
// Enhanced Node structure for farmer dashboard with modern types
pub struct FarmNode {
pub mut:
id string
name string
location string
status NodeStatus
capacity NodeCapacity
used_capacity NodeCapacity
uptime_percentage f32
earnings_today_usd f64 // Using f64 for Decimal
last_seen u64 // Unix timestamp
health_score f32
region string
node_type string
slice_formats []string
rental_options NodeRentalOptions
availability_status NodeAvailabilityStatus
grid_node_id u32
grid_data GridNodeData
node_group_id string
group_assignment_date u64 // Unix timestamp
group_slice_format string
group_slice_price f64 // Using f64 for Decimal
staking_options NodeStakingOptions
marketplace_sla MarketplaceSLA
total_base_slices u32
allocated_base_slices u32
slice_allocations []SliceAllocation // Assuming SliceAllocation is defined elsewhere or will be translated
available_combinations []SliceCombination // Assuming SliceCombination is defined elsewhere or will be translated
slice_pricing SlicePricing // Assuming SlicePricing is defined elsewhere or will be translated
slice_last_calculated u64 // Unix timestamp
}
pub struct EarningsRecord {
pub mut:
date string
amount int
source string
}
pub enum NodeStatus {
online
offline
maintenance
error
standby
}
pub struct FarmerSettings {
pub mut:
auto_accept_deployments bool
maintenance_window string
notification_preferences NotificationSettings
minimum_deployment_duration int
preferred_regions []string
default_slice_customizations map[string]DefaultSliceFormat // Assuming DefaultSliceFormat is defined elsewhere or will be translated
}
pub struct NotificationSettings {
pub mut:
email_enabled bool
sms_enabled bool
push bool
node_offline_alerts bool
earnings_reports bool
maintenance_reminders bool
}
// Marketplace SLA configuration - what the farmer promises to customers
pub struct MarketplaceSLA {
pub mut:
uptime_guarantee_percentage f32
bandwidth_guarantee_mbps int
base_slice_price f64 // Using f64 for Decimal
last_updated u64 // Unix timestamp
}
// Node rental options that farmers can configure
pub struct NodeRentalOptions {
pub mut:
slice_rental_enabled bool
full_node_rental_enabled bool
full_node_pricing FullNodePricing
minimum_rental_days u32
maximum_rental_days u32
auto_renewal_enabled bool
}
// Node staking options that farmers can configure
pub struct NodeStakingOptions {
pub mut:
staking_enabled bool
staked_amount f64 // Using f64 for Decimal
staking_start_date u64 // Unix timestamp
staking_period_months u32
early_withdrawal_allowed bool
early_withdrawal_penalty_percent f32
}
// Full node rental pricing configuration with auto-calculation support
pub struct FullNodePricing {
pub mut:
hourly f64 // Using f64 for Decimal
daily f64 // Using f64 for Decimal
monthly f64 // Using f64 for Decimal
yearly f64 // Using f64 for Decimal
auto_calculate bool
daily_discount_percent f32
monthly_discount_percent f32
yearly_discount_percent f32
}
// Node availability status for rental management
pub enum NodeAvailabilityStatus {
available
partially_rented
fully_rented
unavailable
reserved
}
// Individual node rental record
pub struct NodeRental {
pub mut:
id string
node_id string
renter_email string
rental_type NodeRentalType
monthly_cost f64 // Using f64 for Decimal
start_date u64 // Unix timestamp
end_date u64 // Unix timestamp
status NodeRentalStatus
auto_renewal bool
payment_method string
metadata map[string]string // Using map[string]string for HashMap<String, serde_json::Value>
}
// Type of node rental
pub enum NodeRentalType {
slice // Slice { slice_ids: Vec<String>, total_cpu_cores: u32, total_memory_gb: u32, total_storage_gb: u32 }
full_node
}
// Status of a node rental
pub enum NodeRentalStatus {
active
pending
expired
cancelled
suspended
}
// Farmer earnings from node rentals
pub struct FarmerRentalEarning {
pub mut:
id string
node_id string
rental_id string
renter_email string
amount f64 // Using f64 for Decimal
currency string
earning_date u64 // Unix timestamp
rental_type NodeRentalType
payment_status PaymentStatus
}
pub enum PaymentStatus {
pending
completed
failed
refunded
}
// User Activity Tracking
pub struct UserActivity {
pub mut:
id string
activity_type ActivityType
description string
timestamp u64 // Unix timestamp
metadata map[string]string // Using map[string]string for HashMap<String, serde_json::Value>
category string
importance ActivityImportance
}
pub enum ActivityType {
login
purchase
deployment
service_created
app_published
node_added
node_updated
wallet_transaction
profile_update
settings_change
marketplace_view
slice_created
slice_allocated
slice_released
slice_rental_started
slice_rental_stopped
slice_rental_restarted
slice_rental_cancelled
}
pub enum ActivityImportance {
low
medium
high
critical
}
// Enhanced User Statistics
pub struct UsageStatistics {
pub mut:
total_deployments int
active_services int
total_spent f64 // Using f64 for Decimal
favorite_categories []string
usage_trends []UsageTrend
login_frequency f32
preferred_regions []string
account_age_days int
last_activity u64 // Unix timestamp
}
pub struct UsageTrend {
pub mut:
period string
metric string
value f32
change_percentage f32
}
pub struct UserPreferences {
pub mut:
preferred_currency string
preferred_language string
timezone string
dashboard_layout string
notification_settings NotificationSettings
privacy_settings PrivacySettings
theme string
last_payment_method string
}
pub struct PrivacySettings {
pub mut:
profile_visibility string
activity_tracking bool
marketing_emails bool
data_sharing bool
}
// ThreeFold Grid Node Data fetched from gridproxy/graphql
pub struct GridNodeData {
pub mut:
grid_node_id u32
city string
country string
farm_name string
farm_id u32
public_ips u32
total_resources NodeCapacity
used_resources NodeCapacity
certification_type string
farming_policy_id u32
last_updated u64 // Unix timestamp
}
// Node Group for managing multiple nodes together
pub struct NodeGroup {
core.Base
pub mut:
id string
name string
description string
group_type NodeGroupType
node_ids []string
group_config NodeGroupConfig
created_at u64 // Unix timestamp
updated_at u64 // Unix timestamp
}
// Type of node group - default or custom
pub enum NodeGroupType {
default_compute
default_storage
default_ai_gpu
custom
}
// Configuration for node groups
pub struct NodeGroupConfig {
pub mut:
preferred_slice_formats []string
default_pricing map[string]f64 // Using f64 for Decimal
resource_optimization ResourceOptimization
auto_scaling bool
}
// Resource optimization settings for groups
pub enum ResourceOptimization {
balanced
performance
efficiency
custom
}
// Statistics for a node group
pub struct GroupStatistics {
pub mut:
group_id string
total_nodes int
online_nodes int
total_capacity NodeCapacity
average_uptime f32
group_type NodeGroupType
}
// Enhanced User Dashboard Data
pub struct UserDashboardData {
pub mut:
user_info UserInfo
recent_activities []UserActivity
usage_statistics UsageStatistics
active_services []Service // Assuming Service is defined elsewhere or will be translated
active_deployments int
wallet_summary WalletSummary
recommendations []Recommendation
quick_actions []QuickAction
}
pub struct UserInfo {
pub mut:
name string
email string
member_since string
account_type string
verification_status string
}
pub struct WalletSummary {
pub mut:
balance f64 // Using f64 for Decimal
currency string
recent_transactions int
pending_transactions int
}
pub struct Recommendation {
pub mut:
id string
title string
description string
action_url string
priority string
category string
}
pub struct QuickAction {
pub mut:
id string
title string
description string
action_url string
icon string
enabled bool
}
// App Provider-specific data
pub struct AppProviderData {
pub mut:
published_apps int
total_deployments int
active_deployments int
monthly_revenue_usd int
total_revenue_usd int
apps []PublishedApp
deployment_stats []DeploymentStat
revenue_history []RevenueRecord
}
pub struct PublishedApp {
pub mut:
id string
name string
category string
version string
status string
deployments int
rating f32
monthly_revenue_usd int
last_updated string
auto_healing bool
}
pub struct DeploymentStat {
pub mut:
app_name string
region string
instances int
status string
resource_usage ResourceUtilization
customer_name string
deployed_date string
deployment_id string
auto_healing bool
}
pub struct RevenueRecord {
pub mut:
date string
amount int
app_name string
}
// Service Provider-specific data
pub struct ServiceProviderData {
pub mut:
active_services int
total_clients int
monthly_revenue_usd int
total_revenue_usd int
service_rating f32
services []Service
client_requests []ServiceRequest
revenue_history []RevenueRecord
}
pub struct Service {
pub mut:
id string
name string
category string
description string
price_per_hour_usd int
status string
clients int
rating f32
total_hours int
}
pub struct ServiceRequest {
pub mut:
id string
client_name string
service_name string
status string
requested_date string
estimated_hours int
budget int
priority string
progress int
completed_date string
client_email string
client_phone string
description string
created_date string
}
// Service booking record for customers who purchase services
pub struct ServiceBooking {
pub mut:
id string
service_id string
service_name string
provider_email string
customer_email string
budget int
estimated_hours int
status string
requested_date string
priority string
description string
booking_date string
client_phone string
progress int
completed_date string
}
// Customer Service-specific data (for users who book services)
pub struct CustomerServiceData {
pub mut:
active_bookings int
completed_bookings int
total_spent int
monthly_spending int
average_rating_given f32
service_bookings []ServiceBooking
favorite_providers []string
spending_history []SpendingRecord
}
pub struct SpendingRecord {
pub mut:
date string
amount int
service_name string
provider_name string
}
// Transaction record for wallet operations
pub struct Transaction {
pub mut:
id string
user_id string
transaction_type TransactionType
amount f64 // Using f64 for Decimal
timestamp u64 // Unix timestamp
status TransactionStatus
}
// Types of transactions
pub enum TransactionType {
purchase
rental
transfer
earning
instant_purchase
exchange
stake
unstake
auto_top_up
credits_purchase
credits_sale
credits_transfer
}
// Transaction status
pub enum TransactionStatus {
pending
completed
failed
cancelled
}
// Rental record
pub struct Rental {
pub mut:
id string
user_id string
product_id string
start_date u64 // Unix timestamp
end_date u64 // Unix timestamp
status RentalStatus
monthly_cost f64 // Using f64 for Decimal
}
// Rental status
pub enum RentalStatus {
active
expired
cancelled
pending
}
// User deployment information for dashboard
pub struct UserDeployment {
pub mut:
id string
app_name string
status DeploymentStatus
cost_per_month f64 // Using f64 for Decimal
deployed_at u64 // Unix timestamp
provider string
region string
resource_usage ResourceUtilization
}
// Deployment status enum
pub enum DeploymentStatus {
active
pending
stopped
error
maintenance
}
// Comprehensive user metrics for dashboard
pub struct UserMetrics {
pub mut:
total_spent_this_month f64 // Using f64 for Decimal
active_deployments_count int
resource_utilization ResourceUtilization
cost_trend []int
wallet_balance f64 // Using f64 for Decimal
total_transactions int
}
// User compute resource for dashboard display
pub struct UserComputeResource {
pub mut:
id string
resource_type string
specs string
location string
status string
sla string
monthly_cost f64 // Using f64 for Decimal
provider string
resource_usage ResourceUtilization
}

View File

@@ -0,0 +1,4 @@
module models_marketplace
import models_marketplace.core
import models_marketplace.main

View File

@@ -0,0 +1,7 @@
make SQL script to populate DB
only models with base class are put in tables
the data itself is in data field
the fields marked with @index go as separate fields in tables

1
specs/models_threefold/core Symbolic link
View File

@@ -0,0 +1 @@
../models_heroledger/core

View File

@@ -0,0 +1,31 @@
module main
pub struct SecretBox {
pub mut:
notary_id u32 // person who is allowed to decrypt this info
value string //the actual incrypted value
version u16 //version of the schema used to encrypt this value
timestamp u64
cat SecretBoxCategory //category of the secret box, e.g. profile
}
pub enum SecretBoxCategory {
profile
}
pub struct Notary {
core.Base
pub mut:
userid u32 // Reference to the user entity @[index]
status NotaryStatus // Current user status
myceliumaddress string // Mycelium address of the notary
pubkey string // Public key for cryptographic operations @[index]
}
pub enum NotaryStatus {
active
inactive
suspended
archived
error
}

View File

@@ -0,0 +1,32 @@
module circle
import freeflowuniverse.herolib.hero.models.core
// Wallet represents a wallet associated with a circle for financial operations
pub struct Signature {
core.Base
pub mut:
signature_id u32 // Reference to the user who created the signature @[index]
user_id u32 // Reference to the user who created the signature @[index]
value string // The actual signature value
objectid u32 // Reference to the user who created the signature @[index]
objecttype ObjectType // Type of object being signed (e.g.,
status SignatureStatus
timestamp u64
}
pub enum SignatureStatus {
active
inactive
pending
revoked
}
pub enum ObjectType {
account
dnsrecord
membership
user
transaction
kyc
}

View File

@@ -0,0 +1,29 @@
module circle
import freeflowuniverse.herolib.hero.models.core
//is a user in the system, most of info is in models_heroledger
pub struct User {
core.Base
pub mut:
username string // Unique username for the user @[index]
pubkey string // Public key for cryptographic operations @[index]
status UserStatus // Current user status
kyc KYCStatus // Know Your Customer status
}
pub enum UserStatus {
active
inactive
suspended
archived
}
pub enum KYCStatus {
pending
approved
rejected
}

View File

@@ -0,0 +1,22 @@
module circle
import freeflowuniverse.herolib.hero.models.core
//a per user db
pub struct UserKVS {
core.Base
pub mut:
userid u32 // Reference to the user entity @[index]
name string // Name of the key-value store
}
pub struct UserKVSItem {
core.Base
pub mut:
userkvs_id u32 // Reference to the user entity @[index]
key string
value string // Value associated with the key
secretbox []SecretBox // Optional secret boxes for sensitive data
timestamp u64 // Timestamp when the item was created or last updated
}

179
tst/Cargo.lock generated
View File

@@ -1,179 +0,0 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 4
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "crc32fast"
version = "1.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3"
dependencies = [
"cfg-if",
]
[[package]]
name = "getrandom"
version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7"
dependencies = [
"cfg-if",
"libc",
"wasi",
]
[[package]]
name = "libc"
version = "0.2.172"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa"
[[package]]
name = "log"
version = "0.4.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94"
[[package]]
name = "ourdb"
version = "0.1.0"
dependencies = [
"crc32fast",
"log",
"rand",
"thiserror",
]
[[package]]
name = "ppv-lite86"
version = "0.2.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9"
dependencies = [
"zerocopy",
]
[[package]]
name = "proc-macro2"
version = "1.0.95"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.40"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d"
dependencies = [
"proc-macro2",
]
[[package]]
name = "rand"
version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
dependencies = [
"libc",
"rand_chacha",
"rand_core",
]
[[package]]
name = "rand_chacha"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
dependencies = [
"ppv-lite86",
"rand_core",
]
[[package]]
name = "rand_core"
version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
dependencies = [
"getrandom",
]
[[package]]
name = "syn"
version = "2.0.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "thiserror"
version = "1.0.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "tst"
version = "0.1.0"
dependencies = [
"ourdb",
"thiserror",
]
[[package]]
name = "unicode-ident"
version = "1.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512"
[[package]]
name = "wasi"
version = "0.11.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
[[package]]
name = "zerocopy"
version = "0.8.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2586fea28e186957ef732a5f8b3be2da217d65c5969d4b1e17f973ebbe876879"
dependencies = [
"zerocopy-derive",
]
[[package]]
name = "zerocopy-derive"
version = "0.8.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a996a8f63c5c4448cd959ac1bab0aaa3306ccfd060472f85943ee0750f0169be"
dependencies = [
"proc-macro2",
"quote",
"syn",
]

View File

@@ -1,30 +0,0 @@
[package]
name = "tst"
version = "0.1.0"
edition = "2021"
description = "A persistent ternary search tree implementation using OurDB for storage"
authors = ["OurWorld Team"]
[dependencies]
ourdb = { path = "../ourdb" }
thiserror = "1.0.40"
[dev-dependencies]
# criterion = "0.5.1"
# Uncomment when benchmarks are implemented
# [[bench]]
# name = "tst_benchmarks"
# harness = false
[[example]]
name = "basic_usage"
path = "examples/basic_usage.rs"
[[example]]
name = "prefix_ops"
path = "examples/prefix_ops.rs"
[[example]]
name = "performance"
path = "examples/performance.rs"

View File

@@ -1,185 +0,0 @@
# Ternary Search Tree (TST)
A persistent ternary search tree implementation in Rust using OurDB for storage.
## Overview
TST is a space-optimized tree data structure that enables efficient string key operations with persistent storage. This implementation provides a persistent ternary search tree that can be used for efficient string key operations, such as auto-complete, routing tables, and more.
A ternary search tree is a type of trie where each node has three children: left, middle, and right. Unlike a radix tree which compresses common prefixes, a TST stores one character per node and uses a binary search tree-like structure for efficient traversal.
Key characteristics:
- Each node stores a single character
- Nodes have three children: left (for characters < current), middle (for next character in key), and right (for characters > current)
- Leaf nodes contain the actual values
- Balanced structure for consistent performance across operations
## Features
- Efficient string key operations
- Persistent storage using OurDB backend
- Balanced tree structure for consistent performance
- Support for binary values
- Thread-safe operations through OurDB
## Usage
Add the dependency to your `Cargo.toml`:
```toml
[dependencies]
tst = { path = "../tst" }
```
### Basic Example
```rust
use tst::TST;
fn main() -> Result<(), tst::Error> {
// Create a new ternary search tree
let mut tree = TST::new("/tmp/tst", false)?;
// Set key-value pairs
tree.set("hello", b"world".to_vec())?;
tree.set("help", b"me".to_vec())?;
// Get values by key
let value = tree.get("hello")?;
println!("hello: {}", String::from_utf8_lossy(&value)); // Prints: world
// List keys by prefix
let keys = tree.list("hel")?; // Returns ["hello", "help"]
println!("Keys with prefix 'hel': {:?}", keys);
// Get all values by prefix
let values = tree.getall("hel")?; // Returns [b"world", b"me"]
// Delete keys
tree.delete("help")?;
Ok(())
}
```
## API
### Creating a TST
```rust
// Create a new ternary search tree
let mut tree = TST::new("/tmp/tst", false)?;
// Create a new ternary search tree and reset if it exists
let mut tree = TST::new("/tmp/tst", true)?;
```
### Setting Values
```rust
// Set a key-value pair
tree.set("key", b"value".to_vec())?;
```
### Getting Values
```rust
// Get a value by key
let value = tree.get("key")?;
```
### Deleting Keys
```rust
// Delete a key
tree.delete("key")?;
```
### Listing Keys by Prefix
```rust
// List all keys with a given prefix
let keys = tree.list("prefix")?;
```
### Getting All Values by Prefix
```rust
// Get all values for keys with a given prefix
let values = tree.getall("prefix")?;
```
## Performance Characteristics
- Search: O(k) where k is the key length
- Insert: O(k) for new keys
- Delete: O(k) plus potential node cleanup
- Space: O(n) where n is the total number of nodes
## Use Cases
TST is particularly useful for:
- Prefix-based searching
- Auto-complete systems
- Dictionary implementations
- Spell checking
- Any application requiring efficient string key operations with persistence
## Implementation Details
The TST implementation uses OurDB for persistent storage:
- Each node is serialized and stored as a record in OurDB
- Node references use OurDB record IDs
- The tree maintains a root node ID for traversal
- Node serialization includes version tracking for format evolution
## Running Tests
The project includes a comprehensive test suite that verifies all functionality:
```bash
cd ~/code/git.threefold.info/herocode/db/tst
# Run all tests
cargo test
# Run specific test file
cargo test --test basic_test
cargo test --test prefix_test
```
## Running Examples
The project includes example applications that demonstrate how to use the TST:
```bash
# Run the basic usage example
cargo run --example basic_usage
# Run the prefix operations example
cargo run --example prefix_ops
# Run the performance test
cargo run --example performance
```
## Comparison with RadixTree
While both TST and RadixTree provide efficient string key operations, they have different characteristics:
- **TST**: Stores one character per node, with a balanced structure for consistent performance across operations.
- **RadixTree**: Compresses common prefixes, which can be more space-efficient for keys with long common prefixes.
Choose TST when:
- You need balanced performance across all operations
- Your keys don't share long common prefixes
- You want a simpler implementation with predictable performance
Choose RadixTree when:
- Space efficiency is a priority
- Your keys share long common prefixes
- You prioritize lookup performance over balanced performance
## License
This project is licensed under the same license as the HeroCode project.

View File

@@ -1,75 +0,0 @@
use std::time::Instant;
use tst::TST;
fn main() -> Result<(), tst::Error> {
// Create a temporary directory for the database
let db_path = std::env::temp_dir().join("tst_example");
std::fs::create_dir_all(&db_path)?;
println!("Creating ternary search tree at: {}", db_path.display());
// Create a new TST
let mut tree = TST::new(db_path.to_str().unwrap(), true)?;
// Store some data
println!("Inserting data...");
tree.set("hello", b"world".to_vec())?;
tree.set("help", b"me".to_vec())?;
tree.set("helicopter", b"flying".to_vec())?;
tree.set("apple", b"fruit".to_vec())?;
tree.set("application", b"software".to_vec())?;
tree.set("banana", b"yellow".to_vec())?;
// Retrieve and print the data
let value = tree.get("hello")?;
println!("hello: {}", String::from_utf8_lossy(&value));
// List keys with prefix
println!("\nListing keys with prefix 'hel':");
let start = Instant::now();
let keys = tree.list("hel")?;
let duration = start.elapsed();
for key in &keys {
println!(" {}", key);
}
println!("Found {} keys in {:?}", keys.len(), duration);
// Get all values with prefix
println!("\nGetting all values with prefix 'app':");
let start = Instant::now();
let values = tree.getall("app")?;
let duration = start.elapsed();
for (i, value) in values.iter().enumerate() {
println!(" Value {}: {}", i + 1, String::from_utf8_lossy(value));
}
println!("Found {} values in {:?}", values.len(), duration);
// Delete a key
println!("\nDeleting 'help'...");
tree.delete("help")?;
// Verify deletion
println!("Listing keys with prefix 'hel' after deletion:");
let keys_after = tree.list("hel")?;
for key in &keys_after {
println!(" {}", key);
}
// Try to get a deleted key
match tree.get("help") {
Ok(_) => println!("Unexpectedly found 'help' after deletion!"),
Err(e) => println!("As expected, 'help' was not found: {}", e),
}
// Clean up (optional)
if std::env::var("KEEP_DB").is_err() {
std::fs::remove_dir_all(&db_path)?;
println!("\nCleaned up database directory");
} else {
println!("\nDatabase kept at: {}", db_path.display());
}
Ok(())
}

View File

@@ -1,167 +0,0 @@
use std::io::{self, Write};
use std::time::{Duration, Instant};
use tst::TST;
// Function to generate a test value of specified size
fn generate_test_value(index: usize, size: usize) -> Vec<u8> {
let base_value = format!("val{:08}", index);
let mut value = Vec::with_capacity(size);
// Fill with repeating pattern to reach desired size
while value.len() < size {
value.extend_from_slice(base_value.as_bytes());
}
// Truncate to exact size
value.truncate(size);
value
}
// Number of records to insert
const TOTAL_RECORDS: usize = 100_000;
// How often to report progress (every X records)
const PROGRESS_INTERVAL: usize = 1_000;
// How many records to use for performance sampling
const PERFORMANCE_SAMPLE_SIZE: usize = 100;
fn main() -> Result<(), tst::Error> {
// Create a temporary directory for the database
let db_path = std::env::temp_dir().join("tst_performance_test");
// Completely remove and recreate the directory to ensure a clean start
if db_path.exists() {
std::fs::remove_dir_all(&db_path)?;
}
std::fs::create_dir_all(&db_path)?;
println!("Creating ternary search tree at: {}", db_path.display());
println!("Will insert {} records and show progress...", TOTAL_RECORDS);
// Create a new TST
let mut tree = TST::new(db_path.to_str().unwrap(), true)?;
// Track overall time
let start_time = Instant::now();
// Track performance metrics
let mut insertion_times = Vec::with_capacity(TOTAL_RECORDS / PROGRESS_INTERVAL);
let mut last_batch_time = Instant::now();
let mut last_batch_records = 0;
// Insert records and track progress
for i in 0..TOTAL_RECORDS {
let key = format!("key:{:08}", i);
// Generate a 100-byte value
let value = generate_test_value(i, 100);
// Time the insertion of every Nth record for performance sampling
if i % PERFORMANCE_SAMPLE_SIZE == 0 {
let insert_start = Instant::now();
tree.set(&key, value)?;
let insert_duration = insert_start.elapsed();
// Only print detailed timing for specific samples to avoid flooding output
if i % (PERFORMANCE_SAMPLE_SIZE * 10) == 0 {
println!("Record {}: Insertion took {:?}", i, insert_duration);
}
} else {
tree.set(&key, value)?;
}
// Show progress at intervals
if (i + 1) % PROGRESS_INTERVAL == 0 || i == TOTAL_RECORDS - 1 {
let records_in_batch = i + 1 - last_batch_records;
let batch_duration = last_batch_time.elapsed();
let records_per_second = records_in_batch as f64 / batch_duration.as_secs_f64();
insertion_times.push((i + 1, batch_duration));
print!(
"\rProgress: {}/{} records ({:.2}%) - {:.2} records/sec",
i + 1,
TOTAL_RECORDS,
(i + 1) as f64 / TOTAL_RECORDS as f64 * 100.0,
records_per_second
);
io::stdout().flush().unwrap();
last_batch_time = Instant::now();
last_batch_records = i + 1;
}
}
let total_duration = start_time.elapsed();
println!("\n\nPerformance Summary:");
println!(
"Total time to insert {} records: {:?}",
TOTAL_RECORDS, total_duration
);
println!(
"Average insertion rate: {:.2} records/second",
TOTAL_RECORDS as f64 / total_duration.as_secs_f64()
);
// Show performance trend
println!("\nPerformance Trend (records inserted vs. time per batch):");
for (i, (record_count, duration)) in insertion_times.iter().enumerate() {
if i % 10 == 0 || i == insertion_times.len() - 1 {
// Only show every 10th point to avoid too much output
println!(
" After {} records: {:?} for {} records ({:.2} records/sec)",
record_count,
duration,
PROGRESS_INTERVAL,
PROGRESS_INTERVAL as f64 / duration.as_secs_f64()
);
}
}
// Test access performance with distributed samples
println!("\nTesting access performance with distributed samples...");
let mut total_get_time = Duration::new(0, 0);
let num_samples = 1000;
// Use a simple distribution pattern instead of random
for i in 0..num_samples {
// Distribute samples across the entire range
let sample_id = (i * (TOTAL_RECORDS / num_samples)) % TOTAL_RECORDS;
let key = format!("key:{:08}", sample_id);
let get_start = Instant::now();
let _ = tree.get(&key)?;
total_get_time += get_start.elapsed();
}
println!(
"Average time to retrieve a record: {:?}",
total_get_time / num_samples as u32
);
// Test prefix search performance
println!("\nTesting prefix search performance...");
let prefixes = ["key:0", "key:1", "key:5", "key:9"];
for prefix in &prefixes {
let list_start = Instant::now();
let keys = tree.list(prefix)?;
let list_duration = list_start.elapsed();
println!(
"Found {} keys with prefix '{}' in {:?}",
keys.len(),
prefix,
list_duration
);
}
// Clean up (optional)
if std::env::var("KEEP_DB").is_err() {
std::fs::remove_dir_all(&db_path)?;
println!("\nCleaned up database directory");
} else {
println!("\nDatabase kept at: {}", db_path.display());
}
Ok(())
}

View File

@@ -1,184 +0,0 @@
use std::time::Instant;
use tst::TST;
fn main() -> Result<(), tst::Error> {
// Create a temporary directory for the database
let db_path = std::env::temp_dir().join("tst_prefix_example");
std::fs::create_dir_all(&db_path)?;
println!("Creating ternary search tree at: {}", db_path.display());
// Create a new TST
let mut tree = TST::new(db_path.to_str().unwrap(), true)?;
// Insert a variety of keys with different prefixes
println!("Inserting data with various prefixes...");
// Names
let names = [
"Alice",
"Alexander",
"Amanda",
"Andrew",
"Amy",
"Bob",
"Barbara",
"Benjamin",
"Brenda",
"Brian",
"Charlie",
"Catherine",
"Christopher",
"Cynthia",
"Carl",
"David",
"Diana",
"Daniel",
"Deborah",
"Donald",
"Edward",
"Elizabeth",
"Eric",
"Emily",
"Ethan",
];
for (i, name) in names.iter().enumerate() {
let value = format!("person-{}", i).into_bytes();
tree.set(name, value)?;
}
// Cities
let cities = [
"New York",
"Los Angeles",
"Chicago",
"Houston",
"Phoenix",
"Philadelphia",
"San Antonio",
"San Diego",
"Dallas",
"San Jose",
"Austin",
"Jacksonville",
"Fort Worth",
"Columbus",
"San Francisco",
"Charlotte",
"Indianapolis",
"Seattle",
"Denver",
"Washington",
];
for (i, city) in cities.iter().enumerate() {
let value = format!("city-{}", i).into_bytes();
tree.set(city, value)?;
}
// Countries
let countries = [
"United States",
"Canada",
"Mexico",
"Brazil",
"Argentina",
"United Kingdom",
"France",
"Germany",
"Italy",
"Spain",
"China",
"Japan",
"India",
"Australia",
"Russia",
];
for (i, country) in countries.iter().enumerate() {
let value = format!("country-{}", i).into_bytes();
tree.set(country, value)?;
}
println!(
"Total items inserted: {}",
names.len() + cities.len() + countries.len()
);
// Test prefix operations
test_prefix(&mut tree, "A")?;
test_prefix(&mut tree, "B")?;
test_prefix(&mut tree, "C")?;
test_prefix(&mut tree, "San")?;
test_prefix(&mut tree, "United")?;
// Test non-existent prefix
test_prefix(&mut tree, "Z")?;
// Test empty prefix (should return all keys)
println!("\nTesting empty prefix (should return all keys):");
let start = Instant::now();
let all_keys = tree.list("")?;
let duration = start.elapsed();
println!(
"Found {} keys with empty prefix in {:?}",
all_keys.len(),
duration
);
println!("First 5 keys (alphabetically):");
for key in all_keys.iter().take(5) {
println!(" {}", key);
}
// Clean up (optional)
if std::env::var("KEEP_DB").is_err() {
std::fs::remove_dir_all(&db_path)?;
println!("\nCleaned up database directory");
} else {
println!("\nDatabase kept at: {}", db_path.display());
}
Ok(())
}
fn test_prefix(tree: &mut TST, prefix: &str) -> Result<(), tst::Error> {
println!("\nTesting prefix '{}':", prefix);
// Test list operation
let start = Instant::now();
let keys = tree.list(prefix)?;
let list_duration = start.elapsed();
println!(
"Found {} keys with prefix '{}' in {:?}",
keys.len(),
prefix,
list_duration
);
if !keys.is_empty() {
println!("Keys:");
for key in &keys {
println!(" {}", key);
}
// Test getall operation
let start = Instant::now();
let values = tree.getall(prefix)?;
let getall_duration = start.elapsed();
println!("Retrieved {} values in {:?}", values.len(), getall_duration);
println!(
"First value: {}",
if !values.is_empty() {
String::from_utf8_lossy(&values[0])
} else {
"None".into()
}
);
}
Ok(())
}

View File

@@ -1,36 +0,0 @@
//! Error types for the TST module.
use std::io;
use thiserror::Error;
/// Error type for TST operations.
#[derive(Debug, Error)]
pub enum Error {
/// Error from OurDB operations.
#[error("OurDB error: {0}")]
OurDB(#[from] ourdb::Error),
/// Error when a key is not found.
#[error("Key not found: {0}")]
KeyNotFound(String),
/// Error when a prefix is not found.
#[error("Prefix not found: {0}")]
PrefixNotFound(String),
/// Error during serialization.
#[error("Serialization error: {0}")]
Serialization(String),
/// Error during deserialization.
#[error("Deserialization error: {0}")]
Deserialization(String),
/// Error for invalid operations.
#[error("Invalid operation: {0}")]
InvalidOperation(String),
/// IO error.
#[error("IO error: {0}")]
IO(#[from] io::Error),
}

Some files were not shown because too many files have changed in this diff Show More