Compare commits
29 Commits
dfd6931c5b
...
network_se
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f8436a726e | ||
|
|
182b0edeb7 | ||
|
|
f5670f20be | ||
|
|
0f4ed1d64d | ||
|
|
f4512b66cf | ||
|
|
da3da0ae30 | ||
|
|
784f87db97 | ||
|
|
773db2238d | ||
|
|
e8a369e3a2 | ||
|
|
4b4f3371b0 | ||
|
|
1bb731711b | ||
|
|
af89ef0149 | ||
|
|
768e3e176d | ||
|
|
aa0248ef17 | ||
|
|
aab2b6f128 | ||
|
|
d735316b7f | ||
|
|
d1c80863b8 | ||
|
|
169c62da47 | ||
|
|
33a5f24981 | ||
|
|
d7562ce466 | ||
| ca736d62f3 | |||
|
|
078c6f723b | ||
|
|
9fdb8d8845 | ||
| 8203a3b1ff | |||
| 1770ac561e | |||
|
|
eed6dbf8dc | ||
| 4cd4e04028 | |||
| 8cc828fc0e | |||
| 56af312aad |
51
Cargo.toml
51
Cargo.toml
@@ -19,13 +19,18 @@ members = [
|
||||
"packages/core/net",
|
||||
"packages/core/text",
|
||||
"packages/crypt/vault",
|
||||
"packages/data/ourdb",
|
||||
"packages/data/radixtree",
|
||||
"packages/data/tst",
|
||||
"packages/system/git",
|
||||
"packages/system/kubernetes",
|
||||
"packages/system/os",
|
||||
"packages/system/process",
|
||||
"packages/system/virt",
|
||||
"rhai",
|
||||
"rhailib",
|
||||
"herodo",
|
||||
"packages/clients/hetznerclient",
|
||||
]
|
||||
resolver = "2"
|
||||
|
||||
@@ -47,7 +52,7 @@ log = "0.4"
|
||||
once_cell = "1.18.0"
|
||||
rand = "0.8.5"
|
||||
regex = "1.8.1"
|
||||
reqwest = { version = "0.12.15", features = ["json"] }
|
||||
reqwest = { version = "0.12.15", features = ["json", "blocking"] }
|
||||
rhai = { version = "1.12.0", features = ["sync"] }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
@@ -98,29 +103,42 @@ postgres-types = "0.2.5"
|
||||
r2d2 = "0.8.10"
|
||||
|
||||
# SAL dependencies
|
||||
sal-git = { path = "packages/system/git" }
|
||||
sal-kubernetes = { path = "packages/system/kubernetes" }
|
||||
sal-redisclient = { path = "packages/clients/redisclient" }
|
||||
sal-mycelium = { path = "packages/clients/myceliumclient" }
|
||||
sal-hetzner = { path = "packages/clients/hetznerclient" }
|
||||
sal-text = { path = "packages/core/text" }
|
||||
sal-os = { path = "packages/system/os" }
|
||||
sal-net = { path = "packages/core/net" }
|
||||
sal-zinit-client = { path = "packages/clients/zinitclient" }
|
||||
sal-process = { path = "packages/system/process" }
|
||||
sal-virt = { path = "packages/system/virt" }
|
||||
sal-postgresclient = { path = "packages/clients/postgresclient" }
|
||||
sal-vault = { path = "packages/crypt/vault" }
|
||||
sal-rhai = { path = "rhai" }
|
||||
sal-service-manager = { path = "_archive/service_manager" }
|
||||
|
||||
[dependencies]
|
||||
thiserror = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
|
||||
# Optional dependencies - users can choose which modules to include
|
||||
sal-git = { path = "packages/system/git", optional = true }
|
||||
sal-kubernetes = { path = "packages/system/kubernetes", optional = true }
|
||||
sal-redisclient = { path = "packages/clients/redisclient", optional = true }
|
||||
sal-mycelium = { path = "packages/clients/myceliumclient", optional = true }
|
||||
sal-text = { path = "packages/core/text", optional = true }
|
||||
sal-os = { path = "packages/system/os", optional = true }
|
||||
sal-net = { path = "packages/core/net", optional = true }
|
||||
sal-zinit-client = { path = "packages/clients/zinitclient", optional = true }
|
||||
sal-process = { path = "packages/system/process", optional = true }
|
||||
sal-virt = { path = "packages/system/virt", optional = true }
|
||||
sal-postgresclient = { path = "packages/clients/postgresclient", optional = true }
|
||||
sal-vault = { path = "packages/crypt/vault", optional = true }
|
||||
sal-rhai = { path = "rhai", optional = true }
|
||||
sal-git = { workspace = true, optional = true }
|
||||
sal-kubernetes = { workspace = true, optional = true }
|
||||
sal-redisclient = { workspace = true, optional = true }
|
||||
sal-mycelium = { workspace = true, optional = true }
|
||||
sal-hetzner = { workspace = true, optional = true }
|
||||
sal-text = { workspace = true, optional = true }
|
||||
sal-os = { workspace = true, optional = true }
|
||||
sal-net = { workspace = true, optional = true }
|
||||
sal-zinit-client = { workspace = true, optional = true }
|
||||
sal-process = { workspace = true, optional = true }
|
||||
sal-virt = { workspace = true, optional = true }
|
||||
sal-postgresclient = { workspace = true, optional = true }
|
||||
sal-vault = { workspace = true, optional = true }
|
||||
sal-rhai = { workspace = true, optional = true }
|
||||
sal-service-manager = { workspace = true, optional = true }
|
||||
|
||||
[features]
|
||||
default = []
|
||||
@@ -130,6 +148,7 @@ git = ["dep:sal-git"]
|
||||
kubernetes = ["dep:sal-kubernetes"]
|
||||
redisclient = ["dep:sal-redisclient"]
|
||||
mycelium = ["dep:sal-mycelium"]
|
||||
hetzner = ["dep:sal-hetzner"]
|
||||
text = ["dep:sal-text"]
|
||||
os = ["dep:sal-os"]
|
||||
net = ["dep:sal-net"]
|
||||
@@ -143,7 +162,7 @@ rhai = ["dep:sal-rhai"]
|
||||
|
||||
# Convenience feature groups
|
||||
core = ["os", "process", "text", "net"]
|
||||
clients = ["redisclient", "postgresclient", "zinit_client", "mycelium"]
|
||||
clients = ["redisclient", "postgresclient", "zinit_client", "mycelium", "hetzner"]
|
||||
infrastructure = ["git", "vault", "kubernetes", "virt"]
|
||||
scripting = ["rhai"]
|
||||
all = [
|
||||
@@ -151,6 +170,7 @@ all = [
|
||||
"kubernetes",
|
||||
"redisclient",
|
||||
"mycelium",
|
||||
"hetzner",
|
||||
"text",
|
||||
"os",
|
||||
"net",
|
||||
@@ -177,4 +197,3 @@ required-features = ["kubernetes"]
|
||||
name = "generic_cluster"
|
||||
path = "examples/kubernetes/clusters/generic.rs"
|
||||
required-features = ["kubernetes"]
|
||||
|
||||
|
||||
228
README.md
228
README.md
@@ -1,148 +1,136 @@
|
||||
# SAL (System Abstraction Layer)
|
||||
# Herocode Herolib Rust Repository
|
||||
|
||||
**Version 0.1.0** - A modular Rust library for cross-platform system operations and automation.
|
||||
## Overview
|
||||
|
||||
SAL provides a unified interface for system operations with Rhai scripting support through the `herodo` tool.
|
||||
This repository contains the **Herocode Herolib** Rust library and a collection of scripts, examples, and utilities for building, testing, and publishing the SAL (System Abstraction Layer) crates. The repository includes:
|
||||
|
||||
## Installation
|
||||
- **Rust crates** for various system components (e.g., `os`, `process`, `text`, `git`, `vault`, `kubernetes`, etc.).
|
||||
- **Rhai scripts** and test suites for each crate.
|
||||
- **Utility scripts** to automate common development tasks.
|
||||
|
||||
### Individual Packages (Recommended)
|
||||
## Scripts
|
||||
|
||||
The repository provides three primary helper scripts located in the repository root:
|
||||
|
||||
| Script | Description | Typical Usage |
|
||||
|--------|-------------|--------------|
|
||||
| `scripts/publish-all.sh` | Publishes all SAL crates to **crates.io** in the correct dependency order. Handles version bumping, dependency updates, dry‑run mode, and rate‑limiting. | `./scripts/publish-all.sh [--dry-run] [--wait <seconds>] [--version <ver>]` |
|
||||
| `build_herodo.sh` | Builds the `herodo` binary from the `herodo` package and optionally runs a specified Rhai script. | `./build_herodo.sh [script_name]` |
|
||||
| `run_rhai_tests.sh` | Executes all Rhai test suites across the repository, logging results and providing a summary. | `./run_rhai_tests.sh` |
|
||||
|
||||
Below are detailed usage instructions for each script.
|
||||
|
||||
---
|
||||
|
||||
## 1. `scripts/publish-all.sh`
|
||||
|
||||
### Purpose
|
||||
|
||||
- Publishes each SAL crate in the correct dependency order.
|
||||
- Updates crate versions (if `--version` is supplied).
|
||||
- Updates path dependencies to version dependencies before publishing.
|
||||
- Supports **dry‑run** mode to preview actions without publishing.
|
||||
- Handles rate‑limiting between crate publishes.
|
||||
|
||||
### Options
|
||||
|
||||
| Option | Description |
|
||||
|--------|-------------|
|
||||
| `--dry-run` | Shows what would be published without actually publishing. |
|
||||
| `--wait <seconds>` | Wait time between publishes (default: 15 s). |
|
||||
| `--version <ver>` | Set a new version for all crates (updates `Cargo.toml` files). |
|
||||
| `-h, --help` | Show help message. |
|
||||
|
||||
### Example Usage
|
||||
|
||||
```bash
|
||||
# Core functionality
|
||||
cargo add sal-os sal-process sal-text sal-net
|
||||
# Dry run – no crates will be published
|
||||
./scripts/publish-all.sh --dry-run
|
||||
|
||||
# Infrastructure
|
||||
cargo add sal-git sal-vault sal-kubernetes sal-virt
|
||||
# Publish with a custom wait time and version bump
|
||||
./scripts/publish-all.sh --wait 30 --version 1.2.3
|
||||
|
||||
# Database clients
|
||||
cargo add sal-redisclient sal-postgresclient sal-zinit-client
|
||||
|
||||
# Scripting
|
||||
cargo add sal-rhai
|
||||
# Normal publish (no dry‑run)
|
||||
./scripts/publish-all.sh
|
||||
```
|
||||
|
||||
### Meta-package with Features
|
||||
### Notes
|
||||
|
||||
- Must be run from the repository root (where `Cargo.toml` lives).
|
||||
- Requires `cargo` and a logged‑in `cargo` session (`cargo login`).
|
||||
- The script automatically updates dependencies in each crate’s `Cargo.toml` to use the new version before publishing.
|
||||
|
||||
---
|
||||
|
||||
## 2. `build_herodo.sh`
|
||||
|
||||
### Purpose
|
||||
|
||||
- Builds the `herodo` binary from the `herodo` package.
|
||||
- Copies the binary to a system‑wide location (`/usr/local/bin`) if run as root, otherwise to `~/hero/bin`.
|
||||
- Optionally runs a specified Rhai script after building.
|
||||
|
||||
### Usage
|
||||
|
||||
```bash
|
||||
cargo add sal --features core # os, process, text, net
|
||||
cargo add sal --features infrastructure # git, vault, kubernetes, virt
|
||||
cargo add sal --features all # everything
|
||||
# Build only
|
||||
./build_herodo.sh
|
||||
|
||||
# Build and run a specific Rhai script (e.g., `example`):
|
||||
./build_herodo.sh example
|
||||
```
|
||||
|
||||
### Herodo Script Runner
|
||||
### Details
|
||||
|
||||
- The script changes to its own directory, builds the `herodo` crate (`cargo build`), and copies the binary.
|
||||
- If a script name is provided, it looks for the script in:
|
||||
- `src/rhaiexamples/<name>.rhai`
|
||||
- `src/herodo/scripts/<name>.rhai`
|
||||
- If the script is not found, the script exits with an error.
|
||||
|
||||
---
|
||||
|
||||
## 3. `run_rhai_tests.sh`
|
||||
|
||||
### Purpose
|
||||
|
||||
- Runs **all** Rhai test suites across the repository.
|
||||
- Supports both the legacy `rhai_tests` directory and the newer `*/tests/rhai` layout.
|
||||
- Logs output to `run_rhai_tests.log` and prints a summary.
|
||||
|
||||
### Usage
|
||||
|
||||
```bash
|
||||
cargo install herodo
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Rust Library Usage
|
||||
|
||||
```rust
|
||||
use sal_os::fs;
|
||||
use sal_process::run;
|
||||
|
||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let files = fs::list_files(".")?;
|
||||
println!("Found {} files", files.len());
|
||||
|
||||
let result = run::command("echo hello")?;
|
||||
println!("Output: {}", result.stdout);
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
### Herodo Scripting
|
||||
|
||||
```bash
|
||||
# Create script
|
||||
cat > example.rhai << 'EOF'
|
||||
let files = find_files(".", "*.rs");
|
||||
print("Found " + files.len() + " Rust files");
|
||||
|
||||
let result = run("echo 'Hello from SAL!'");
|
||||
print("Output: " + result.stdout);
|
||||
EOF
|
||||
|
||||
# Run script
|
||||
herodo example.rhai
|
||||
```
|
||||
|
||||
## Available Packages
|
||||
|
||||
| Package | Description |
|
||||
|---------|-------------|
|
||||
| [`sal-os`](https://crates.io/crates/sal-os) | Operating system operations |
|
||||
| [`sal-process`](https://crates.io/crates/sal-process) | Process management |
|
||||
| [`sal-text`](https://crates.io/crates/sal-text) | Text processing |
|
||||
| [`sal-net`](https://crates.io/crates/sal-net) | Network operations |
|
||||
| [`sal-git`](https://crates.io/crates/sal-git) | Git repository management |
|
||||
| [`sal-vault`](https://crates.io/crates/sal-vault) | Cryptographic operations |
|
||||
| [`sal-kubernetes`](https://crates.io/crates/sal-kubernetes) | Kubernetes management |
|
||||
| [`sal-virt`](https://crates.io/crates/sal-virt) | Virtualization tools |
|
||||
| [`sal-redisclient`](https://crates.io/crates/sal-redisclient) | Redis client |
|
||||
| [`sal-postgresclient`](https://crates.io/crates/sal-postgresclient) | PostgreSQL client |
|
||||
| [`sal-zinit-client`](https://crates.io/crates/sal-zinit-client) | Zinit process supervisor |
|
||||
| [`sal-mycelium`](https://crates.io/crates/sal-mycelium) | Mycelium network client |
|
||||
| [`sal-service-manager`](https://crates.io/crates/sal-service-manager) | Service management |
|
||||
| [`sal-rhai`](https://crates.io/crates/sal-rhai) | Rhai scripting integration |
|
||||
| [`sal`](https://crates.io/crates/sal) | Meta-crate with features |
|
||||
| [`herodo`](https://crates.io/crates/herodo) | Script executor binary |
|
||||
|
||||
## Building & Testing
|
||||
|
||||
```bash
|
||||
# Build all packages
|
||||
cargo build --workspace
|
||||
|
||||
# Run tests
|
||||
cargo test --workspace
|
||||
|
||||
# Run Rhai integration tests
|
||||
# Run all tests
|
||||
./run_rhai_tests.sh
|
||||
```
|
||||
|
||||
## Core Features
|
||||
### Output
|
||||
|
||||
- **System Operations**: File/directory management, environment access, OS commands
|
||||
- **Process Management**: Create, monitor, and control system processes
|
||||
- **Containerization**: Buildah and nerdctl integration
|
||||
- **Version Control**: Git repository operations
|
||||
- **Database Clients**: Redis and PostgreSQL support
|
||||
- **Networking**: HTTP, TCP, SSH connectivity utilities
|
||||
- **Cryptography**: Key management, encryption, digital signatures
|
||||
- **Text Processing**: String manipulation and templating
|
||||
- **Scripting**: Rhai script execution via `herodo`
|
||||
- Colored console output for readability.
|
||||
- Log file (`run_rhai_tests.log`) contains full output for later review.
|
||||
- Summary includes total modules, passed, and failed counts.
|
||||
- Exit code `0` if all tests pass, `1` otherwise.
|
||||
|
||||
## Herodo Scripting
|
||||
---
|
||||
|
||||
`herodo` executes Rhai scripts with access to all SAL modules:
|
||||
## General Development Workflow
|
||||
|
||||
```bash
|
||||
herodo script.rhai # Run single script
|
||||
herodo script.rhai arg1 arg2 # With arguments
|
||||
herodo /path/to/scripts/ # Run all .rhai files in directory
|
||||
```
|
||||
1. **Build**: Use `build_herodo.sh` to compile the `herodo` binary.
|
||||
2. **Test**: Run `run_rhai_tests.sh` to ensure all Rhai scripts pass.
|
||||
3. **Publish**: When ready to release, use `scripts/publish-all.sh` (with `--dry-run` first to verify).
|
||||
|
||||
### Example Script
|
||||
## Prerequisites
|
||||
|
||||
```rhai
|
||||
// File operations
|
||||
let files = find_files(".", "*.rs");
|
||||
print("Found " + files.len() + " Rust files");
|
||||
|
||||
// Process execution
|
||||
let result = run("echo 'Hello SAL!'");
|
||||
print("Output: " + result.stdout);
|
||||
|
||||
// Redis operations
|
||||
redis_set("status", "running");
|
||||
let status = redis_get("status");
|
||||
print("Status: " + status);
|
||||
```
|
||||
- **Rust toolchain** (`cargo`, `rustc`) installed.
|
||||
- **Rhai** interpreter (`herodo`) built and available.
|
||||
- **Git** for version control.
|
||||
- **Cargo login** for publishing to crates.io.
|
||||
|
||||
## License
|
||||
|
||||
Licensed under the Apache License 2.0. See [LICENSE](LICENSE) for details.
|
||||
See `LICENSE` for details.
|
||||
|
||||
---
|
||||
|
||||
**Happy coding!**
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
// Example of using the network modules in SAL through Rhai
|
||||
// Shows TCP port checking, HTTP URL validation, and SSH command execution
|
||||
|
||||
|
||||
// Function to print section header
|
||||
fn section(title) {
|
||||
print("\n");
|
||||
@@ -19,14 +20,14 @@ let host = "localhost";
|
||||
let port = 22;
|
||||
print(`Checking if port ${port} is open on ${host}...`);
|
||||
let is_open = tcp.check_port(host, port);
|
||||
print(`Port ${port} is ${is_open ? "open" : "closed"}`);
|
||||
print(`Port ${port} is ${if is_open { "open" } else { "closed" }}`);
|
||||
|
||||
// Check multiple ports
|
||||
let ports = [22, 80, 443];
|
||||
print(`Checking multiple ports on ${host}...`);
|
||||
let port_results = tcp.check_ports(host, ports);
|
||||
for result in port_results {
|
||||
print(`Port ${result.port} is ${result.is_open ? "open" : "closed"}`);
|
||||
print(`Port ${result.port} is ${if result.is_open { "open" } else { "closed" }}`);
|
||||
}
|
||||
|
||||
// HTTP connectivity checks
|
||||
@@ -39,7 +40,7 @@ let http = net::new_http_connector();
|
||||
let url = "https://www.example.com";
|
||||
print(`Checking if ${url} is reachable...`);
|
||||
let is_reachable = http.check_url(url);
|
||||
print(`${url} is ${is_reachable ? "reachable" : "unreachable"}`);
|
||||
print(`${url} is ${if is_reachable { "reachable" } else { "unreachable" }}`);
|
||||
|
||||
// Check the status code of a URL
|
||||
print(`Checking status code of ${url}...`);
|
||||
@@ -68,7 +69,7 @@ if is_open {
|
||||
let ssh = net::new_ssh_builder()
|
||||
.host("localhost")
|
||||
.port(22)
|
||||
.user(os::get_env("USER") || "root")
|
||||
.user(if os::get_env("USER") != () { os::get_env("USER") } else { "root" })
|
||||
.timeout(10)
|
||||
.build();
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
print("Running a basic command using run().do()...");
|
||||
print("Running a basic command using run().execute()...");
|
||||
|
||||
// Execute a simple command
|
||||
let result = run("echo Hello from run_basic!").do();
|
||||
let result = run("echo Hello from run_basic!").execute();
|
||||
|
||||
// Print the command result
|
||||
print(`Command: echo Hello from run_basic!`);
|
||||
@@ -13,6 +13,6 @@ print(`Stderr:\n${result.stderr}`);
|
||||
// Example of a command that might fail (if 'nonexistent_command' doesn't exist)
|
||||
// This will halt execution by default because ignore_error() is not used.
|
||||
// print("Running a command that will fail (and should halt)...");
|
||||
// let fail_result = run("nonexistent_command").do(); // This line will cause the script to halt if the command doesn't exist
|
||||
// let fail_result = run("nonexistent_command").execute(); // This line will cause the script to halt if the command doesn't exist
|
||||
|
||||
print("Basic run() example finished.");
|
||||
@@ -2,7 +2,7 @@ print("Running a command that will fail, but ignoring the error...");
|
||||
|
||||
// Run a command that exits with a non-zero code (will fail)
|
||||
// Using .ignore_error() prevents the script from halting
|
||||
let result = run("exit 1").ignore_error().do();
|
||||
let result = run("exit 1").ignore_error().execute();
|
||||
|
||||
print(`Command finished.`);
|
||||
print(`Success: ${result.success}`); // This should be false
|
||||
@@ -22,7 +22,7 @@ print("\nScript continued execution after the potentially failing command.");
|
||||
// Example of a command that might fail due to OS error (e.g., command not found)
|
||||
// This *might* still halt depending on how the underlying Rust function handles it,
|
||||
// as ignore_error() primarily prevents halting on *command* non-zero exit codes.
|
||||
// let os_error_result = run("nonexistent_command_123").ignore_error().do();
|
||||
// let os_error_result = run("nonexistent_command_123").ignore_error().execute();
|
||||
// print(`OS Error Command Success: ${os_error_result.success}`);
|
||||
// print(`OS Error Command Exit Code: ${os_error_result.code}`);
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
print("Running a command using run().log().do()...");
|
||||
print("Running a command using run().log().execute()...");
|
||||
|
||||
// The .log() method will print the command string to the console before execution.
|
||||
// This is useful for debugging or tracing which commands are being run.
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
print("Running a command using run().silent().do()...\n");
|
||||
print("Running a command using run().silent().execute()...\n");
|
||||
|
||||
// This command will print to standard output and standard error
|
||||
// However, because .silent() is used, the output will not appear in the console directly
|
||||
let result = run("echo 'This should be silent stdout.'; echo 'This should be silent stderr.' >&2; exit 0").silent().do();
|
||||
let result = run("echo 'This should be silent stdout.'; echo 'This should be silent stderr.' >&2; exit 0").silent().execute();
|
||||
|
||||
// The output is still captured in the CommandResult
|
||||
print(`Command finished.`);
|
||||
@@ -12,7 +12,7 @@ print(`Captured Stdout:\\n${result.stdout}`);
|
||||
print(`Captured Stderr:\\n${result.stderr}`);
|
||||
|
||||
// Example of a silent command that fails (but won't halt because we only suppress output)
|
||||
// let fail_result = run("echo 'This is silent failure stderr.' >&2; exit 1").silent().do();
|
||||
// let fail_result = run("echo 'This is silent failure stderr.' >&2; exit 1").silent().execute();
|
||||
// print(`Failed command finished (silent):`);
|
||||
// print(`Success: ${fail_result.success}`);
|
||||
// print(`Exit Code: ${fail_result.code}`);
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
//! This library loads the Rhai engine, registers all SAL modules,
|
||||
//! and executes Rhai scripts from a specified directory in sorted order.
|
||||
|
||||
use rhai::Engine;
|
||||
use rhai::{Engine, Scope};
|
||||
use std::error::Error;
|
||||
use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
@@ -29,6 +29,19 @@ pub fn run(script_path: &str) -> Result<(), Box<dyn Error>> {
|
||||
|
||||
// Create a new Rhai engine
|
||||
let mut engine = Engine::new();
|
||||
|
||||
// TODO: if we create a scope here we could clean up all the different functionsand types regsitered wit the engine
|
||||
// We should generalize the way we add things to the scope for each module sepeartely
|
||||
let mut scope = Scope::new();
|
||||
// Conditionally add Hetzner client only when env config is present
|
||||
if let Ok(cfg) = sal::hetzner::config::Config::from_env() {
|
||||
let hetzner_client = sal::hetzner::api::Client::new(cfg);
|
||||
scope.push("hetzner", hetzner_client);
|
||||
}
|
||||
// This makes it easy to call e.g. `hetzner.get_server()` or `mycelium.get_connected_peers()`
|
||||
// --> without the need of manually created a client for each one first
|
||||
// --> could be conditionally compiled to only use those who we need (we only push the things to the scope that we actually need to run the script)
|
||||
|
||||
|
||||
// Register println function for output
|
||||
engine.register_fn("println", |s: &str| println!("{}", s));
|
||||
@@ -78,19 +91,20 @@ pub fn run(script_path: &str) -> Result<(), Box<dyn Error>> {
|
||||
let script = fs::read_to_string(&script_file)?;
|
||||
|
||||
// Execute the script
|
||||
match engine.eval::<rhai::Dynamic>(&script) {
|
||||
Ok(result) => {
|
||||
println!("Script executed successfully");
|
||||
if !result.is_unit() {
|
||||
println!("Result: {}", result);
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("Error executing script: {}", err);
|
||||
// Exit with error code when a script fails
|
||||
process::exit(1);
|
||||
}
|
||||
}
|
||||
// match engine.eval::<rhai::Dynamic>(&script) {
|
||||
// Ok(result) => {
|
||||
// println!("Script executed successfully");
|
||||
// if !result.is_unit() {
|
||||
// println!("Result: {}", result);
|
||||
// }
|
||||
// }
|
||||
// Err(err) => {
|
||||
// eprintln!("Error executing script: {}", err);
|
||||
// // Exit with error code when a script fails
|
||||
// process::exit(1);
|
||||
// }
|
||||
// }
|
||||
engine.run_with_scope(&mut scope, &script)?;
|
||||
}
|
||||
|
||||
println!("\nAll scripts executed successfully!");
|
||||
|
||||
12
packages/clients/hetznerclient/Cargo.toml
Normal file
12
packages/clients/hetznerclient/Cargo.toml
Normal file
@@ -0,0 +1,12 @@
|
||||
[package]
|
||||
name = "sal-hetzner"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
|
||||
[dependencies]
|
||||
prettytable = "0.10.0"
|
||||
reqwest.workspace = true
|
||||
rhai = { workspace = true, features = ["serde"] }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json.workspace = true
|
||||
thiserror.workspace = true
|
||||
54
packages/clients/hetznerclient/src/api/error.rs
Normal file
54
packages/clients/hetznerclient/src/api/error.rs
Normal file
@@ -0,0 +1,54 @@
|
||||
use std::fmt;
|
||||
|
||||
use serde::Deserialize;
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum AppError {
|
||||
#[error("Request failed: {0}")]
|
||||
RequestError(#[from] reqwest::Error),
|
||||
#[error("API error: {0}")]
|
||||
ApiError(ApiError),
|
||||
#[error("Deserialization Error: {0:?}")]
|
||||
SerdeJsonError(#[from] serde_json::Error),
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct ApiError {
|
||||
pub status: u16,
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
impl From<reqwest::blocking::Response> for ApiError {
|
||||
fn from(value: reqwest::blocking::Response) -> Self {
|
||||
ApiError {
|
||||
status: value.status().into(),
|
||||
message: value.text().unwrap_or("The API call returned an error.".to_string()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ApiError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
#[derive(Deserialize)]
|
||||
struct HetznerApiError {
|
||||
code: String,
|
||||
message: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct HetznerApiErrorWrapper {
|
||||
error: HetznerApiError,
|
||||
}
|
||||
|
||||
if let Ok(wrapper) = serde_json::from_str::<HetznerApiErrorWrapper>(&self.message) {
|
||||
write!(
|
||||
f,
|
||||
"Status: {}, Code: {}, Message: {}",
|
||||
self.status, wrapper.error.code, wrapper.error.message
|
||||
)
|
||||
} else {
|
||||
write!(f, "Status: {}: {}", self.status, self.message)
|
||||
}
|
||||
}
|
||||
}
|
||||
513
packages/clients/hetznerclient/src/api/mod.rs
Normal file
513
packages/clients/hetznerclient/src/api/mod.rs
Normal file
@@ -0,0 +1,513 @@
|
||||
pub mod error;
|
||||
pub mod models;
|
||||
|
||||
use self::models::{
|
||||
Boot, Rescue, Server, SshKey, ServerAddonProduct, ServerAddonProductWrapper,
|
||||
AuctionServerProduct, AuctionServerProductWrapper, AuctionTransaction,
|
||||
AuctionTransactionWrapper, BootWrapper, Cancellation, CancellationWrapper,
|
||||
OrderServerBuilder, OrderServerProduct, OrderServerProductWrapper, RescueWrapped,
|
||||
ServerWrapper, SshKeyWrapper, Transaction, TransactionWrapper,
|
||||
ServerAddonTransaction, ServerAddonTransactionWrapper,
|
||||
OrderServerAddonBuilder,
|
||||
};
|
||||
use crate::api::error::ApiError;
|
||||
use crate::config::Config;
|
||||
use error::AppError;
|
||||
use reqwest::blocking::Client as HttpClient;
|
||||
use serde_json::json;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Client {
|
||||
http_client: HttpClient,
|
||||
config: Config,
|
||||
}
|
||||
|
||||
impl Client {
|
||||
pub fn new(config: Config) -> Self {
|
||||
Self {
|
||||
http_client: HttpClient::new(),
|
||||
config,
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_response<T>(&self, response: reqwest::blocking::Response) -> Result<T, AppError>
|
||||
where
|
||||
T: serde::de::DeserializeOwned,
|
||||
{
|
||||
let status = response.status();
|
||||
let body = response.text()?;
|
||||
|
||||
if status.is_success() {
|
||||
serde_json::from_str::<T>(&body).map_err(Into::into)
|
||||
} else {
|
||||
Err(AppError::ApiError(ApiError {
|
||||
status: status.as_u16(),
|
||||
message: body,
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_server(&self, server_number: i32) -> Result<Server, AppError> {
|
||||
let response = self
|
||||
.http_client
|
||||
.get(format!("{}/server/{}", self.config.api_url, server_number))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
let wrapped: ServerWrapper = self.handle_response(response)?;
|
||||
Ok(wrapped.server)
|
||||
}
|
||||
|
||||
pub fn get_servers(&self) -> Result<Vec<Server>, AppError> {
|
||||
let response = self
|
||||
.http_client
|
||||
.get(format!("{}/server", self.config.api_url))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
let wrapped: Vec<ServerWrapper> = self.handle_response(response)?;
|
||||
let servers = wrapped.into_iter().map(|sw| sw.server).collect();
|
||||
Ok(servers)
|
||||
}
|
||||
|
||||
pub fn update_server_name(&self, server_number: i32, name: &str) -> Result<Server, AppError> {
|
||||
let params = [("server_name", name)];
|
||||
let response = self
|
||||
.http_client
|
||||
.post(format!("{}/server/{}", self.config.api_url, server_number))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.form(¶ms)
|
||||
.send()?;
|
||||
|
||||
let wrapped: ServerWrapper = self.handle_response(response)?;
|
||||
Ok(wrapped.server)
|
||||
}
|
||||
|
||||
pub fn get_cancellation_data(&self, server_number: i32) -> Result<Cancellation, AppError> {
|
||||
let response = self
|
||||
.http_client
|
||||
.get(format!(
|
||||
"{}/server/{}/cancellation",
|
||||
self.config.api_url, server_number
|
||||
))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
let wrapped: CancellationWrapper = self.handle_response(response)?;
|
||||
Ok(wrapped.cancellation)
|
||||
}
|
||||
|
||||
pub fn cancel_server(
|
||||
&self,
|
||||
server_number: i32,
|
||||
cancellation_date: &str,
|
||||
) -> Result<Cancellation, AppError> {
|
||||
let params = [("cancellation_date", cancellation_date)];
|
||||
let response = self
|
||||
.http_client
|
||||
.post(format!(
|
||||
"{}/server/{}/cancellation",
|
||||
self.config.api_url, server_number
|
||||
))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.form(¶ms)
|
||||
.send()?;
|
||||
|
||||
let wrapped: CancellationWrapper = self.handle_response(response)?;
|
||||
Ok(wrapped.cancellation)
|
||||
}
|
||||
|
||||
pub fn withdraw_cancellation(&self, server_number: i32) -> Result<(), AppError> {
|
||||
self.http_client
|
||||
.delete(format!(
|
||||
"{}/server/{}/cancellation",
|
||||
self.config.api_url, server_number
|
||||
))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_ssh_keys(&self) -> Result<Vec<SshKey>, AppError> {
|
||||
let response = self
|
||||
.http_client
|
||||
.get(format!("{}/key", self.config.api_url))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
let wrapped: Vec<SshKeyWrapper> = self.handle_response(response)?;
|
||||
let keys = wrapped.into_iter().map(|sk| sk.key).collect();
|
||||
Ok(keys)
|
||||
}
|
||||
|
||||
pub fn get_ssh_key(&self, fingerprint: &str) -> Result<SshKey, AppError> {
|
||||
let response = self
|
||||
.http_client
|
||||
.get(format!("{}/key/{}", self.config.api_url, fingerprint))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
let wrapped: SshKeyWrapper = self.handle_response(response)?;
|
||||
Ok(wrapped.key)
|
||||
}
|
||||
|
||||
pub fn add_ssh_key(&self, name: &str, data: &str) -> Result<SshKey, AppError> {
|
||||
let params = [("name", name), ("data", data)];
|
||||
let response = self
|
||||
.http_client
|
||||
.post(format!("{}/key", self.config.api_url))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.form(¶ms)
|
||||
.send()?;
|
||||
|
||||
let wrapped: SshKeyWrapper = self.handle_response(response)?;
|
||||
Ok(wrapped.key)
|
||||
}
|
||||
|
||||
pub fn update_ssh_key_name(&self, fingerprint: &str, name: &str) -> Result<SshKey, AppError> {
|
||||
let params = [("name", name)];
|
||||
let response = self
|
||||
.http_client
|
||||
.post(format!("{}/key/{}", self.config.api_url, fingerprint))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.form(¶ms)
|
||||
.send()?;
|
||||
|
||||
let wrapped: SshKeyWrapper = self.handle_response(response)?;
|
||||
Ok(wrapped.key)
|
||||
}
|
||||
|
||||
pub fn delete_ssh_key(&self, fingerprint: &str) -> Result<(), AppError> {
|
||||
self.http_client
|
||||
.delete(format!("{}/key/{}", self.config.api_url, fingerprint))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
pub fn get_boot_configuration(&self, server_number: i32) -> Result<Boot, AppError> {
|
||||
let response = self
|
||||
.http_client
|
||||
.get(format!("{}/boot/{}", self.config.api_url, server_number))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
let wrapped: BootWrapper = self.handle_response(response)?;
|
||||
Ok(wrapped.boot)
|
||||
}
|
||||
|
||||
pub fn get_rescue_boot_configuration(&self, server_number: i32) -> Result<Rescue, AppError> {
|
||||
let response = self
|
||||
.http_client
|
||||
.get(format!(
|
||||
"{}/boot/{}/rescue",
|
||||
self.config.api_url, server_number
|
||||
))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
let wrapped: RescueWrapped = self.handle_response(response)?;
|
||||
Ok(wrapped.rescue)
|
||||
}
|
||||
|
||||
pub fn enable_rescue_mode(
|
||||
&self,
|
||||
server_number: i32,
|
||||
os: &str,
|
||||
authorized_keys: Option<&[String]>,
|
||||
) -> Result<Rescue, AppError> {
|
||||
let mut params = vec![("os", os)];
|
||||
if let Some(keys) = authorized_keys {
|
||||
for key in keys {
|
||||
params.push(("authorized_key[]", key));
|
||||
}
|
||||
}
|
||||
let response = self
|
||||
.http_client
|
||||
.post(format!(
|
||||
"{}/boot/{}/rescue",
|
||||
self.config.api_url, server_number
|
||||
))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.form(¶ms)
|
||||
.send()?;
|
||||
|
||||
let wrapped: RescueWrapped = self.handle_response(response)?;
|
||||
Ok(wrapped.rescue)
|
||||
}
|
||||
|
||||
pub fn disable_rescue_mode(&self, server_number: i32) -> Result<Rescue, AppError> {
|
||||
let response = self
|
||||
.http_client
|
||||
.delete(format!(
|
||||
"{}/boot/{}/rescue",
|
||||
self.config.api_url, server_number
|
||||
))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
let wrapped: RescueWrapped = self.handle_response(response)?;
|
||||
Ok(wrapped.rescue)
|
||||
}
|
||||
|
||||
pub fn get_server_products(
|
||||
&self,
|
||||
) -> Result<Vec<OrderServerProduct>, AppError> {
|
||||
let response = self
|
||||
.http_client
|
||||
.get(format!("{}/order/server/product", &self.config.api_url))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
let wrapped: Vec<OrderServerProductWrapper> = self.handle_response(response)?;
|
||||
let products = wrapped.into_iter().map(|sop| sop.product).collect();
|
||||
Ok(products)
|
||||
}
|
||||
|
||||
pub fn get_server_product_by_id(
|
||||
&self,
|
||||
product_id: &str,
|
||||
) -> Result<OrderServerProduct, AppError> {
|
||||
let response = self
|
||||
.http_client
|
||||
.get(format!(
|
||||
"{}/order/server/product/{}",
|
||||
&self.config.api_url, product_id
|
||||
))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
let wrapped: OrderServerProductWrapper = self.handle_response(response)?;
|
||||
Ok(wrapped.product)
|
||||
}
|
||||
pub fn order_server(&self, order: OrderServerBuilder) -> Result<Transaction, AppError> {
|
||||
let mut params = json!({
|
||||
"product_id": order.product_id,
|
||||
"dist": order.dist,
|
||||
"location": order.location,
|
||||
"authorized_key": order.authorized_keys.unwrap_or_default(),
|
||||
});
|
||||
|
||||
if let Some(addons) = order.addons {
|
||||
params["addon"] = json!(addons);
|
||||
}
|
||||
|
||||
if let Some(test) = order.test {
|
||||
if test {
|
||||
params["test"] = json!(test);
|
||||
}
|
||||
}
|
||||
|
||||
let response = self
|
||||
.http_client
|
||||
.post(format!("{}/order/server/transaction", &self.config.api_url))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.json(¶ms)
|
||||
.send()?;
|
||||
|
||||
let wrapped: TransactionWrapper = self.handle_response(response)?;
|
||||
Ok(wrapped.transaction)
|
||||
}
|
||||
|
||||
pub fn get_transaction_by_id(&self, transaction_id: &str) -> Result<Transaction, AppError> {
|
||||
let response = self
|
||||
.http_client
|
||||
.get(format!(
|
||||
"{}/order/server/transaction/{}",
|
||||
&self.config.api_url, transaction_id
|
||||
))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
let wrapped: TransactionWrapper = self.handle_response(response)?;
|
||||
Ok(wrapped.transaction)
|
||||
}
|
||||
pub fn get_transactions(&self) -> Result<Vec<Transaction>, AppError> {
|
||||
let response = self
|
||||
.http_client
|
||||
.get(format!("{}/order/server/transaction", &self.config.api_url))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
let wrapped: Vec<TransactionWrapper> = self.handle_response(response)?;
|
||||
let transactions = wrapped.into_iter().map(|t| t.transaction).collect();
|
||||
Ok(transactions)
|
||||
}
|
||||
pub fn get_auction_server_products(&self) -> Result<Vec<AuctionServerProduct>, AppError> {
|
||||
let response = self
|
||||
.http_client
|
||||
.get(format!(
|
||||
"{}/order/server_market/product",
|
||||
&self.config.api_url
|
||||
))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
let wrapped: Vec<AuctionServerProductWrapper> = self.handle_response(response)?;
|
||||
let products = wrapped.into_iter().map(|asp| asp.product).collect();
|
||||
Ok(products)
|
||||
}
|
||||
pub fn get_auction_server_product_by_id(&self, product_id: &str) -> Result<AuctionServerProduct, AppError> {
|
||||
let response = self
|
||||
.http_client
|
||||
.get(format!("{}/order/server_market/product/{}", &self.config.api_url, product_id))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
let wrapped: AuctionServerProductWrapper = self.handle_response(response)?;
|
||||
Ok(wrapped.product)
|
||||
}
|
||||
pub fn get_auction_transactions(&self) -> Result<Vec<AuctionTransaction>, AppError> {
|
||||
let response = self
|
||||
.http_client
|
||||
.get(format!("{}/order/server_market/transaction", &self.config.api_url))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
let wrapped: Vec<AuctionTransactionWrapper> = self.handle_response(response)?;
|
||||
let transactions = wrapped.into_iter().map(|t| t.transaction).collect();
|
||||
Ok(transactions)
|
||||
}
|
||||
|
||||
pub fn get_auction_transaction_by_id(&self, transaction_id: &str) -> Result<AuctionTransaction, AppError> {
|
||||
let response = self
|
||||
.http_client
|
||||
.get(format!("{}/order/server_market/transaction/{}", &self.config.api_url, transaction_id))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
let wrapped: AuctionTransactionWrapper = self.handle_response(response)?;
|
||||
Ok(wrapped.transaction)
|
||||
}
|
||||
|
||||
pub fn get_server_addon_products(
|
||||
&self,
|
||||
server_number: i64,
|
||||
) -> Result<Vec<ServerAddonProduct>, AppError> {
|
||||
let response = self
|
||||
.http_client
|
||||
.get(format!(
|
||||
"{}/order/server_addon/{}/product",
|
||||
&self.config.api_url, server_number
|
||||
))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
let wrapped: Vec<ServerAddonProductWrapper> = self.handle_response(response)?;
|
||||
let products = wrapped.into_iter().map(|sap| sap.product).collect();
|
||||
Ok(products)
|
||||
}
|
||||
|
||||
pub fn order_auction_server(
|
||||
&self,
|
||||
product_id: i64,
|
||||
authorized_keys: Vec<String>,
|
||||
dist: Option<String>,
|
||||
arch: Option<String>,
|
||||
lang: Option<String>,
|
||||
comment: Option<String>,
|
||||
addons: Option<Vec<String>>,
|
||||
test: Option<bool>,
|
||||
) -> Result<AuctionTransaction, AppError> {
|
||||
let mut params: Vec<(&str, String)> = Vec::new();
|
||||
|
||||
params.push(("product_id", product_id.to_string()));
|
||||
|
||||
for key in &authorized_keys {
|
||||
params.push(("authorized_key[]", key.clone()));
|
||||
}
|
||||
|
||||
if let Some(dist) = dist {
|
||||
params.push(("dist", dist));
|
||||
}
|
||||
if let Some(arch) = arch {
|
||||
params.push(("@deprecated arch", arch));
|
||||
}
|
||||
if let Some(lang) = lang {
|
||||
params.push(("lang", lang));
|
||||
}
|
||||
if let Some(comment) = comment {
|
||||
params.push(("comment", comment));
|
||||
}
|
||||
if let Some(addons) = addons {
|
||||
for addon in addons {
|
||||
params.push(("addon[]", addon));
|
||||
}
|
||||
}
|
||||
if let Some(test) = test {
|
||||
params.push(("test", test.to_string()));
|
||||
}
|
||||
|
||||
let response = self
|
||||
.http_client
|
||||
.post(format!("{}/order/server_market/transaction", &self.config.api_url))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.form(¶ms)
|
||||
.send()?;
|
||||
|
||||
let wrapped: AuctionTransactionWrapper = self.handle_response(response)?;
|
||||
Ok(wrapped.transaction)
|
||||
}
|
||||
|
||||
pub fn get_server_addon_transactions(&self) -> Result<Vec<ServerAddonTransaction>, AppError> {
|
||||
let response = self
|
||||
.http_client
|
||||
.get(format!("{}/order/server_addon/transaction", &self.config.api_url))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
let wrapped: Vec<ServerAddonTransactionWrapper> = self.handle_response(response)?;
|
||||
let transactions = wrapped.into_iter().map(|satw| satw.transaction).collect();
|
||||
Ok(transactions)
|
||||
}
|
||||
|
||||
pub fn get_server_addon_transaction_by_id(
|
||||
&self,
|
||||
transaction_id: &str,
|
||||
) -> Result<ServerAddonTransaction, AppError> {
|
||||
let response = self
|
||||
.http_client
|
||||
.get(format!(
|
||||
"{}/order/server_addon/transaction/{}",
|
||||
&self.config.api_url, transaction_id
|
||||
))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
let wrapped: ServerAddonTransactionWrapper = self.handle_response(response)?;
|
||||
Ok(wrapped.transaction)
|
||||
}
|
||||
|
||||
pub fn order_server_addon(
|
||||
&self,
|
||||
order: OrderServerAddonBuilder,
|
||||
) -> Result<ServerAddonTransaction, AppError> {
|
||||
let mut params = json!({
|
||||
"server_number": order.server_number,
|
||||
"product_id": order.product_id,
|
||||
});
|
||||
|
||||
if let Some(reason) = order.reason {
|
||||
params["reason"] = json!(reason);
|
||||
}
|
||||
if let Some(gateway) = order.gateway {
|
||||
params["gateway"] = json!(gateway);
|
||||
}
|
||||
if let Some(test) = order.test {
|
||||
if test {
|
||||
params["test"] = json!(test);
|
||||
}
|
||||
}
|
||||
|
||||
let response = self
|
||||
.http_client
|
||||
.post(format!("{}/order/server_addon/transaction", &self.config.api_url))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.form(¶ms)
|
||||
.send()?;
|
||||
|
||||
let wrapped: ServerAddonTransactionWrapper = self.handle_response(response)?;
|
||||
Ok(wrapped.transaction)
|
||||
}
|
||||
}
|
||||
1894
packages/clients/hetznerclient/src/api/models.rs
Normal file
1894
packages/clients/hetznerclient/src/api/models.rs
Normal file
File diff suppressed because it is too large
Load Diff
25
packages/clients/hetznerclient/src/config.rs
Normal file
25
packages/clients/hetznerclient/src/config.rs
Normal file
@@ -0,0 +1,25 @@
|
||||
use std::env;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Config {
|
||||
pub username: String,
|
||||
pub password: String,
|
||||
pub api_url: String,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
pub fn from_env() -> Result<Self, String> {
|
||||
let username = env::var("HETZNER_USERNAME")
|
||||
.map_err(|_| "HETZNER_USERNAME environment variable not set".to_string())?;
|
||||
let password = env::var("HETZNER_PASSWORD")
|
||||
.map_err(|_| "HETZNER_PASSWORD environment variable not set".to_string())?;
|
||||
let api_url = env::var("HETZNER_API_URL")
|
||||
.unwrap_or_else(|_| "https://robot-ws.your-server.de".to_string());
|
||||
|
||||
Ok(Config {
|
||||
username,
|
||||
password,
|
||||
api_url,
|
||||
})
|
||||
}
|
||||
}
|
||||
3
packages/clients/hetznerclient/src/lib.rs
Normal file
3
packages/clients/hetznerclient/src/lib.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
pub mod api;
|
||||
pub mod config;
|
||||
pub mod rhai;
|
||||
63
packages/clients/hetznerclient/src/rhai/boot.rs
Normal file
63
packages/clients/hetznerclient/src/rhai/boot.rs
Normal file
@@ -0,0 +1,63 @@
|
||||
use crate::api::{
|
||||
models::{Boot, Rescue},
|
||||
Client,
|
||||
};
|
||||
use rhai::{plugin::*, Engine};
|
||||
|
||||
pub fn register(engine: &mut Engine) {
|
||||
let boot_module = exported_module!(boot_api);
|
||||
engine.register_global_module(boot_module.into());
|
||||
}
|
||||
|
||||
#[export_module]
|
||||
pub mod boot_api {
|
||||
use super::*;
|
||||
use rhai::EvalAltResult;
|
||||
|
||||
#[rhai_fn(name = "get_boot_configuration", return_raw)]
|
||||
pub fn get_boot_configuration(
|
||||
client: &mut Client,
|
||||
server_number: i64,
|
||||
) -> Result<Boot, Box<EvalAltResult>> {
|
||||
client
|
||||
.get_boot_configuration(server_number as i32)
|
||||
.map_err(|e| e.to_string().into())
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "get_rescue_boot_configuration", return_raw)]
|
||||
pub fn get_rescue_boot_configuration(
|
||||
client: &mut Client,
|
||||
server_number: i64,
|
||||
) -> Result<Rescue, Box<EvalAltResult>> {
|
||||
client
|
||||
.get_rescue_boot_configuration(server_number as i32)
|
||||
.map_err(|e| e.to_string().into())
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "enable_rescue_mode", return_raw)]
|
||||
pub fn enable_rescue_mode(
|
||||
client: &mut Client,
|
||||
server_number: i64,
|
||||
os: &str,
|
||||
authorized_keys: rhai::Array,
|
||||
) -> Result<Rescue, Box<EvalAltResult>> {
|
||||
let keys: Vec<String> = authorized_keys
|
||||
.into_iter()
|
||||
.map(|k| k.into_string().unwrap())
|
||||
.collect();
|
||||
|
||||
client
|
||||
.enable_rescue_mode(server_number as i32, os, Some(&keys))
|
||||
.map_err(|e| e.to_string().into())
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "disable_rescue_mode", return_raw)]
|
||||
pub fn disable_rescue_mode(
|
||||
client: &mut Client,
|
||||
server_number: i64,
|
||||
) -> Result<Rescue, Box<EvalAltResult>> {
|
||||
client
|
||||
.disable_rescue_mode(server_number as i32)
|
||||
.map_err(|e| e.to_string().into())
|
||||
}
|
||||
}
|
||||
54
packages/clients/hetznerclient/src/rhai/mod.rs
Normal file
54
packages/clients/hetznerclient/src/rhai/mod.rs
Normal file
@@ -0,0 +1,54 @@
|
||||
use rhai::{Engine, EvalAltResult};
|
||||
|
||||
use crate::api::models::{
|
||||
AuctionServerProduct, AuctionTransaction, AuctionTransactionProduct, AuthorizedKey, Boot,
|
||||
Cancellation, Cpanel, HostKey, Linux, OrderAuctionServerBuilder, OrderServerAddonBuilder,
|
||||
OrderServerBuilder, OrderServerProduct, Plesk, Rescue, Server, ServerAddonProduct,
|
||||
ServerAddonResource, ServerAddonTransaction, SshKey, Transaction, TransactionProduct, Vnc,
|
||||
Windows,
|
||||
};
|
||||
|
||||
pub mod boot;
|
||||
pub mod printing;
|
||||
pub mod server;
|
||||
pub mod server_ordering;
|
||||
pub mod ssh_keys;
|
||||
|
||||
// here just register the hetzner module
|
||||
pub fn register_hetzner_module(engine: &mut Engine) -> Result<(), Box<EvalAltResult>> {
|
||||
// TODO:register types
|
||||
engine.build_type::<Server>();
|
||||
engine.build_type::<SshKey>();
|
||||
engine.build_type::<Boot>();
|
||||
engine.build_type::<Rescue>();
|
||||
engine.build_type::<Linux>();
|
||||
engine.build_type::<Vnc>();
|
||||
engine.build_type::<Windows>();
|
||||
engine.build_type::<Plesk>();
|
||||
engine.build_type::<Cpanel>();
|
||||
engine.build_type::<Cancellation>();
|
||||
engine.build_type::<OrderServerProduct>();
|
||||
engine.build_type::<Transaction>();
|
||||
engine.build_type::<AuthorizedKey>();
|
||||
engine.build_type::<TransactionProduct>();
|
||||
engine.build_type::<HostKey>();
|
||||
engine.build_type::<AuctionServerProduct>();
|
||||
engine.build_type::<AuctionTransaction>();
|
||||
engine.build_type::<AuctionTransactionProduct>();
|
||||
engine.build_type::<OrderAuctionServerBuilder>();
|
||||
engine.build_type::<OrderServerBuilder>();
|
||||
engine.build_type::<ServerAddonProduct>();
|
||||
engine.build_type::<ServerAddonTransaction>();
|
||||
engine.build_type::<ServerAddonResource>();
|
||||
engine.build_type::<OrderServerAddonBuilder>();
|
||||
|
||||
server::register(engine);
|
||||
ssh_keys::register(engine);
|
||||
boot::register(engine);
|
||||
server_ordering::register(engine);
|
||||
|
||||
// TODO: push hetzner to scope as value client:
|
||||
// scope.push("hetzner", client);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
43
packages/clients/hetznerclient/src/rhai/printing/mod.rs
Normal file
43
packages/clients/hetznerclient/src/rhai/printing/mod.rs
Normal file
@@ -0,0 +1,43 @@
|
||||
use rhai::{Array, Engine};
|
||||
use crate::{api::models::{OrderServerProduct, AuctionServerProduct, AuctionTransaction, ServerAddonProduct, ServerAddonTransaction, Server, SshKey}};
|
||||
|
||||
mod servers_table;
|
||||
mod ssh_keys_table;
|
||||
mod server_ordering_table;
|
||||
|
||||
// This will be called when we print(...) or pretty_print() an Array (with Dynamic values)
|
||||
pub fn pretty_print_dispatch(array: Array) {
|
||||
if array.is_empty() {
|
||||
println!("<empty table>");
|
||||
return;
|
||||
}
|
||||
|
||||
let first = &array[0];
|
||||
|
||||
if first.is::<Server>() {
|
||||
println!("Yeah first is server!");
|
||||
servers_table::pretty_print_servers(array);
|
||||
} else if first.is::<SshKey>() {
|
||||
ssh_keys_table::pretty_print_ssh_keys(array);
|
||||
}
|
||||
else if first.is::<OrderServerProduct>() {
|
||||
server_ordering_table::pretty_print_server_products(array);
|
||||
} else if first.is::<AuctionServerProduct>() {
|
||||
server_ordering_table::pretty_print_auction_server_products(array);
|
||||
} else if first.is::<AuctionTransaction>() {
|
||||
server_ordering_table::pretty_print_auction_transactions(array);
|
||||
} else if first.is::<ServerAddonProduct>() {
|
||||
server_ordering_table::pretty_print_server_addon_products(array);
|
||||
} else if first.is::<ServerAddonTransaction>() {
|
||||
server_ordering_table::pretty_print_server_addon_transactions(array);
|
||||
} else {
|
||||
// Generic fallback for other types
|
||||
for item in array {
|
||||
println!("{}", item.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn register(engine: &mut Engine) {
|
||||
engine.register_fn("pretty_print", pretty_print_dispatch);
|
||||
}
|
||||
@@ -0,0 +1,293 @@
|
||||
use prettytable::{row, Table};
|
||||
use crate::api::models::{OrderServerProduct, ServerAddonProduct, ServerAddonTransaction, ServerAddonResource};
|
||||
|
||||
pub fn pretty_print_server_products(products: rhai::Array) {
|
||||
let mut table = Table::new();
|
||||
table.add_row(row![b =>
|
||||
"ID",
|
||||
"Name",
|
||||
"Description",
|
||||
"Traffic",
|
||||
"Location",
|
||||
"Price (Net)",
|
||||
"Price (Gross)",
|
||||
]);
|
||||
|
||||
for product_dyn in products {
|
||||
if let Some(product) = product_dyn.try_cast::<OrderServerProduct>() {
|
||||
let mut price_net = "N/A".to_string();
|
||||
let mut price_gross = "N/A".to_string();
|
||||
|
||||
if let Some(first_price) = product.prices.first() {
|
||||
price_net = first_price.price.net.clone();
|
||||
price_gross = first_price.price.gross.clone();
|
||||
}
|
||||
|
||||
table.add_row(row![
|
||||
product.id,
|
||||
product.name,
|
||||
product.description.join(", "),
|
||||
product.traffic,
|
||||
product.location.join(", "),
|
||||
price_net,
|
||||
price_gross,
|
||||
]);
|
||||
}
|
||||
}
|
||||
table.printstd();
|
||||
}
|
||||
|
||||
pub fn pretty_print_auction_server_products(products: rhai::Array) {
|
||||
let mut table = Table::new();
|
||||
table.add_row(row![b =>
|
||||
"ID",
|
||||
"Name",
|
||||
"Description",
|
||||
"Traffic",
|
||||
"Distributions",
|
||||
"Architectures",
|
||||
"Languages",
|
||||
"CPU",
|
||||
"CPU Benchmark",
|
||||
"Memory Size (GB)",
|
||||
"HDD Size (GB)",
|
||||
"HDD Text",
|
||||
"HDD Count",
|
||||
"Datacenter",
|
||||
"Network Speed",
|
||||
"Price (Net)",
|
||||
"Price (Hourly Net)",
|
||||
"Price (Setup Net)",
|
||||
"Price (VAT)",
|
||||
"Price (Hourly VAT)",
|
||||
"Price (Setup VAT)",
|
||||
"Fixed Price",
|
||||
"Next Reduce (seconds)",
|
||||
"Next Reduce Date",
|
||||
"Orderable Addons",
|
||||
]);
|
||||
|
||||
for product_dyn in products {
|
||||
if let Some(product) = product_dyn.try_cast::<crate::api::models::AuctionServerProduct>() {
|
||||
let mut addons_table = Table::new();
|
||||
addons_table.add_row(row![b => "ID", "Name", "Min", "Max", "Prices"]);
|
||||
for addon in &product.orderable_addons {
|
||||
let mut addon_prices_table = Table::new();
|
||||
addon_prices_table.add_row(row![b => "Location", "Net", "Gross", "Hourly Net", "Hourly Gross", "Setup Net", "Setup Gross"]);
|
||||
for price in &addon.prices {
|
||||
addon_prices_table.add_row(row![
|
||||
price.location,
|
||||
price.price.net,
|
||||
price.price.gross,
|
||||
price.price.hourly_net,
|
||||
price.price.hourly_gross,
|
||||
price.price_setup.net,
|
||||
price.price_setup.gross
|
||||
]);
|
||||
}
|
||||
addons_table.add_row(row![
|
||||
addon.id,
|
||||
addon.name,
|
||||
addon.min,
|
||||
addon.max,
|
||||
addon_prices_table
|
||||
]);
|
||||
}
|
||||
|
||||
table.add_row(row![
|
||||
product.id,
|
||||
product.name,
|
||||
product.description.join(", "),
|
||||
product.traffic,
|
||||
product.dist.join(", "),
|
||||
product.arch.as_deref().unwrap_or_default().join(", "),
|
||||
product.lang.join(", "),
|
||||
product.cpu,
|
||||
product.cpu_benchmark,
|
||||
product.memory_size,
|
||||
product.hdd_size,
|
||||
product.hdd_text,
|
||||
product.hdd_count,
|
||||
product.datacenter,
|
||||
product.network_speed,
|
||||
product.price,
|
||||
product.price_hourly.as_deref().unwrap_or("N/A"),
|
||||
product.price_setup,
|
||||
product.price_with_vat,
|
||||
product.price_hourly_with_vat.as_deref().unwrap_or("N/A"),
|
||||
product.price_setup_with_vat,
|
||||
product.fixed_price,
|
||||
product.next_reduce,
|
||||
product.next_reduce_date,
|
||||
addons_table,
|
||||
]);
|
||||
}
|
||||
}
|
||||
table.printstd();
|
||||
}
|
||||
|
||||
pub fn pretty_print_server_addon_products(products: rhai::Array) {
|
||||
let mut table = Table::new();
|
||||
table.add_row(row![b =>
|
||||
"ID",
|
||||
"Name",
|
||||
"Type",
|
||||
"Location",
|
||||
"Price (Net)",
|
||||
"Price (Gross)",
|
||||
"Hourly Net",
|
||||
"Hourly Gross",
|
||||
"Setup Net",
|
||||
"Setup Gross",
|
||||
]);
|
||||
|
||||
for product_dyn in products {
|
||||
if let Some(product) = product_dyn.try_cast::<ServerAddonProduct>() {
|
||||
table.add_row(row![
|
||||
product.id,
|
||||
product.name,
|
||||
product.product_type,
|
||||
product.price.location,
|
||||
product.price.price.net,
|
||||
product.price.price.gross,
|
||||
product.price.price.hourly_net,
|
||||
product.price.price.hourly_gross,
|
||||
product.price.price_setup.net,
|
||||
product.price.price_setup.gross,
|
||||
]);
|
||||
}
|
||||
}
|
||||
table.printstd();
|
||||
}
|
||||
|
||||
pub fn pretty_print_auction_transactions(transactions: rhai::Array) {
|
||||
let mut table = Table::new();
|
||||
table.add_row(row![b =>
|
||||
"ID",
|
||||
"Date",
|
||||
"Status",
|
||||
"Server Number",
|
||||
"Server IP",
|
||||
"Comment",
|
||||
"Product ID",
|
||||
"Product Name",
|
||||
"Product Traffic",
|
||||
"Product Distributions",
|
||||
"Product Architectures",
|
||||
"Product Languages",
|
||||
"Product CPU",
|
||||
"Product CPU Benchmark",
|
||||
"Product Memory Size (GB)",
|
||||
"Product HDD Size (GB)",
|
||||
"Product HDD Text",
|
||||
"Product HDD Count",
|
||||
"Product Datacenter",
|
||||
"Product Network Speed",
|
||||
"Product Fixed Price",
|
||||
"Product Next Reduce (seconds)",
|
||||
"Product Next Reduce Date",
|
||||
"Addons",
|
||||
]);
|
||||
|
||||
for transaction_dyn in transactions {
|
||||
if let Some(transaction) = transaction_dyn.try_cast::<crate::api::models::AuctionTransaction>() {
|
||||
let _authorized_keys_table = {
|
||||
let mut table = Table::new();
|
||||
table.add_row(row![b => "Name", "Fingerprint", "Type", "Size"]);
|
||||
for key in &transaction.authorized_key {
|
||||
table.add_row(row![
|
||||
key.key.name.as_deref().unwrap_or("N/A"),
|
||||
key.key.fingerprint.as_deref().unwrap_or("N/A"),
|
||||
key.key.key_type.as_deref().unwrap_or("N/A"),
|
||||
key.key.size.map_or("N/A".to_string(), |s| s.to_string())
|
||||
]);
|
||||
}
|
||||
table
|
||||
};
|
||||
|
||||
let _host_keys_table = {
|
||||
let mut table = Table::new();
|
||||
table.add_row(row![b => "Fingerprint", "Type", "Size"]);
|
||||
for key in &transaction.host_key {
|
||||
table.add_row(row![
|
||||
key.key.fingerprint.as_deref().unwrap_or("N/A"),
|
||||
key.key.key_type.as_deref().unwrap_or("N/A"),
|
||||
key.key.size.map_or("N/A".to_string(), |s| s.to_string())
|
||||
]);
|
||||
}
|
||||
table
|
||||
};
|
||||
|
||||
table.add_row(row![
|
||||
transaction.id,
|
||||
transaction.date,
|
||||
transaction.status,
|
||||
transaction.server_number.map_or("N/A".to_string(), |id| id.to_string()),
|
||||
transaction.server_ip.as_deref().unwrap_or("N/A"),
|
||||
transaction.comment.as_deref().unwrap_or("N/A"),
|
||||
transaction.product.id,
|
||||
transaction.product.name,
|
||||
transaction.product.traffic,
|
||||
transaction.product.dist,
|
||||
transaction.product.arch.as_deref().unwrap_or("N/A"),
|
||||
transaction.product.lang,
|
||||
transaction.product.cpu,
|
||||
transaction.product.cpu_benchmark,
|
||||
transaction.product.memory_size,
|
||||
transaction.product.hdd_size,
|
||||
transaction.product.hdd_text,
|
||||
transaction.product.hdd_count,
|
||||
transaction.product.datacenter,
|
||||
transaction.product.network_speed,
|
||||
transaction.product.fixed_price.unwrap_or_default().to_string(),
|
||||
transaction
|
||||
.product
|
||||
.next_reduce
|
||||
.map_or("N/A".to_string(), |r| r.to_string()),
|
||||
transaction
|
||||
.product
|
||||
.next_reduce_date
|
||||
.as_deref()
|
||||
.unwrap_or("N/A"),
|
||||
transaction.addons.join(", "),
|
||||
]);
|
||||
}
|
||||
}
|
||||
table.printstd();
|
||||
}
|
||||
|
||||
pub fn pretty_print_server_addon_transactions(transactions: rhai::Array) {
|
||||
let mut table = Table::new();
|
||||
table.add_row(row![b =>
|
||||
"ID",
|
||||
"Date",
|
||||
"Status",
|
||||
"Server Number",
|
||||
"Product ID",
|
||||
"Product Name",
|
||||
"Product Price",
|
||||
"Resources",
|
||||
]);
|
||||
|
||||
for transaction_dyn in transactions {
|
||||
if let Some(transaction) = transaction_dyn.try_cast::<ServerAddonTransaction>() {
|
||||
let mut resources_table = Table::new();
|
||||
resources_table.add_row(row![b => "Type", "ID"]);
|
||||
for resource in &transaction.resources {
|
||||
resources_table.add_row(row![resource.resource_type, resource.id]);
|
||||
}
|
||||
|
||||
table.add_row(row![
|
||||
transaction.id,
|
||||
transaction.date,
|
||||
transaction.status,
|
||||
transaction.server_number,
|
||||
transaction.product.id,
|
||||
transaction.product.name,
|
||||
transaction.product.price.to_string(),
|
||||
resources_table,
|
||||
]);
|
||||
}
|
||||
}
|
||||
table.printstd();
|
||||
}
|
||||
@@ -0,0 +1,30 @@
|
||||
use prettytable::{row, Table};
|
||||
use rhai::Array;
|
||||
|
||||
use super::Server;
|
||||
|
||||
pub fn pretty_print_servers(servers: Array) {
|
||||
let mut table = Table::new();
|
||||
table.add_row(row![b =>
|
||||
"Number",
|
||||
"Name",
|
||||
"IP",
|
||||
"Product",
|
||||
"DC",
|
||||
"Status"
|
||||
]);
|
||||
|
||||
for server_dyn in servers {
|
||||
if let Some(server) = server_dyn.try_cast::<Server>() {
|
||||
table.add_row(row![
|
||||
server.server_number.to_string(),
|
||||
server.server_name,
|
||||
server.server_ip.unwrap_or("N/A".to_string()),
|
||||
server.product,
|
||||
server.dc,
|
||||
server.status
|
||||
]);
|
||||
}
|
||||
}
|
||||
table.printstd();
|
||||
}
|
||||
@@ -0,0 +1,26 @@
|
||||
use prettytable::{row, Table};
|
||||
use super::SshKey;
|
||||
|
||||
pub fn pretty_print_ssh_keys(keys: rhai::Array) {
|
||||
let mut table = Table::new();
|
||||
table.add_row(row![b =>
|
||||
"Name",
|
||||
"Fingerprint",
|
||||
"Type",
|
||||
"Size",
|
||||
"Created At"
|
||||
]);
|
||||
|
||||
for key_dyn in keys {
|
||||
if let Some(key) = key_dyn.try_cast::<SshKey>() {
|
||||
table.add_row(row![
|
||||
key.name,
|
||||
key.fingerprint,
|
||||
key.key_type,
|
||||
key.size.to_string(),
|
||||
key.created_at
|
||||
]);
|
||||
}
|
||||
}
|
||||
table.printstd();
|
||||
}
|
||||
76
packages/clients/hetznerclient/src/rhai/server.rs
Normal file
76
packages/clients/hetznerclient/src/rhai/server.rs
Normal file
@@ -0,0 +1,76 @@
|
||||
use crate::api::{Client, models::Server};
|
||||
use rhai::{Array, Dynamic, plugin::*};
|
||||
|
||||
pub fn register(engine: &mut Engine) {
|
||||
let server_module = exported_module!(server_api);
|
||||
engine.register_global_module(server_module.into());
|
||||
}
|
||||
|
||||
#[export_module]
|
||||
pub mod server_api {
|
||||
use crate::api::models::Cancellation;
|
||||
|
||||
use super::*;
|
||||
use rhai::EvalAltResult;
|
||||
|
||||
#[rhai_fn(name = "get_server", return_raw)]
|
||||
pub fn get_server(
|
||||
client: &mut Client,
|
||||
server_number: i64,
|
||||
) -> Result<Server, Box<EvalAltResult>> {
|
||||
client
|
||||
.get_server(server_number as i32)
|
||||
.map_err(|e| e.to_string().into())
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "get_servers", return_raw)]
|
||||
pub fn get_servers(client: &mut Client) -> Result<Array, Box<EvalAltResult>> {
|
||||
let servers = client
|
||||
.get_servers()
|
||||
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
|
||||
println!("number of SERVERS we got: {:#?}", servers.len());
|
||||
Ok(servers.into_iter().map(Dynamic::from).collect())
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "update_server_name", return_raw)]
|
||||
pub fn update_server_name(
|
||||
client: &mut Client,
|
||||
server_number: i64,
|
||||
name: &str,
|
||||
) -> Result<Server, Box<EvalAltResult>> {
|
||||
client
|
||||
.update_server_name(server_number as i32, name)
|
||||
.map_err(|e| e.to_string().into())
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "get_cancellation_data", return_raw)]
|
||||
pub fn get_cancellation_data(
|
||||
client: &mut Client,
|
||||
server_number: i64,
|
||||
) -> Result<Cancellation, Box<EvalAltResult>> {
|
||||
client
|
||||
.get_cancellation_data(server_number as i32)
|
||||
.map_err(|e| e.to_string().into())
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "cancel_server", return_raw)]
|
||||
pub fn cancel_server(
|
||||
client: &mut Client,
|
||||
server_number: i64,
|
||||
cancellation_date: &str,
|
||||
) -> Result<Cancellation, Box<EvalAltResult>> {
|
||||
client
|
||||
.cancel_server(server_number as i32, cancellation_date)
|
||||
.map_err(|e| e.to_string().into())
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "withdraw_cancellation", return_raw)]
|
||||
pub fn withdraw_cancellation(
|
||||
client: &mut Client,
|
||||
server_number: i64,
|
||||
) -> Result<(), Box<EvalAltResult>> {
|
||||
client
|
||||
.withdraw_cancellation(server_number as i32)
|
||||
.map_err(|e| e.to_string().into())
|
||||
}
|
||||
}
|
||||
170
packages/clients/hetznerclient/src/rhai/server_ordering.rs
Normal file
170
packages/clients/hetznerclient/src/rhai/server_ordering.rs
Normal file
@@ -0,0 +1,170 @@
|
||||
use crate::api::{
|
||||
Client,
|
||||
models::{
|
||||
AuctionServerProduct, AuctionTransaction, OrderAuctionServerBuilder, OrderServerBuilder,
|
||||
OrderServerProduct, ServerAddonProduct, ServerAddonTransaction, Transaction,
|
||||
},
|
||||
};
|
||||
use rhai::{Array, Dynamic, plugin::*};
|
||||
|
||||
pub fn register(engine: &mut Engine) {
|
||||
let server_order_module = exported_module!(server_order_api);
|
||||
engine.register_global_module(server_order_module.into());
|
||||
}
|
||||
|
||||
#[export_module]
|
||||
pub mod server_order_api {
|
||||
use crate::api::models::OrderServerAddonBuilder;
|
||||
|
||||
#[rhai_fn(name = "get_server_products", return_raw)]
|
||||
pub fn get_server_ordering_product_overview(
|
||||
client: &mut Client,
|
||||
) -> Result<Array, Box<EvalAltResult>> {
|
||||
let overview_servers = client
|
||||
.get_server_products()
|
||||
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
|
||||
Ok(overview_servers.into_iter().map(Dynamic::from).collect())
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "get_server_product_by_id", return_raw)]
|
||||
pub fn get_server_ordering_product_by_id(
|
||||
client: &mut Client,
|
||||
product_id: &str,
|
||||
) -> Result<OrderServerProduct, Box<EvalAltResult>> {
|
||||
let product = client
|
||||
.get_server_product_by_id(product_id)
|
||||
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
|
||||
Ok(product)
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "order_server", return_raw)]
|
||||
pub fn order_server(
|
||||
client: &mut Client,
|
||||
order: OrderServerBuilder,
|
||||
) -> Result<Transaction, Box<EvalAltResult>> {
|
||||
let transaction = client
|
||||
.order_server(order)
|
||||
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
|
||||
Ok(transaction)
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "get_transaction_by_id", return_raw)]
|
||||
pub fn get_transaction_by_id(
|
||||
client: &mut Client,
|
||||
transaction_id: &str,
|
||||
) -> Result<Transaction, Box<EvalAltResult>> {
|
||||
let transaction = client
|
||||
.get_transaction_by_id(transaction_id)
|
||||
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
|
||||
Ok(transaction)
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "get_transactions", return_raw)]
|
||||
pub fn get_transactions(client: &mut Client) -> Result<Array, Box<EvalAltResult>> {
|
||||
let transactions = client
|
||||
.get_transactions()
|
||||
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
|
||||
Ok(transactions.into_iter().map(Dynamic::from).collect())
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "get_auction_server_products", return_raw)]
|
||||
pub fn get_auction_server_products(client: &mut Client) -> Result<Array, Box<EvalAltResult>> {
|
||||
let products = client
|
||||
.get_auction_server_products()
|
||||
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
|
||||
Ok(products.into_iter().map(Dynamic::from).collect())
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "get_auction_server_product_by_id", return_raw)]
|
||||
pub fn get_auction_server_product_by_id(
|
||||
client: &mut Client,
|
||||
product_id: &str,
|
||||
) -> Result<AuctionServerProduct, Box<EvalAltResult>> {
|
||||
let product = client
|
||||
.get_auction_server_product_by_id(product_id)
|
||||
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
|
||||
Ok(product)
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "get_auction_transactions", return_raw)]
|
||||
pub fn get_auction_transactions(client: &mut Client) -> Result<Array, Box<EvalAltResult>> {
|
||||
let transactions = client
|
||||
.get_auction_transactions()
|
||||
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
|
||||
Ok(transactions.into_iter().map(Dynamic::from).collect())
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "get_auction_transaction_by_id", return_raw)]
|
||||
pub fn get_auction_transaction_by_id(
|
||||
client: &mut Client,
|
||||
transaction_id: &str,
|
||||
) -> Result<AuctionTransaction, Box<EvalAltResult>> {
|
||||
let transaction = client
|
||||
.get_auction_transaction_by_id(transaction_id)
|
||||
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
|
||||
Ok(transaction)
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "get_server_addon_products", return_raw)]
|
||||
pub fn get_server_addon_products(
|
||||
client: &mut Client,
|
||||
server_number: i64,
|
||||
) -> Result<Array, Box<EvalAltResult>> {
|
||||
let products = client
|
||||
.get_server_addon_products(server_number)
|
||||
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
|
||||
Ok(products.into_iter().map(Dynamic::from).collect())
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "get_server_addon_transactions", return_raw)]
|
||||
pub fn get_server_addon_transactions(
|
||||
client: &mut Client,
|
||||
) -> Result<Array, Box<EvalAltResult>> {
|
||||
let transactions = client
|
||||
.get_server_addon_transactions()
|
||||
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
|
||||
Ok(transactions.into_iter().map(Dynamic::from).collect())
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "get_server_addon_transaction_by_id", return_raw)]
|
||||
pub fn get_server_addon_transaction_by_id(
|
||||
client: &mut Client,
|
||||
transaction_id: &str,
|
||||
) -> Result<ServerAddonTransaction, Box<EvalAltResult>> {
|
||||
let transaction = client
|
||||
.get_server_addon_transaction_by_id(transaction_id)
|
||||
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
|
||||
Ok(transaction)
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "order_auction_server", return_raw)]
|
||||
pub fn order_auction_server(
|
||||
client: &mut Client,
|
||||
order: OrderAuctionServerBuilder,
|
||||
) -> Result<AuctionTransaction, Box<EvalAltResult>> {
|
||||
println!("Builder struct being used to order server: {:#?}", order);
|
||||
let transaction = client.order_auction_server(
|
||||
order.product_id,
|
||||
order.authorized_keys.unwrap_or(vec![]),
|
||||
order.dist,
|
||||
None,
|
||||
order.lang,
|
||||
order.comment,
|
||||
order.addon,
|
||||
order.test,
|
||||
).map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
|
||||
Ok(transaction)
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "order_server_addon", return_raw)]
|
||||
pub fn order_server_addon(
|
||||
client: &mut Client,
|
||||
order: OrderServerAddonBuilder,
|
||||
) -> Result<ServerAddonTransaction, Box<EvalAltResult>> {
|
||||
println!("Builder struct being used to order server addon: {:#?}", order);
|
||||
let transaction = client
|
||||
.order_server_addon(order)
|
||||
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
|
||||
Ok(transaction)
|
||||
}
|
||||
}
|
||||
89
packages/clients/hetznerclient/src/rhai/ssh_keys.rs
Normal file
89
packages/clients/hetznerclient/src/rhai/ssh_keys.rs
Normal file
@@ -0,0 +1,89 @@
|
||||
use crate::api::{Client, models::SshKey};
|
||||
use prettytable::{Table, row};
|
||||
use rhai::{Array, Dynamic, Engine, plugin::*};
|
||||
|
||||
pub fn register(engine: &mut Engine) {
|
||||
let ssh_keys_module = exported_module!(ssh_keys_api);
|
||||
engine.register_global_module(ssh_keys_module.into());
|
||||
}
|
||||
|
||||
#[export_module]
|
||||
pub mod ssh_keys_api {
|
||||
use super::*;
|
||||
use rhai::EvalAltResult;
|
||||
|
||||
#[rhai_fn(name = "get_ssh_keys", return_raw)]
|
||||
pub fn get_ssh_keys(client: &mut Client) -> Result<Array, Box<EvalAltResult>> {
|
||||
let ssh_keys = client
|
||||
.get_ssh_keys()
|
||||
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
|
||||
Ok(ssh_keys.into_iter().map(Dynamic::from).collect())
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "get_ssh_key", return_raw)]
|
||||
pub fn get_ssh_key(
|
||||
client: &mut Client,
|
||||
fingerprint: &str,
|
||||
) -> Result<SshKey, Box<EvalAltResult>> {
|
||||
client
|
||||
.get_ssh_key(fingerprint)
|
||||
.map_err(|e| e.to_string().into())
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "add_ssh_key", return_raw)]
|
||||
pub fn add_ssh_key(
|
||||
client: &mut Client,
|
||||
name: &str,
|
||||
data: &str,
|
||||
) -> Result<SshKey, Box<EvalAltResult>> {
|
||||
client
|
||||
.add_ssh_key(name, data)
|
||||
.map_err(|e| e.to_string().into())
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "update_ssh_key_name", return_raw)]
|
||||
pub fn update_ssh_key_name(
|
||||
client: &mut Client,
|
||||
fingerprint: &str,
|
||||
name: &str,
|
||||
) -> Result<SshKey, Box<EvalAltResult>> {
|
||||
client
|
||||
.update_ssh_key_name(fingerprint, name)
|
||||
.map_err(|e| e.to_string().into())
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "delete_ssh_key", return_raw)]
|
||||
pub fn delete_ssh_key(
|
||||
client: &mut Client,
|
||||
fingerprint: &str,
|
||||
) -> Result<(), Box<EvalAltResult>> {
|
||||
client
|
||||
.delete_ssh_key(fingerprint)
|
||||
.map_err(|e| e.to_string().into())
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "pretty_print")]
|
||||
pub fn pretty_print_ssh_keys(keys: Array) {
|
||||
let mut table = Table::new();
|
||||
table.add_row(row![b =>
|
||||
"Name",
|
||||
"Fingerprint",
|
||||
"Type",
|
||||
"Size",
|
||||
"Created At"
|
||||
]);
|
||||
|
||||
for key_dyn in keys {
|
||||
if let Some(key) = key_dyn.try_cast::<SshKey>() {
|
||||
table.add_row(row![
|
||||
key.name,
|
||||
key.fingerprint,
|
||||
key.key_type,
|
||||
key.size.to_string(),
|
||||
key.created_at
|
||||
]);
|
||||
}
|
||||
}
|
||||
table.printstd();
|
||||
}
|
||||
}
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
use k256::{SecretKey, ecdh::diffie_hellman, elliptic_curve::sec1::ToEncodedPoint};
|
||||
use sha2::Sha256;
|
||||
use getrandom::fill;
|
||||
|
||||
use crate::{error::CryptoError, key::symmetric::SymmetricKey};
|
||||
|
||||
@@ -22,7 +23,7 @@ impl AsymmetricKeypair {
|
||||
/// Generates a new random keypair
|
||||
pub fn new() -> Result<Self, CryptoError> {
|
||||
let mut raw_private = [0u8; 32];
|
||||
rand::fill(&mut raw_private);
|
||||
fill(&mut raw_private);
|
||||
let sk = SecretKey::from_slice(&raw_private)
|
||||
.expect("Key is provided generated with fixed valid size");
|
||||
let pk = sk.public_key();
|
||||
|
||||
@@ -4,6 +4,7 @@ use k256::ecdsa::{
|
||||
Signature, SigningKey, VerifyingKey,
|
||||
signature::{Signer, Verifier},
|
||||
};
|
||||
use getrandom::fill;
|
||||
|
||||
use crate::error::CryptoError;
|
||||
|
||||
@@ -19,7 +20,7 @@ impl SigningKeypair {
|
||||
/// Generates a new random keypair
|
||||
pub fn new() -> Result<Self, CryptoError> {
|
||||
let mut raw_private = [0u8; 32];
|
||||
rand::fill(&mut raw_private);
|
||||
fill(&mut raw_private);
|
||||
let sk = SigningKey::from_slice(&raw_private)
|
||||
.expect("Key is provided generated with fixed valid size");
|
||||
let vk = sk.verifying_key().to_owned();
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
//! Keys are 32 bytes in size.
|
||||
|
||||
use chacha20poly1305::{ChaCha20Poly1305, KeyInit, Nonce, aead::Aead};
|
||||
use getrandom::fill;
|
||||
|
||||
use crate::error::CryptoError;
|
||||
|
||||
@@ -18,7 +19,7 @@ impl SymmetricKey {
|
||||
/// Generate a new random SymmetricKey.
|
||||
pub fn new() -> Self {
|
||||
let mut key = [0u8; 32];
|
||||
rand::fill(&mut key);
|
||||
fill(&mut key);
|
||||
Self(key)
|
||||
}
|
||||
|
||||
@@ -47,7 +48,7 @@ impl SymmetricKey {
|
||||
|
||||
// Generate random nonce
|
||||
let mut nonce_bytes = [0u8; NONCE_SIZE];
|
||||
rand::fill(&mut nonce_bytes);
|
||||
fill(&mut nonce_bytes);
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
// Encrypt message
|
||||
|
||||
277
packages/data/ourdb/API.md
Normal file
277
packages/data/ourdb/API.md
Normal file
@@ -0,0 +1,277 @@
|
||||
# OurDB API Reference
|
||||
|
||||
This document provides a comprehensive reference for the OurDB Rust API.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
1. [Configuration](#configuration)
|
||||
2. [Database Operations](#database-operations)
|
||||
- [Creating and Opening](#creating-and-opening)
|
||||
- [Setting Data](#setting-data)
|
||||
- [Getting Data](#getting-data)
|
||||
- [Deleting Data](#deleting-data)
|
||||
- [History Tracking](#history-tracking)
|
||||
3. [Error Handling](#error-handling)
|
||||
4. [Advanced Usage](#advanced-usage)
|
||||
- [Custom File Size](#custom-file-size)
|
||||
- [Custom Key Size](#custom-key-size)
|
||||
5. [Performance Considerations](#performance-considerations)
|
||||
|
||||
## Configuration
|
||||
|
||||
### OurDBConfig
|
||||
|
||||
The `OurDBConfig` struct is used to configure a new OurDB instance.
|
||||
|
||||
```rust
|
||||
pub struct OurDBConfig {
|
||||
pub path: PathBuf,
|
||||
pub incremental_mode: bool,
|
||||
pub file_size: Option<usize>,
|
||||
pub keysize: Option<u8>,
|
||||
}
|
||||
```
|
||||
|
||||
| Field | Type | Description |
|
||||
|-------|------|-------------|
|
||||
| `path` | `PathBuf` | Path to the database directory |
|
||||
| `incremental_mode` | `bool` | Whether to use auto-incremented IDs (true) or user-provided IDs (false) |
|
||||
| `file_size` | `Option<usize>` | Maximum size of each database file in bytes (default: 500MB) |
|
||||
| `keysize` | `Option<u8>` | Size of keys in bytes (default: 4, valid values: 2, 3, 4, 6) |
|
||||
|
||||
Example:
|
||||
```rust
|
||||
let config = OurDBConfig {
|
||||
path: PathBuf::from("/path/to/db"),
|
||||
incremental_mode: true,
|
||||
file_size: Some(1024 * 1024 * 100), // 100MB
|
||||
keysize: Some(4), // 4-byte keys
|
||||
};
|
||||
```
|
||||
|
||||
## Database Operations
|
||||
|
||||
### Creating and Opening
|
||||
|
||||
#### `OurDB::new`
|
||||
|
||||
Creates a new OurDB instance or opens an existing one.
|
||||
|
||||
```rust
|
||||
pub fn new(config: OurDBConfig) -> Result<OurDB, Error>
|
||||
```
|
||||
|
||||
Example:
|
||||
```rust
|
||||
let mut db = OurDB::new(config)?;
|
||||
```
|
||||
|
||||
### Setting Data
|
||||
|
||||
#### `OurDB::set`
|
||||
|
||||
Sets a value in the database. In incremental mode, if no ID is provided, a new ID is generated.
|
||||
|
||||
```rust
|
||||
pub fn set(&mut self, args: OurDBSetArgs) -> Result<u32, Error>
|
||||
```
|
||||
|
||||
The `OurDBSetArgs` struct has the following fields:
|
||||
|
||||
```rust
|
||||
pub struct OurDBSetArgs<'a> {
|
||||
pub id: Option<u32>,
|
||||
pub data: &'a [u8],
|
||||
}
|
||||
```
|
||||
|
||||
Example with auto-generated ID:
|
||||
```rust
|
||||
let id = db.set(OurDBSetArgs {
|
||||
id: None,
|
||||
data: b"Hello, World!",
|
||||
})?;
|
||||
```
|
||||
|
||||
Example with explicit ID:
|
||||
```rust
|
||||
db.set(OurDBSetArgs {
|
||||
id: Some(42),
|
||||
data: b"Hello, World!",
|
||||
})?;
|
||||
```
|
||||
|
||||
### Getting Data
|
||||
|
||||
#### `OurDB::get`
|
||||
|
||||
Retrieves a value from the database by ID.
|
||||
|
||||
```rust
|
||||
pub fn get(&mut self, id: u32) -> Result<Vec<u8>, Error>
|
||||
```
|
||||
|
||||
Example:
|
||||
```rust
|
||||
let data = db.get(42)?;
|
||||
```
|
||||
|
||||
### Deleting Data
|
||||
|
||||
#### `OurDB::delete`
|
||||
|
||||
Deletes a value from the database by ID.
|
||||
|
||||
```rust
|
||||
pub fn delete(&mut self, id: u32) -> Result<(), Error>
|
||||
```
|
||||
|
||||
Example:
|
||||
```rust
|
||||
db.delete(42)?;
|
||||
```
|
||||
|
||||
### History Tracking
|
||||
|
||||
#### `OurDB::get_history`
|
||||
|
||||
Retrieves the history of values for a given ID, up to the specified depth.
|
||||
|
||||
```rust
|
||||
pub fn get_history(&mut self, id: u32, depth: u8) -> Result<Vec<Vec<u8>>, Error>
|
||||
```
|
||||
|
||||
Example:
|
||||
```rust
|
||||
// Get the last 5 versions of the record
|
||||
let history = db.get_history(42, 5)?;
|
||||
|
||||
// Process each version (most recent first)
|
||||
for (i, version) in history.iter().enumerate() {
|
||||
println!("Version {}: {:?}", i, version);
|
||||
}
|
||||
```
|
||||
|
||||
### Other Operations
|
||||
|
||||
#### `OurDB::get_next_id`
|
||||
|
||||
Returns the next ID that will be assigned in incremental mode.
|
||||
|
||||
```rust
|
||||
pub fn get_next_id(&self) -> Result<u32, Error>
|
||||
```
|
||||
|
||||
Example:
|
||||
```rust
|
||||
let next_id = db.get_next_id()?;
|
||||
```
|
||||
|
||||
#### `OurDB::close`
|
||||
|
||||
Closes the database, ensuring all data is flushed to disk.
|
||||
|
||||
```rust
|
||||
pub fn close(&mut self) -> Result<(), Error>
|
||||
```
|
||||
|
||||
Example:
|
||||
```rust
|
||||
db.close()?;
|
||||
```
|
||||
|
||||
#### `OurDB::destroy`
|
||||
|
||||
Closes the database and deletes all database files.
|
||||
|
||||
```rust
|
||||
pub fn destroy(&mut self) -> Result<(), Error>
|
||||
```
|
||||
|
||||
Example:
|
||||
```rust
|
||||
db.destroy()?;
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
OurDB uses the `thiserror` crate to define error types. The main error type is `ourdb::Error`.
|
||||
|
||||
```rust
|
||||
pub enum Error {
|
||||
IoError(std::io::Error),
|
||||
InvalidKeySize,
|
||||
InvalidId,
|
||||
RecordNotFound,
|
||||
InvalidCrc,
|
||||
NotIncrementalMode,
|
||||
DatabaseClosed,
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
All OurDB operations that can fail return a `Result<T, Error>` which can be handled using Rust's standard error handling mechanisms.
|
||||
|
||||
Example:
|
||||
```rust
|
||||
match db.get(42) {
|
||||
Ok(data) => println!("Found data: {:?}", data),
|
||||
Err(ourdb::Error::RecordNotFound) => println!("Record not found"),
|
||||
Err(e) => eprintln!("Error: {}", e),
|
||||
}
|
||||
```
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
### Custom File Size
|
||||
|
||||
You can configure the maximum size of each database file:
|
||||
|
||||
```rust
|
||||
let config = OurDBConfig {
|
||||
path: PathBuf::from("/path/to/db"),
|
||||
incremental_mode: true,
|
||||
file_size: Some(1024 * 1024 * 10), // 10MB per file
|
||||
keysize: None,
|
||||
};
|
||||
```
|
||||
|
||||
Smaller file sizes can be useful for:
|
||||
- Limiting memory usage when reading files
|
||||
- Improving performance on systems with limited memory
|
||||
- Easier backup and file management
|
||||
|
||||
### Custom Key Size
|
||||
|
||||
OurDB supports different key sizes (2, 3, 4, or 6 bytes):
|
||||
|
||||
```rust
|
||||
let config = OurDBConfig {
|
||||
path: PathBuf::from("/path/to/db"),
|
||||
incremental_mode: true,
|
||||
file_size: None,
|
||||
keysize: Some(6), // 6-byte keys
|
||||
};
|
||||
```
|
||||
|
||||
Key size considerations:
|
||||
- 2 bytes: Up to 65,536 records
|
||||
- 3 bytes: Up to 16,777,216 records
|
||||
- 4 bytes: Up to 4,294,967,296 records (default)
|
||||
- 6 bytes: Up to 281,474,976,710,656 records
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
For optimal performance:
|
||||
|
||||
1. **Choose appropriate key size**: Use the smallest key size that can accommodate your expected number of records.
|
||||
|
||||
2. **Configure file size**: For large databases, consider using smaller file sizes to improve memory usage.
|
||||
|
||||
3. **Batch operations**: When inserting or updating many records, consider batching operations to minimize disk I/O.
|
||||
|
||||
4. **Close properly**: Always call `close()` when you're done with the database to ensure data is properly flushed to disk.
|
||||
|
||||
5. **Reuse OurDB instance**: Creating a new OurDB instance has overhead, so reuse the same instance for multiple operations when possible.
|
||||
|
||||
6. **Consider memory usage**: The lookup table is loaded into memory, so very large databases may require significant RAM.
|
||||
32
packages/data/ourdb/Cargo.toml
Normal file
32
packages/data/ourdb/Cargo.toml
Normal file
@@ -0,0 +1,32 @@
|
||||
[package]
|
||||
name = "ourdb"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
description = "A lightweight, efficient key-value database with history tracking capabilities"
|
||||
authors = ["OurWorld Team"]
|
||||
|
||||
[dependencies]
|
||||
crc32fast = "1.3.2"
|
||||
thiserror = "1.0.40"
|
||||
log = "0.4.17"
|
||||
rand = "0.8.5"
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = "0.5.1"
|
||||
tempfile = "3.8.0"
|
||||
|
||||
# [[bench]]
|
||||
# name = "ourdb_benchmarks"
|
||||
# harness = false
|
||||
|
||||
[[example]]
|
||||
name = "basic_usage"
|
||||
path = "examples/basic_usage.rs"
|
||||
|
||||
[[example]]
|
||||
name = "advanced_usage"
|
||||
path = "examples/advanced_usage.rs"
|
||||
|
||||
[[example]]
|
||||
name = "benchmark"
|
||||
path = "examples/benchmark.rs"
|
||||
135
packages/data/ourdb/README.md
Normal file
135
packages/data/ourdb/README.md
Normal file
@@ -0,0 +1,135 @@
|
||||
# OurDB
|
||||
|
||||
OurDB is a lightweight, efficient key-value database implementation that provides data persistence with history tracking capabilities. This Rust implementation offers a robust and performant solution for applications requiring simple but reliable data storage.
|
||||
|
||||
## Features
|
||||
|
||||
- Simple key-value storage with history tracking
|
||||
- Data integrity verification using CRC32
|
||||
- Support for multiple backend files for large datasets
|
||||
- Lookup table for fast data retrieval
|
||||
- Incremental mode for auto-generated IDs
|
||||
- Memory and disk-based lookup tables
|
||||
|
||||
## Limitations
|
||||
|
||||
- Maximum data size per entry is 65,535 bytes (~64KB) due to the 2-byte size field in the record header
|
||||
|
||||
## Usage
|
||||
|
||||
### Basic Example
|
||||
|
||||
```rust
|
||||
use ourdb::{OurDB, OurDBConfig, OurDBSetArgs};
|
||||
use std::path::PathBuf;
|
||||
|
||||
fn main() -> Result<(), ourdb::Error> {
|
||||
// Create a new database
|
||||
let config = OurDBConfig {
|
||||
path: PathBuf::from("/tmp/ourdb"),
|
||||
incremental_mode: true,
|
||||
file_size: None, // Use default (500MB)
|
||||
keysize: None, // Use default (4 bytes)
|
||||
};
|
||||
|
||||
let mut db = OurDB::new(config)?;
|
||||
|
||||
// Store data (with auto-generated ID in incremental mode)
|
||||
let data = b"Hello, OurDB!";
|
||||
let id = db.set(OurDBSetArgs { id: None, data })?;
|
||||
println!("Stored data with ID: {}", id);
|
||||
|
||||
// Retrieve data
|
||||
let retrieved = db.get(id)?;
|
||||
println!("Retrieved: {}", String::from_utf8_lossy(&retrieved));
|
||||
|
||||
// Update data
|
||||
let updated_data = b"Updated data";
|
||||
db.set(OurDBSetArgs { id: Some(id), data: updated_data })?;
|
||||
|
||||
// Get history (returns most recent first)
|
||||
let history = db.get_history(id, 2)?;
|
||||
for (i, entry) in history.iter().enumerate() {
|
||||
println!("History {}: {}", i, String::from_utf8_lossy(entry));
|
||||
}
|
||||
|
||||
// Delete data
|
||||
db.delete(id)?;
|
||||
|
||||
// Close the database
|
||||
db.close()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
### Key-Value Mode vs Incremental Mode
|
||||
|
||||
OurDB supports two operating modes:
|
||||
|
||||
1. **Key-Value Mode** (`incremental_mode: false`): You must provide IDs explicitly when storing data.
|
||||
2. **Incremental Mode** (`incremental_mode: true`): IDs are auto-generated when not provided.
|
||||
|
||||
### Configuration Options
|
||||
|
||||
- `path`: Directory for database storage
|
||||
- `incremental_mode`: Whether to use auto-increment mode
|
||||
- `file_size`: Maximum file size (default: 500MB)
|
||||
- `keysize`: Size of lookup table entries (2-6 bytes)
|
||||
- 2: For databases with < 65,536 records
|
||||
- 3: For databases with < 16,777,216 records
|
||||
- 4: For databases with < 4,294,967,296 records (default)
|
||||
- 6: For large databases requiring multiple files
|
||||
|
||||
## Architecture
|
||||
|
||||
OurDB consists of three main components:
|
||||
|
||||
1. **Frontend API**: Provides the public interface for database operations
|
||||
2. **Lookup Table**: Maps keys to physical locations in the backend storage
|
||||
3. **Backend Storage**: Manages the actual data persistence in files
|
||||
|
||||
### Record Format
|
||||
|
||||
Each record in the backend storage includes:
|
||||
- 2 bytes: Data size
|
||||
- 4 bytes: CRC32 checksum
|
||||
- 6 bytes: Previous record location (for history)
|
||||
- N bytes: Actual data
|
||||
|
||||
## Documentation
|
||||
|
||||
Additional documentation is available in the repository:
|
||||
|
||||
- [API Reference](API.md): Detailed API documentation
|
||||
- [Migration Guide](MIGRATION.md): Guide for migrating from the V implementation
|
||||
- [Architecture](architecture.md): Design and implementation details
|
||||
|
||||
## Examples
|
||||
|
||||
The repository includes several examples to demonstrate OurDB usage:
|
||||
|
||||
- `basic_usage.rs`: Simple operations with OurDB
|
||||
- `advanced_usage.rs`: More complex features including both operation modes
|
||||
- `benchmark.rs`: Performance benchmarking tool
|
||||
|
||||
Run an example with:
|
||||
|
||||
```bash
|
||||
cargo run --example basic_usage
|
||||
cargo run --example advanced_usage
|
||||
cargo run --example benchmark
|
||||
```
|
||||
|
||||
## Performance
|
||||
|
||||
OurDB is designed for efficiency and minimal overhead. The benchmark example can be used to evaluate performance on your specific hardware and workload.
|
||||
|
||||
Typical performance metrics on modern hardware:
|
||||
|
||||
- **Write**: 10,000+ operations per second
|
||||
- **Read**: 50,000+ operations per second
|
||||
|
||||
## License
|
||||
|
||||
This project is licensed under the MIT License.
|
||||
439
packages/data/ourdb/architecture.md
Normal file
439
packages/data/ourdb/architecture.md
Normal file
@@ -0,0 +1,439 @@
|
||||
# OurDB: Architecture for V to Rust Port
|
||||
|
||||
## 1. Overview
|
||||
|
||||
OurDB is a lightweight, efficient key-value database implementation that provides data persistence with history tracking capabilities. This document outlines the architecture for porting OurDB from its original V implementation to Rust, maintaining all existing functionality while leveraging Rust's memory safety, performance, and ecosystem.
|
||||
|
||||
## 2. Current Architecture (V Implementation)
|
||||
|
||||
The current V implementation of OurDB consists of three main components in a layered architecture:
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
A[Client Code] --> B[Frontend API]
|
||||
B --> C[Lookup Table]
|
||||
B --> D[Backend Storage]
|
||||
C --> D
|
||||
```
|
||||
|
||||
### 2.1 Frontend (db.v)
|
||||
|
||||
The frontend provides the public API for database operations and coordinates between the lookup table and backend storage components.
|
||||
|
||||
Key responsibilities:
|
||||
- Exposing high-level operations (set, get, delete, history)
|
||||
- Managing incremental ID generation in auto-increment mode
|
||||
- Coordinating data flow between lookup and backend components
|
||||
- Handling database lifecycle (open, close, destroy)
|
||||
|
||||
### 2.2 Lookup Table (lookup.v)
|
||||
|
||||
The lookup table maps keys to physical locations in the backend storage.
|
||||
|
||||
Key responsibilities:
|
||||
- Maintaining key-to-location mapping
|
||||
- Optimizing key sizes based on database configuration
|
||||
- Supporting both memory and disk-based lookup tables
|
||||
- Handling sparse data efficiently
|
||||
- Providing next ID generation for incremental mode
|
||||
|
||||
### 2.3 Backend Storage (backend.v)
|
||||
|
||||
The backend storage manages the actual data persistence in files.
|
||||
|
||||
Key responsibilities:
|
||||
- Managing physical data storage in files
|
||||
- Ensuring data integrity with CRC32 checksums
|
||||
- Supporting multiple file backends for large datasets
|
||||
- Implementing low-level read/write operations
|
||||
- Tracking record history through linked locations
|
||||
|
||||
### 2.4 Core Data Structures
|
||||
|
||||
#### OurDB
|
||||
```v
|
||||
@[heap]
|
||||
pub struct OurDB {
|
||||
mut:
|
||||
lookup &LookupTable
|
||||
pub:
|
||||
path string // directory for storage
|
||||
incremental_mode bool
|
||||
file_size u32 = 500 * (1 << 20) // 500MB
|
||||
pub mut:
|
||||
file os.File
|
||||
file_nr u16 // the file which is open
|
||||
last_used_file_nr u16
|
||||
}
|
||||
```
|
||||
|
||||
#### LookupTable
|
||||
```v
|
||||
pub struct LookupTable {
|
||||
keysize u8
|
||||
lookuppath string
|
||||
mut:
|
||||
data []u8
|
||||
incremental ?u32 // points to next empty slot if incremental mode is enabled
|
||||
}
|
||||
```
|
||||
|
||||
#### Location
|
||||
```v
|
||||
pub struct Location {
|
||||
pub mut:
|
||||
file_nr u16
|
||||
position u32
|
||||
}
|
||||
```
|
||||
|
||||
### 2.5 Storage Format
|
||||
|
||||
#### Record Format
|
||||
Each record in the backend storage includes:
|
||||
- 2 bytes: Data size
|
||||
- 4 bytes: CRC32 checksum
|
||||
- 6 bytes: Previous record location (for history)
|
||||
- N bytes: Actual data
|
||||
|
||||
#### Lookup Table Optimization
|
||||
The lookup table automatically optimizes its key size based on the database configuration:
|
||||
- 2 bytes: For databases with < 65,536 records
|
||||
- 3 bytes: For databases with < 16,777,216 records
|
||||
- 4 bytes: For databases with < 4,294,967,296 records
|
||||
- 6 bytes: For large databases requiring multiple files
|
||||
|
||||
## 3. Proposed Rust Architecture
|
||||
|
||||
The Rust implementation will maintain the same layered architecture while leveraging Rust's type system, ownership model, and error handling.
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
A[Client Code] --> B[OurDB API]
|
||||
B --> C[LookupTable]
|
||||
B --> D[Backend]
|
||||
C --> D
|
||||
E[Error Handling] --> B
|
||||
E --> C
|
||||
E --> D
|
||||
F[Configuration] --> B
|
||||
```
|
||||
|
||||
### 3.1 Core Components
|
||||
|
||||
#### 3.1.1 OurDB (API Layer)
|
||||
|
||||
```rust
|
||||
pub struct OurDB {
|
||||
path: String,
|
||||
incremental_mode: bool,
|
||||
file_size: u32,
|
||||
lookup: LookupTable,
|
||||
file: Option<std::fs::File>,
|
||||
file_nr: u16,
|
||||
last_used_file_nr: u16,
|
||||
}
|
||||
|
||||
impl OurDB {
|
||||
pub fn new(config: OurDBConfig) -> Result<Self, Error>;
|
||||
pub fn set(&mut self, id: Option<u32>, data: &[u8]) -> Result<u32, Error>;
|
||||
pub fn get(&mut self, id: u32) -> Result<Vec<u8>, Error>;
|
||||
pub fn get_history(&mut self, id: u32, depth: u8) -> Result<Vec<Vec<u8>>, Error>;
|
||||
pub fn delete(&mut self, id: u32) -> Result<(), Error>;
|
||||
pub fn get_next_id(&mut self) -> Result<u32, Error>;
|
||||
pub fn close(&mut self) -> Result<(), Error>;
|
||||
pub fn destroy(&mut self) -> Result<(), Error>;
|
||||
}
|
||||
```
|
||||
|
||||
#### 3.1.2 LookupTable
|
||||
|
||||
```rust
|
||||
pub struct LookupTable {
|
||||
keysize: u8,
|
||||
lookuppath: String,
|
||||
data: Vec<u8>,
|
||||
incremental: Option<u32>,
|
||||
}
|
||||
|
||||
impl LookupTable {
|
||||
fn new(config: LookupConfig) -> Result<Self, Error>;
|
||||
fn get(&self, id: u32) -> Result<Location, Error>;
|
||||
fn set(&mut self, id: u32, location: Location) -> Result<(), Error>;
|
||||
fn delete(&mut self, id: u32) -> Result<(), Error>;
|
||||
fn get_next_id(&self) -> Result<u32, Error>;
|
||||
fn increment_index(&mut self) -> Result<(), Error>;
|
||||
fn export_data(&self, path: &str) -> Result<(), Error>;
|
||||
fn import_data(&mut self, path: &str) -> Result<(), Error>;
|
||||
fn export_sparse(&self, path: &str) -> Result<(), Error>;
|
||||
fn import_sparse(&mut self, path: &str) -> Result<(), Error>;
|
||||
}
|
||||
```
|
||||
|
||||
#### 3.1.3 Location
|
||||
|
||||
```rust
|
||||
pub struct Location {
|
||||
file_nr: u16,
|
||||
position: u32,
|
||||
}
|
||||
|
||||
impl Location {
|
||||
fn new(bytes: &[u8], keysize: u8) -> Result<Self, Error>;
|
||||
fn to_bytes(&self) -> Result<Vec<u8>, Error>;
|
||||
fn to_u64(&self) -> u64;
|
||||
}
|
||||
```
|
||||
|
||||
#### 3.1.4 Backend
|
||||
|
||||
The backend functionality will be implemented as methods on the OurDB struct:
|
||||
|
||||
```rust
|
||||
impl OurDB {
|
||||
fn db_file_select(&mut self, file_nr: u16) -> Result<(), Error>;
|
||||
fn create_new_db_file(&mut self, file_nr: u16) -> Result<(), Error>;
|
||||
fn get_file_nr(&mut self) -> Result<u16, Error>;
|
||||
fn set_(&mut self, id: u32, old_location: Location, data: &[u8]) -> Result<(), Error>;
|
||||
fn get_(&mut self, location: Location) -> Result<Vec<u8>, Error>;
|
||||
fn get_prev_pos_(&mut self, location: Location) -> Result<Location, Error>;
|
||||
fn delete_(&mut self, id: u32, location: Location) -> Result<(), Error>;
|
||||
fn close_(&mut self);
|
||||
}
|
||||
```
|
||||
|
||||
#### 3.1.5 Configuration
|
||||
|
||||
```rust
|
||||
pub struct OurDBConfig {
|
||||
pub record_nr_max: u32,
|
||||
pub record_size_max: u32,
|
||||
pub file_size: u32,
|
||||
pub path: String,
|
||||
pub incremental_mode: bool,
|
||||
pub reset: bool,
|
||||
}
|
||||
|
||||
struct LookupConfig {
|
||||
size: u32,
|
||||
keysize: u8,
|
||||
lookuppath: String,
|
||||
incremental_mode: bool,
|
||||
}
|
||||
```
|
||||
|
||||
#### 3.1.6 Error Handling
|
||||
|
||||
```rust
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum Error {
|
||||
#[error("I/O error: {0}")]
|
||||
Io(#[from] std::io::Error),
|
||||
|
||||
#[error("Invalid key size: {0}")]
|
||||
InvalidKeySize(u8),
|
||||
|
||||
#[error("Record not found: {0}")]
|
||||
RecordNotFound(u32),
|
||||
|
||||
#[error("Data corruption: CRC mismatch")]
|
||||
DataCorruption,
|
||||
|
||||
#[error("Index out of bounds: {0}")]
|
||||
IndexOutOfBounds(u32),
|
||||
|
||||
#[error("Incremental mode not enabled")]
|
||||
IncrementalNotEnabled,
|
||||
|
||||
#[error("Lookup table is full")]
|
||||
LookupTableFull,
|
||||
|
||||
#[error("Invalid file number: {0}")]
|
||||
InvalidFileNumber(u16),
|
||||
|
||||
#[error("Invalid operation: {0}")]
|
||||
InvalidOperation(String),
|
||||
}
|
||||
```
|
||||
|
||||
## 4. Implementation Strategy
|
||||
|
||||
### 4.1 Phase 1: Core Data Structures
|
||||
|
||||
1. Implement the `Location` struct with serialization/deserialization
|
||||
2. Implement the `Error` enum for error handling
|
||||
3. Implement the configuration structures
|
||||
|
||||
### 4.2 Phase 2: Lookup Table
|
||||
|
||||
1. Implement the `LookupTable` struct with memory-based storage
|
||||
2. Add disk-based storage support
|
||||
3. Implement key size optimization
|
||||
4. Add incremental ID support
|
||||
5. Implement import/export functionality
|
||||
|
||||
### 4.3 Phase 3: Backend Storage
|
||||
|
||||
1. Implement file management functions
|
||||
2. Implement record serialization/deserialization with CRC32
|
||||
3. Implement history tracking through linked locations
|
||||
4. Add support for multiple backend files
|
||||
|
||||
### 4.4 Phase 4: Frontend API
|
||||
|
||||
1. Implement the `OurDB` struct with core operations
|
||||
2. Add high-level API methods (set, get, delete, history)
|
||||
3. Implement database lifecycle management
|
||||
|
||||
### 4.5 Phase 5: Testing and Optimization
|
||||
|
||||
1. Port existing tests from V to Rust
|
||||
2. Add new tests for Rust-specific functionality
|
||||
3. Benchmark and optimize performance
|
||||
4. Ensure compatibility with existing OurDB files
|
||||
|
||||
## 5. Implementation Considerations
|
||||
|
||||
### 5.1 Memory Management
|
||||
|
||||
Leverage Rust's ownership model for safe and efficient memory management:
|
||||
- Use `Vec<u8>` for data buffers instead of raw pointers
|
||||
- Implement proper RAII for file handles
|
||||
- Use references and borrows to avoid unnecessary copying
|
||||
- Consider using `Bytes` from the `bytes` crate for zero-copy operations
|
||||
|
||||
### 5.2 Error Handling
|
||||
|
||||
Use Rust's `Result` type for comprehensive error handling:
|
||||
- Define custom error types for OurDB-specific errors
|
||||
- Propagate errors using the `?` operator
|
||||
- Provide detailed error messages
|
||||
- Implement proper error conversion using the `From` trait
|
||||
|
||||
### 5.3 File I/O
|
||||
|
||||
Optimize file operations for performance:
|
||||
- Use `BufReader` and `BufWriter` for buffered I/O
|
||||
- Implement proper file locking for concurrent access
|
||||
- Consider memory-mapped files for lookup tables
|
||||
- Use `seek` and `read_exact` for precise positioning
|
||||
|
||||
### 5.4 Concurrency
|
||||
|
||||
Consider thread safety for concurrent database access:
|
||||
- Use interior mutability patterns where appropriate
|
||||
- Implement `Send` and `Sync` traits for thread safety
|
||||
- Consider using `RwLock` for shared read access
|
||||
- Provide clear documentation on thread safety guarantees
|
||||
|
||||
### 5.5 Performance Optimizations
|
||||
|
||||
Identify opportunities for performance improvements:
|
||||
- Use memory-mapped files for lookup tables
|
||||
- Implement caching for frequently accessed records
|
||||
- Use zero-copy operations where possible
|
||||
- Consider async I/O for non-blocking operations
|
||||
|
||||
## 6. Testing Strategy
|
||||
|
||||
### 6.1 Unit Tests
|
||||
|
||||
Write comprehensive unit tests for each component:
|
||||
- Test `Location` serialization/deserialization
|
||||
- Test `LookupTable` operations
|
||||
- Test backend storage functions
|
||||
- Test error handling
|
||||
|
||||
### 6.2 Integration Tests
|
||||
|
||||
Write integration tests for the complete system:
|
||||
- Test database creation and configuration
|
||||
- Test basic CRUD operations
|
||||
- Test history tracking
|
||||
- Test incremental ID generation
|
||||
- Test file management
|
||||
|
||||
### 6.3 Compatibility Tests
|
||||
|
||||
Ensure compatibility with existing OurDB files:
|
||||
- Test reading existing V-created OurDB files
|
||||
- Test writing files that can be read by the V implementation
|
||||
- Test migration scenarios
|
||||
|
||||
### 6.4 Performance Tests
|
||||
|
||||
Benchmark performance against the V implementation:
|
||||
- Measure throughput for set/get operations
|
||||
- Measure latency for different operations
|
||||
- Test with different database sizes
|
||||
- Test with different record sizes
|
||||
|
||||
## 7. Project Structure
|
||||
|
||||
```
|
||||
ourdb/
|
||||
├── Cargo.toml
|
||||
├── src/
|
||||
│ ├── lib.rs # Public API and re-exports
|
||||
│ ├── ourdb.rs # OurDB implementation (frontend)
|
||||
│ ├── lookup.rs # Lookup table implementation
|
||||
│ ├── location.rs # Location struct implementation
|
||||
│ ├── backend.rs # Backend storage implementation
|
||||
│ ├── error.rs # Error types
|
||||
│ ├── config.rs # Configuration structures
|
||||
│ └── utils.rs # Utility functions
|
||||
├── tests/
|
||||
│ ├── unit/ # Unit tests
|
||||
│ ├── integration/ # Integration tests
|
||||
│ └── compatibility/ # Compatibility tests
|
||||
└── examples/
|
||||
├── basic.rs # Basic usage example
|
||||
├── history.rs # History tracking example
|
||||
└── client_server.rs # Client-server example
|
||||
```
|
||||
|
||||
## 8. Dependencies
|
||||
|
||||
The Rust implementation will use the following dependencies:
|
||||
|
||||
- `thiserror` for error handling
|
||||
- `crc32fast` for CRC32 calculation
|
||||
- `bytes` for efficient byte manipulation
|
||||
- `memmap2` for memory-mapped files (optional)
|
||||
- `serde` for serialization (optional, for future extensions)
|
||||
- `log` for logging
|
||||
- `criterion` for benchmarking
|
||||
|
||||
## 9. Compatibility Considerations
|
||||
|
||||
To ensure compatibility with the V implementation:
|
||||
|
||||
1. Maintain the same file format for data storage
|
||||
2. Preserve the lookup table format
|
||||
3. Keep the same CRC32 calculation method
|
||||
4. Ensure identical behavior for incremental ID generation
|
||||
5. Maintain the same history tracking mechanism
|
||||
|
||||
## 10. Future Extensions
|
||||
|
||||
Potential future extensions to consider:
|
||||
|
||||
1. Async API for non-blocking operations
|
||||
2. Transactions support
|
||||
3. Better concurrency control
|
||||
4. Compression support
|
||||
5. Encryption support
|
||||
6. Streaming API for large values
|
||||
7. Iterators for scanning records
|
||||
8. Secondary indexes
|
||||
|
||||
## 11. Conclusion
|
||||
|
||||
This architecture provides a roadmap for porting OurDB from V to Rust while maintaining compatibility and leveraging Rust's strengths. The implementation will follow a phased approach, starting with core data structures and gradually building up to the complete system.
|
||||
|
||||
The Rust implementation aims to be:
|
||||
- **Safe**: Leveraging Rust's ownership model for memory safety
|
||||
- **Fast**: Maintaining or improving performance compared to V
|
||||
- **Compatible**: Working with existing OurDB files
|
||||
- **Extensible**: Providing a foundation for future enhancements
|
||||
- **Well-tested**: Including comprehensive test coverage
|
||||
231
packages/data/ourdb/examples/advanced_usage.rs
Normal file
231
packages/data/ourdb/examples/advanced_usage.rs
Normal file
@@ -0,0 +1,231 @@
|
||||
use ourdb::{OurDB, OurDBConfig, OurDBSetArgs};
|
||||
use std::path::PathBuf;
|
||||
use std::time::Instant;
|
||||
|
||||
fn main() -> Result<(), ourdb::Error> {
|
||||
// Create a temporary directory for the database
|
||||
let db_path = std::env::temp_dir().join("ourdb_advanced_example");
|
||||
std::fs::create_dir_all(&db_path)?;
|
||||
|
||||
println!("Creating database at: {}", db_path.display());
|
||||
|
||||
// Demonstrate key-value mode (non-incremental)
|
||||
key_value_mode_example(&db_path)?;
|
||||
|
||||
// Demonstrate incremental mode
|
||||
incremental_mode_example(&db_path)?;
|
||||
|
||||
// Demonstrate performance benchmarking
|
||||
performance_benchmark(&db_path)?;
|
||||
|
||||
// Clean up (optional)
|
||||
if std::env::var("KEEP_DB").is_err() {
|
||||
std::fs::remove_dir_all(&db_path)?;
|
||||
println!("Cleaned up database directory");
|
||||
} else {
|
||||
println!("Database kept at: {}", db_path.display());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn key_value_mode_example(base_path: &PathBuf) -> Result<(), ourdb::Error> {
|
||||
println!("\n=== Key-Value Mode Example ===");
|
||||
|
||||
let db_path = base_path.join("key_value");
|
||||
std::fs::create_dir_all(&db_path)?;
|
||||
|
||||
// Create a new database with key-value mode (non-incremental)
|
||||
let config = OurDBConfig {
|
||||
path: db_path,
|
||||
incremental_mode: false,
|
||||
file_size: Some(1024 * 1024), // 1MB for testing
|
||||
keysize: Some(2), // Small key size for demonstration
|
||||
reset: None, // Don't reset existing database
|
||||
};
|
||||
|
||||
let mut db = OurDB::new(config)?;
|
||||
|
||||
// In key-value mode, we must provide IDs explicitly
|
||||
let custom_ids = [100, 200, 300, 400, 500];
|
||||
|
||||
// Store data with custom IDs
|
||||
for (i, &id) in custom_ids.iter().enumerate() {
|
||||
let data = format!("Record with custom ID {}", id);
|
||||
db.set(OurDBSetArgs {
|
||||
id: Some(id),
|
||||
data: data.as_bytes(),
|
||||
})?;
|
||||
println!("Stored record {} with custom ID: {}", i + 1, id);
|
||||
}
|
||||
|
||||
// Retrieve data by custom IDs
|
||||
for &id in &custom_ids {
|
||||
let retrieved = db.get(id)?;
|
||||
println!(
|
||||
"Retrieved ID {}: {}",
|
||||
id,
|
||||
String::from_utf8_lossy(&retrieved)
|
||||
);
|
||||
}
|
||||
|
||||
// Update and track history
|
||||
let id_to_update = custom_ids[2]; // ID 300
|
||||
for i in 1..=3 {
|
||||
let updated_data = format!("Updated record {} (version {})", id_to_update, i);
|
||||
db.set(OurDBSetArgs {
|
||||
id: Some(id_to_update),
|
||||
data: updated_data.as_bytes(),
|
||||
})?;
|
||||
println!("Updated ID {} (version {})", id_to_update, i);
|
||||
}
|
||||
|
||||
// Get history for the updated record
|
||||
let history = db.get_history(id_to_update, 5)?;
|
||||
println!("History for ID {} (most recent first):", id_to_update);
|
||||
for (i, entry) in history.iter().enumerate() {
|
||||
println!(" Version {}: {}", i, String::from_utf8_lossy(entry));
|
||||
}
|
||||
|
||||
db.close()?;
|
||||
println!("Key-value mode example completed");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn incremental_mode_example(base_path: &PathBuf) -> Result<(), ourdb::Error> {
|
||||
println!("\n=== Incremental Mode Example ===");
|
||||
|
||||
let db_path = base_path.join("incremental");
|
||||
std::fs::create_dir_all(&db_path)?;
|
||||
|
||||
// Create a new database with incremental mode
|
||||
let config = OurDBConfig {
|
||||
path: db_path,
|
||||
incremental_mode: true,
|
||||
file_size: Some(1024 * 1024), // 1MB for testing
|
||||
keysize: Some(3), // 3-byte keys
|
||||
reset: None, // Don't reset existing database
|
||||
};
|
||||
|
||||
let mut db = OurDB::new(config)?;
|
||||
|
||||
// In incremental mode, IDs are auto-generated
|
||||
let mut assigned_ids = Vec::new();
|
||||
|
||||
// Store multiple records and collect assigned IDs
|
||||
for i in 1..=5 {
|
||||
let data = format!("Auto-increment record {}", i);
|
||||
let id = db.set(OurDBSetArgs {
|
||||
id: None,
|
||||
data: data.as_bytes(),
|
||||
})?;
|
||||
assigned_ids.push(id);
|
||||
println!("Stored record {} with auto-assigned ID: {}", i, id);
|
||||
}
|
||||
|
||||
// Check next ID
|
||||
let next_id = db.get_next_id()?;
|
||||
println!("Next ID to be assigned: {}", next_id);
|
||||
|
||||
// Retrieve all records
|
||||
for &id in &assigned_ids {
|
||||
let retrieved = db.get(id)?;
|
||||
println!(
|
||||
"Retrieved ID {}: {}",
|
||||
id,
|
||||
String::from_utf8_lossy(&retrieved)
|
||||
);
|
||||
}
|
||||
|
||||
db.close()?;
|
||||
println!("Incremental mode example completed");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn performance_benchmark(base_path: &PathBuf) -> Result<(), ourdb::Error> {
|
||||
println!("\n=== Performance Benchmark ===");
|
||||
|
||||
let db_path = base_path.join("benchmark");
|
||||
std::fs::create_dir_all(&db_path)?;
|
||||
|
||||
// Create a new database
|
||||
let config = OurDBConfig {
|
||||
path: db_path,
|
||||
incremental_mode: true,
|
||||
file_size: Some(1024 * 1024), // 10MB
|
||||
keysize: Some(4), // 4-byte keys
|
||||
reset: None, // Don't reset existing database
|
||||
};
|
||||
|
||||
let mut db = OurDB::new(config)?;
|
||||
|
||||
// Number of operations for the benchmark
|
||||
let num_operations = 1000;
|
||||
let data_size = 100; // bytes per record
|
||||
|
||||
// Prepare test data
|
||||
let test_data = vec![b'A'; data_size];
|
||||
|
||||
// Benchmark write operations
|
||||
println!("Benchmarking {} write operations...", num_operations);
|
||||
let start = Instant::now();
|
||||
|
||||
let mut ids = Vec::with_capacity(num_operations);
|
||||
for _ in 0..num_operations {
|
||||
let id = db.set(OurDBSetArgs {
|
||||
id: None,
|
||||
data: &test_data,
|
||||
})?;
|
||||
ids.push(id);
|
||||
}
|
||||
|
||||
let write_duration = start.elapsed();
|
||||
let writes_per_second = num_operations as f64 / write_duration.as_secs_f64();
|
||||
println!(
|
||||
"Write performance: {:.2} ops/sec ({:.2} ms/op)",
|
||||
writes_per_second,
|
||||
write_duration.as_secs_f64() * 1000.0 / num_operations as f64
|
||||
);
|
||||
|
||||
// Benchmark read operations
|
||||
println!("Benchmarking {} read operations...", num_operations);
|
||||
let start = Instant::now();
|
||||
|
||||
for &id in &ids {
|
||||
let _ = db.get(id)?;
|
||||
}
|
||||
|
||||
let read_duration = start.elapsed();
|
||||
let reads_per_second = num_operations as f64 / read_duration.as_secs_f64();
|
||||
println!(
|
||||
"Read performance: {:.2} ops/sec ({:.2} ms/op)",
|
||||
reads_per_second,
|
||||
read_duration.as_secs_f64() * 1000.0 / num_operations as f64
|
||||
);
|
||||
|
||||
// Benchmark update operations
|
||||
println!("Benchmarking {} update operations...", num_operations);
|
||||
let start = Instant::now();
|
||||
|
||||
for &id in &ids {
|
||||
db.set(OurDBSetArgs {
|
||||
id: Some(id),
|
||||
data: &test_data,
|
||||
})?;
|
||||
}
|
||||
|
||||
let update_duration = start.elapsed();
|
||||
let updates_per_second = num_operations as f64 / update_duration.as_secs_f64();
|
||||
println!(
|
||||
"Update performance: {:.2} ops/sec ({:.2} ms/op)",
|
||||
updates_per_second,
|
||||
update_duration.as_secs_f64() * 1000.0 / num_operations as f64
|
||||
);
|
||||
|
||||
db.close()?;
|
||||
println!("Performance benchmark completed");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
89
packages/data/ourdb/examples/basic_usage.rs
Normal file
89
packages/data/ourdb/examples/basic_usage.rs
Normal file
@@ -0,0 +1,89 @@
|
||||
use ourdb::{OurDB, OurDBConfig, OurDBSetArgs};
|
||||
|
||||
fn main() -> Result<(), ourdb::Error> {
|
||||
// Create a temporary directory for the database
|
||||
let db_path = std::env::temp_dir().join("ourdb_example");
|
||||
std::fs::create_dir_all(&db_path)?;
|
||||
|
||||
println!("Creating database at: {}", db_path.display());
|
||||
|
||||
// Create a new database with incremental mode enabled
|
||||
let config = OurDBConfig {
|
||||
path: db_path.clone(),
|
||||
incremental_mode: true,
|
||||
file_size: None, // Use default (500MB)
|
||||
keysize: None, // Use default (4 bytes)
|
||||
reset: None, // Don't reset existing database
|
||||
};
|
||||
|
||||
let mut db = OurDB::new(config)?;
|
||||
|
||||
// Store some data with auto-generated IDs
|
||||
let data1 = b"First record";
|
||||
let id1 = db.set(OurDBSetArgs {
|
||||
id: None,
|
||||
data: data1,
|
||||
})?;
|
||||
println!("Stored first record with ID: {}", id1);
|
||||
|
||||
let data2 = b"Second record";
|
||||
let id2 = db.set(OurDBSetArgs {
|
||||
id: None,
|
||||
data: data2,
|
||||
})?;
|
||||
println!("Stored second record with ID: {}", id2);
|
||||
|
||||
// Retrieve and print the data
|
||||
let retrieved1 = db.get(id1)?;
|
||||
println!(
|
||||
"Retrieved ID {}: {}",
|
||||
id1,
|
||||
String::from_utf8_lossy(&retrieved1)
|
||||
);
|
||||
|
||||
let retrieved2 = db.get(id2)?;
|
||||
println!(
|
||||
"Retrieved ID {}: {}",
|
||||
id2,
|
||||
String::from_utf8_lossy(&retrieved2)
|
||||
);
|
||||
|
||||
// Update a record to demonstrate history tracking
|
||||
let updated_data = b"Updated first record";
|
||||
db.set(OurDBSetArgs {
|
||||
id: Some(id1),
|
||||
data: updated_data,
|
||||
})?;
|
||||
println!("Updated record with ID: {}", id1);
|
||||
|
||||
// Get history for the updated record
|
||||
let history = db.get_history(id1, 2)?;
|
||||
println!("History for ID {}:", id1);
|
||||
for (i, entry) in history.iter().enumerate() {
|
||||
println!(" Version {}: {}", i, String::from_utf8_lossy(entry));
|
||||
}
|
||||
|
||||
// Delete a record
|
||||
db.delete(id2)?;
|
||||
println!("Deleted record with ID: {}", id2);
|
||||
|
||||
// Verify deletion
|
||||
match db.get(id2) {
|
||||
Ok(_) => println!("Record still exists (unexpected)"),
|
||||
Err(e) => println!("Verified deletion: {}", e),
|
||||
}
|
||||
|
||||
// Close the database
|
||||
db.close()?;
|
||||
println!("Database closed successfully");
|
||||
|
||||
// Clean up (optional)
|
||||
if std::env::var("KEEP_DB").is_err() {
|
||||
std::fs::remove_dir_all(&db_path)?;
|
||||
println!("Cleaned up database directory");
|
||||
} else {
|
||||
println!("Database kept at: {}", db_path.display());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
124
packages/data/ourdb/examples/benchmark.rs
Normal file
124
packages/data/ourdb/examples/benchmark.rs
Normal file
@@ -0,0 +1,124 @@
|
||||
use ourdb::{OurDB, OurDBConfig, OurDBSetArgs};
|
||||
use std::time::Instant;
|
||||
|
||||
fn main() -> Result<(), ourdb::Error> {
|
||||
// Parse command-line arguments
|
||||
let args: Vec<String> = std::env::args().collect();
|
||||
|
||||
// Default values
|
||||
let mut incremental_mode = true;
|
||||
let mut keysize: u8 = 4;
|
||||
let mut num_operations = 10000;
|
||||
|
||||
// Parse arguments
|
||||
for i in 1..args.len() {
|
||||
if args[i] == "--no-incremental" {
|
||||
incremental_mode = false;
|
||||
} else if args[i] == "--keysize" && i + 1 < args.len() {
|
||||
keysize = args[i + 1].parse().unwrap_or(4);
|
||||
} else if args[i] == "--ops" && i + 1 < args.len() {
|
||||
num_operations = args[i + 1].parse().unwrap_or(10000);
|
||||
}
|
||||
}
|
||||
|
||||
// Create a temporary directory for the database
|
||||
let db_path = std::env::temp_dir().join("ourdb_benchmark");
|
||||
std::fs::create_dir_all(&db_path)?;
|
||||
|
||||
println!("Database path: {}", db_path.display());
|
||||
|
||||
// Create a new database
|
||||
let config = OurDBConfig {
|
||||
path: db_path.clone(),
|
||||
incremental_mode,
|
||||
file_size: Some(1024 * 1024),
|
||||
keysize: Some(keysize),
|
||||
reset: Some(true), // Reset the database for benchmarking
|
||||
};
|
||||
|
||||
let mut db = OurDB::new(config)?;
|
||||
|
||||
// Prepare test data (100 bytes per record)
|
||||
let test_data = vec![b'A'; 100];
|
||||
|
||||
// Benchmark write operations
|
||||
println!(
|
||||
"Benchmarking {} write operations (incremental: {}, keysize: {})...",
|
||||
num_operations, incremental_mode, keysize
|
||||
);
|
||||
|
||||
let start = Instant::now();
|
||||
|
||||
let mut ids = Vec::with_capacity(num_operations);
|
||||
for _ in 0..num_operations {
|
||||
let id = if incremental_mode {
|
||||
db.set(OurDBSetArgs {
|
||||
id: None,
|
||||
data: &test_data,
|
||||
})?
|
||||
} else {
|
||||
// In non-incremental mode, we need to provide IDs
|
||||
let id = ids.len() as u32 + 1;
|
||||
db.set(OurDBSetArgs {
|
||||
id: Some(id),
|
||||
data: &test_data,
|
||||
})?;
|
||||
id
|
||||
};
|
||||
ids.push(id);
|
||||
}
|
||||
|
||||
let write_duration = start.elapsed();
|
||||
let writes_per_second = num_operations as f64 / write_duration.as_secs_f64();
|
||||
|
||||
println!(
|
||||
"Write performance: {:.2} ops/sec ({:.2} ms/op)",
|
||||
writes_per_second,
|
||||
write_duration.as_secs_f64() * 1000.0 / num_operations as f64
|
||||
);
|
||||
|
||||
// Benchmark read operations
|
||||
println!("Benchmarking {} read operations...", num_operations);
|
||||
|
||||
let start = Instant::now();
|
||||
|
||||
for &id in &ids {
|
||||
let _ = db.get(id)?;
|
||||
}
|
||||
|
||||
let read_duration = start.elapsed();
|
||||
let reads_per_second = num_operations as f64 / read_duration.as_secs_f64();
|
||||
|
||||
println!(
|
||||
"Read performance: {:.2} ops/sec ({:.2} ms/op)",
|
||||
reads_per_second,
|
||||
read_duration.as_secs_f64() * 1000.0 / num_operations as f64
|
||||
);
|
||||
|
||||
// Benchmark update operations
|
||||
println!("Benchmarking {} update operations...", num_operations);
|
||||
|
||||
let start = Instant::now();
|
||||
|
||||
for &id in &ids {
|
||||
db.set(OurDBSetArgs {
|
||||
id: Some(id),
|
||||
data: &test_data,
|
||||
})?;
|
||||
}
|
||||
|
||||
let update_duration = start.elapsed();
|
||||
let updates_per_second = num_operations as f64 / update_duration.as_secs_f64();
|
||||
|
||||
println!(
|
||||
"Update performance: {:.2} ops/sec ({:.2} ms/op)",
|
||||
updates_per_second,
|
||||
update_duration.as_secs_f64() * 1000.0 / num_operations as f64
|
||||
);
|
||||
|
||||
// Clean up
|
||||
db.close()?;
|
||||
std::fs::remove_dir_all(&db_path)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
83
packages/data/ourdb/examples/main.rs
Normal file
83
packages/data/ourdb/examples/main.rs
Normal file
@@ -0,0 +1,83 @@
|
||||
use ourdb::{OurDB, OurDBConfig, OurDBSetArgs};
|
||||
use std::env::temp_dir;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
println!("Standalone OurDB Example");
|
||||
println!("=======================\n");
|
||||
|
||||
// Create a temporary directory for the database
|
||||
let timestamp = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_secs();
|
||||
let db_path = temp_dir().join(format!("ourdb_example_{}", timestamp));
|
||||
std::fs::create_dir_all(&db_path)?;
|
||||
|
||||
println!("Creating database at: {}", db_path.display());
|
||||
|
||||
// Create a new OurDB instance
|
||||
let config = OurDBConfig {
|
||||
path: db_path.clone(),
|
||||
incremental_mode: true,
|
||||
file_size: None,
|
||||
keysize: None,
|
||||
reset: Some(false),
|
||||
};
|
||||
|
||||
let mut db = OurDB::new(config)?;
|
||||
println!("Database created successfully");
|
||||
|
||||
// Store some data
|
||||
let test_data = b"Hello, OurDB!";
|
||||
let id = db.set(OurDBSetArgs {
|
||||
id: None,
|
||||
data: test_data,
|
||||
})?;
|
||||
println!("\nStored data with ID: {}", id);
|
||||
|
||||
// Retrieve the data
|
||||
let retrieved = db.get(id)?;
|
||||
println!("Retrieved data: {}", String::from_utf8_lossy(&retrieved));
|
||||
|
||||
// Update the data
|
||||
let updated_data = b"Updated data in OurDB!";
|
||||
db.set(OurDBSetArgs {
|
||||
id: Some(id),
|
||||
data: updated_data,
|
||||
})?;
|
||||
println!("\nUpdated data with ID: {}", id);
|
||||
|
||||
// Retrieve the updated data
|
||||
let retrieved = db.get(id)?;
|
||||
println!(
|
||||
"Retrieved updated data: {}",
|
||||
String::from_utf8_lossy(&retrieved)
|
||||
);
|
||||
|
||||
// Get history
|
||||
let history = db.get_history(id, 2)?;
|
||||
println!("\nHistory for ID {}:", id);
|
||||
for (i, data) in history.iter().enumerate() {
|
||||
println!(" Version {}: {}", i + 1, String::from_utf8_lossy(data));
|
||||
}
|
||||
|
||||
// Delete the data
|
||||
db.delete(id)?;
|
||||
println!("\nDeleted data with ID: {}", id);
|
||||
|
||||
// Try to retrieve the deleted data (should fail)
|
||||
match db.get(id) {
|
||||
Ok(_) => println!("Data still exists (unexpected)"),
|
||||
Err(e) => println!("Verified deletion: {}", e),
|
||||
}
|
||||
|
||||
println!("\nExample completed successfully!");
|
||||
|
||||
// Clean up
|
||||
db.close()?;
|
||||
std::fs::remove_dir_all(&db_path)?;
|
||||
println!("Cleaned up database directory");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
83
packages/data/ourdb/examples/standalone_ourdb_example.rs
Normal file
83
packages/data/ourdb/examples/standalone_ourdb_example.rs
Normal file
@@ -0,0 +1,83 @@
|
||||
use ourdb::{OurDB, OurDBConfig, OurDBSetArgs};
|
||||
use std::env::temp_dir;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
println!("Standalone OurDB Example");
|
||||
println!("=======================\n");
|
||||
|
||||
// Create a temporary directory for the database
|
||||
let timestamp = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_secs();
|
||||
let db_path = temp_dir().join(format!("ourdb_example_{}", timestamp));
|
||||
std::fs::create_dir_all(&db_path)?;
|
||||
|
||||
println!("Creating database at: {}", db_path.display());
|
||||
|
||||
// Create a new OurDB instance
|
||||
let config = OurDBConfig {
|
||||
path: db_path.clone(),
|
||||
incremental_mode: true,
|
||||
file_size: None,
|
||||
keysize: None,
|
||||
reset: Some(false),
|
||||
};
|
||||
|
||||
let mut db = OurDB::new(config)?;
|
||||
println!("Database created successfully");
|
||||
|
||||
// Store some data
|
||||
let test_data = b"Hello, OurDB!";
|
||||
let id = db.set(OurDBSetArgs {
|
||||
id: None,
|
||||
data: test_data,
|
||||
})?;
|
||||
println!("\nStored data with ID: {}", id);
|
||||
|
||||
// Retrieve the data
|
||||
let retrieved = db.get(id)?;
|
||||
println!("Retrieved data: {}", String::from_utf8_lossy(&retrieved));
|
||||
|
||||
// Update the data
|
||||
let updated_data = b"Updated data in OurDB!";
|
||||
db.set(OurDBSetArgs {
|
||||
id: Some(id),
|
||||
data: updated_data,
|
||||
})?;
|
||||
println!("\nUpdated data with ID: {}", id);
|
||||
|
||||
// Retrieve the updated data
|
||||
let retrieved = db.get(id)?;
|
||||
println!(
|
||||
"Retrieved updated data: {}",
|
||||
String::from_utf8_lossy(&retrieved)
|
||||
);
|
||||
|
||||
// Get history
|
||||
let history = db.get_history(id, 2)?;
|
||||
println!("\nHistory for ID {}:", id);
|
||||
for (i, data) in history.iter().enumerate() {
|
||||
println!(" Version {}: {}", i + 1, String::from_utf8_lossy(data));
|
||||
}
|
||||
|
||||
// Delete the data
|
||||
db.delete(id)?;
|
||||
println!("\nDeleted data with ID: {}", id);
|
||||
|
||||
// Try to retrieve the deleted data (should fail)
|
||||
match db.get(id) {
|
||||
Ok(_) => println!("Data still exists (unexpected)"),
|
||||
Err(e) => println!("Verified deletion: {}", e),
|
||||
}
|
||||
|
||||
println!("\nExample completed successfully!");
|
||||
|
||||
// Clean up
|
||||
db.close()?;
|
||||
std::fs::remove_dir_all(&db_path)?;
|
||||
println!("Cleaned up database directory");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
366
packages/data/ourdb/src/backend.rs
Normal file
366
packages/data/ourdb/src/backend.rs
Normal file
@@ -0,0 +1,366 @@
|
||||
use std::fs::{self, File, OpenOptions};
|
||||
use std::io::{Read, Seek, SeekFrom, Write};
|
||||
|
||||
use crc32fast::Hasher;
|
||||
|
||||
use crate::error::Error;
|
||||
use crate::location::Location;
|
||||
use crate::OurDB;
|
||||
|
||||
// Header size: 2 bytes (size) + 4 bytes (CRC32) + 6 bytes (previous location)
|
||||
pub const HEADER_SIZE: usize = 12;
|
||||
|
||||
impl OurDB {
|
||||
/// Selects and opens a database file for read/write operations
|
||||
pub(crate) fn db_file_select(&mut self, file_nr: u16) -> Result<(), Error> {
|
||||
// No need to check if file_nr > 65535 as u16 can't exceed that value
|
||||
|
||||
let path = self.path.join(format!("{}.db", file_nr));
|
||||
|
||||
// Always close the current file if it's open
|
||||
self.file = None;
|
||||
|
||||
// Create file if it doesn't exist
|
||||
if !path.exists() {
|
||||
self.create_new_db_file(file_nr)?;
|
||||
}
|
||||
|
||||
// Open the file fresh
|
||||
let file = OpenOptions::new().read(true).write(true).open(&path)?;
|
||||
|
||||
self.file = Some(file);
|
||||
self.file_nr = file_nr;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Creates a new database file
|
||||
pub(crate) fn create_new_db_file(&mut self, file_nr: u16) -> Result<(), Error> {
|
||||
let new_file_path = self.path.join(format!("{}.db", file_nr));
|
||||
let mut file = File::create(&new_file_path)?;
|
||||
|
||||
// Write a single byte to make all positions start from 1
|
||||
file.write_all(&[0u8])?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Gets the file number to use for the next write operation
|
||||
pub(crate) fn get_file_nr(&mut self) -> Result<u16, Error> {
|
||||
// For keysize 2, 3, or 4, we can only use file_nr 0
|
||||
if self.lookup.keysize() <= 4 {
|
||||
let path = self.path.join("0.db");
|
||||
|
||||
if !path.exists() {
|
||||
self.create_new_db_file(0)?;
|
||||
}
|
||||
|
||||
return Ok(0);
|
||||
}
|
||||
|
||||
// For keysize 6, we can use multiple files
|
||||
let path = self.path.join(format!("{}.db", self.last_used_file_nr));
|
||||
|
||||
if !path.exists() {
|
||||
self.create_new_db_file(self.last_used_file_nr)?;
|
||||
return Ok(self.last_used_file_nr);
|
||||
}
|
||||
|
||||
let metadata = fs::metadata(&path)?;
|
||||
if metadata.len() >= self.file_size as u64 {
|
||||
self.last_used_file_nr += 1;
|
||||
self.create_new_db_file(self.last_used_file_nr)?;
|
||||
}
|
||||
|
||||
Ok(self.last_used_file_nr)
|
||||
}
|
||||
|
||||
/// Stores data at the specified ID with history tracking
|
||||
pub(crate) fn set_(
|
||||
&mut self,
|
||||
id: u32,
|
||||
old_location: Location,
|
||||
data: &[u8],
|
||||
) -> Result<(), Error> {
|
||||
// Validate data size - maximum is u16::MAX (65535 bytes or ~64KB)
|
||||
if data.len() > u16::MAX as usize {
|
||||
return Err(Error::InvalidOperation(format!(
|
||||
"Data size exceeds maximum allowed size of {} bytes",
|
||||
u16::MAX
|
||||
)));
|
||||
}
|
||||
|
||||
// Get file number to use
|
||||
let file_nr = self.get_file_nr()?;
|
||||
|
||||
// Select the file
|
||||
self.db_file_select(file_nr)?;
|
||||
|
||||
// Get current file position for lookup
|
||||
let file = self
|
||||
.file
|
||||
.as_mut()
|
||||
.ok_or_else(|| Error::Other("No file open".to_string()))?;
|
||||
file.seek(SeekFrom::End(0))?;
|
||||
let position = file.stream_position()? as u32;
|
||||
|
||||
// Create new location
|
||||
let new_location = Location { file_nr, position };
|
||||
|
||||
// Calculate CRC of data
|
||||
let crc = calculate_crc(data);
|
||||
|
||||
// Create header
|
||||
let mut header = vec![0u8; HEADER_SIZE];
|
||||
|
||||
// Write size (2 bytes)
|
||||
let size = data.len() as u16; // Safe now because we've validated the size
|
||||
header[0] = (size & 0xFF) as u8;
|
||||
header[1] = ((size >> 8) & 0xFF) as u8;
|
||||
|
||||
// Write CRC (4 bytes)
|
||||
header[2] = (crc & 0xFF) as u8;
|
||||
header[3] = ((crc >> 8) & 0xFF) as u8;
|
||||
header[4] = ((crc >> 16) & 0xFF) as u8;
|
||||
header[5] = ((crc >> 24) & 0xFF) as u8;
|
||||
|
||||
// Write previous location (6 bytes)
|
||||
let prev_bytes = old_location.to_bytes();
|
||||
for (i, &byte) in prev_bytes.iter().enumerate().take(6) {
|
||||
header[6 + i] = byte;
|
||||
}
|
||||
|
||||
// Write header
|
||||
file.write_all(&header)?;
|
||||
|
||||
// Write actual data
|
||||
file.write_all(data)?;
|
||||
file.flush()?;
|
||||
|
||||
// Update lookup table with new position
|
||||
self.lookup.set(id, new_location)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Retrieves data at the specified location
|
||||
pub(crate) fn get_(&mut self, location: Location) -> Result<Vec<u8>, Error> {
|
||||
if location.position == 0 {
|
||||
return Err(Error::NotFound(format!(
|
||||
"Record not found, location: {:?}",
|
||||
location
|
||||
)));
|
||||
}
|
||||
|
||||
// Select the file
|
||||
self.db_file_select(location.file_nr)?;
|
||||
|
||||
let file = self
|
||||
.file
|
||||
.as_mut()
|
||||
.ok_or_else(|| Error::Other("No file open".to_string()))?;
|
||||
|
||||
// Read header
|
||||
file.seek(SeekFrom::Start(location.position as u64))?;
|
||||
let mut header = vec![0u8; HEADER_SIZE];
|
||||
file.read_exact(&mut header)?;
|
||||
|
||||
// Parse size (2 bytes)
|
||||
let size = u16::from(header[0]) | (u16::from(header[1]) << 8);
|
||||
|
||||
// Parse CRC (4 bytes)
|
||||
let stored_crc = u32::from(header[2])
|
||||
| (u32::from(header[3]) << 8)
|
||||
| (u32::from(header[4]) << 16)
|
||||
| (u32::from(header[5]) << 24);
|
||||
|
||||
// Read data
|
||||
let mut data = vec![0u8; size as usize];
|
||||
file.read_exact(&mut data)?;
|
||||
|
||||
// Verify CRC
|
||||
let calculated_crc = calculate_crc(&data);
|
||||
if calculated_crc != stored_crc {
|
||||
return Err(Error::DataCorruption(
|
||||
"CRC mismatch: data corruption detected".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
Ok(data)
|
||||
}
|
||||
|
||||
/// Retrieves the previous position for a record (for history tracking)
|
||||
pub(crate) fn get_prev_pos_(&mut self, location: Location) -> Result<Location, Error> {
|
||||
if location.position == 0 {
|
||||
return Err(Error::NotFound("Record not found".to_string()));
|
||||
}
|
||||
|
||||
// Select the file
|
||||
self.db_file_select(location.file_nr)?;
|
||||
|
||||
let file = self
|
||||
.file
|
||||
.as_mut()
|
||||
.ok_or_else(|| Error::Other("No file open".to_string()))?;
|
||||
|
||||
// Skip size and CRC (6 bytes)
|
||||
file.seek(SeekFrom::Start(location.position as u64 + 6))?;
|
||||
|
||||
// Read previous location (6 bytes)
|
||||
let mut prev_bytes = vec![0u8; 6];
|
||||
file.read_exact(&mut prev_bytes)?;
|
||||
|
||||
// Create location from bytes
|
||||
Location::from_bytes(&prev_bytes, 6)
|
||||
}
|
||||
|
||||
/// Deletes the record at the specified location
|
||||
pub(crate) fn delete_(&mut self, id: u32, location: Location) -> Result<(), Error> {
|
||||
if location.position == 0 {
|
||||
return Err(Error::NotFound("Record not found".to_string()));
|
||||
}
|
||||
|
||||
// Select the file
|
||||
self.db_file_select(location.file_nr)?;
|
||||
|
||||
let file = self
|
||||
.file
|
||||
.as_mut()
|
||||
.ok_or_else(|| Error::Other("No file open".to_string()))?;
|
||||
|
||||
// Read size first
|
||||
file.seek(SeekFrom::Start(location.position as u64))?;
|
||||
let mut size_bytes = vec![0u8; 2];
|
||||
file.read_exact(&mut size_bytes)?;
|
||||
let size = u16::from(size_bytes[0]) | (u16::from(size_bytes[1]) << 8);
|
||||
|
||||
// Write zeros for the entire record (header + data)
|
||||
let zeros = vec![0u8; HEADER_SIZE + size as usize];
|
||||
file.seek(SeekFrom::Start(location.position as u64))?;
|
||||
file.write_all(&zeros)?;
|
||||
|
||||
// Clear lookup entry
|
||||
self.lookup.delete(id)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Condenses the database by removing empty records and updating positions
|
||||
pub fn condense(&mut self) -> Result<(), Error> {
|
||||
// Create a temporary directory
|
||||
let temp_path = self.path.join("temp");
|
||||
fs::create_dir_all(&temp_path)?;
|
||||
|
||||
// Get all file numbers
|
||||
let mut file_numbers = Vec::new();
|
||||
for entry in fs::read_dir(&self.path)? {
|
||||
let entry = entry?;
|
||||
let path = entry.path();
|
||||
|
||||
if path.is_file() && path.extension().map_or(false, |ext| ext == "db") {
|
||||
if let Some(stem) = path.file_stem() {
|
||||
if let Ok(file_nr) = stem.to_string_lossy().parse::<u16>() {
|
||||
file_numbers.push(file_nr);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Process each file
|
||||
for file_nr in file_numbers {
|
||||
let src_path = self.path.join(format!("{}.db", file_nr));
|
||||
let temp_file_path = temp_path.join(format!("{}.db", file_nr));
|
||||
|
||||
// Create new file
|
||||
let mut temp_file = File::create(&temp_file_path)?;
|
||||
temp_file.write_all(&[0u8])?; // Initialize with a byte
|
||||
|
||||
// Open source file
|
||||
let mut src_file = File::open(&src_path)?;
|
||||
|
||||
// Read and process records
|
||||
let mut buffer = vec![0u8; 1024]; // Read in chunks
|
||||
let mut _position = 0;
|
||||
|
||||
while let Ok(bytes_read) = src_file.read(&mut buffer) {
|
||||
if bytes_read == 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
// Process the chunk
|
||||
// This is a simplified version - in a real implementation,
|
||||
// you would need to handle records that span chunk boundaries
|
||||
|
||||
_position += bytes_read;
|
||||
}
|
||||
|
||||
// TODO: Implement proper record copying and position updating
|
||||
// This would involve:
|
||||
// 1. Reading each record from the source file
|
||||
// 2. If not deleted (all zeros), copy to temp file
|
||||
// 3. Update lookup table with new positions
|
||||
}
|
||||
|
||||
// TODO: Replace original files with temp files
|
||||
|
||||
// Clean up
|
||||
fs::remove_dir_all(&temp_path)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Calculates CRC32 for the data
|
||||
fn calculate_crc(data: &[u8]) -> u32 {
|
||||
let mut hasher = Hasher::new();
|
||||
hasher.update(data);
|
||||
hasher.finalize()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::path::PathBuf;
|
||||
|
||||
use crate::{OurDB, OurDBConfig, OurDBSetArgs};
|
||||
use std::env::temp_dir;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
fn get_temp_dir() -> PathBuf {
|
||||
let timestamp = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_secs();
|
||||
temp_dir().join(format!("ourdb_backend_test_{}", timestamp))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_backend_operations() {
|
||||
let temp_dir = get_temp_dir();
|
||||
|
||||
let config = OurDBConfig {
|
||||
path: temp_dir.clone(),
|
||||
incremental_mode: false,
|
||||
file_size: None,
|
||||
keysize: None,
|
||||
reset: None, // Don't reset existing database
|
||||
};
|
||||
|
||||
let mut db = OurDB::new(config).unwrap();
|
||||
|
||||
// Test set and get
|
||||
let test_data = b"Test data for backend operations";
|
||||
let id = 1;
|
||||
|
||||
db.set(OurDBSetArgs {
|
||||
id: Some(id),
|
||||
data: test_data,
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
let retrieved = db.get(id).unwrap();
|
||||
assert_eq!(retrieved, test_data);
|
||||
|
||||
// Clean up
|
||||
db.destroy().unwrap();
|
||||
}
|
||||
}
|
||||
41
packages/data/ourdb/src/error.rs
Normal file
41
packages/data/ourdb/src/error.rs
Normal file
@@ -0,0 +1,41 @@
|
||||
use thiserror::Error;
|
||||
|
||||
/// Error types for OurDB operations
|
||||
#[derive(Error, Debug)]
|
||||
pub enum Error {
|
||||
/// IO errors from file operations
|
||||
#[error("IO error: {0}")]
|
||||
Io(#[from] std::io::Error),
|
||||
|
||||
/// Data corruption errors
|
||||
#[error("Data corruption: {0}")]
|
||||
DataCorruption(String),
|
||||
|
||||
/// Invalid operation errors
|
||||
#[error("Invalid operation: {0}")]
|
||||
InvalidOperation(String),
|
||||
|
||||
/// Lookup table errors
|
||||
#[error("Lookup error: {0}")]
|
||||
LookupError(String),
|
||||
|
||||
/// Record not found errors
|
||||
#[error("Record not found: {0}")]
|
||||
NotFound(String),
|
||||
|
||||
/// Other errors
|
||||
#[error("Error: {0}")]
|
||||
Other(String),
|
||||
}
|
||||
|
||||
impl From<String> for Error {
|
||||
fn from(msg: String) -> Self {
|
||||
Error::Other(msg)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&str> for Error {
|
||||
fn from(msg: &str) -> Self {
|
||||
Error::Other(msg.to_string())
|
||||
}
|
||||
}
|
||||
293
packages/data/ourdb/src/lib.rs
Normal file
293
packages/data/ourdb/src/lib.rs
Normal file
@@ -0,0 +1,293 @@
|
||||
mod backend;
|
||||
mod error;
|
||||
mod location;
|
||||
mod lookup;
|
||||
|
||||
pub use error::Error;
|
||||
pub use location::Location;
|
||||
pub use lookup::LookupTable;
|
||||
|
||||
use std::fs::File;
|
||||
use std::path::PathBuf;
|
||||
|
||||
/// OurDB is a lightweight, efficient key-value database implementation that provides
|
||||
/// data persistence with history tracking capabilities.
|
||||
pub struct OurDB {
|
||||
/// Directory path for storage
|
||||
path: PathBuf,
|
||||
/// Whether to use auto-increment mode
|
||||
incremental_mode: bool,
|
||||
/// Maximum file size (default: 500MB)
|
||||
file_size: u32,
|
||||
/// Lookup table for mapping keys to locations
|
||||
lookup: LookupTable,
|
||||
/// Currently open file
|
||||
file: Option<File>,
|
||||
/// Current file number
|
||||
file_nr: u16,
|
||||
/// Last used file number
|
||||
last_used_file_nr: u16,
|
||||
}
|
||||
|
||||
/// Configuration for creating a new OurDB instance
|
||||
pub struct OurDBConfig {
|
||||
/// Directory path for storage
|
||||
pub path: PathBuf,
|
||||
/// Whether to use auto-increment mode
|
||||
pub incremental_mode: bool,
|
||||
/// Maximum file size (default: 500MB)
|
||||
pub file_size: Option<u32>,
|
||||
/// Lookup table key size (default: 4)
|
||||
/// - 2: For databases with < 65,536 records (single file)
|
||||
/// - 3: For databases with < 16,777,216 records (single file)
|
||||
/// - 4: For databases with < 4,294,967,296 records (single file)
|
||||
/// - 6: For large databases requiring multiple files (default)
|
||||
pub keysize: Option<u8>,
|
||||
/// Whether to reset the database if it exists (default: false)
|
||||
pub reset: Option<bool>,
|
||||
}
|
||||
|
||||
/// Arguments for setting a value in OurDB
|
||||
pub struct OurDBSetArgs<'a> {
|
||||
/// ID for the record (optional in incremental mode)
|
||||
pub id: Option<u32>,
|
||||
/// Data to store
|
||||
pub data: &'a [u8],
|
||||
}
|
||||
|
||||
impl OurDB {
|
||||
/// Creates a new OurDB instance with the given configuration
|
||||
pub fn new(config: OurDBConfig) -> Result<Self, Error> {
|
||||
// If reset is true and the path exists, remove it first
|
||||
if config.reset.unwrap_or(false) && config.path.exists() {
|
||||
std::fs::remove_dir_all(&config.path)?;
|
||||
}
|
||||
|
||||
// Create directory if it doesn't exist
|
||||
std::fs::create_dir_all(&config.path)?;
|
||||
|
||||
// Create lookup table
|
||||
let lookup_path = config.path.join("lookup");
|
||||
std::fs::create_dir_all(&lookup_path)?;
|
||||
|
||||
let lookup_config = lookup::LookupConfig {
|
||||
size: 1000000, // Default size
|
||||
keysize: config.keysize.unwrap_or(4),
|
||||
lookuppath: lookup_path.to_string_lossy().to_string(),
|
||||
incremental_mode: config.incremental_mode,
|
||||
};
|
||||
|
||||
let lookup = LookupTable::new(lookup_config)?;
|
||||
|
||||
let mut db = OurDB {
|
||||
path: config.path,
|
||||
incremental_mode: config.incremental_mode,
|
||||
file_size: config.file_size.unwrap_or(500 * (1 << 20)), // 500MB default
|
||||
lookup,
|
||||
file: None,
|
||||
file_nr: 0,
|
||||
last_used_file_nr: 0,
|
||||
};
|
||||
|
||||
// Load existing metadata if available
|
||||
db.load()?;
|
||||
|
||||
Ok(db)
|
||||
}
|
||||
|
||||
/// Sets a value in the database
|
||||
///
|
||||
/// In incremental mode:
|
||||
/// - If ID is provided, it updates an existing record
|
||||
/// - If ID is not provided, it creates a new record with auto-generated ID
|
||||
///
|
||||
/// In key-value mode:
|
||||
/// - ID must be provided
|
||||
pub fn set(&mut self, args: OurDBSetArgs) -> Result<u32, Error> {
|
||||
if self.incremental_mode {
|
||||
if let Some(id) = args.id {
|
||||
// This is an update
|
||||
let location = self.lookup.get(id)?;
|
||||
if location.position == 0 {
|
||||
return Err(Error::InvalidOperation(
|
||||
"Cannot set ID for insertions when incremental mode is enabled".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
self.set_(id, location, args.data)?;
|
||||
Ok(id)
|
||||
} else {
|
||||
// This is an insert
|
||||
let id = self.lookup.get_next_id()?;
|
||||
self.set_(id, Location::default(), args.data)?;
|
||||
Ok(id)
|
||||
}
|
||||
} else {
|
||||
// Using key-value mode
|
||||
let id = args.id.ok_or_else(|| {
|
||||
Error::InvalidOperation(
|
||||
"ID must be provided when incremental is disabled".to_string(),
|
||||
)
|
||||
})?;
|
||||
|
||||
let location = self.lookup.get(id)?;
|
||||
self.set_(id, location, args.data)?;
|
||||
Ok(id)
|
||||
}
|
||||
}
|
||||
|
||||
/// Retrieves data stored at the specified key position
|
||||
pub fn get(&mut self, id: u32) -> Result<Vec<u8>, Error> {
|
||||
let location = self.lookup.get(id)?;
|
||||
self.get_(location)
|
||||
}
|
||||
|
||||
/// Retrieves a list of previous values for the specified key
|
||||
///
|
||||
/// The depth parameter controls how many historical values to retrieve (maximum)
|
||||
pub fn get_history(&mut self, id: u32, depth: u8) -> Result<Vec<Vec<u8>>, Error> {
|
||||
let mut result = Vec::new();
|
||||
let mut current_location = self.lookup.get(id)?;
|
||||
|
||||
// Traverse the history chain up to specified depth
|
||||
for _ in 0..depth {
|
||||
// Get current value
|
||||
let data = self.get_(current_location)?;
|
||||
result.push(data);
|
||||
|
||||
// Try to get previous location
|
||||
match self.get_prev_pos_(current_location) {
|
||||
Ok(location) => {
|
||||
if location.position == 0 {
|
||||
break;
|
||||
}
|
||||
current_location = location;
|
||||
}
|
||||
Err(_) => break,
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Deletes the data at the specified key position
|
||||
pub fn delete(&mut self, id: u32) -> Result<(), Error> {
|
||||
let location = self.lookup.get(id)?;
|
||||
self.delete_(id, location)?;
|
||||
self.lookup.delete(id)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the next ID which will be used when storing in incremental mode
|
||||
pub fn get_next_id(&mut self) -> Result<u32, Error> {
|
||||
if !self.incremental_mode {
|
||||
return Err(Error::InvalidOperation(
|
||||
"Incremental mode is not enabled".to_string(),
|
||||
));
|
||||
}
|
||||
self.lookup.get_next_id()
|
||||
}
|
||||
|
||||
/// Closes the database, ensuring all data is saved
|
||||
pub fn close(&mut self) -> Result<(), Error> {
|
||||
self.save()?;
|
||||
self.close_();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Destroys the database, removing all files
|
||||
pub fn destroy(&mut self) -> Result<(), Error> {
|
||||
let _ = self.close();
|
||||
std::fs::remove_dir_all(&self.path)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Helper methods
|
||||
fn lookup_dump_path(&self) -> PathBuf {
|
||||
self.path.join("lookup_dump.db")
|
||||
}
|
||||
|
||||
fn load(&mut self) -> Result<(), Error> {
|
||||
let dump_path = self.lookup_dump_path();
|
||||
if dump_path.exists() {
|
||||
self.lookup.import_sparse(&dump_path.to_string_lossy())?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn save(&mut self) -> Result<(), Error> {
|
||||
self.lookup
|
||||
.export_sparse(&self.lookup_dump_path().to_string_lossy())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn close_(&mut self) {
|
||||
self.file = None;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::env::temp_dir;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
fn get_temp_dir() -> PathBuf {
|
||||
let timestamp = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_secs();
|
||||
temp_dir().join(format!("ourdb_test_{}", timestamp))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_basic_operations() {
|
||||
let temp_dir = get_temp_dir();
|
||||
|
||||
let config = OurDBConfig {
|
||||
path: temp_dir.clone(),
|
||||
incremental_mode: true,
|
||||
file_size: None,
|
||||
keysize: None,
|
||||
reset: None, // Don't reset existing database
|
||||
};
|
||||
|
||||
let mut db = OurDB::new(config).unwrap();
|
||||
|
||||
// Test set and get
|
||||
let test_data = b"Hello, OurDB!";
|
||||
let id = db
|
||||
.set(OurDBSetArgs {
|
||||
id: None,
|
||||
data: test_data,
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
let retrieved = db.get(id).unwrap();
|
||||
assert_eq!(retrieved, test_data);
|
||||
|
||||
// Test update
|
||||
let updated_data = b"Updated data";
|
||||
db.set(OurDBSetArgs {
|
||||
id: Some(id),
|
||||
data: updated_data,
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
let retrieved = db.get(id).unwrap();
|
||||
assert_eq!(retrieved, updated_data);
|
||||
|
||||
// Test history
|
||||
let history = db.get_history(id, 2).unwrap();
|
||||
assert_eq!(history.len(), 2);
|
||||
assert_eq!(history[0], updated_data);
|
||||
assert_eq!(history[1], test_data);
|
||||
|
||||
// Test delete
|
||||
db.delete(id).unwrap();
|
||||
assert!(db.get(id).is_err());
|
||||
|
||||
// Clean up
|
||||
db.destroy().unwrap();
|
||||
}
|
||||
}
|
||||
178
packages/data/ourdb/src/location.rs
Normal file
178
packages/data/ourdb/src/location.rs
Normal file
@@ -0,0 +1,178 @@
|
||||
use crate::error::Error;
|
||||
|
||||
/// Location represents a physical position in a database file
|
||||
///
|
||||
/// It consists of a file number and a position within that file.
|
||||
/// This allows OurDB to span multiple files for large datasets.
|
||||
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
|
||||
pub struct Location {
|
||||
/// File number (0-65535)
|
||||
pub file_nr: u16,
|
||||
/// Position within the file
|
||||
pub position: u32,
|
||||
}
|
||||
|
||||
impl Location {
|
||||
/// Creates a new Location from bytes based on keysize
|
||||
///
|
||||
/// - keysize = 2: Only position (2 bytes), file_nr = 0
|
||||
/// - keysize = 3: Only position (3 bytes), file_nr = 0
|
||||
/// - keysize = 4: Only position (4 bytes), file_nr = 0
|
||||
/// - keysize = 6: file_nr (2 bytes) + position (4 bytes)
|
||||
pub fn from_bytes(bytes: &[u8], keysize: u8) -> Result<Self, Error> {
|
||||
// Validate keysize
|
||||
if ![2, 3, 4, 6].contains(&keysize) {
|
||||
return Err(Error::InvalidOperation(format!(
|
||||
"Invalid keysize: {}",
|
||||
keysize
|
||||
)));
|
||||
}
|
||||
|
||||
// Create padded bytes
|
||||
let mut padded = vec![0u8; keysize as usize];
|
||||
if bytes.len() > keysize as usize {
|
||||
return Err(Error::InvalidOperation(
|
||||
"Input bytes exceed keysize".to_string(),
|
||||
));
|
||||
}
|
||||
let start_idx = keysize as usize - bytes.len();
|
||||
|
||||
for (i, &b) in bytes.iter().enumerate() {
|
||||
if i + start_idx < padded.len() {
|
||||
padded[start_idx + i] = b;
|
||||
}
|
||||
}
|
||||
|
||||
let mut location = Location::default();
|
||||
|
||||
match keysize {
|
||||
2 => {
|
||||
// Only position, 2 bytes big endian
|
||||
location.position = u32::from(padded[0]) << 8 | u32::from(padded[1]);
|
||||
location.file_nr = 0;
|
||||
|
||||
// Verify limits
|
||||
if location.position > 0xFFFF {
|
||||
return Err(Error::InvalidOperation(
|
||||
"Position exceeds max value for keysize=2 (max 65535)".to_string(),
|
||||
));
|
||||
}
|
||||
}
|
||||
3 => {
|
||||
// Only position, 3 bytes big endian
|
||||
location.position =
|
||||
u32::from(padded[0]) << 16 | u32::from(padded[1]) << 8 | u32::from(padded[2]);
|
||||
location.file_nr = 0;
|
||||
|
||||
// Verify limits
|
||||
if location.position > 0xFFFFFF {
|
||||
return Err(Error::InvalidOperation(
|
||||
"Position exceeds max value for keysize=3 (max 16777215)".to_string(),
|
||||
));
|
||||
}
|
||||
}
|
||||
4 => {
|
||||
// Only position, 4 bytes big endian
|
||||
location.position = u32::from(padded[0]) << 24
|
||||
| u32::from(padded[1]) << 16
|
||||
| u32::from(padded[2]) << 8
|
||||
| u32::from(padded[3]);
|
||||
location.file_nr = 0;
|
||||
}
|
||||
6 => {
|
||||
// 2 bytes file_nr + 4 bytes position, all big endian
|
||||
location.file_nr = u16::from(padded[0]) << 8 | u16::from(padded[1]);
|
||||
location.position = u32::from(padded[2]) << 24
|
||||
| u32::from(padded[3]) << 16
|
||||
| u32::from(padded[4]) << 8
|
||||
| u32::from(padded[5]);
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
|
||||
Ok(location)
|
||||
}
|
||||
|
||||
/// Converts the location to bytes (always 6 bytes)
|
||||
///
|
||||
/// Format: [file_nr (2 bytes)][position (4 bytes)]
|
||||
pub fn to_bytes(&self) -> Vec<u8> {
|
||||
let mut bytes = Vec::with_capacity(6);
|
||||
|
||||
// Put file_nr first (2 bytes)
|
||||
bytes.push((self.file_nr >> 8) as u8);
|
||||
bytes.push(self.file_nr as u8);
|
||||
|
||||
// Put position next (4 bytes)
|
||||
bytes.push((self.position >> 24) as u8);
|
||||
bytes.push((self.position >> 16) as u8);
|
||||
bytes.push((self.position >> 8) as u8);
|
||||
bytes.push(self.position as u8);
|
||||
|
||||
bytes
|
||||
}
|
||||
|
||||
/// Converts the location to a u64 value
|
||||
///
|
||||
/// The file_nr is stored in the most significant bits
|
||||
pub fn to_u64(&self) -> u64 {
|
||||
(u64::from(self.file_nr) << 32) | u64::from(self.position)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_location_from_bytes_keysize_2() {
|
||||
let bytes = vec![0x12, 0x34];
|
||||
let location = Location::from_bytes(&bytes, 2).unwrap();
|
||||
assert_eq!(location.file_nr, 0);
|
||||
assert_eq!(location.position, 0x1234);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_location_from_bytes_keysize_3() {
|
||||
let bytes = vec![0x12, 0x34, 0x56];
|
||||
let location = Location::from_bytes(&bytes, 3).unwrap();
|
||||
assert_eq!(location.file_nr, 0);
|
||||
assert_eq!(location.position, 0x123456);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_location_from_bytes_keysize_4() {
|
||||
let bytes = vec![0x12, 0x34, 0x56, 0x78];
|
||||
let location = Location::from_bytes(&bytes, 4).unwrap();
|
||||
assert_eq!(location.file_nr, 0);
|
||||
assert_eq!(location.position, 0x12345678);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_location_from_bytes_keysize_6() {
|
||||
let bytes = vec![0xAB, 0xCD, 0x12, 0x34, 0x56, 0x78];
|
||||
let location = Location::from_bytes(&bytes, 6).unwrap();
|
||||
assert_eq!(location.file_nr, 0xABCD);
|
||||
assert_eq!(location.position, 0x12345678);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_location_to_bytes() {
|
||||
let location = Location {
|
||||
file_nr: 0xABCD,
|
||||
position: 0x12345678,
|
||||
};
|
||||
let bytes = location.to_bytes();
|
||||
assert_eq!(bytes, vec![0xAB, 0xCD, 0x12, 0x34, 0x56, 0x78]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_location_to_u64() {
|
||||
let location = Location {
|
||||
file_nr: 0xABCD,
|
||||
position: 0x12345678,
|
||||
};
|
||||
let value = location.to_u64();
|
||||
assert_eq!(value, 0xABCD_0000_0000 | 0x12345678);
|
||||
}
|
||||
}
|
||||
540
packages/data/ourdb/src/lookup.rs
Normal file
540
packages/data/ourdb/src/lookup.rs
Normal file
@@ -0,0 +1,540 @@
|
||||
use std::fs::{self, File, OpenOptions};
|
||||
use std::io::{Read, Seek, SeekFrom, Write};
|
||||
use std::path::Path;
|
||||
|
||||
use crate::error::Error;
|
||||
use crate::location::Location;
|
||||
|
||||
const DATA_FILE_NAME: &str = "data";
|
||||
const INCREMENTAL_FILE_NAME: &str = ".inc";
|
||||
|
||||
/// Configuration for creating a new lookup table
|
||||
pub struct LookupConfig {
|
||||
/// Size of the lookup table
|
||||
pub size: u32,
|
||||
/// Size of each entry in bytes (2-6)
|
||||
/// - 2: For databases with < 65,536 records (single file)
|
||||
/// - 3: For databases with < 16,777,216 records (single file)
|
||||
/// - 4: For databases with < 4,294,967,296 records (single file)
|
||||
/// - 6: For large databases requiring multiple files
|
||||
pub keysize: u8,
|
||||
/// Path for disk-based lookup
|
||||
pub lookuppath: String,
|
||||
/// Whether to use incremental mode
|
||||
pub incremental_mode: bool,
|
||||
}
|
||||
|
||||
/// Lookup table maps keys to physical locations in the backend storage
|
||||
pub struct LookupTable {
|
||||
/// Size of each entry in bytes (2-6)
|
||||
keysize: u8,
|
||||
/// Path for disk-based lookup
|
||||
lookuppath: String,
|
||||
/// In-memory data for memory-based lookup
|
||||
data: Vec<u8>,
|
||||
/// Next empty slot if incremental mode is enabled
|
||||
incremental: Option<u32>,
|
||||
}
|
||||
|
||||
impl LookupTable {
|
||||
/// Returns the keysize of this lookup table
|
||||
pub fn keysize(&self) -> u8 {
|
||||
self.keysize
|
||||
}
|
||||
|
||||
/// Creates a new lookup table with the given configuration
|
||||
pub fn new(config: LookupConfig) -> Result<Self, Error> {
|
||||
// Verify keysize is valid
|
||||
if ![2, 3, 4, 6].contains(&config.keysize) {
|
||||
return Err(Error::InvalidOperation(format!(
|
||||
"Invalid keysize: {}",
|
||||
config.keysize
|
||||
)));
|
||||
}
|
||||
|
||||
let incremental = if config.incremental_mode {
|
||||
Some(get_incremental_info(&config)?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
if !config.lookuppath.is_empty() {
|
||||
// Create directory if it doesn't exist
|
||||
fs::create_dir_all(&config.lookuppath)?;
|
||||
|
||||
// For disk-based lookup, create empty file if it doesn't exist
|
||||
let data_path = Path::new(&config.lookuppath).join(DATA_FILE_NAME);
|
||||
if !data_path.exists() {
|
||||
let data = vec![0u8; config.size as usize * config.keysize as usize];
|
||||
fs::write(&data_path, &data)?;
|
||||
}
|
||||
|
||||
Ok(LookupTable {
|
||||
data: Vec::new(),
|
||||
keysize: config.keysize,
|
||||
lookuppath: config.lookuppath,
|
||||
incremental,
|
||||
})
|
||||
} else {
|
||||
// For memory-based lookup
|
||||
Ok(LookupTable {
|
||||
data: vec![0u8; config.size as usize * config.keysize as usize],
|
||||
keysize: config.keysize,
|
||||
lookuppath: String::new(),
|
||||
incremental,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets a location for the given ID
|
||||
pub fn get(&self, id: u32) -> Result<Location, Error> {
|
||||
let entry_size = self.keysize as usize;
|
||||
|
||||
if !self.lookuppath.is_empty() {
|
||||
// Disk-based lookup
|
||||
let data_path = Path::new(&self.lookuppath).join(DATA_FILE_NAME);
|
||||
|
||||
// Check file size first
|
||||
let file_size = fs::metadata(&data_path)?.len();
|
||||
let start_pos = id as u64 * entry_size as u64;
|
||||
|
||||
if start_pos + entry_size as u64 > file_size {
|
||||
return Err(Error::LookupError(format!(
|
||||
"Invalid read for get in lut: {}: {} would exceed file size {}",
|
||||
self.lookuppath,
|
||||
start_pos + entry_size as u64,
|
||||
file_size
|
||||
)));
|
||||
}
|
||||
|
||||
// Read directly from file
|
||||
let mut file = File::open(&data_path)?;
|
||||
file.seek(SeekFrom::Start(start_pos))?;
|
||||
|
||||
let mut data = vec![0u8; entry_size];
|
||||
let bytes_read = file.read(&mut data)?;
|
||||
|
||||
if bytes_read < entry_size {
|
||||
return Err(Error::LookupError(format!(
|
||||
"Incomplete read: expected {} bytes but got {}",
|
||||
entry_size, bytes_read
|
||||
)));
|
||||
}
|
||||
|
||||
return Location::from_bytes(&data, self.keysize);
|
||||
}
|
||||
|
||||
// Memory-based lookup
|
||||
if (id * self.keysize as u32) as usize >= self.data.len() {
|
||||
return Err(Error::LookupError("Index out of bounds".to_string()));
|
||||
}
|
||||
|
||||
let start = (id * self.keysize as u32) as usize;
|
||||
let end = start + entry_size;
|
||||
|
||||
Location::from_bytes(&self.data[start..end], self.keysize)
|
||||
}
|
||||
|
||||
/// Sets a location for the given ID
|
||||
pub fn set(&mut self, id: u32, location: Location) -> Result<(), Error> {
|
||||
let entry_size = self.keysize as usize;
|
||||
|
||||
// Handle incremental mode
|
||||
if let Some(incremental) = self.incremental {
|
||||
if id == incremental {
|
||||
self.increment_index()?;
|
||||
}
|
||||
|
||||
if id > incremental {
|
||||
return Err(Error::InvalidOperation(
|
||||
"Cannot set ID for insertions when incremental mode is enabled".to_string(),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
// Convert location to bytes based on keysize
|
||||
let location_bytes = match self.keysize {
|
||||
2 => {
|
||||
if location.file_nr != 0 {
|
||||
return Err(Error::InvalidOperation(
|
||||
"file_nr must be 0 for keysize=2".to_string(),
|
||||
));
|
||||
}
|
||||
if location.position > 0xFFFF {
|
||||
return Err(Error::InvalidOperation(
|
||||
"position exceeds max value for keysize=2 (max 65535)".to_string(),
|
||||
));
|
||||
}
|
||||
vec![(location.position >> 8) as u8, location.position as u8]
|
||||
}
|
||||
3 => {
|
||||
if location.file_nr != 0 {
|
||||
return Err(Error::InvalidOperation(
|
||||
"file_nr must be 0 for keysize=3".to_string(),
|
||||
));
|
||||
}
|
||||
if location.position > 0xFFFFFF {
|
||||
return Err(Error::InvalidOperation(
|
||||
"position exceeds max value for keysize=3 (max 16777215)".to_string(),
|
||||
));
|
||||
}
|
||||
vec![
|
||||
(location.position >> 16) as u8,
|
||||
(location.position >> 8) as u8,
|
||||
location.position as u8,
|
||||
]
|
||||
}
|
||||
4 => {
|
||||
if location.file_nr != 0 {
|
||||
return Err(Error::InvalidOperation(
|
||||
"file_nr must be 0 for keysize=4".to_string(),
|
||||
));
|
||||
}
|
||||
vec![
|
||||
(location.position >> 24) as u8,
|
||||
(location.position >> 16) as u8,
|
||||
(location.position >> 8) as u8,
|
||||
location.position as u8,
|
||||
]
|
||||
}
|
||||
6 => {
|
||||
// Full location with file_nr and position
|
||||
location.to_bytes()
|
||||
}
|
||||
_ => {
|
||||
return Err(Error::InvalidOperation(format!(
|
||||
"Invalid keysize: {}",
|
||||
self.keysize
|
||||
)))
|
||||
}
|
||||
};
|
||||
|
||||
if !self.lookuppath.is_empty() {
|
||||
// Disk-based lookup
|
||||
let data_path = Path::new(&self.lookuppath).join(DATA_FILE_NAME);
|
||||
let mut file = OpenOptions::new().write(true).open(data_path)?;
|
||||
|
||||
let start_pos = id as u64 * entry_size as u64;
|
||||
file.seek(SeekFrom::Start(start_pos))?;
|
||||
file.write_all(&location_bytes)?;
|
||||
} else {
|
||||
// Memory-based lookup
|
||||
let start = (id * self.keysize as u32) as usize;
|
||||
if start + entry_size > self.data.len() {
|
||||
return Err(Error::LookupError("Index out of bounds".to_string()));
|
||||
}
|
||||
|
||||
for (i, &byte) in location_bytes.iter().enumerate() {
|
||||
self.data[start + i] = byte;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Deletes an entry for the given ID
|
||||
pub fn delete(&mut self, id: u32) -> Result<(), Error> {
|
||||
// Set location to all zeros
|
||||
self.set(id, Location::default())
|
||||
}
|
||||
|
||||
/// Gets the next available ID in incremental mode
|
||||
pub fn get_next_id(&self) -> Result<u32, Error> {
|
||||
let incremental = self.incremental.ok_or_else(|| {
|
||||
Error::InvalidOperation("Lookup table not in incremental mode".to_string())
|
||||
})?;
|
||||
|
||||
let table_size = if !self.lookuppath.is_empty() {
|
||||
let data_path = Path::new(&self.lookuppath).join(DATA_FILE_NAME);
|
||||
fs::metadata(data_path)?.len() as u32
|
||||
} else {
|
||||
self.data.len() as u32
|
||||
};
|
||||
|
||||
if incremental * self.keysize as u32 >= table_size {
|
||||
return Err(Error::LookupError("Lookup table is full".to_string()));
|
||||
}
|
||||
|
||||
Ok(incremental)
|
||||
}
|
||||
|
||||
/// Increments the index in incremental mode
|
||||
pub fn increment_index(&mut self) -> Result<(), Error> {
|
||||
let mut incremental = self.incremental.ok_or_else(|| {
|
||||
Error::InvalidOperation("Lookup table not in incremental mode".to_string())
|
||||
})?;
|
||||
|
||||
incremental += 1;
|
||||
self.incremental = Some(incremental);
|
||||
|
||||
if !self.lookuppath.is_empty() {
|
||||
let inc_path = Path::new(&self.lookuppath).join(INCREMENTAL_FILE_NAME);
|
||||
fs::write(inc_path, incremental.to_string())?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Exports the lookup table to a file
|
||||
pub fn export_data(&self, path: &str) -> Result<(), Error> {
|
||||
if !self.lookuppath.is_empty() {
|
||||
// For disk-based lookup, just copy the file
|
||||
let data_path = Path::new(&self.lookuppath).join(DATA_FILE_NAME);
|
||||
fs::copy(data_path, path)?;
|
||||
} else {
|
||||
// For memory-based lookup, write the data to file
|
||||
fs::write(path, &self.data)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Imports the lookup table from a file
|
||||
pub fn import_data(&mut self, path: &str) -> Result<(), Error> {
|
||||
if !self.lookuppath.is_empty() {
|
||||
// For disk-based lookup, copy the file
|
||||
let data_path = Path::new(&self.lookuppath).join(DATA_FILE_NAME);
|
||||
fs::copy(path, data_path)?;
|
||||
} else {
|
||||
// For memory-based lookup, read the data from file
|
||||
self.data = fs::read(path)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Exports only non-zero entries to save space
|
||||
pub fn export_sparse(&self, path: &str) -> Result<(), Error> {
|
||||
let mut output = Vec::new();
|
||||
let entry_size = self.keysize as usize;
|
||||
|
||||
if !self.lookuppath.is_empty() {
|
||||
// For disk-based lookup
|
||||
let data_path = Path::new(&self.lookuppath).join(DATA_FILE_NAME);
|
||||
let mut file = File::open(&data_path)?;
|
||||
let file_size = fs::metadata(&data_path)?.len();
|
||||
let max_entries = file_size / entry_size as u64;
|
||||
|
||||
for id in 0..max_entries {
|
||||
file.seek(SeekFrom::Start(id * entry_size as u64))?;
|
||||
|
||||
let mut buffer = vec![0u8; entry_size];
|
||||
let bytes_read = file.read(&mut buffer)?;
|
||||
|
||||
if bytes_read < entry_size {
|
||||
break;
|
||||
}
|
||||
|
||||
// Check if entry is non-zero
|
||||
if buffer.iter().any(|&b| b != 0) {
|
||||
// Write ID (4 bytes) + entry
|
||||
output.extend_from_slice(&(id as u32).to_be_bytes());
|
||||
output.extend_from_slice(&buffer);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// For memory-based lookup
|
||||
let max_entries = self.data.len() / entry_size;
|
||||
|
||||
for id in 0..max_entries {
|
||||
let start = id * entry_size;
|
||||
let entry = &self.data[start..start + entry_size];
|
||||
|
||||
// Check if entry is non-zero
|
||||
if entry.iter().any(|&b| b != 0) {
|
||||
// Write ID (4 bytes) + entry
|
||||
output.extend_from_slice(&(id as u32).to_be_bytes());
|
||||
output.extend_from_slice(entry);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Write the output to file
|
||||
fs::write(path, &output)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Imports sparse data (only non-zero entries)
|
||||
pub fn import_sparse(&mut self, path: &str) -> Result<(), Error> {
|
||||
let data = fs::read(path)?;
|
||||
let entry_size = self.keysize as usize;
|
||||
let record_size = 4 + entry_size; // ID (4 bytes) + entry
|
||||
|
||||
if data.len() % record_size != 0 {
|
||||
return Err(Error::DataCorruption(
|
||||
"Invalid sparse data format: size mismatch".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
for chunk_start in (0..data.len()).step_by(record_size) {
|
||||
if chunk_start + record_size > data.len() {
|
||||
break;
|
||||
}
|
||||
|
||||
// Extract ID (4 bytes)
|
||||
let id_bytes = &data[chunk_start..chunk_start + 4];
|
||||
let id = u32::from_be_bytes([id_bytes[0], id_bytes[1], id_bytes[2], id_bytes[3]]);
|
||||
|
||||
// Extract entry
|
||||
let entry = &data[chunk_start + 4..chunk_start + record_size];
|
||||
|
||||
// Create location from entry
|
||||
let location = Location::from_bytes(entry, self.keysize)?;
|
||||
|
||||
// Set the entry
|
||||
self.set(id, location)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Finds the highest ID with a non-zero entry
|
||||
pub fn find_last_entry(&mut self) -> Result<u32, Error> {
|
||||
let mut last_id = 0u32;
|
||||
let entry_size = self.keysize as usize;
|
||||
|
||||
if !self.lookuppath.is_empty() {
|
||||
// For disk-based lookup
|
||||
let data_path = Path::new(&self.lookuppath).join(DATA_FILE_NAME);
|
||||
let mut file = File::open(&data_path)?;
|
||||
let file_size = fs::metadata(&data_path)?.len();
|
||||
|
||||
let mut buffer = vec![0u8; entry_size];
|
||||
let mut pos = 0u32;
|
||||
|
||||
while (pos as u64 * entry_size as u64) < file_size {
|
||||
file.seek(SeekFrom::Start(pos as u64 * entry_size as u64))?;
|
||||
|
||||
let bytes_read = file.read(&mut buffer)?;
|
||||
if bytes_read == 0 || bytes_read < entry_size {
|
||||
break;
|
||||
}
|
||||
|
||||
let location = Location::from_bytes(&buffer, self.keysize)?;
|
||||
if location.position != 0 || location.file_nr != 0 {
|
||||
last_id = pos;
|
||||
}
|
||||
|
||||
pos += 1;
|
||||
}
|
||||
} else {
|
||||
// For memory-based lookup
|
||||
for i in 0..(self.data.len() / entry_size) as u32 {
|
||||
if let Ok(location) = self.get(i) {
|
||||
if location.position != 0 || location.file_nr != 0 {
|
||||
last_id = i;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(last_id)
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper function to get the incremental value
|
||||
fn get_incremental_info(config: &LookupConfig) -> Result<u32, Error> {
|
||||
if !config.incremental_mode {
|
||||
return Ok(0);
|
||||
}
|
||||
|
||||
if !config.lookuppath.is_empty() {
|
||||
let inc_path = Path::new(&config.lookuppath).join(INCREMENTAL_FILE_NAME);
|
||||
|
||||
if !inc_path.exists() {
|
||||
// Create a separate file for storing the incremental value
|
||||
fs::write(&inc_path, "1")?;
|
||||
}
|
||||
|
||||
let inc_str = fs::read_to_string(&inc_path)?;
|
||||
let incremental = match inc_str.trim().parse::<u32>() {
|
||||
Ok(val) => val,
|
||||
Err(_) => {
|
||||
// If the value is invalid, reset it to 1
|
||||
fs::write(&inc_path, "1")?;
|
||||
1
|
||||
}
|
||||
};
|
||||
|
||||
Ok(incremental)
|
||||
} else {
|
||||
// For memory-based lookup, start with 1
|
||||
Ok(1)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::env::temp_dir;
|
||||
use std::path::PathBuf;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
fn get_temp_dir() -> PathBuf {
|
||||
let timestamp = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_secs();
|
||||
temp_dir().join(format!("ourdb_lookup_test_{}", timestamp))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_memory_lookup() {
|
||||
let config = LookupConfig {
|
||||
size: 1000,
|
||||
keysize: 4,
|
||||
lookuppath: String::new(),
|
||||
incremental_mode: true,
|
||||
};
|
||||
|
||||
let mut lookup = LookupTable::new(config).unwrap();
|
||||
|
||||
// Test set and get
|
||||
let location = Location {
|
||||
file_nr: 0,
|
||||
position: 12345,
|
||||
};
|
||||
|
||||
lookup.set(1, location).unwrap();
|
||||
let retrieved = lookup.get(1).unwrap();
|
||||
|
||||
assert_eq!(retrieved.file_nr, location.file_nr);
|
||||
assert_eq!(retrieved.position, location.position);
|
||||
|
||||
// Test incremental mode
|
||||
let next_id = lookup.get_next_id().unwrap();
|
||||
assert_eq!(next_id, 2);
|
||||
|
||||
lookup.increment_index().unwrap();
|
||||
let next_id = lookup.get_next_id().unwrap();
|
||||
assert_eq!(next_id, 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_disk_lookup() {
|
||||
let temp_dir = get_temp_dir();
|
||||
fs::create_dir_all(&temp_dir).unwrap();
|
||||
|
||||
let config = LookupConfig {
|
||||
size: 1000,
|
||||
keysize: 4,
|
||||
lookuppath: temp_dir.to_string_lossy().to_string(),
|
||||
incremental_mode: true,
|
||||
};
|
||||
|
||||
let mut lookup = LookupTable::new(config).unwrap();
|
||||
|
||||
// Test set and get
|
||||
let location = Location {
|
||||
file_nr: 0,
|
||||
position: 12345,
|
||||
};
|
||||
|
||||
lookup.set(1, location).unwrap();
|
||||
let retrieved = lookup.get(1).unwrap();
|
||||
|
||||
assert_eq!(retrieved.file_nr, location.file_nr);
|
||||
assert_eq!(retrieved.position, location.position);
|
||||
|
||||
// Clean up
|
||||
fs::remove_dir_all(temp_dir).unwrap();
|
||||
}
|
||||
}
|
||||
369
packages/data/ourdb/tests/integration_tests.rs
Normal file
369
packages/data/ourdb/tests/integration_tests.rs
Normal file
@@ -0,0 +1,369 @@
|
||||
use ourdb::{OurDB, OurDBConfig, OurDBSetArgs};
|
||||
use rand;
|
||||
use std::env::temp_dir;
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
// Helper function to create a unique temporary directory for tests
|
||||
fn get_temp_dir() -> PathBuf {
|
||||
let timestamp = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_nanos();
|
||||
let random_part = rand::random::<u32>();
|
||||
let dir = temp_dir().join(format!("ourdb_test_{}_{}", timestamp, random_part));
|
||||
|
||||
// Ensure the directory exists and is empty
|
||||
if dir.exists() {
|
||||
std::fs::remove_dir_all(&dir).unwrap();
|
||||
}
|
||||
std::fs::create_dir_all(&dir).unwrap();
|
||||
|
||||
dir
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_basic_operations() {
|
||||
let temp_dir = get_temp_dir();
|
||||
|
||||
// Create a new database with incremental mode
|
||||
let config = OurDBConfig {
|
||||
path: temp_dir.clone(),
|
||||
incremental_mode: true,
|
||||
file_size: None,
|
||||
keysize: None,
|
||||
reset: None,
|
||||
};
|
||||
|
||||
let mut db = OurDB::new(config).unwrap();
|
||||
|
||||
// Test set and get
|
||||
let test_data = b"Hello, OurDB!";
|
||||
let id = db
|
||||
.set(OurDBSetArgs {
|
||||
id: None,
|
||||
data: test_data,
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
let retrieved = db.get(id).unwrap();
|
||||
assert_eq!(retrieved, test_data);
|
||||
|
||||
// Test update
|
||||
let updated_data = b"Updated data";
|
||||
db.set(OurDBSetArgs {
|
||||
id: Some(id),
|
||||
data: updated_data,
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
let retrieved = db.get(id).unwrap();
|
||||
assert_eq!(retrieved, updated_data);
|
||||
|
||||
// Test history
|
||||
let history = db.get_history(id, 2).unwrap();
|
||||
assert_eq!(history.len(), 2);
|
||||
assert_eq!(history[0], updated_data);
|
||||
assert_eq!(history[1], test_data);
|
||||
|
||||
// Test delete
|
||||
db.delete(id).unwrap();
|
||||
assert!(db.get(id).is_err());
|
||||
|
||||
// Clean up
|
||||
db.destroy().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_key_value_mode() {
|
||||
let temp_dir = get_temp_dir();
|
||||
|
||||
// Create a new database with key-value mode
|
||||
let config = OurDBConfig {
|
||||
path: temp_dir.clone(),
|
||||
incremental_mode: false,
|
||||
file_size: None,
|
||||
keysize: None,
|
||||
reset: None,
|
||||
};
|
||||
|
||||
let mut db = OurDB::new(config).unwrap();
|
||||
|
||||
// Test set with explicit ID
|
||||
let test_data = b"Key-value data";
|
||||
let id = 42;
|
||||
db.set(OurDBSetArgs {
|
||||
id: Some(id),
|
||||
data: test_data,
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
let retrieved = db.get(id).unwrap();
|
||||
assert_eq!(retrieved, test_data);
|
||||
|
||||
// Verify next_id fails in key-value mode
|
||||
assert!(db.get_next_id().is_err());
|
||||
|
||||
// Clean up
|
||||
db.destroy().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_incremental_mode() {
|
||||
let temp_dir = get_temp_dir();
|
||||
|
||||
// Create a new database with incremental mode
|
||||
let config = OurDBConfig {
|
||||
path: temp_dir.clone(),
|
||||
incremental_mode: true,
|
||||
file_size: None,
|
||||
keysize: None,
|
||||
reset: None,
|
||||
};
|
||||
|
||||
let mut db = OurDB::new(config).unwrap();
|
||||
|
||||
// Test auto-increment IDs
|
||||
let data1 = b"First record";
|
||||
let id1 = db
|
||||
.set(OurDBSetArgs {
|
||||
id: None,
|
||||
data: data1,
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
let data2 = b"Second record";
|
||||
let id2 = db
|
||||
.set(OurDBSetArgs {
|
||||
id: None,
|
||||
data: data2,
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
// IDs should be sequential
|
||||
assert_eq!(id2, id1 + 1);
|
||||
|
||||
// Verify get_next_id works
|
||||
let next_id = db.get_next_id().unwrap();
|
||||
assert_eq!(next_id, id2 + 1);
|
||||
|
||||
// Clean up
|
||||
db.destroy().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_persistence() {
|
||||
let temp_dir = get_temp_dir();
|
||||
|
||||
// Create data in a new database
|
||||
{
|
||||
let config = OurDBConfig {
|
||||
path: temp_dir.clone(),
|
||||
incremental_mode: true,
|
||||
file_size: None,
|
||||
keysize: None,
|
||||
reset: None,
|
||||
};
|
||||
|
||||
let mut db = OurDB::new(config).unwrap();
|
||||
|
||||
let test_data = b"Persistent data";
|
||||
let id = db
|
||||
.set(OurDBSetArgs {
|
||||
id: None,
|
||||
data: test_data,
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
// Explicitly close the database
|
||||
db.close().unwrap();
|
||||
|
||||
// ID should be 1 in a new database
|
||||
assert_eq!(id, 1);
|
||||
}
|
||||
|
||||
// Reopen the database and verify data persists
|
||||
{
|
||||
let config = OurDBConfig {
|
||||
path: temp_dir.clone(),
|
||||
incremental_mode: true,
|
||||
file_size: None,
|
||||
keysize: None,
|
||||
reset: None,
|
||||
};
|
||||
|
||||
let mut db = OurDB::new(config).unwrap();
|
||||
|
||||
// Verify data is still there
|
||||
let retrieved = db.get(1).unwrap();
|
||||
assert_eq!(retrieved, b"Persistent data");
|
||||
|
||||
// Verify incremental counter persisted
|
||||
let next_id = db.get_next_id().unwrap();
|
||||
assert_eq!(next_id, 2);
|
||||
|
||||
// Clean up
|
||||
db.destroy().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_different_keysizes() {
|
||||
for keysize in [2, 3, 4, 6].iter() {
|
||||
let temp_dir = get_temp_dir();
|
||||
|
||||
// Ensure the directory exists
|
||||
std::fs::create_dir_all(&temp_dir).unwrap();
|
||||
|
||||
// Create a new database with specified keysize
|
||||
let config = OurDBConfig {
|
||||
path: temp_dir.clone(),
|
||||
incremental_mode: true,
|
||||
file_size: None,
|
||||
keysize: Some(*keysize),
|
||||
reset: None,
|
||||
};
|
||||
|
||||
let mut db = OurDB::new(config).unwrap();
|
||||
|
||||
// Test basic operations
|
||||
let test_data = b"Keysize test data";
|
||||
let id = db
|
||||
.set(OurDBSetArgs {
|
||||
id: None,
|
||||
data: test_data,
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
let retrieved = db.get(id).unwrap();
|
||||
assert_eq!(retrieved, test_data);
|
||||
|
||||
// Clean up
|
||||
db.destroy().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_large_data() {
|
||||
let temp_dir = get_temp_dir();
|
||||
|
||||
// Create a new database
|
||||
let config = OurDBConfig {
|
||||
path: temp_dir.clone(),
|
||||
incremental_mode: true,
|
||||
file_size: None,
|
||||
keysize: None,
|
||||
reset: None,
|
||||
};
|
||||
|
||||
let mut db = OurDB::new(config).unwrap();
|
||||
|
||||
// Create a large data set (60KB - within the 64KB limit)
|
||||
let large_data = vec![b'X'; 60 * 1024];
|
||||
|
||||
// Store and retrieve large data
|
||||
let id = db
|
||||
.set(OurDBSetArgs {
|
||||
id: None,
|
||||
data: &large_data,
|
||||
})
|
||||
.unwrap();
|
||||
let retrieved = db.get(id).unwrap();
|
||||
|
||||
assert_eq!(retrieved.len(), large_data.len());
|
||||
assert_eq!(retrieved, large_data);
|
||||
|
||||
// Clean up
|
||||
db.destroy().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_exceed_size_limit() {
|
||||
let temp_dir = get_temp_dir();
|
||||
|
||||
// Create a new database
|
||||
let config = OurDBConfig {
|
||||
path: temp_dir.clone(),
|
||||
incremental_mode: true,
|
||||
file_size: None,
|
||||
keysize: None,
|
||||
reset: None,
|
||||
};
|
||||
|
||||
let mut db = OurDB::new(config).unwrap();
|
||||
|
||||
// Create data larger than the 64KB limit (70KB)
|
||||
let oversized_data = vec![b'X'; 70 * 1024];
|
||||
|
||||
// Attempt to store data that exceeds the size limit
|
||||
let result = db.set(OurDBSetArgs {
|
||||
id: None,
|
||||
data: &oversized_data,
|
||||
});
|
||||
|
||||
// Verify that an error is returned
|
||||
assert!(
|
||||
result.is_err(),
|
||||
"Expected an error when storing data larger than 64KB"
|
||||
);
|
||||
|
||||
// Clean up
|
||||
db.destroy().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multiple_files() {
|
||||
let temp_dir = get_temp_dir();
|
||||
|
||||
// Create a new database with small file size to force multiple files
|
||||
let config = OurDBConfig {
|
||||
path: temp_dir.clone(),
|
||||
incremental_mode: true,
|
||||
file_size: Some(1024), // Very small file size (1KB)
|
||||
keysize: Some(6), // 6-byte keysize for multiple files
|
||||
reset: None,
|
||||
};
|
||||
|
||||
let mut db = OurDB::new(config).unwrap();
|
||||
|
||||
// Store enough data to span multiple files
|
||||
let data_size = 500; // bytes per record
|
||||
let test_data = vec![b'A'; data_size];
|
||||
|
||||
let mut ids = Vec::new();
|
||||
for _ in 0..10 {
|
||||
let id = db
|
||||
.set(OurDBSetArgs {
|
||||
id: None,
|
||||
data: &test_data,
|
||||
})
|
||||
.unwrap();
|
||||
ids.push(id);
|
||||
}
|
||||
|
||||
// Verify all data can be retrieved
|
||||
for &id in &ids {
|
||||
let retrieved = db.get(id).unwrap();
|
||||
assert_eq!(retrieved.len(), data_size);
|
||||
}
|
||||
|
||||
// Verify multiple files were created
|
||||
let files = fs::read_dir(&temp_dir)
|
||||
.unwrap()
|
||||
.filter_map(Result::ok)
|
||||
.filter(|entry| {
|
||||
let path = entry.path();
|
||||
path.is_file() && path.extension().map_or(false, |ext| ext == "db")
|
||||
})
|
||||
.count();
|
||||
|
||||
assert!(
|
||||
files > 1,
|
||||
"Expected multiple database files, found {}",
|
||||
files
|
||||
);
|
||||
|
||||
// Clean up
|
||||
db.destroy().unwrap();
|
||||
}
|
||||
787
packages/data/radixtree/ARCHITECTURE.md
Normal file
787
packages/data/radixtree/ARCHITECTURE.md
Normal file
@@ -0,0 +1,787 @@
|
||||
# RadixTree: Architecture for V to Rust Port
|
||||
|
||||
## 1. Overview
|
||||
|
||||
RadixTree is a space-optimized tree data structure that enables efficient string key operations with persistent storage. This document outlines the architecture for porting the RadixTree module from its original V implementation to Rust, maintaining all existing functionality while leveraging Rust's memory safety, performance, and ecosystem.
|
||||
|
||||
The Rust implementation will integrate with the existing OurDB Rust implementation for persistent storage.
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
A[Client Code] --> B[RadixTree API]
|
||||
B --> C[Node Management]
|
||||
B --> D[Serialization]
|
||||
B --> E[Tree Operations]
|
||||
C --> F[OurDB]
|
||||
D --> F
|
||||
E --> C
|
||||
```
|
||||
|
||||
## 2. Current Architecture (V Implementation)
|
||||
|
||||
The current V implementation of RadixTree consists of the following components:
|
||||
|
||||
### 2.1 Core Data Structures
|
||||
|
||||
#### Node
|
||||
```v
|
||||
struct Node {
|
||||
mut:
|
||||
key_segment string // The segment of the key stored at this node
|
||||
value []u8 // Value stored at this node (empty if not a leaf)
|
||||
children []NodeRef // References to child nodes
|
||||
is_leaf bool // Whether this node is a leaf node
|
||||
}
|
||||
```
|
||||
|
||||
#### NodeRef
|
||||
```v
|
||||
struct NodeRef {
|
||||
mut:
|
||||
key_part string // The key segment for this child
|
||||
node_id u32 // Database ID of the node
|
||||
}
|
||||
```
|
||||
|
||||
#### RadixTree
|
||||
```v
|
||||
@[heap]
|
||||
pub struct RadixTree {
|
||||
mut:
|
||||
db &ourdb.OurDB // Database for persistent storage
|
||||
root_id u32 // Database ID of the root node
|
||||
}
|
||||
```
|
||||
|
||||
### 2.2 Key Operations
|
||||
|
||||
1. **new()**: Creates a new radix tree with a specified database path
|
||||
2. **set(key, value)**: Sets a key-value pair in the tree
|
||||
3. **get(key)**: Retrieves a value by key
|
||||
4. **update(prefix, new_value)**: Updates the value at a given key prefix
|
||||
5. **delete(key)**: Removes a key from the tree
|
||||
6. **list(prefix)**: Lists all keys with a given prefix
|
||||
7. **getall(prefix)**: Gets all values for keys with a given prefix
|
||||
|
||||
### 2.3 Serialization
|
||||
|
||||
The V implementation uses a custom binary serialization format for nodes:
|
||||
- Version byte (1 byte)
|
||||
- Key segment (string)
|
||||
- Value length (2 bytes) followed by value bytes
|
||||
- Children count (2 bytes) followed by children
|
||||
- Is leaf flag (1 byte)
|
||||
|
||||
Each child is serialized as:
|
||||
- Key part (string)
|
||||
- Node ID (4 bytes)
|
||||
|
||||
### 2.4 Integration with OurDB
|
||||
|
||||
The RadixTree uses OurDB for persistent storage:
|
||||
- Each node is serialized and stored as a record in OurDB
|
||||
- Node references use OurDB record IDs
|
||||
- The tree maintains a root node ID for traversal
|
||||
|
||||
## 3. Proposed Rust Architecture
|
||||
|
||||
The Rust implementation will maintain the same overall architecture while leveraging Rust's type system, ownership model, and error handling.
|
||||
|
||||
### 3.1 Core Data Structures
|
||||
|
||||
#### Node
|
||||
```rust
|
||||
pub struct Node {
|
||||
key_segment: String,
|
||||
value: Vec<u8>,
|
||||
children: Vec<NodeRef>,
|
||||
is_leaf: bool,
|
||||
}
|
||||
```
|
||||
|
||||
#### NodeRef
|
||||
```rust
|
||||
pub struct NodeRef {
|
||||
key_part: String,
|
||||
node_id: u32,
|
||||
}
|
||||
```
|
||||
|
||||
#### RadixTree
|
||||
```rust
|
||||
pub struct RadixTree {
|
||||
db: ourdb::OurDB,
|
||||
root_id: u32,
|
||||
}
|
||||
```
|
||||
|
||||
### 3.2 Public API
|
||||
|
||||
```rust
|
||||
impl RadixTree {
|
||||
/// Creates a new radix tree with the specified database path
|
||||
pub fn new(path: &str, reset: bool) -> Result<Self, Error> {
|
||||
// Implementation
|
||||
}
|
||||
|
||||
/// Sets a key-value pair in the tree
|
||||
pub fn set(&mut self, key: &str, value: Vec<u8>) -> Result<(), Error> {
|
||||
// Implementation
|
||||
}
|
||||
|
||||
/// Gets a value by key from the tree
|
||||
pub fn get(&mut self, key: &str) -> Result<Vec<u8>, Error> {
|
||||
// Implementation
|
||||
}
|
||||
|
||||
/// Updates the value at a given key prefix
|
||||
pub fn update(&mut self, prefix: &str, new_value: Vec<u8>) -> Result<(), Error> {
|
||||
// Implementation
|
||||
}
|
||||
|
||||
/// Deletes a key from the tree
|
||||
pub fn delete(&mut self, key: &str) -> Result<(), Error> {
|
||||
// Implementation
|
||||
}
|
||||
|
||||
/// Lists all keys with a given prefix
|
||||
pub fn list(&mut self, prefix: &str) -> Result<Vec<String>, Error> {
|
||||
// Implementation
|
||||
}
|
||||
|
||||
/// Gets all values for keys with a given prefix
|
||||
pub fn getall(&mut self, prefix: &str) -> Result<Vec<Vec<u8>>, Error> {
|
||||
// Implementation
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 3.3 Error Handling
|
||||
|
||||
```rust
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum Error {
|
||||
#[error("OurDB error: {0}")]
|
||||
OurDB(#[from] ourdb::Error),
|
||||
|
||||
#[error("Key not found: {0}")]
|
||||
KeyNotFound(String),
|
||||
|
||||
#[error("Prefix not found: {0}")]
|
||||
PrefixNotFound(String),
|
||||
|
||||
#[error("Serialization error: {0}")]
|
||||
Serialization(String),
|
||||
|
||||
#[error("Deserialization error: {0}")]
|
||||
Deserialization(String),
|
||||
|
||||
#[error("Invalid operation: {0}")]
|
||||
InvalidOperation(String),
|
||||
}
|
||||
```
|
||||
|
||||
### 3.4 Serialization
|
||||
|
||||
The Rust implementation will maintain the same binary serialization format for compatibility:
|
||||
|
||||
```rust
|
||||
const VERSION: u8 = 1;
|
||||
|
||||
impl Node {
|
||||
/// Serializes a node to bytes for storage
|
||||
fn serialize(&self) -> Vec<u8> {
|
||||
// Implementation
|
||||
}
|
||||
|
||||
/// Deserializes bytes to a node
|
||||
fn deserialize(data: &[u8]) -> Result<Self, Error> {
|
||||
// Implementation
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 3.5 Integration with OurDB
|
||||
|
||||
The Rust implementation will use the existing OurDB Rust implementation:
|
||||
|
||||
```rust
|
||||
impl RadixTree {
|
||||
fn get_node(&mut self, node_id: u32) -> Result<Node, Error> {
|
||||
let data = self.db.get(node_id)?;
|
||||
Node::deserialize(&data)
|
||||
}
|
||||
|
||||
fn save_node(&mut self, node_id: Option<u32>, node: &Node) -> Result<u32, Error> {
|
||||
let data = node.serialize();
|
||||
let args = ourdb::OurDBSetArgs {
|
||||
id: node_id,
|
||||
data: &data,
|
||||
};
|
||||
Ok(self.db.set(args)?)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## 4. Implementation Strategy
|
||||
|
||||
### 4.1 Phase 1: Core Data Structures and Serialization
|
||||
|
||||
1. Implement the `Node` and `NodeRef` structs
|
||||
2. Implement serialization and deserialization functions
|
||||
3. Implement the `Error` enum for error handling
|
||||
|
||||
### 4.2 Phase 2: Basic Tree Operations
|
||||
|
||||
1. Implement the `RadixTree` struct with OurDB integration
|
||||
2. Implement the `new()` function for creating a new tree
|
||||
3. Implement the `get()` and `set()` functions for basic operations
|
||||
|
||||
### 4.3 Phase 3: Advanced Tree Operations
|
||||
|
||||
1. Implement the `delete()` function for removing keys
|
||||
2. Implement the `update()` function for updating values
|
||||
3. Implement the `list()` and `getall()` functions for prefix operations
|
||||
|
||||
### 4.4 Phase 4: Testing and Optimization
|
||||
|
||||
1. Port existing tests from V to Rust
|
||||
2. Add new tests for Rust-specific functionality
|
||||
3. Benchmark and optimize performance
|
||||
4. Ensure compatibility with existing RadixTree data
|
||||
|
||||
## 5. Implementation Considerations
|
||||
|
||||
### 5.1 Memory Management
|
||||
|
||||
Leverage Rust's ownership model for safe and efficient memory management:
|
||||
- Use `String` and `Vec<u8>` for data buffers instead of raw pointers
|
||||
- Use references and borrows to avoid unnecessary copying
|
||||
- Implement proper RAII for resource management
|
||||
|
||||
### 5.2 Error Handling
|
||||
|
||||
Use Rust's `Result` type for comprehensive error handling:
|
||||
- Define custom error types for RadixTree-specific errors
|
||||
- Propagate errors using the `?` operator
|
||||
- Provide detailed error messages
|
||||
- Implement proper error conversion using the `From` trait
|
||||
|
||||
### 5.3 Performance Optimizations
|
||||
|
||||
Identify opportunities for performance improvements:
|
||||
- Use efficient string operations for prefix matching
|
||||
- Minimize database operations by caching nodes when appropriate
|
||||
- Use iterators for efficient traversal
|
||||
- Consider using `Cow<str>` for string operations to avoid unnecessary cloning
|
||||
|
||||
### 5.4 Compatibility
|
||||
|
||||
Ensure compatibility with the V implementation:
|
||||
- Maintain the same serialization format
|
||||
- Ensure identical behavior for all operations
|
||||
- Support reading existing RadixTree data
|
||||
|
||||
## 6. Testing Strategy
|
||||
|
||||
### 6.1 Unit Tests
|
||||
|
||||
Write comprehensive unit tests for each component:
|
||||
- Test `Node` serialization/deserialization
|
||||
- Test string operations (common prefix, etc.)
|
||||
- Test error handling
|
||||
|
||||
### 6.2 Integration Tests
|
||||
|
||||
Write integration tests for the complete system:
|
||||
- Test basic CRUD operations
|
||||
- Test prefix operations
|
||||
- Test edge cases (empty keys, very long keys, etc.)
|
||||
- Test with large datasets
|
||||
|
||||
### 6.3 Compatibility Tests
|
||||
|
||||
Ensure compatibility with existing RadixTree data:
|
||||
- Test reading existing V-created RadixTree data
|
||||
- Test writing data that can be read by the V implementation
|
||||
|
||||
### 6.4 Performance Tests
|
||||
|
||||
Benchmark performance against the V implementation:
|
||||
- Measure throughput for set/get operations
|
||||
- Measure latency for different operations
|
||||
- Test with different tree sizes and key distributions
|
||||
|
||||
## 7. Project Structure
|
||||
|
||||
```
|
||||
radixtree/
|
||||
├── Cargo.toml
|
||||
├── src/
|
||||
│ ├── lib.rs # Public API and re-exports
|
||||
│ ├── node.rs # Node and NodeRef implementations
|
||||
│ ├── serialize.rs # Serialization and deserialization
|
||||
│ ├── error.rs # Error types
|
||||
│ └── operations.rs # Tree operations implementation
|
||||
├── tests/
|
||||
│ ├── basic_test.rs # Basic operations tests
|
||||
│ ├── prefix_test.rs # Prefix operations tests
|
||||
│ └── edge_cases.rs # Edge case tests
|
||||
└── examples/
|
||||
├── basic.rs # Basic usage example
|
||||
├── prefix.rs # Prefix operations example
|
||||
└── performance.rs # Performance benchmark
|
||||
```
|
||||
|
||||
## 8. Dependencies
|
||||
|
||||
The Rust implementation will use the following dependencies:
|
||||
|
||||
- `ourdb` for persistent storage
|
||||
- `thiserror` for error handling
|
||||
- `log` for logging
|
||||
- `criterion` for benchmarking (dev dependency)
|
||||
|
||||
## 9. Compatibility Considerations
|
||||
|
||||
To ensure compatibility with the V implementation:
|
||||
|
||||
1. Maintain the same serialization format for nodes
|
||||
2. Ensure identical behavior for all operations
|
||||
3. Support reading existing RadixTree data
|
||||
4. Maintain the same performance characteristics
|
||||
|
||||
## 10. Future Extensions
|
||||
|
||||
Potential future extensions to consider:
|
||||
|
||||
1. Async API for non-blocking operations
|
||||
2. Iterator interface for efficient traversal
|
||||
3. Batch operations for improved performance
|
||||
4. Custom serialization formats for specific use cases
|
||||
5. Compression support for values
|
||||
6. Concurrency support for parallel operations
|
||||
|
||||
## 11. Conclusion
|
||||
|
||||
This architecture provides a roadmap for porting RadixTree from V to Rust while maintaining compatibility and leveraging Rust's strengths. The implementation will follow a phased approach, starting with core data structures and gradually building up to the complete system.
|
||||
|
||||
The Rust implementation aims to be:
|
||||
- **Safe**: Leveraging Rust's ownership model for memory safety
|
||||
- **Fast**: Maintaining or improving performance compared to V
|
||||
- **Compatible**: Working with existing RadixTree data
|
||||
- **Extensible**: Providing a foundation for future enhancements
|
||||
- **Well-tested**: Including comprehensive test coverage
|
||||
|
||||
## 12. Implementation Files
|
||||
|
||||
### 12.1 Cargo.toml
|
||||
|
||||
```toml
|
||||
[package]
|
||||
name = "radixtree"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
description = "A persistent radix tree implementation using OurDB for storage"
|
||||
authors = ["OurWorld Team"]
|
||||
|
||||
[dependencies]
|
||||
ourdb = { path = "../ourdb" }
|
||||
thiserror = "1.0.40"
|
||||
log = "0.4.17"
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = "0.5.1"
|
||||
|
||||
[[bench]]
|
||||
name = "radixtree_benchmarks"
|
||||
harness = false
|
||||
|
||||
[[example]]
|
||||
name = "basic_usage"
|
||||
path = "examples/basic_usage.rs"
|
||||
|
||||
[[example]]
|
||||
name = "prefix_operations"
|
||||
path = "examples/prefix_operations.rs"
|
||||
```
|
||||
|
||||
### 12.2 src/lib.rs
|
||||
|
||||
```rust
|
||||
//! RadixTree is a space-optimized tree data structure that enables efficient string key operations
|
||||
//! with persistent storage using OurDB as a backend.
|
||||
//!
|
||||
//! This implementation provides a persistent radix tree that can be used for efficient
|
||||
//! prefix-based key operations, such as auto-complete, routing tables, and more.
|
||||
|
||||
mod error;
|
||||
mod node;
|
||||
mod operations;
|
||||
mod serialize;
|
||||
|
||||
pub use error::Error;
|
||||
pub use node::{Node, NodeRef};
|
||||
|
||||
use ourdb::{OurDB, OurDBConfig, OurDBSetArgs};
|
||||
use std::path::PathBuf;
|
||||
|
||||
/// RadixTree represents a radix tree data structure with persistent storage.
|
||||
pub struct RadixTree {
|
||||
db: OurDB,
|
||||
root_id: u32,
|
||||
}
|
||||
|
||||
impl RadixTree {
|
||||
/// Creates a new radix tree with the specified database path.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `path` - The path to the database directory
|
||||
/// * `reset` - Whether to reset the database if it exists
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// A new `RadixTree` instance
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if the database cannot be created or opened
|
||||
pub fn new(path: &str, reset: bool) -> Result<Self, Error> {
|
||||
// Implementation will go here
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
/// Sets a key-value pair in the tree.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `key` - The key to set
|
||||
/// * `value` - The value to set
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if the operation fails
|
||||
pub fn set(&mut self, key: &str, value: Vec<u8>) -> Result<(), Error> {
|
||||
// Implementation will go here
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
/// Gets a value by key from the tree.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `key` - The key to get
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// The value associated with the key
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if the key is not found or the operation fails
|
||||
pub fn get(&mut self, key: &str) -> Result<Vec<u8>, Error> {
|
||||
// Implementation will go here
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
/// Updates the value at a given key prefix.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `prefix` - The key prefix to update
|
||||
/// * `new_value` - The new value to set
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if the prefix is not found or the operation fails
|
||||
pub fn update(&mut self, prefix: &str, new_value: Vec<u8>) -> Result<(), Error> {
|
||||
// Implementation will go here
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
/// Deletes a key from the tree.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `key` - The key to delete
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if the key is not found or the operation fails
|
||||
pub fn delete(&mut self, key: &str) -> Result<(), Error> {
|
||||
// Implementation will go here
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
/// Lists all keys with a given prefix.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `prefix` - The prefix to search for
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// A list of keys that start with the given prefix
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if the operation fails
|
||||
pub fn list(&mut self, prefix: &str) -> Result<Vec<String>, Error> {
|
||||
// Implementation will go here
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
/// Gets all values for keys with a given prefix.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `prefix` - The prefix to search for
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// A list of values for keys that start with the given prefix
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if the operation fails
|
||||
pub fn getall(&mut self, prefix: &str) -> Result<Vec<Vec<u8>>, Error> {
|
||||
// Implementation will go here
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 12.3 src/error.rs
|
||||
|
||||
```rust
|
||||
//! Error types for the RadixTree module.
|
||||
|
||||
use thiserror::Error;
|
||||
|
||||
/// Error type for RadixTree operations.
|
||||
#[derive(Debug, Error)]
|
||||
pub enum Error {
|
||||
/// Error from OurDB operations.
|
||||
#[error("OurDB error: {0}")]
|
||||
OurDB(#[from] ourdb::Error),
|
||||
|
||||
/// Error when a key is not found.
|
||||
#[error("Key not found: {0}")]
|
||||
KeyNotFound(String),
|
||||
|
||||
/// Error when a prefix is not found.
|
||||
#[error("Prefix not found: {0}")]
|
||||
PrefixNotFound(String),
|
||||
|
||||
/// Error during serialization.
|
||||
#[error("Serialization error: {0}")]
|
||||
Serialization(String),
|
||||
|
||||
/// Error during deserialization.
|
||||
#[error("Deserialization error: {0}")]
|
||||
Deserialization(String),
|
||||
|
||||
/// Error for invalid operations.
|
||||
#[error("Invalid operation: {0}")]
|
||||
InvalidOperation(String),
|
||||
}
|
||||
```
|
||||
|
||||
### 12.4 src/node.rs
|
||||
|
||||
```rust
|
||||
//! Node types for the RadixTree module.
|
||||
|
||||
/// Represents a node in the radix tree.
|
||||
pub struct Node {
|
||||
/// The segment of the key stored at this node.
|
||||
pub key_segment: String,
|
||||
|
||||
/// Value stored at this node (empty if not a leaf).
|
||||
pub value: Vec<u8>,
|
||||
|
||||
/// References to child nodes.
|
||||
pub children: Vec<NodeRef>,
|
||||
|
||||
/// Whether this node is a leaf node.
|
||||
pub is_leaf: bool,
|
||||
}
|
||||
|
||||
/// Reference to a node in the database.
|
||||
pub struct NodeRef {
|
||||
/// The key segment for this child.
|
||||
pub key_part: String,
|
||||
|
||||
/// Database ID of the node.
|
||||
pub node_id: u32,
|
||||
}
|
||||
|
||||
impl Node {
|
||||
/// Creates a new node.
|
||||
pub fn new(key_segment: String, value: Vec<u8>, is_leaf: bool) -> Self {
|
||||
Self {
|
||||
key_segment,
|
||||
value,
|
||||
children: Vec::new(),
|
||||
is_leaf,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new root node.
|
||||
pub fn new_root() -> Self {
|
||||
Self {
|
||||
key_segment: String::new(),
|
||||
value: Vec::new(),
|
||||
children: Vec::new(),
|
||||
is_leaf: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl NodeRef {
|
||||
/// Creates a new node reference.
|
||||
pub fn new(key_part: String, node_id: u32) -> Self {
|
||||
Self {
|
||||
key_part,
|
||||
node_id,
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 12.5 src/serialize.rs
|
||||
|
||||
```rust
|
||||
//! Serialization and deserialization for RadixTree nodes.
|
||||
|
||||
use crate::error::Error;
|
||||
use crate::node::{Node, NodeRef};
|
||||
|
||||
/// Current binary format version.
|
||||
const VERSION: u8 = 1;
|
||||
|
||||
impl Node {
|
||||
/// Serializes a node to bytes for storage.
|
||||
pub fn serialize(&self) -> Vec<u8> {
|
||||
// Implementation will go here
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
/// Deserializes bytes to a node.
|
||||
pub fn deserialize(data: &[u8]) -> Result<Self, Error> {
|
||||
// Implementation will go here
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 12.6 src/operations.rs
|
||||
|
||||
```rust
|
||||
//! Implementation of RadixTree operations.
|
||||
|
||||
use crate::error::Error;
|
||||
use crate::node::{Node, NodeRef};
|
||||
use crate::RadixTree;
|
||||
|
||||
impl RadixTree {
|
||||
/// Helper function to get a node from the database.
|
||||
pub(crate) fn get_node(&mut self, node_id: u32) -> Result<Node, Error> {
|
||||
// Implementation will go here
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
/// Helper function to save a node to the database.
|
||||
pub(crate) fn save_node(&mut self, node_id: Option<u32>, node: &Node) -> Result<u32, Error> {
|
||||
// Implementation will go here
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
/// Helper function to find all keys with a given prefix.
|
||||
fn find_keys_with_prefix(
|
||||
&mut self,
|
||||
node_id: u32,
|
||||
current_path: &str,
|
||||
prefix: &str,
|
||||
result: &mut Vec<String>,
|
||||
) -> Result<(), Error> {
|
||||
// Implementation will go here
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
/// Helper function to recursively collect all keys under a node.
|
||||
fn collect_all_keys(
|
||||
&mut self,
|
||||
node_id: u32,
|
||||
current_path: &str,
|
||||
result: &mut Vec<String>,
|
||||
) -> Result<(), Error> {
|
||||
// Implementation will go here
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
/// Helper function to get the common prefix of two strings.
|
||||
fn get_common_prefix(a: &str, b: &str) -> String {
|
||||
// Implementation will go here
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 12.7 examples/basic_usage.rs
|
||||
|
||||
```rust
|
||||
//! Basic usage example for RadixTree.
|
||||
|
||||
use radixtree::RadixTree;
|
||||
|
||||
fn main() -> Result<(), radixtree::Error> {
|
||||
// Create a temporary directory for the database
|
||||
let db_path = std::env::temp_dir().join("radixtree_example");
|
||||
std::fs::create_dir_all(&db_path)?;
|
||||
|
||||
println!("Creating radix tree at: {}", db_path.display());
|
||||
|
||||
// Create a new radix tree
|
||||
let mut tree = RadixTree::new(db_path.to_str().unwrap(), true)?;
|
||||
|
||||
// Store some data
|
||||
tree.set("hello", b"world".to_vec())?;
|
||||
tree.set("help", b"me".to_vec())?;
|
||||
tree.set("helicopter", b"flying".to_vec())?;
|
||||
|
||||
// Retrieve and print the data
|
||||
let value = tree.get("hello")?;
|
||||
println!("hello: {}", String::from_utf8_lossy(&value));
|
||||
|
||||
// List keys with prefix
|
||||
let keys = tree.list("hel")?;
|
||||
println!("Keys with prefix 'hel': {:?}", keys);
|
||||
|
||||
// Get all values with prefix
|
||||
let values = tree.getall("hel")?;
|
||||
println!("Values with prefix 'hel':");
|
||||
for (i, value) in values.iter().enumerate() {
|
||||
println!(" {}: {}", i, String::from_utf8_lossy(value));
|
||||
}
|
||||
|
||||
// Delete a key
|
||||
tree.delete("help")?;
|
||||
println!("Deleted 'help'");
|
||||
|
||||
// Verify deletion
|
||||
let keys_after = tree.list("hel")?;
|
||||
println!("Keys with prefix 'hel' after deletion: {:?}", keys_after);
|
||||
|
||||
// Clean up (optional)
|
||||
if std::env::var("KEEP_DB").is_err() {
|
||||
std::fs::remove_dir_all(&db_path)?;
|
||||
println!("Cleaned up database directory");
|
||||
} else {
|
||||
println!("Database kept at: {}", db_path.display());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
27
packages/data/radixtree/Cargo.toml
Normal file
27
packages/data/radixtree/Cargo.toml
Normal file
@@ -0,0 +1,27 @@
|
||||
[package]
|
||||
name = "radixtree"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
description = "A persistent radix tree implementation using OurDB for storage"
|
||||
authors = ["OurWorld Team"]
|
||||
|
||||
[dependencies]
|
||||
ourdb = { path = "../ourdb" }
|
||||
thiserror = "1.0.40"
|
||||
log = "0.4.17"
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = "0.5.1"
|
||||
tempfile = "3.8.0"
|
||||
|
||||
[[bench]]
|
||||
name = "radixtree_benchmarks"
|
||||
harness = false
|
||||
|
||||
[[example]]
|
||||
name = "basic_usage"
|
||||
path = "examples/basic_usage.rs"
|
||||
|
||||
[[example]]
|
||||
name = "prefix_operations"
|
||||
path = "examples/prefix_operations.rs"
|
||||
265
packages/data/radixtree/MIGRATION.md
Normal file
265
packages/data/radixtree/MIGRATION.md
Normal file
@@ -0,0 +1,265 @@
|
||||
# Migration Guide: V to Rust RadixTree
|
||||
|
||||
This document provides guidance for migrating from the V implementation of RadixTree to the Rust implementation.
|
||||
|
||||
## API Changes
|
||||
|
||||
The Rust implementation maintains API compatibility with the V implementation, but with some idiomatic Rust changes:
|
||||
|
||||
### V API
|
||||
|
||||
```v
|
||||
// Create a new radix tree
|
||||
mut rt := radixtree.new(path: '/tmp/radixtree_test', reset: true)!
|
||||
|
||||
// Set a key-value pair
|
||||
rt.set('test', 'value1'.bytes())!
|
||||
|
||||
// Get a value by key
|
||||
value := rt.get('test')!
|
||||
|
||||
// Update a value at a prefix
|
||||
rt.update('prefix', 'new_value'.bytes())!
|
||||
|
||||
// Delete a key
|
||||
rt.delete('test')!
|
||||
|
||||
// List keys with a prefix
|
||||
keys := rt.list('prefix')!
|
||||
|
||||
// Get all values with a prefix
|
||||
values := rt.getall('prefix')!
|
||||
```
|
||||
|
||||
### Rust API
|
||||
|
||||
```rust
|
||||
// Create a new radix tree
|
||||
let mut tree = RadixTree::new("/tmp/radixtree_test", true)?;
|
||||
|
||||
// Set a key-value pair
|
||||
tree.set("test", b"value1".to_vec())?;
|
||||
|
||||
// Get a value by key
|
||||
let value = tree.get("test")?;
|
||||
|
||||
// Update a value at a prefix
|
||||
tree.update("prefix", b"new_value".to_vec())?;
|
||||
|
||||
// Delete a key
|
||||
tree.delete("test")?;
|
||||
|
||||
// List keys with a prefix
|
||||
let keys = tree.list("prefix")?;
|
||||
|
||||
// Get all values with a prefix
|
||||
let values = tree.getall("prefix")?;
|
||||
```
|
||||
|
||||
## Key Differences
|
||||
|
||||
1. **Error Handling**: The Rust implementation uses Rust's `Result` type for error handling, while the V implementation uses V's `!` operator.
|
||||
|
||||
2. **String Handling**: The Rust implementation uses Rust's `&str` for string parameters and `String` for string return values, while the V implementation uses V's `string` type.
|
||||
|
||||
3. **Binary Data**: The Rust implementation uses Rust's `Vec<u8>` for binary data, while the V implementation uses V's `[]u8` type.
|
||||
|
||||
4. **Constructor**: The Rust implementation uses a constructor function with separate parameters, while the V implementation uses a struct with named parameters.
|
||||
|
||||
5. **Ownership**: The Rust implementation follows Rust's ownership model, requiring mutable references for methods that modify the tree.
|
||||
|
||||
## Data Compatibility
|
||||
|
||||
The Rust implementation maintains data compatibility with the V implementation:
|
||||
|
||||
- The same serialization format is used for nodes
|
||||
- The same OurDB storage format is used
|
||||
- Existing RadixTree data created with the V implementation can be read by the Rust implementation
|
||||
|
||||
## Migration Steps
|
||||
|
||||
1. **Update Dependencies**: Replace the V RadixTree dependency with the Rust RadixTree dependency in your project.
|
||||
|
||||
2. **Update Import Statements**: Replace V import statements with Rust use statements.
|
||||
|
||||
```v
|
||||
// V
|
||||
import freeflowuniverse.herolib.data.radixtree
|
||||
```
|
||||
|
||||
```rust
|
||||
// Rust
|
||||
use radixtree::RadixTree;
|
||||
```
|
||||
|
||||
3. **Update Constructor Calls**: Replace V constructor calls with Rust constructor calls.
|
||||
|
||||
```v
|
||||
// V
|
||||
mut rt := radixtree.new(path: '/path/to/db', reset: false)!
|
||||
```
|
||||
|
||||
```rust
|
||||
// Rust
|
||||
let mut tree = RadixTree::new("/path/to/db", false)?;
|
||||
```
|
||||
|
||||
4. **Update Method Calls**: Replace V method calls with Rust method calls.
|
||||
|
||||
```v
|
||||
// V
|
||||
rt.set('key', 'value'.bytes())!
|
||||
```
|
||||
|
||||
```rust
|
||||
// Rust
|
||||
tree.set("key", b"value".to_vec())?;
|
||||
```
|
||||
|
||||
5. **Update Error Handling**: Replace V error handling with Rust error handling.
|
||||
|
||||
```v
|
||||
// V
|
||||
if value := rt.get('key') {
|
||||
println('Found: ${value.bytestr()}')
|
||||
} else {
|
||||
println('Error: ${err}')
|
||||
}
|
||||
```
|
||||
|
||||
```rust
|
||||
// Rust
|
||||
match tree.get("key") {
|
||||
Ok(value) => println!("Found: {}", String::from_utf8_lossy(&value)),
|
||||
Err(e) => println!("Error: {}", e),
|
||||
}
|
||||
```
|
||||
|
||||
6. **Update String Conversions**: Replace V string conversions with Rust string conversions.
|
||||
|
||||
```v
|
||||
// V
|
||||
value.bytestr() // Convert []u8 to string
|
||||
```
|
||||
|
||||
```rust
|
||||
// Rust
|
||||
String::from_utf8_lossy(&value) // Convert Vec<u8> to string
|
||||
```
|
||||
|
||||
## Example Migration
|
||||
|
||||
### V Code
|
||||
|
||||
```v
|
||||
module main
|
||||
|
||||
import freeflowuniverse.herolib.data.radixtree
|
||||
|
||||
fn main() {
|
||||
mut rt := radixtree.new(path: '/tmp/radixtree_test', reset: true) or {
|
||||
println('Error creating RadixTree: ${err}')
|
||||
return
|
||||
}
|
||||
|
||||
rt.set('hello', 'world'.bytes()) or {
|
||||
println('Error setting key: ${err}')
|
||||
return
|
||||
}
|
||||
|
||||
rt.set('help', 'me'.bytes()) or {
|
||||
println('Error setting key: ${err}')
|
||||
return
|
||||
}
|
||||
|
||||
if value := rt.get('hello') {
|
||||
println('hello: ${value.bytestr()}')
|
||||
} else {
|
||||
println('Error getting key: ${err}')
|
||||
return
|
||||
}
|
||||
|
||||
keys := rt.list('hel') or {
|
||||
println('Error listing keys: ${err}')
|
||||
return
|
||||
}
|
||||
println('Keys with prefix "hel": ${keys}')
|
||||
|
||||
values := rt.getall('hel') or {
|
||||
println('Error getting all values: ${err}')
|
||||
return
|
||||
}
|
||||
println('Values with prefix "hel":')
|
||||
for i, value in values {
|
||||
println(' ${i}: ${value.bytestr()}')
|
||||
}
|
||||
|
||||
rt.delete('help') or {
|
||||
println('Error deleting key: ${err}')
|
||||
return
|
||||
}
|
||||
println('Deleted "help"')
|
||||
}
|
||||
```
|
||||
|
||||
### Rust Code
|
||||
|
||||
```rust
|
||||
use radixtree::RadixTree;
|
||||
|
||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let mut tree = RadixTree::new("/tmp/radixtree_test", true)
|
||||
.map_err(|e| format!("Error creating RadixTree: {}", e))?;
|
||||
|
||||
tree.set("hello", b"world".to_vec())
|
||||
.map_err(|e| format!("Error setting key: {}", e))?;
|
||||
|
||||
tree.set("help", b"me".to_vec())
|
||||
.map_err(|e| format!("Error setting key: {}", e))?;
|
||||
|
||||
let value = tree.get("hello")
|
||||
.map_err(|e| format!("Error getting key: {}", e))?;
|
||||
println!("hello: {}", String::from_utf8_lossy(&value));
|
||||
|
||||
let keys = tree.list("hel")
|
||||
.map_err(|e| format!("Error listing keys: {}", e))?;
|
||||
println!("Keys with prefix \"hel\": {:?}", keys);
|
||||
|
||||
let values = tree.getall("hel")
|
||||
.map_err(|e| format!("Error getting all values: {}", e))?;
|
||||
println!("Values with prefix \"hel\":");
|
||||
for (i, value) in values.iter().enumerate() {
|
||||
println!(" {}: {}", i, String::from_utf8_lossy(value));
|
||||
}
|
||||
|
||||
tree.delete("help")
|
||||
.map_err(|e| format!("Error deleting key: {}", e))?;
|
||||
println!("Deleted \"help\"");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
The Rust implementation should provide similar or better performance compared to the V implementation. However, there are some considerations:
|
||||
|
||||
1. **Memory Usage**: The Rust implementation may have different memory usage patterns due to Rust's ownership model.
|
||||
|
||||
2. **Error Handling**: The Rust implementation uses Rust's `Result` type, which may have different performance characteristics compared to V's error handling.
|
||||
|
||||
3. **String Handling**: The Rust implementation uses Rust's string types, which may have different performance characteristics compared to V's string types.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If you encounter issues during migration, check the following:
|
||||
|
||||
1. **Data Compatibility**: Ensure that the data format is compatible between the V and Rust implementations.
|
||||
|
||||
2. **API Usage**: Ensure that you're using the correct API for the Rust implementation.
|
||||
|
||||
3. **Error Handling**: Ensure that you're handling errors correctly in the Rust implementation.
|
||||
|
||||
4. **String Encoding**: Ensure that string encoding is consistent between the V and Rust implementations.
|
||||
|
||||
If you encounter any issues that are not covered in this guide, please report them to the project maintainers.
|
||||
189
packages/data/radixtree/README.md
Normal file
189
packages/data/radixtree/README.md
Normal file
@@ -0,0 +1,189 @@
|
||||
# RadixTree
|
||||
|
||||
A persistent radix tree implementation in Rust using OurDB for storage.
|
||||
|
||||
## Overview
|
||||
|
||||
RadixTree is a space-optimized tree data structure that enables efficient string key operations with persistent storage. This implementation provides a persistent radix tree that can be used for efficient prefix-based key operations, such as auto-complete, routing tables, and more.
|
||||
|
||||
A radix tree (also known as a patricia trie or radix trie) is a space-optimized tree data structure that enables efficient string key operations. Unlike a standard trie where each node represents a single character, a radix tree compresses paths by allowing nodes to represent multiple characters (key segments).
|
||||
|
||||
Key characteristics:
|
||||
- Each node stores a segment of a key (not just a single character)
|
||||
- Nodes can have multiple children, each representing a different branch
|
||||
- Leaf nodes contain the actual values
|
||||
- Optimizes storage by compressing common prefixes
|
||||
|
||||
## Features
|
||||
|
||||
- Efficient prefix-based key operations
|
||||
- Persistent storage using OurDB backend
|
||||
- Memory-efficient storage of strings with common prefixes
|
||||
- Support for binary values
|
||||
- Thread-safe operations through OurDB
|
||||
|
||||
## Usage
|
||||
|
||||
Add the dependency to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
radixtree = { path = "../radixtree" }
|
||||
```
|
||||
|
||||
### Basic Example
|
||||
|
||||
```rust
|
||||
use radixtree::RadixTree;
|
||||
|
||||
fn main() -> Result<(), radixtree::Error> {
|
||||
// Create a new radix tree
|
||||
let mut tree = RadixTree::new("/tmp/radix", false)?;
|
||||
|
||||
// Set key-value pairs
|
||||
tree.set("hello", b"world".to_vec())?;
|
||||
tree.set("help", b"me".to_vec())?;
|
||||
|
||||
// Get values by key
|
||||
let value = tree.get("hello")?;
|
||||
println!("hello: {}", String::from_utf8_lossy(&value)); // Prints: world
|
||||
|
||||
// List keys by prefix
|
||||
let keys = tree.list("hel")?; // Returns ["hello", "help"]
|
||||
println!("Keys with prefix 'hel': {:?}", keys);
|
||||
|
||||
// Get all values by prefix
|
||||
let values = tree.getall("hel")?; // Returns [b"world", b"me"]
|
||||
|
||||
// Delete keys
|
||||
tree.delete("help")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
## API
|
||||
|
||||
### Creating a RadixTree
|
||||
|
||||
```rust
|
||||
// Create a new radix tree
|
||||
let mut tree = RadixTree::new("/tmp/radix", false)?;
|
||||
|
||||
// Create a new radix tree and reset if it exists
|
||||
let mut tree = RadixTree::new("/tmp/radix", true)?;
|
||||
```
|
||||
|
||||
### Setting Values
|
||||
|
||||
```rust
|
||||
// Set a key-value pair
|
||||
tree.set("key", b"value".to_vec())?;
|
||||
```
|
||||
|
||||
### Getting Values
|
||||
|
||||
```rust
|
||||
// Get a value by key
|
||||
let value = tree.get("key")?;
|
||||
```
|
||||
|
||||
### Updating Values
|
||||
|
||||
```rust
|
||||
// Update a value at a given prefix
|
||||
tree.update("prefix", b"new_value".to_vec())?;
|
||||
```
|
||||
|
||||
### Deleting Keys
|
||||
|
||||
```rust
|
||||
// Delete a key
|
||||
tree.delete("key")?;
|
||||
```
|
||||
|
||||
### Listing Keys by Prefix
|
||||
|
||||
```rust
|
||||
// List all keys with a given prefix
|
||||
let keys = tree.list("prefix")?;
|
||||
```
|
||||
|
||||
### Getting All Values by Prefix
|
||||
|
||||
```rust
|
||||
// Get all values for keys with a given prefix
|
||||
let values = tree.getall("prefix")?;
|
||||
```
|
||||
|
||||
## Performance Characteristics
|
||||
|
||||
- Search: O(k) where k is the key length
|
||||
- Insert: O(k) for new keys, may require node splitting
|
||||
- Delete: O(k) plus potential node cleanup
|
||||
- Space: O(n) where n is the total length of all keys
|
||||
|
||||
## Use Cases
|
||||
|
||||
RadixTree is particularly useful for:
|
||||
- Prefix-based searching
|
||||
- IP routing tables
|
||||
- Dictionary implementations
|
||||
- Auto-complete systems
|
||||
- File system paths
|
||||
- Any application requiring efficient string key operations with persistence
|
||||
|
||||
## Implementation Details
|
||||
|
||||
The RadixTree implementation uses OurDB for persistent storage:
|
||||
- Each node is serialized and stored as a record in OurDB
|
||||
- Node references use OurDB record IDs
|
||||
- The tree maintains a root node ID for traversal
|
||||
- Node serialization includes version tracking for format evolution
|
||||
|
||||
For more detailed information about the implementation, see the [ARCHITECTURE.md](./ARCHITECTURE.md) file.
|
||||
|
||||
## Running Tests
|
||||
|
||||
The project includes a comprehensive test suite that verifies all functionality:
|
||||
|
||||
```bash
|
||||
# Run all tests
|
||||
cargo test
|
||||
|
||||
# Run specific test file
|
||||
cargo test --test basic_test
|
||||
cargo test --test prefix_test
|
||||
cargo test --test getall_test
|
||||
cargo test --test serialize_test
|
||||
```
|
||||
|
||||
## Running Examples
|
||||
|
||||
The project includes example applications that demonstrate how to use the RadixTree:
|
||||
|
||||
```bash
|
||||
# Run the basic usage example
|
||||
cargo run --example basic_usage
|
||||
|
||||
# Run the prefix operations example
|
||||
cargo run --example prefix_operations
|
||||
```
|
||||
|
||||
## Benchmarking
|
||||
|
||||
The project includes benchmarks to measure performance:
|
||||
|
||||
```bash
|
||||
# Run all benchmarks
|
||||
cargo bench
|
||||
|
||||
# Run specific benchmark
|
||||
cargo bench -- set
|
||||
cargo bench -- get
|
||||
cargo bench -- prefix_operations
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
This project is licensed under the same license as the HeroCode project.
|
||||
141
packages/data/radixtree/benches/radixtree_benchmarks.rs
Normal file
141
packages/data/radixtree/benches/radixtree_benchmarks.rs
Normal file
@@ -0,0 +1,141 @@
|
||||
use criterion::{black_box, criterion_group, criterion_main, Criterion};
|
||||
use radixtree::RadixTree;
|
||||
use std::path::PathBuf;
|
||||
use tempfile::tempdir;
|
||||
|
||||
fn criterion_benchmark(c: &mut Criterion) {
|
||||
// Create a temporary directory for benchmarks
|
||||
let temp_dir = tempdir().expect("Failed to create temp directory");
|
||||
let db_path = temp_dir.path().to_str().unwrap();
|
||||
|
||||
// Benchmark set operation
|
||||
c.bench_function("set", |b| {
|
||||
let mut tree = RadixTree::new(db_path, true).unwrap();
|
||||
let mut i = 0;
|
||||
b.iter(|| {
|
||||
let key = format!("benchmark_key_{}", i);
|
||||
let value = format!("benchmark_value_{}", i).into_bytes();
|
||||
tree.set(&key, value).unwrap();
|
||||
i += 1;
|
||||
});
|
||||
});
|
||||
|
||||
// Setup tree with data for get/list/delete benchmarks
|
||||
let mut setup_tree = RadixTree::new(db_path, true).unwrap();
|
||||
for i in 0..1000 {
|
||||
let key = format!("benchmark_key_{}", i);
|
||||
let value = format!("benchmark_value_{}", i).into_bytes();
|
||||
setup_tree.set(&key, value).unwrap();
|
||||
}
|
||||
|
||||
// Benchmark get operation
|
||||
c.bench_function("get", |b| {
|
||||
let mut tree = RadixTree::new(db_path, false).unwrap();
|
||||
let mut i = 0;
|
||||
b.iter(|| {
|
||||
let key = format!("benchmark_key_{}", i % 1000);
|
||||
let _value = tree.get(&key).unwrap();
|
||||
i += 1;
|
||||
});
|
||||
});
|
||||
|
||||
// Benchmark list operation
|
||||
c.bench_function("list", |b| {
|
||||
let mut tree = RadixTree::new(db_path, false).unwrap();
|
||||
b.iter(|| {
|
||||
let _keys = tree.list("benchmark_key_1").unwrap();
|
||||
});
|
||||
});
|
||||
|
||||
// Benchmark getall operation
|
||||
c.bench_function("getall", |b| {
|
||||
let mut tree = RadixTree::new(db_path, false).unwrap();
|
||||
b.iter(|| {
|
||||
let _values = tree.getall("benchmark_key_1").unwrap();
|
||||
});
|
||||
});
|
||||
|
||||
// Benchmark update operation
|
||||
c.bench_function("update", |b| {
|
||||
let mut tree = RadixTree::new(db_path, false).unwrap();
|
||||
let mut i = 0;
|
||||
b.iter(|| {
|
||||
let key = format!("benchmark_key_{}", i % 1000);
|
||||
let new_value = format!("updated_value_{}", i).into_bytes();
|
||||
tree.update(&key, new_value).unwrap();
|
||||
i += 1;
|
||||
});
|
||||
});
|
||||
|
||||
// Benchmark delete operation
|
||||
c.bench_function("delete", |b| {
|
||||
// Create a fresh tree for deletion benchmarks
|
||||
let delete_dir = tempdir().expect("Failed to create temp directory");
|
||||
let delete_path = delete_dir.path().to_str().unwrap();
|
||||
let mut tree = RadixTree::new(delete_path, true).unwrap();
|
||||
|
||||
// Setup keys to delete
|
||||
for i in 0..1000 {
|
||||
let key = format!("delete_key_{}", i);
|
||||
let value = format!("delete_value_{}", i).into_bytes();
|
||||
tree.set(&key, value).unwrap();
|
||||
}
|
||||
|
||||
let mut i = 0;
|
||||
b.iter(|| {
|
||||
let key = format!("delete_key_{}", i % 1000);
|
||||
// Only try to delete if it exists
|
||||
if tree.get(&key).is_ok() {
|
||||
tree.delete(&key).unwrap();
|
||||
}
|
||||
i += 1;
|
||||
});
|
||||
});
|
||||
|
||||
// Benchmark prefix operations with varying tree sizes
|
||||
let mut group = c.benchmark_group("prefix_operations");
|
||||
|
||||
for &size in &[100, 1000, 10000] {
|
||||
// Create a fresh tree for each size
|
||||
let size_dir = tempdir().expect("Failed to create temp directory");
|
||||
let size_path = size_dir.path().to_str().unwrap();
|
||||
let mut tree = RadixTree::new(size_path, true).unwrap();
|
||||
|
||||
// Insert data with common prefixes
|
||||
for i in 0..size {
|
||||
let prefix = match i % 5 {
|
||||
0 => "user",
|
||||
1 => "post",
|
||||
2 => "comment",
|
||||
3 => "product",
|
||||
_ => "category",
|
||||
};
|
||||
let key = format!("{}_{}", prefix, i);
|
||||
let value = format!("value_{}", i).into_bytes();
|
||||
tree.set(&key, value).unwrap();
|
||||
}
|
||||
|
||||
// Benchmark list operation for this size
|
||||
group.bench_function(format!("list_size_{}", size), |b| {
|
||||
b.iter(|| {
|
||||
for prefix in &["user", "post", "comment", "product", "category"] {
|
||||
let _keys = tree.list(prefix).unwrap();
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// Benchmark getall operation for this size
|
||||
group.bench_function(format!("getall_size_{}", size), |b| {
|
||||
b.iter(|| {
|
||||
for prefix in &["user", "post", "comment", "product", "category"] {
|
||||
let _values = tree.getall(prefix).unwrap();
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
group.finish();
|
||||
}
|
||||
|
||||
criterion_group!(benches, criterion_benchmark);
|
||||
criterion_main!(benches);
|
||||
51
packages/data/radixtree/examples/basic_usage.rs
Normal file
51
packages/data/radixtree/examples/basic_usage.rs
Normal file
@@ -0,0 +1,51 @@
|
||||
use radixtree::RadixTree;
|
||||
use std::path::PathBuf;
|
||||
|
||||
fn main() -> Result<(), radixtree::Error> {
|
||||
// Create a temporary directory for the database
|
||||
let db_path = std::env::temp_dir().join("radixtree_example");
|
||||
std::fs::create_dir_all(&db_path)?;
|
||||
|
||||
println!("Creating radix tree at: {}", db_path.display());
|
||||
|
||||
// Create a new radix tree
|
||||
let mut tree = RadixTree::new(db_path.to_str().unwrap(), true)?;
|
||||
|
||||
// Store some data
|
||||
println!("Storing data...");
|
||||
tree.set("hello", b"world".to_vec())?;
|
||||
tree.set("help", b"me".to_vec())?;
|
||||
tree.set("helicopter", b"flying".to_vec())?;
|
||||
|
||||
// Retrieve and print the data
|
||||
let value = tree.get("hello")?;
|
||||
println!("hello: {}", String::from_utf8_lossy(&value));
|
||||
|
||||
// Update a value
|
||||
println!("Updating value...");
|
||||
tree.update("hello", b"updated world".to_vec())?;
|
||||
|
||||
// Retrieve the updated value
|
||||
let updated_value = tree.get("hello")?;
|
||||
println!("hello (updated): {}", String::from_utf8_lossy(&updated_value));
|
||||
|
||||
// Delete a key
|
||||
println!("Deleting 'help'...");
|
||||
tree.delete("help")?;
|
||||
|
||||
// Try to retrieve the deleted key (should fail)
|
||||
match tree.get("help") {
|
||||
Ok(value) => println!("Unexpected: help still exists with value: {}", String::from_utf8_lossy(&value)),
|
||||
Err(e) => println!("As expected, help was deleted: {}", e),
|
||||
}
|
||||
|
||||
// Clean up (optional)
|
||||
if std::env::var("KEEP_DB").is_err() {
|
||||
std::fs::remove_dir_all(&db_path)?;
|
||||
println!("Cleaned up database directory");
|
||||
} else {
|
||||
println!("Database kept at: {}", db_path.display());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
121
packages/data/radixtree/examples/large_scale_test.rs
Normal file
121
packages/data/radixtree/examples/large_scale_test.rs
Normal file
@@ -0,0 +1,121 @@
|
||||
use radixtree::RadixTree;
|
||||
use std::time::{Duration, Instant};
|
||||
use std::io::{self, Write};
|
||||
|
||||
// Use much smaller batches to avoid hitting OurDB's size limit
|
||||
const BATCH_SIZE: usize = 1_000;
|
||||
const NUM_BATCHES: usize = 1_000; // Total records: 1,000,000
|
||||
const PROGRESS_INTERVAL: usize = 100;
|
||||
|
||||
fn main() -> Result<(), radixtree::Error> {
|
||||
// Overall metrics
|
||||
let total_start_time = Instant::now();
|
||||
let mut total_records_inserted = 0;
|
||||
let mut batch_times = Vec::with_capacity(NUM_BATCHES);
|
||||
|
||||
println!("Will insert up to {} records in batches of {}",
|
||||
BATCH_SIZE * NUM_BATCHES, BATCH_SIZE);
|
||||
|
||||
// Process in batches to avoid OurDB size limits
|
||||
for batch in 0..NUM_BATCHES {
|
||||
// Create a new database for each batch
|
||||
let batch_path = std::env::temp_dir().join(format!("radixtree_batch_{}", batch));
|
||||
|
||||
// Clean up any existing database
|
||||
if batch_path.exists() {
|
||||
std::fs::remove_dir_all(&batch_path)?;
|
||||
}
|
||||
std::fs::create_dir_all(&batch_path)?;
|
||||
|
||||
println!("\nBatch {}/{}: Creating new radix tree...", batch + 1, NUM_BATCHES);
|
||||
let mut tree = RadixTree::new(batch_path.to_str().unwrap(), true)?;
|
||||
|
||||
let batch_start_time = Instant::now();
|
||||
let mut last_progress_time = Instant::now();
|
||||
let mut last_progress_count = 0;
|
||||
|
||||
// Insert records for this batch
|
||||
for i in 0..BATCH_SIZE {
|
||||
let global_index = batch * BATCH_SIZE + i;
|
||||
let key = format!("key:{:08}", global_index);
|
||||
let value = format!("val{}", global_index).into_bytes();
|
||||
|
||||
tree.set(&key, value)?;
|
||||
|
||||
// Show progress at intervals
|
||||
if (i + 1) % PROGRESS_INTERVAL == 0 || i == BATCH_SIZE - 1 {
|
||||
let records_since_last = i + 1 - last_progress_count;
|
||||
let time_since_last = last_progress_time.elapsed();
|
||||
let records_per_second = records_since_last as f64 / time_since_last.as_secs_f64();
|
||||
|
||||
print!("\rProgress: {}/{} records ({:.2}%) - {:.2} records/sec",
|
||||
i + 1, BATCH_SIZE,
|
||||
(i + 1) as f64 / BATCH_SIZE as f64 * 100.0,
|
||||
records_per_second);
|
||||
io::stdout().flush().unwrap();
|
||||
|
||||
last_progress_time = Instant::now();
|
||||
last_progress_count = i + 1;
|
||||
}
|
||||
}
|
||||
|
||||
let batch_duration = batch_start_time.elapsed();
|
||||
batch_times.push(batch_duration);
|
||||
total_records_inserted += BATCH_SIZE;
|
||||
|
||||
println!("\nBatch {}/{} completed in {:?} ({:.2} records/sec)",
|
||||
batch + 1, NUM_BATCHES,
|
||||
batch_duration,
|
||||
BATCH_SIZE as f64 / batch_duration.as_secs_f64());
|
||||
|
||||
// Test random access performance for this batch
|
||||
println!("Testing access performance for batch {}...", batch + 1);
|
||||
let mut total_get_time = Duration::new(0, 0);
|
||||
let num_samples = 100;
|
||||
|
||||
// Use a simple distribution pattern
|
||||
for i in 0..num_samples {
|
||||
// Distribute samples across the batch
|
||||
let sample_id = batch * BATCH_SIZE + (i * (BATCH_SIZE / num_samples));
|
||||
let key = format!("key:{:08}", sample_id);
|
||||
|
||||
let get_start = Instant::now();
|
||||
let _ = tree.get(&key)?;
|
||||
total_get_time += get_start.elapsed();
|
||||
}
|
||||
|
||||
println!("Average time to retrieve a record: {:?}",
|
||||
total_get_time / num_samples as u32);
|
||||
|
||||
// Test prefix search performance
|
||||
println!("Testing prefix search performance...");
|
||||
let prefix = format!("key:{:02}", batch % 100);
|
||||
|
||||
let list_start = Instant::now();
|
||||
let keys = tree.list(&prefix)?;
|
||||
let list_duration = list_start.elapsed();
|
||||
|
||||
println!("Found {} keys with prefix '{}' in {:?}",
|
||||
keys.len(), prefix, list_duration);
|
||||
}
|
||||
|
||||
// Overall performance summary
|
||||
let total_duration = total_start_time.elapsed();
|
||||
println!("\n\nPerformance Summary:");
|
||||
println!("Total time to insert {} records: {:?}", total_records_inserted, total_duration);
|
||||
println!("Average insertion rate: {:.2} records/second",
|
||||
total_records_inserted as f64 / total_duration.as_secs_f64());
|
||||
|
||||
// Show performance trend
|
||||
println!("\nPerformance Trend (batch number vs. time):");
|
||||
for (i, duration) in batch_times.iter().enumerate() {
|
||||
if i % 10 == 0 || i == batch_times.len() - 1 { // Only show every 10th point
|
||||
println!(" Batch {}: {:?} ({:.2} records/sec)",
|
||||
i + 1,
|
||||
duration,
|
||||
BATCH_SIZE as f64 / duration.as_secs_f64());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
134
packages/data/radixtree/examples/performance_test.rs
Normal file
134
packages/data/radixtree/examples/performance_test.rs
Normal file
@@ -0,0 +1,134 @@
|
||||
use radixtree::RadixTree;
|
||||
use std::time::{Duration, Instant};
|
||||
use std::io::{self, Write};
|
||||
|
||||
// Number of records to insert
|
||||
const TOTAL_RECORDS: usize = 1_000_000;
|
||||
// How often to report progress (every X records)
|
||||
const PROGRESS_INTERVAL: usize = 10_000;
|
||||
// How many records to use for performance sampling
|
||||
const PERFORMANCE_SAMPLE_SIZE: usize = 1000;
|
||||
|
||||
fn main() -> Result<(), radixtree::Error> {
|
||||
// Create a temporary directory for the database
|
||||
let db_path = std::env::temp_dir().join("radixtree_performance_test");
|
||||
|
||||
// Completely remove and recreate the directory to ensure a clean start
|
||||
if db_path.exists() {
|
||||
std::fs::remove_dir_all(&db_path)?;
|
||||
}
|
||||
std::fs::create_dir_all(&db_path)?;
|
||||
|
||||
println!("Creating radix tree at: {}", db_path.display());
|
||||
println!("Will insert {} records and show progress...", TOTAL_RECORDS);
|
||||
|
||||
// Create a new radix tree
|
||||
let mut tree = RadixTree::new(db_path.to_str().unwrap(), true)?;
|
||||
|
||||
// Track overall time
|
||||
let start_time = Instant::now();
|
||||
|
||||
// Track performance metrics
|
||||
let mut insertion_times = Vec::with_capacity(TOTAL_RECORDS / PROGRESS_INTERVAL);
|
||||
let mut last_batch_time = Instant::now();
|
||||
let mut last_batch_records = 0;
|
||||
|
||||
// Insert records and track progress
|
||||
for i in 0..TOTAL_RECORDS {
|
||||
let key = format!("key:{:08}", i);
|
||||
// Use smaller values to avoid exceeding OurDB's size limit
|
||||
let value = format!("val{}", i).into_bytes();
|
||||
|
||||
// Time the insertion of every Nth record for performance sampling
|
||||
if i % PERFORMANCE_SAMPLE_SIZE == 0 {
|
||||
let insert_start = Instant::now();
|
||||
tree.set(&key, value)?;
|
||||
let insert_duration = insert_start.elapsed();
|
||||
|
||||
// Only print detailed timing for specific samples to avoid flooding output
|
||||
if i % (PERFORMANCE_SAMPLE_SIZE * 10) == 0 {
|
||||
println!("Record {}: Insertion took {:?}", i, insert_duration);
|
||||
}
|
||||
} else {
|
||||
tree.set(&key, value)?;
|
||||
}
|
||||
|
||||
// Show progress at intervals
|
||||
if (i + 1) % PROGRESS_INTERVAL == 0 || i == TOTAL_RECORDS - 1 {
|
||||
let records_in_batch = i + 1 - last_batch_records;
|
||||
let batch_duration = last_batch_time.elapsed();
|
||||
let records_per_second = records_in_batch as f64 / batch_duration.as_secs_f64();
|
||||
|
||||
insertion_times.push((i + 1, batch_duration));
|
||||
|
||||
print!("\rProgress: {}/{} records ({:.2}%) - {:.2} records/sec",
|
||||
i + 1, TOTAL_RECORDS,
|
||||
(i + 1) as f64 / TOTAL_RECORDS as f64 * 100.0,
|
||||
records_per_second);
|
||||
io::stdout().flush().unwrap();
|
||||
|
||||
last_batch_time = Instant::now();
|
||||
last_batch_records = i + 1;
|
||||
}
|
||||
}
|
||||
|
||||
let total_duration = start_time.elapsed();
|
||||
println!("\n\nPerformance Summary:");
|
||||
println!("Total time to insert {} records: {:?}", TOTAL_RECORDS, total_duration);
|
||||
println!("Average insertion rate: {:.2} records/second",
|
||||
TOTAL_RECORDS as f64 / total_duration.as_secs_f64());
|
||||
|
||||
// Show performance trend
|
||||
println!("\nPerformance Trend (records inserted vs. time per batch):");
|
||||
for (i, (record_count, duration)) in insertion_times.iter().enumerate() {
|
||||
if i % 10 == 0 || i == insertion_times.len() - 1 { // Only show every 10th point to avoid too much output
|
||||
println!(" After {} records: {:?} for {} records ({:.2} records/sec)",
|
||||
record_count,
|
||||
duration,
|
||||
PROGRESS_INTERVAL,
|
||||
PROGRESS_INTERVAL as f64 / duration.as_secs_f64());
|
||||
}
|
||||
}
|
||||
|
||||
// Test access performance with distributed samples
|
||||
println!("\nTesting access performance with distributed samples...");
|
||||
let mut total_get_time = Duration::new(0, 0);
|
||||
let num_samples = 1000;
|
||||
|
||||
// Use a simple distribution pattern instead of random
|
||||
for i in 0..num_samples {
|
||||
// Distribute samples across the entire range
|
||||
let sample_id = (i * (TOTAL_RECORDS / num_samples)) % TOTAL_RECORDS;
|
||||
let key = format!("key:{:08}", sample_id);
|
||||
|
||||
let get_start = Instant::now();
|
||||
let _ = tree.get(&key)?;
|
||||
total_get_time += get_start.elapsed();
|
||||
}
|
||||
|
||||
println!("Average time to retrieve a record: {:?}",
|
||||
total_get_time / num_samples as u32);
|
||||
|
||||
// Test prefix search performance
|
||||
println!("\nTesting prefix search performance...");
|
||||
let prefixes = ["key:0", "key:1", "key:5", "key:9"];
|
||||
|
||||
for prefix in &prefixes {
|
||||
let list_start = Instant::now();
|
||||
let keys = tree.list(prefix)?;
|
||||
let list_duration = list_start.elapsed();
|
||||
|
||||
println!("Found {} keys with prefix '{}' in {:?}",
|
||||
keys.len(), prefix, list_duration);
|
||||
}
|
||||
|
||||
// Clean up (optional)
|
||||
if std::env::var("KEEP_DB").is_err() {
|
||||
std::fs::remove_dir_all(&db_path)?;
|
||||
println!("\nCleaned up database directory");
|
||||
} else {
|
||||
println!("\nDatabase kept at: {}", db_path.display());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
97
packages/data/radixtree/examples/prefix_operations.rs
Normal file
97
packages/data/radixtree/examples/prefix_operations.rs
Normal file
@@ -0,0 +1,97 @@
|
||||
use radixtree::RadixTree;
|
||||
use std::path::PathBuf;
|
||||
|
||||
fn main() -> Result<(), radixtree::Error> {
|
||||
// Create a temporary directory for the database
|
||||
let db_path = std::env::temp_dir().join("radixtree_prefix_example");
|
||||
std::fs::create_dir_all(&db_path)?;
|
||||
|
||||
println!("Creating radix tree at: {}", db_path.display());
|
||||
|
||||
// Create a new radix tree
|
||||
let mut tree = RadixTree::new(db_path.to_str().unwrap(), true)?;
|
||||
|
||||
// Store data with common prefixes
|
||||
println!("Storing data with common prefixes...");
|
||||
|
||||
// User data
|
||||
tree.set("user:1:name", b"Alice".to_vec())?;
|
||||
tree.set("user:1:email", b"alice@example.com".to_vec())?;
|
||||
tree.set("user:2:name", b"Bob".to_vec())?;
|
||||
tree.set("user:2:email", b"bob@example.com".to_vec())?;
|
||||
|
||||
// Post data
|
||||
tree.set("post:1:title", b"First Post".to_vec())?;
|
||||
tree.set("post:1:content", b"Hello World!".to_vec())?;
|
||||
tree.set("post:2:title", b"Second Post".to_vec())?;
|
||||
tree.set("post:2:content", b"Another post content".to_vec())?;
|
||||
|
||||
// Demonstrate listing keys with a prefix
|
||||
println!("\nListing keys with prefix 'user:1:'");
|
||||
let user1_keys = tree.list("user:1:")?;
|
||||
for key in &user1_keys {
|
||||
println!(" Key: {}", key);
|
||||
}
|
||||
|
||||
println!("\nListing keys with prefix 'post:'");
|
||||
let post_keys = tree.list("post:")?;
|
||||
for key in &post_keys {
|
||||
println!(" Key: {}", key);
|
||||
}
|
||||
|
||||
// Demonstrate getting all values with a prefix
|
||||
println!("\nGetting all values with prefix 'user:1:'");
|
||||
let user1_values = tree.getall("user:1:")?;
|
||||
for (i, value) in user1_values.iter().enumerate() {
|
||||
println!(" Value {}: {}", i + 1, String::from_utf8_lossy(value));
|
||||
}
|
||||
|
||||
// Demonstrate finding all user names
|
||||
println!("\nFinding all user names (prefix 'user:*:name')");
|
||||
let mut user_names = Vec::new();
|
||||
let all_keys = tree.list("user:")?;
|
||||
for key in all_keys {
|
||||
if key.ends_with(":name") {
|
||||
if let Ok(value) = tree.get(&key) {
|
||||
user_names.push((key, String::from_utf8_lossy(&value).to_string()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (key, name) in user_names {
|
||||
println!(" {}: {}", key, name);
|
||||
}
|
||||
|
||||
// Demonstrate updating values with a specific prefix
|
||||
println!("\nUpdating all post titles...");
|
||||
let post_title_keys = tree.list("post:")?.into_iter().filter(|k| k.ends_with(":title")).collect::<Vec<_>>();
|
||||
|
||||
for key in post_title_keys {
|
||||
let old_value = tree.get(&key)?;
|
||||
let old_title = String::from_utf8_lossy(&old_value);
|
||||
let new_title = format!("UPDATED: {}", old_title);
|
||||
|
||||
println!(" Updating '{}' to '{}'", old_title, new_title);
|
||||
tree.update(&key, new_title.as_bytes().to_vec())?;
|
||||
}
|
||||
|
||||
// Verify updates
|
||||
println!("\nVerifying updates:");
|
||||
let post_keys = tree.list("post:")?;
|
||||
for key in post_keys {
|
||||
if key.ends_with(":title") {
|
||||
let value = tree.get(&key)?;
|
||||
println!(" {}: {}", key, String::from_utf8_lossy(&value));
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up (optional)
|
||||
if std::env::var("KEEP_DB").is_err() {
|
||||
std::fs::remove_dir_all(&db_path)?;
|
||||
println!("\nCleaned up database directory");
|
||||
} else {
|
||||
println!("\nDatabase kept at: {}", db_path.display());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
35
packages/data/radixtree/src/error.rs
Normal file
35
packages/data/radixtree/src/error.rs
Normal file
@@ -0,0 +1,35 @@
|
||||
//! Error types for the RadixTree module.
|
||||
|
||||
use thiserror::Error;
|
||||
|
||||
/// Error type for RadixTree operations.
|
||||
#[derive(Debug, Error)]
|
||||
pub enum Error {
|
||||
/// Error from OurDB operations.
|
||||
#[error("OurDB error: {0}")]
|
||||
OurDB(#[from] ourdb::Error),
|
||||
|
||||
/// Error when a key is not found.
|
||||
#[error("Key not found: {0}")]
|
||||
KeyNotFound(String),
|
||||
|
||||
/// Error when a prefix is not found.
|
||||
#[error("Prefix not found: {0}")]
|
||||
PrefixNotFound(String),
|
||||
|
||||
/// Error during serialization.
|
||||
#[error("Serialization error: {0}")]
|
||||
Serialization(String),
|
||||
|
||||
/// Error during deserialization.
|
||||
#[error("Deserialization error: {0}")]
|
||||
Deserialization(String),
|
||||
|
||||
/// Error for invalid operations.
|
||||
#[error("Invalid operation: {0}")]
|
||||
InvalidOperation(String),
|
||||
|
||||
/// Error for I/O operations.
|
||||
#[error("I/O error: {0}")]
|
||||
IO(#[from] std::io::Error),
|
||||
}
|
||||
133
packages/data/radixtree/src/lib.rs
Normal file
133
packages/data/radixtree/src/lib.rs
Normal file
@@ -0,0 +1,133 @@
|
||||
//! RadixTree is a space-optimized tree data structure that enables efficient string key operations
|
||||
//! with persistent storage using OurDB as a backend.
|
||||
//!
|
||||
//! This implementation provides a persistent radix tree that can be used for efficient
|
||||
//! prefix-based key operations, such as auto-complete, routing tables, and more.
|
||||
|
||||
mod error;
|
||||
mod node;
|
||||
mod operations;
|
||||
mod serialize;
|
||||
|
||||
pub use error::Error;
|
||||
pub use node::{Node, NodeRef};
|
||||
|
||||
use ourdb::OurDB;
|
||||
|
||||
/// RadixTree represents a radix tree data structure with persistent storage.
|
||||
pub struct RadixTree {
|
||||
db: OurDB,
|
||||
root_id: u32,
|
||||
}
|
||||
|
||||
impl RadixTree {
|
||||
/// Creates a new radix tree with the specified database path.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `path` - The path to the database directory
|
||||
/// * `reset` - Whether to reset the database if it exists
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// A new `RadixTree` instance
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if the database cannot be created or opened
|
||||
pub fn new(path: &str, reset: bool) -> Result<Self, Error> {
|
||||
operations::new_radix_tree(path, reset)
|
||||
}
|
||||
|
||||
/// Sets a key-value pair in the tree.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `key` - The key to set
|
||||
/// * `value` - The value to set
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if the operation fails
|
||||
pub fn set(&mut self, key: &str, value: Vec<u8>) -> Result<(), Error> {
|
||||
operations::set(self, key, value)
|
||||
}
|
||||
|
||||
/// Gets a value by key from the tree.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `key` - The key to get
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// The value associated with the key
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if the key is not found or the operation fails
|
||||
pub fn get(&mut self, key: &str) -> Result<Vec<u8>, Error> {
|
||||
operations::get(self, key)
|
||||
}
|
||||
|
||||
/// Updates the value at a given key prefix.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `prefix` - The key prefix to update
|
||||
/// * `new_value` - The new value to set
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if the prefix is not found or the operation fails
|
||||
pub fn update(&mut self, prefix: &str, new_value: Vec<u8>) -> Result<(), Error> {
|
||||
operations::update(self, prefix, new_value)
|
||||
}
|
||||
|
||||
/// Deletes a key from the tree.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `key` - The key to delete
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if the key is not found or the operation fails
|
||||
pub fn delete(&mut self, key: &str) -> Result<(), Error> {
|
||||
operations::delete(self, key)
|
||||
}
|
||||
|
||||
/// Lists all keys with a given prefix.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `prefix` - The prefix to search for
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// A list of keys that start with the given prefix
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if the operation fails
|
||||
pub fn list(&mut self, prefix: &str) -> Result<Vec<String>, Error> {
|
||||
operations::list(self, prefix)
|
||||
}
|
||||
|
||||
/// Gets all values for keys with a given prefix.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `prefix` - The prefix to search for
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// A list of values for keys that start with the given prefix
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if the operation fails
|
||||
pub fn getall(&mut self, prefix: &str) -> Result<Vec<Vec<u8>>, Error> {
|
||||
operations::getall(self, prefix)
|
||||
}
|
||||
}
|
||||
59
packages/data/radixtree/src/node.rs
Normal file
59
packages/data/radixtree/src/node.rs
Normal file
@@ -0,0 +1,59 @@
|
||||
//! Node types for the RadixTree module.
|
||||
|
||||
/// Represents a node in the radix tree.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct Node {
|
||||
/// The segment of the key stored at this node.
|
||||
pub key_segment: String,
|
||||
|
||||
/// Value stored at this node (empty if not a leaf).
|
||||
pub value: Vec<u8>,
|
||||
|
||||
/// References to child nodes.
|
||||
pub children: Vec<NodeRef>,
|
||||
|
||||
/// Whether this node is a leaf node.
|
||||
pub is_leaf: bool,
|
||||
}
|
||||
|
||||
/// Reference to a node in the database.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct NodeRef {
|
||||
/// The key segment for this child.
|
||||
pub key_part: String,
|
||||
|
||||
/// Database ID of the node.
|
||||
pub node_id: u32,
|
||||
}
|
||||
|
||||
impl Node {
|
||||
/// Creates a new node.
|
||||
pub fn new(key_segment: String, value: Vec<u8>, is_leaf: bool) -> Self {
|
||||
Self {
|
||||
key_segment,
|
||||
value,
|
||||
children: Vec::new(),
|
||||
is_leaf,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new root node.
|
||||
pub fn new_root() -> Self {
|
||||
Self {
|
||||
key_segment: String::new(),
|
||||
value: Vec::new(),
|
||||
children: Vec::new(),
|
||||
is_leaf: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl NodeRef {
|
||||
/// Creates a new node reference.
|
||||
pub fn new(key_part: String, node_id: u32) -> Self {
|
||||
Self {
|
||||
key_part,
|
||||
node_id,
|
||||
}
|
||||
}
|
||||
}
|
||||
508
packages/data/radixtree/src/operations.rs
Normal file
508
packages/data/radixtree/src/operations.rs
Normal file
@@ -0,0 +1,508 @@
|
||||
//! Implementation of RadixTree operations.
|
||||
|
||||
use crate::error::Error;
|
||||
use crate::node::{Node, NodeRef};
|
||||
use crate::RadixTree;
|
||||
use crate::serialize::get_common_prefix;
|
||||
use ourdb::{OurDB, OurDBConfig, OurDBSetArgs};
|
||||
use std::path::PathBuf;
|
||||
|
||||
|
||||
/// Creates a new radix tree with the specified database path.
|
||||
pub fn new_radix_tree(path: &str, reset: bool) -> Result<RadixTree, Error> {
|
||||
let config = OurDBConfig {
|
||||
path: PathBuf::from(path),
|
||||
incremental_mode: true,
|
||||
file_size: Some(1024 * 1024 * 10), // 10MB file size for better performance with large datasets
|
||||
keysize: Some(6), // Use keysize=6 to support multiple files (file_nr + position)
|
||||
reset: None, // Don't reset existing database
|
||||
};
|
||||
|
||||
let mut db = OurDB::new(config)?;
|
||||
|
||||
// If reset is true, we would clear the database
|
||||
// Since OurDB doesn't have a reset method, we'll handle it by
|
||||
// creating a fresh database when reset is true
|
||||
// We'll implement this by checking if it's a new database (next_id == 1)
|
||||
|
||||
let root_id = if db.get_next_id()? == 1 {
|
||||
// Create a new root node
|
||||
let root = Node::new_root();
|
||||
let root_id = db.set(OurDBSetArgs {
|
||||
id: None,
|
||||
data: &root.serialize(),
|
||||
})?;
|
||||
|
||||
// First ID should be 1
|
||||
assert_eq!(root_id, 1);
|
||||
root_id
|
||||
} else {
|
||||
// Use existing root node
|
||||
1 // Root node always has ID 1
|
||||
};
|
||||
|
||||
Ok(RadixTree {
|
||||
db,
|
||||
root_id,
|
||||
})
|
||||
}
|
||||
|
||||
/// Sets a key-value pair in the tree.
|
||||
pub fn set(tree: &mut RadixTree, key: &str, value: Vec<u8>) -> Result<(), Error> {
|
||||
let mut current_id = tree.root_id;
|
||||
let mut offset = 0;
|
||||
|
||||
// Handle empty key case
|
||||
if key.is_empty() {
|
||||
let mut root_node = tree.get_node(current_id)?;
|
||||
root_node.is_leaf = true;
|
||||
root_node.value = value;
|
||||
tree.save_node(Some(current_id), &root_node)?;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
while offset < key.len() {
|
||||
let mut node = tree.get_node(current_id)?;
|
||||
|
||||
// Find matching child
|
||||
let mut matched_child = None;
|
||||
for (i, child) in node.children.iter().enumerate() {
|
||||
if key[offset..].starts_with(&child.key_part) {
|
||||
matched_child = Some((i, child.clone()));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if matched_child.is_none() {
|
||||
// No matching child found, create new leaf node
|
||||
let key_part = key[offset..].to_string();
|
||||
let new_node = Node {
|
||||
key_segment: key_part.clone(),
|
||||
value: value.clone(),
|
||||
children: Vec::new(),
|
||||
is_leaf: true,
|
||||
};
|
||||
|
||||
let new_id = tree.save_node(None, &new_node)?;
|
||||
|
||||
// Create new child reference and update parent node
|
||||
node.children.push(NodeRef {
|
||||
key_part,
|
||||
node_id: new_id,
|
||||
});
|
||||
|
||||
tree.save_node(Some(current_id), &node)?;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let (child_index, mut child) = matched_child.unwrap();
|
||||
let common_prefix = get_common_prefix(&key[offset..], &child.key_part);
|
||||
|
||||
if common_prefix.len() < child.key_part.len() {
|
||||
// Split existing node
|
||||
let child_node = tree.get_node(child.node_id)?;
|
||||
|
||||
// Create new intermediate node
|
||||
let new_node = Node {
|
||||
key_segment: child.key_part[common_prefix.len()..].to_string(),
|
||||
value: child_node.value.clone(),
|
||||
children: child_node.children.clone(),
|
||||
is_leaf: child_node.is_leaf,
|
||||
};
|
||||
let new_id = tree.save_node(None, &new_node)?;
|
||||
|
||||
// Update current node
|
||||
node.children[child_index] = NodeRef {
|
||||
key_part: common_prefix.to_string(),
|
||||
node_id: new_id,
|
||||
};
|
||||
tree.save_node(Some(current_id), &node)?;
|
||||
|
||||
// Update child node reference
|
||||
child.node_id = new_id;
|
||||
}
|
||||
|
||||
if offset + common_prefix.len() == key.len() {
|
||||
// Update value at existing node
|
||||
let mut child_node = tree.get_node(child.node_id)?;
|
||||
child_node.value = value;
|
||||
child_node.is_leaf = true;
|
||||
tree.save_node(Some(child.node_id), &child_node)?;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
offset += common_prefix.len();
|
||||
current_id = child.node_id;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Gets a value by key from the tree.
|
||||
pub fn get(tree: &mut RadixTree, key: &str) -> Result<Vec<u8>, Error> {
|
||||
let mut current_id = tree.root_id;
|
||||
let mut offset = 0;
|
||||
|
||||
// Handle empty key case
|
||||
if key.is_empty() {
|
||||
let root_node = tree.get_node(current_id)?;
|
||||
if root_node.is_leaf {
|
||||
return Ok(root_node.value.clone());
|
||||
}
|
||||
return Err(Error::KeyNotFound(key.to_string()));
|
||||
}
|
||||
|
||||
while offset < key.len() {
|
||||
let node = tree.get_node(current_id)?;
|
||||
|
||||
let mut found = false;
|
||||
for child in &node.children {
|
||||
if key[offset..].starts_with(&child.key_part) {
|
||||
if offset + child.key_part.len() == key.len() {
|
||||
let child_node = tree.get_node(child.node_id)?;
|
||||
if child_node.is_leaf {
|
||||
return Ok(child_node.value);
|
||||
}
|
||||
}
|
||||
current_id = child.node_id;
|
||||
offset += child.key_part.len();
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return Err(Error::KeyNotFound(key.to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
Err(Error::KeyNotFound(key.to_string()))
|
||||
}
|
||||
|
||||
/// Updates the value at a given key prefix.
|
||||
pub fn update(tree: &mut RadixTree, prefix: &str, new_value: Vec<u8>) -> Result<(), Error> {
|
||||
let mut current_id = tree.root_id;
|
||||
let mut offset = 0;
|
||||
|
||||
// Handle empty prefix case
|
||||
if prefix.is_empty() {
|
||||
return Err(Error::InvalidOperation("Empty prefix not allowed".to_string()));
|
||||
}
|
||||
|
||||
while offset < prefix.len() {
|
||||
let node = tree.get_node(current_id)?;
|
||||
|
||||
let mut found = false;
|
||||
for child in &node.children {
|
||||
if prefix[offset..].starts_with(&child.key_part) {
|
||||
if offset + child.key_part.len() == prefix.len() {
|
||||
// Found exact prefix match
|
||||
let mut child_node = tree.get_node(child.node_id)?;
|
||||
if child_node.is_leaf {
|
||||
// Update the value
|
||||
child_node.value = new_value;
|
||||
tree.save_node(Some(child.node_id), &child_node)?;
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
current_id = child.node_id;
|
||||
offset += child.key_part.len();
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return Err(Error::PrefixNotFound(prefix.to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
Err(Error::PrefixNotFound(prefix.to_string()))
|
||||
}
|
||||
|
||||
/// Deletes a key from the tree.
|
||||
pub fn delete(tree: &mut RadixTree, key: &str) -> Result<(), Error> {
|
||||
let mut current_id = tree.root_id;
|
||||
let mut offset = 0;
|
||||
let mut path = Vec::new();
|
||||
|
||||
// Handle empty key case
|
||||
if key.is_empty() {
|
||||
let mut root_node = tree.get_node(current_id)?;
|
||||
if !root_node.is_leaf {
|
||||
return Err(Error::KeyNotFound(key.to_string()));
|
||||
}
|
||||
// For the root node, we just mark it as non-leaf
|
||||
root_node.is_leaf = false;
|
||||
root_node.value = Vec::new();
|
||||
tree.save_node(Some(current_id), &root_node)?;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Find the node to delete
|
||||
while offset < key.len() {
|
||||
let node = tree.get_node(current_id)?;
|
||||
|
||||
let mut found = false;
|
||||
for child in &node.children {
|
||||
if key[offset..].starts_with(&child.key_part) {
|
||||
path.push(child.clone());
|
||||
current_id = child.node_id;
|
||||
offset += child.key_part.len();
|
||||
found = true;
|
||||
|
||||
// Check if we've matched the full key
|
||||
if offset == key.len() {
|
||||
let child_node = tree.get_node(child.node_id)?;
|
||||
if child_node.is_leaf {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return Err(Error::KeyNotFound(key.to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
if path.is_empty() {
|
||||
return Err(Error::KeyNotFound(key.to_string()));
|
||||
}
|
||||
|
||||
// Get the node to delete
|
||||
let mut last_node = tree.get_node(path.last().unwrap().node_id)?;
|
||||
|
||||
// If the node has children, just mark it as non-leaf
|
||||
if !last_node.children.is_empty() {
|
||||
last_node.is_leaf = false;
|
||||
last_node.value = Vec::new();
|
||||
tree.save_node(Some(path.last().unwrap().node_id), &last_node)?;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// If node has no children, remove it from parent
|
||||
if path.len() > 1 {
|
||||
let parent_id = path[path.len() - 2].node_id;
|
||||
let mut parent_node = tree.get_node(parent_id)?;
|
||||
|
||||
// Find and remove the child from parent
|
||||
for i in 0..parent_node.children.len() {
|
||||
if parent_node.children[i].node_id == path.last().unwrap().node_id {
|
||||
parent_node.children.remove(i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
tree.save_node(Some(parent_id), &parent_node)?;
|
||||
|
||||
// Delete the node from the database
|
||||
tree.db.delete(path.last().unwrap().node_id)?;
|
||||
} else {
|
||||
// If this is a direct child of the root, just mark it as non-leaf
|
||||
last_node.is_leaf = false;
|
||||
last_node.value = Vec::new();
|
||||
tree.save_node(Some(path.last().unwrap().node_id), &last_node)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Lists all keys with a given prefix.
|
||||
pub fn list(tree: &mut RadixTree, prefix: &str) -> Result<Vec<String>, Error> {
|
||||
let mut result = Vec::new();
|
||||
|
||||
// Handle empty prefix case - will return all keys
|
||||
if prefix.is_empty() {
|
||||
collect_all_keys(tree, tree.root_id, "", &mut result)?;
|
||||
return Ok(result);
|
||||
}
|
||||
|
||||
// Start from the root and find all matching keys
|
||||
find_keys_with_prefix(tree, tree.root_id, "", prefix, &mut result)?;
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Helper function to find all keys with a given prefix.
|
||||
fn find_keys_with_prefix(
|
||||
tree: &mut RadixTree,
|
||||
node_id: u32,
|
||||
current_path: &str,
|
||||
prefix: &str,
|
||||
result: &mut Vec<String>,
|
||||
) -> Result<(), Error> {
|
||||
let node = tree.get_node(node_id)?;
|
||||
|
||||
// If the current path already matches or exceeds the prefix length
|
||||
if current_path.len() >= prefix.len() {
|
||||
// Check if the current path starts with the prefix
|
||||
if current_path.starts_with(prefix) {
|
||||
// If this is a leaf node, add it to the results
|
||||
if node.is_leaf {
|
||||
result.push(current_path.to_string());
|
||||
}
|
||||
|
||||
// Collect all keys from this subtree
|
||||
for child in &node.children {
|
||||
let child_path = format!("{}{}", current_path, child.key_part);
|
||||
find_keys_with_prefix(tree, child.node_id, &child_path, prefix, result)?;
|
||||
}
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Current path is shorter than the prefix, continue searching
|
||||
for child in &node.children {
|
||||
let child_path = format!("{}{}", current_path, child.key_part);
|
||||
|
||||
// Check if this child's path could potentially match the prefix
|
||||
if prefix.starts_with(current_path) {
|
||||
// The prefix starts with the current path, so we need to check if
|
||||
// the child's key_part matches the next part of the prefix
|
||||
let prefix_remainder = &prefix[current_path.len()..];
|
||||
|
||||
// If the prefix remainder starts with the child's key_part or vice versa
|
||||
if prefix_remainder.starts_with(&child.key_part)
|
||||
|| (child.key_part.starts_with(prefix_remainder)
|
||||
&& child.key_part.len() >= prefix_remainder.len()) {
|
||||
find_keys_with_prefix(tree, child.node_id, &child_path, prefix, result)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Helper function to recursively collect all keys under a node.
|
||||
fn collect_all_keys(
|
||||
tree: &mut RadixTree,
|
||||
node_id: u32,
|
||||
current_path: &str,
|
||||
result: &mut Vec<String>,
|
||||
) -> Result<(), Error> {
|
||||
let node = tree.get_node(node_id)?;
|
||||
|
||||
// If this node is a leaf, add its path to the result
|
||||
if node.is_leaf {
|
||||
result.push(current_path.to_string());
|
||||
}
|
||||
|
||||
// Recursively collect keys from all children
|
||||
for child in &node.children {
|
||||
let child_path = format!("{}{}", current_path, child.key_part);
|
||||
collect_all_keys(tree, child.node_id, &child_path, result)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Gets all values for keys with a given prefix.
|
||||
pub fn getall(tree: &mut RadixTree, prefix: &str) -> Result<Vec<Vec<u8>>, Error> {
|
||||
// Get all matching keys
|
||||
let keys = list(tree, prefix)?;
|
||||
|
||||
// Get values for each key
|
||||
let mut values = Vec::new();
|
||||
for key in keys {
|
||||
if let Ok(value) = get(tree, &key) {
|
||||
values.push(value);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(values)
|
||||
}
|
||||
|
||||
impl RadixTree {
|
||||
/// Helper function to get a node from the database.
|
||||
pub(crate) fn get_node(&mut self, node_id: u32) -> Result<Node, Error> {
|
||||
let data = self.db.get(node_id)?;
|
||||
Node::deserialize(&data)
|
||||
}
|
||||
|
||||
/// Helper function to save a node to the database.
|
||||
pub(crate) fn save_node(&mut self, node_id: Option<u32>, node: &Node) -> Result<u32, Error> {
|
||||
let data = node.serialize();
|
||||
let args = OurDBSetArgs {
|
||||
id: node_id,
|
||||
data: &data,
|
||||
};
|
||||
Ok(self.db.set(args)?)
|
||||
}
|
||||
|
||||
/// Helper function to find all keys with a given prefix.
|
||||
fn find_keys_with_prefix(
|
||||
&mut self,
|
||||
node_id: u32,
|
||||
current_path: &str,
|
||||
prefix: &str,
|
||||
result: &mut Vec<String>,
|
||||
) -> Result<(), Error> {
|
||||
let node = self.get_node(node_id)?;
|
||||
|
||||
// If the current path already matches or exceeds the prefix length
|
||||
if current_path.len() >= prefix.len() {
|
||||
// Check if the current path starts with the prefix
|
||||
if current_path.starts_with(prefix) {
|
||||
// If this is a leaf node, add it to the results
|
||||
if node.is_leaf {
|
||||
result.push(current_path.to_string());
|
||||
}
|
||||
|
||||
// Collect all keys from this subtree
|
||||
for child in &node.children {
|
||||
let child_path = format!("{}{}", current_path, child.key_part);
|
||||
self.find_keys_with_prefix(child.node_id, &child_path, prefix, result)?;
|
||||
}
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Current path is shorter than the prefix, continue searching
|
||||
for child in &node.children {
|
||||
let child_path = format!("{}{}", current_path, child.key_part);
|
||||
|
||||
// Check if this child's path could potentially match the prefix
|
||||
if prefix.starts_with(current_path) {
|
||||
// The prefix starts with the current path, so we need to check if
|
||||
// the child's key_part matches the next part of the prefix
|
||||
let prefix_remainder = &prefix[current_path.len()..];
|
||||
|
||||
// If the prefix remainder starts with the child's key_part or vice versa
|
||||
if prefix_remainder.starts_with(&child.key_part)
|
||||
|| (child.key_part.starts_with(prefix_remainder)
|
||||
&& child.key_part.len() >= prefix_remainder.len()) {
|
||||
self.find_keys_with_prefix(child.node_id, &child_path, prefix, result)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Helper function to recursively collect all keys under a node.
|
||||
fn collect_all_keys(
|
||||
&mut self,
|
||||
node_id: u32,
|
||||
current_path: &str,
|
||||
result: &mut Vec<String>,
|
||||
) -> Result<(), Error> {
|
||||
let node = self.get_node(node_id)?;
|
||||
|
||||
// If this node is a leaf, add its path to the result
|
||||
if node.is_leaf {
|
||||
result.push(current_path.to_string());
|
||||
}
|
||||
|
||||
// Recursively collect keys from all children
|
||||
for child in &node.children {
|
||||
let child_path = format!("{}{}", current_path, child.key_part);
|
||||
self.collect_all_keys(child.node_id, &child_path, result)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
156
packages/data/radixtree/src/serialize.rs
Normal file
156
packages/data/radixtree/src/serialize.rs
Normal file
@@ -0,0 +1,156 @@
|
||||
//! Serialization and deserialization for RadixTree nodes.
|
||||
|
||||
use crate::error::Error;
|
||||
use crate::node::{Node, NodeRef};
|
||||
use std::io::{Cursor, Read};
|
||||
use std::mem::size_of;
|
||||
|
||||
/// Current binary format version.
|
||||
const VERSION: u8 = 1;
|
||||
|
||||
impl Node {
|
||||
/// Serializes a node to bytes for storage.
|
||||
pub fn serialize(&self) -> Vec<u8> {
|
||||
let mut buffer = Vec::new();
|
||||
|
||||
// Add version byte
|
||||
buffer.push(VERSION);
|
||||
|
||||
// Add key segment
|
||||
write_string(&mut buffer, &self.key_segment);
|
||||
|
||||
// Add value as []u8
|
||||
write_u16(&mut buffer, self.value.len() as u16);
|
||||
buffer.extend_from_slice(&self.value);
|
||||
|
||||
// Add children
|
||||
write_u16(&mut buffer, self.children.len() as u16);
|
||||
for child in &self.children {
|
||||
write_string(&mut buffer, &child.key_part);
|
||||
write_u32(&mut buffer, child.node_id);
|
||||
}
|
||||
|
||||
// Add leaf flag
|
||||
buffer.push(if self.is_leaf { 1 } else { 0 });
|
||||
|
||||
buffer
|
||||
}
|
||||
|
||||
/// Deserializes bytes to a node.
|
||||
pub fn deserialize(data: &[u8]) -> Result<Self, Error> {
|
||||
if data.is_empty() {
|
||||
return Err(Error::Deserialization("Empty data".to_string()));
|
||||
}
|
||||
|
||||
let mut cursor = Cursor::new(data);
|
||||
|
||||
// Read and verify version
|
||||
let mut version_byte = [0u8; 1];
|
||||
cursor.read_exact(&mut version_byte)
|
||||
.map_err(|e| Error::Deserialization(format!("Failed to read version byte: {}", e)))?;
|
||||
|
||||
if version_byte[0] != VERSION {
|
||||
return Err(Error::Deserialization(
|
||||
format!("Invalid version byte: expected {}, got {}", VERSION, version_byte[0])
|
||||
));
|
||||
}
|
||||
|
||||
// Read key segment
|
||||
let key_segment = read_string(&mut cursor)
|
||||
.map_err(|e| Error::Deserialization(format!("Failed to read key segment: {}", e)))?;
|
||||
|
||||
// Read value as []u8
|
||||
let value_len = read_u16(&mut cursor)
|
||||
.map_err(|e| Error::Deserialization(format!("Failed to read value length: {}", e)))?;
|
||||
|
||||
let mut value = vec![0u8; value_len as usize];
|
||||
cursor.read_exact(&mut value)
|
||||
.map_err(|e| Error::Deserialization(format!("Failed to read value: {}", e)))?;
|
||||
|
||||
// Read children
|
||||
let children_len = read_u16(&mut cursor)
|
||||
.map_err(|e| Error::Deserialization(format!("Failed to read children length: {}", e)))?;
|
||||
|
||||
let mut children = Vec::with_capacity(children_len as usize);
|
||||
for _ in 0..children_len {
|
||||
let key_part = read_string(&mut cursor)
|
||||
.map_err(|e| Error::Deserialization(format!("Failed to read child key part: {}", e)))?;
|
||||
|
||||
let node_id = read_u32(&mut cursor)
|
||||
.map_err(|e| Error::Deserialization(format!("Failed to read child node ID: {}", e)))?;
|
||||
|
||||
children.push(NodeRef {
|
||||
key_part,
|
||||
node_id,
|
||||
});
|
||||
}
|
||||
|
||||
// Read leaf flag
|
||||
let mut is_leaf_byte = [0u8; 1];
|
||||
cursor.read_exact(&mut is_leaf_byte)
|
||||
.map_err(|e| Error::Deserialization(format!("Failed to read leaf flag: {}", e)))?;
|
||||
|
||||
let is_leaf = is_leaf_byte[0] == 1;
|
||||
|
||||
Ok(Node {
|
||||
key_segment,
|
||||
value,
|
||||
children,
|
||||
is_leaf,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Helper functions for serialization
|
||||
|
||||
fn write_string(buffer: &mut Vec<u8>, s: &str) {
|
||||
let bytes = s.as_bytes();
|
||||
write_u16(buffer, bytes.len() as u16);
|
||||
buffer.extend_from_slice(bytes);
|
||||
}
|
||||
|
||||
fn write_u16(buffer: &mut Vec<u8>, value: u16) {
|
||||
buffer.extend_from_slice(&value.to_le_bytes());
|
||||
}
|
||||
|
||||
fn write_u32(buffer: &mut Vec<u8>, value: u32) {
|
||||
buffer.extend_from_slice(&value.to_le_bytes());
|
||||
}
|
||||
|
||||
// Helper functions for deserialization
|
||||
|
||||
fn read_string(cursor: &mut Cursor<&[u8]>) -> std::io::Result<String> {
|
||||
let len = read_u16(cursor)? as usize;
|
||||
let mut bytes = vec![0u8; len];
|
||||
cursor.read_exact(&mut bytes)?;
|
||||
|
||||
String::from_utf8(bytes)
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))
|
||||
}
|
||||
|
||||
fn read_u16(cursor: &mut Cursor<&[u8]>) -> std::io::Result<u16> {
|
||||
let mut bytes = [0u8; size_of::<u16>()];
|
||||
cursor.read_exact(&mut bytes)?;
|
||||
|
||||
Ok(u16::from_le_bytes(bytes))
|
||||
}
|
||||
|
||||
fn read_u32(cursor: &mut Cursor<&[u8]>) -> std::io::Result<u32> {
|
||||
let mut bytes = [0u8; size_of::<u32>()];
|
||||
cursor.read_exact(&mut bytes)?;
|
||||
|
||||
Ok(u32::from_le_bytes(bytes))
|
||||
}
|
||||
|
||||
/// Helper function to get the common prefix of two strings.
|
||||
pub fn get_common_prefix(a: &str, b: &str) -> String {
|
||||
let mut i = 0;
|
||||
let a_bytes = a.as_bytes();
|
||||
let b_bytes = b.as_bytes();
|
||||
|
||||
while i < a.len() && i < b.len() && a_bytes[i] == b_bytes[i] {
|
||||
i += 1;
|
||||
}
|
||||
|
||||
a[..i].to_string()
|
||||
}
|
||||
144
packages/data/radixtree/tests/basic_test.rs
Normal file
144
packages/data/radixtree/tests/basic_test.rs
Normal file
@@ -0,0 +1,144 @@
|
||||
use radixtree::RadixTree;
|
||||
use std::path::PathBuf;
|
||||
use tempfile::tempdir;
|
||||
|
||||
#[test]
|
||||
fn test_basic_operations() -> Result<(), radixtree::Error> {
|
||||
// Create a temporary directory for the test
|
||||
let temp_dir = tempdir().expect("Failed to create temp directory");
|
||||
let db_path = temp_dir.path().to_str().unwrap();
|
||||
|
||||
// Create a new radix tree
|
||||
let mut tree = RadixTree::new(db_path, true)?;
|
||||
|
||||
// Test setting and getting values
|
||||
let key = "test_key";
|
||||
let value = b"test_value".to_vec();
|
||||
tree.set(key, value.clone())?;
|
||||
|
||||
let retrieved_value = tree.get(key)?;
|
||||
assert_eq!(retrieved_value, value);
|
||||
|
||||
// Test updating a value
|
||||
let new_value = b"updated_value".to_vec();
|
||||
tree.update(key, new_value.clone())?;
|
||||
|
||||
let updated_value = tree.get(key)?;
|
||||
assert_eq!(updated_value, new_value);
|
||||
|
||||
// Test deleting a value
|
||||
tree.delete(key)?;
|
||||
|
||||
// Trying to get a deleted key should return an error
|
||||
let result = tree.get(key);
|
||||
assert!(result.is_err());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty_key() -> Result<(), radixtree::Error> {
|
||||
// Create a temporary directory for the test
|
||||
let temp_dir = tempdir().expect("Failed to create temp directory");
|
||||
let db_path = temp_dir.path().to_str().unwrap();
|
||||
|
||||
// Create a new radix tree
|
||||
let mut tree = RadixTree::new(db_path, true)?;
|
||||
|
||||
// Test setting and getting empty key
|
||||
let key = "";
|
||||
let value = b"value_for_empty_key".to_vec();
|
||||
tree.set(key, value.clone())?;
|
||||
|
||||
let retrieved_value = tree.get(key)?;
|
||||
assert_eq!(retrieved_value, value);
|
||||
|
||||
// Test deleting empty key
|
||||
tree.delete(key)?;
|
||||
|
||||
// Trying to get a deleted key should return an error
|
||||
let result = tree.get(key);
|
||||
assert!(result.is_err());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multiple_keys() -> Result<(), radixtree::Error> {
|
||||
// Create a temporary directory for the test
|
||||
let temp_dir = tempdir().expect("Failed to create temp directory");
|
||||
let db_path = temp_dir.path().to_str().unwrap();
|
||||
|
||||
// Create a new radix tree
|
||||
let mut tree = RadixTree::new(db_path, true)?;
|
||||
|
||||
// Insert multiple keys
|
||||
let test_data = [
|
||||
("key1", b"value1".to_vec()),
|
||||
("key2", b"value2".to_vec()),
|
||||
("key3", b"value3".to_vec()),
|
||||
];
|
||||
|
||||
for (key, value) in &test_data {
|
||||
tree.set(key, value.clone())?;
|
||||
}
|
||||
|
||||
// Verify all keys can be retrieved
|
||||
for (key, expected_value) in &test_data {
|
||||
let retrieved_value = tree.get(key)?;
|
||||
assert_eq!(&retrieved_value, expected_value);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_shared_prefixes() -> Result<(), radixtree::Error> {
|
||||
// Create a temporary directory for the test
|
||||
let temp_dir = tempdir().expect("Failed to create temp directory");
|
||||
let db_path = temp_dir.path().to_str().unwrap();
|
||||
|
||||
// Create a new radix tree
|
||||
let mut tree = RadixTree::new(db_path, true)?;
|
||||
|
||||
// Insert keys with shared prefixes
|
||||
let test_data = [
|
||||
("test", b"value_test".to_vec()),
|
||||
("testing", b"value_testing".to_vec()),
|
||||
("tested", b"value_tested".to_vec()),
|
||||
];
|
||||
|
||||
for (key, value) in &test_data {
|
||||
tree.set(key, value.clone())?;
|
||||
}
|
||||
|
||||
// Verify all keys can be retrieved
|
||||
for (key, expected_value) in &test_data {
|
||||
let retrieved_value = tree.get(key)?;
|
||||
assert_eq!(&retrieved_value, expected_value);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_persistence() -> Result<(), radixtree::Error> {
|
||||
// Create a temporary directory for the test
|
||||
let temp_dir = tempdir().expect("Failed to create temp directory");
|
||||
let db_path = temp_dir.path().to_str().unwrap();
|
||||
|
||||
// Create a new radix tree and add some data
|
||||
{
|
||||
let mut tree = RadixTree::new(db_path, true)?;
|
||||
tree.set("persistent_key", b"persistent_value".to_vec())?;
|
||||
} // Tree is dropped here
|
||||
|
||||
// Create a new tree instance with the same path
|
||||
{
|
||||
let mut tree = RadixTree::new(db_path, false)?;
|
||||
let value = tree.get("persistent_key")?;
|
||||
assert_eq!(value, b"persistent_value".to_vec());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
153
packages/data/radixtree/tests/getall_test.rs
Normal file
153
packages/data/radixtree/tests/getall_test.rs
Normal file
@@ -0,0 +1,153 @@
|
||||
use radixtree::RadixTree;
|
||||
use std::collections::HashMap;
|
||||
use tempfile::tempdir;
|
||||
|
||||
#[test]
|
||||
fn test_getall() -> Result<(), radixtree::Error> {
|
||||
// Create a temporary directory for the test
|
||||
let temp_dir = tempdir().expect("Failed to create temp directory");
|
||||
let db_path = temp_dir.path().to_str().unwrap();
|
||||
|
||||
// Create a new radix tree
|
||||
let mut tree = RadixTree::new(db_path, true)?;
|
||||
|
||||
// Set up test data with common prefixes
|
||||
let test_data: HashMap<&str, &str> = [
|
||||
("user_1", "data1"),
|
||||
("user_2", "data2"),
|
||||
("user_3", "data3"),
|
||||
("admin_1", "admin_data1"),
|
||||
("admin_2", "admin_data2"),
|
||||
("guest", "guest_data"),
|
||||
].iter().cloned().collect();
|
||||
|
||||
// Set all test data
|
||||
for (key, value) in &test_data {
|
||||
tree.set(key, value.as_bytes().to_vec())?;
|
||||
}
|
||||
|
||||
// Test getall with 'user_' prefix
|
||||
let user_values = tree.getall("user_")?;
|
||||
|
||||
// Should return 3 values
|
||||
assert_eq!(user_values.len(), 3);
|
||||
|
||||
// Convert byte arrays to strings for easier comparison
|
||||
let user_value_strings: Vec<String> = user_values
|
||||
.iter()
|
||||
.map(|v| String::from_utf8_lossy(v).to_string())
|
||||
.collect();
|
||||
|
||||
// Check all expected values are present
|
||||
assert!(user_value_strings.contains(&"data1".to_string()));
|
||||
assert!(user_value_strings.contains(&"data2".to_string()));
|
||||
assert!(user_value_strings.contains(&"data3".to_string()));
|
||||
|
||||
// Test getall with 'admin_' prefix
|
||||
let admin_values = tree.getall("admin_")?;
|
||||
|
||||
// Should return 2 values
|
||||
assert_eq!(admin_values.len(), 2);
|
||||
|
||||
// Convert byte arrays to strings for easier comparison
|
||||
let admin_value_strings: Vec<String> = admin_values
|
||||
.iter()
|
||||
.map(|v| String::from_utf8_lossy(v).to_string())
|
||||
.collect();
|
||||
|
||||
// Check all expected values are present
|
||||
assert!(admin_value_strings.contains(&"admin_data1".to_string()));
|
||||
assert!(admin_value_strings.contains(&"admin_data2".to_string()));
|
||||
|
||||
// Test getall with empty prefix (should return all values)
|
||||
let all_values = tree.getall("")?;
|
||||
|
||||
// Should return all 6 values
|
||||
assert_eq!(all_values.len(), test_data.len());
|
||||
|
||||
// Test getall with non-existent prefix
|
||||
let non_existent_values = tree.getall("xyz")?;
|
||||
|
||||
// Should return empty array
|
||||
assert_eq!(non_existent_values.len(), 0);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_getall_with_updates() -> Result<(), radixtree::Error> {
|
||||
// Create a temporary directory for the test
|
||||
let temp_dir = tempdir().expect("Failed to create temp directory");
|
||||
let db_path = temp_dir.path().to_str().unwrap();
|
||||
|
||||
// Create a new radix tree
|
||||
let mut tree = RadixTree::new(db_path, true)?;
|
||||
|
||||
// Set initial values
|
||||
tree.set("key1", b"value1".to_vec())?;
|
||||
tree.set("key2", b"value2".to_vec())?;
|
||||
tree.set("key3", b"value3".to_vec())?;
|
||||
|
||||
// Get initial values
|
||||
let initial_values = tree.getall("key")?;
|
||||
assert_eq!(initial_values.len(), 3);
|
||||
|
||||
// Update a value
|
||||
tree.update("key2", b"updated_value2".to_vec())?;
|
||||
|
||||
// Get values after update
|
||||
let updated_values = tree.getall("key")?;
|
||||
assert_eq!(updated_values.len(), 3);
|
||||
|
||||
// Convert to strings for easier comparison
|
||||
let updated_value_strings: Vec<String> = updated_values
|
||||
.iter()
|
||||
.map(|v| String::from_utf8_lossy(v).to_string())
|
||||
.collect();
|
||||
|
||||
// Check the updated value is present
|
||||
assert!(updated_value_strings.contains(&"value1".to_string()));
|
||||
assert!(updated_value_strings.contains(&"updated_value2".to_string()));
|
||||
assert!(updated_value_strings.contains(&"value3".to_string()));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_getall_with_deletions() -> Result<(), radixtree::Error> {
|
||||
// Create a temporary directory for the test
|
||||
let temp_dir = tempdir().expect("Failed to create temp directory");
|
||||
let db_path = temp_dir.path().to_str().unwrap();
|
||||
|
||||
// Create a new radix tree
|
||||
let mut tree = RadixTree::new(db_path, true)?;
|
||||
|
||||
// Set initial values
|
||||
tree.set("prefix_1", b"value1".to_vec())?;
|
||||
tree.set("prefix_2", b"value2".to_vec())?;
|
||||
tree.set("prefix_3", b"value3".to_vec())?;
|
||||
tree.set("other", b"other_value".to_vec())?;
|
||||
|
||||
// Get initial values
|
||||
let initial_values = tree.getall("prefix_")?;
|
||||
assert_eq!(initial_values.len(), 3);
|
||||
|
||||
// Delete a key
|
||||
tree.delete("prefix_2")?;
|
||||
|
||||
// Get values after deletion
|
||||
let after_delete_values = tree.getall("prefix_")?;
|
||||
assert_eq!(after_delete_values.len(), 2);
|
||||
|
||||
// Convert to strings for easier comparison
|
||||
let after_delete_strings: Vec<String> = after_delete_values
|
||||
.iter()
|
||||
.map(|v| String::from_utf8_lossy(v).to_string())
|
||||
.collect();
|
||||
|
||||
// Check the remaining values
|
||||
assert!(after_delete_strings.contains(&"value1".to_string()));
|
||||
assert!(after_delete_strings.contains(&"value3".to_string()));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
185
packages/data/radixtree/tests/prefix_test.rs
Normal file
185
packages/data/radixtree/tests/prefix_test.rs
Normal file
@@ -0,0 +1,185 @@
|
||||
use radixtree::RadixTree;
|
||||
use std::collections::HashMap;
|
||||
use tempfile::tempdir;
|
||||
|
||||
#[test]
|
||||
fn test_list() -> Result<(), radixtree::Error> {
|
||||
// Create a temporary directory for the test
|
||||
let temp_dir = tempdir().expect("Failed to create temp directory");
|
||||
let db_path = temp_dir.path().to_str().unwrap();
|
||||
|
||||
// Create a new radix tree
|
||||
let mut tree = RadixTree::new(db_path, true)?;
|
||||
|
||||
// Insert keys with various prefixes
|
||||
let test_data: HashMap<&str, &str> = [
|
||||
("apple", "fruit1"),
|
||||
("application", "software1"),
|
||||
("apply", "verb1"),
|
||||
("banana", "fruit2"),
|
||||
("ball", "toy1"),
|
||||
("cat", "animal1"),
|
||||
("car", "vehicle1"),
|
||||
("cargo", "shipping1"),
|
||||
].iter().cloned().collect();
|
||||
|
||||
// Set all test data
|
||||
for (key, value) in &test_data {
|
||||
tree.set(key, value.as_bytes().to_vec())?;
|
||||
}
|
||||
|
||||
// Test prefix 'app' - should return apple, application, apply
|
||||
let app_keys = tree.list("app")?;
|
||||
assert_eq!(app_keys.len(), 3);
|
||||
assert!(app_keys.contains(&"apple".to_string()));
|
||||
assert!(app_keys.contains(&"application".to_string()));
|
||||
assert!(app_keys.contains(&"apply".to_string()));
|
||||
|
||||
// Test prefix 'ba' - should return banana, ball
|
||||
let ba_keys = tree.list("ba")?;
|
||||
assert_eq!(ba_keys.len(), 2);
|
||||
assert!(ba_keys.contains(&"banana".to_string()));
|
||||
assert!(ba_keys.contains(&"ball".to_string()));
|
||||
|
||||
// Test prefix 'car' - should return car, cargo
|
||||
let car_keys = tree.list("car")?;
|
||||
assert_eq!(car_keys.len(), 2);
|
||||
assert!(car_keys.contains(&"car".to_string()));
|
||||
assert!(car_keys.contains(&"cargo".to_string()));
|
||||
|
||||
// Test prefix 'z' - should return empty list
|
||||
let z_keys = tree.list("z")?;
|
||||
assert_eq!(z_keys.len(), 0);
|
||||
|
||||
// Test empty prefix - should return all keys
|
||||
let all_keys = tree.list("")?;
|
||||
assert_eq!(all_keys.len(), test_data.len());
|
||||
for key in test_data.keys() {
|
||||
assert!(all_keys.contains(&key.to_string()));
|
||||
}
|
||||
|
||||
// Test exact key as prefix - should return just that key
|
||||
let exact_key = tree.list("apple")?;
|
||||
assert_eq!(exact_key.len(), 1);
|
||||
assert_eq!(exact_key[0], "apple");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_list_with_deletion() -> Result<(), radixtree::Error> {
|
||||
// Create a temporary directory for the test
|
||||
let temp_dir = tempdir().expect("Failed to create temp directory");
|
||||
let db_path = temp_dir.path().to_str().unwrap();
|
||||
|
||||
// Create a new radix tree
|
||||
let mut tree = RadixTree::new(db_path, true)?;
|
||||
|
||||
// Set keys with common prefixes
|
||||
tree.set("test1", b"value1".to_vec())?;
|
||||
tree.set("test2", b"value2".to_vec())?;
|
||||
tree.set("test3", b"value3".to_vec())?;
|
||||
tree.set("other", b"value4".to_vec())?;
|
||||
|
||||
// Initial check
|
||||
let test_keys = tree.list("test")?;
|
||||
assert_eq!(test_keys.len(), 3);
|
||||
assert!(test_keys.contains(&"test1".to_string()));
|
||||
assert!(test_keys.contains(&"test2".to_string()));
|
||||
assert!(test_keys.contains(&"test3".to_string()));
|
||||
|
||||
// Delete one key
|
||||
tree.delete("test2")?;
|
||||
|
||||
// Check after deletion
|
||||
let test_keys_after = tree.list("test")?;
|
||||
assert_eq!(test_keys_after.len(), 2);
|
||||
assert!(test_keys_after.contains(&"test1".to_string()));
|
||||
assert!(!test_keys_after.contains(&"test2".to_string()));
|
||||
assert!(test_keys_after.contains(&"test3".to_string()));
|
||||
|
||||
// Check all keys
|
||||
let all_keys = tree.list("")?;
|
||||
assert_eq!(all_keys.len(), 3);
|
||||
assert!(all_keys.contains(&"other".to_string()));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_list_edge_cases() -> Result<(), radixtree::Error> {
|
||||
// Create a temporary directory for the test
|
||||
let temp_dir = tempdir().expect("Failed to create temp directory");
|
||||
let db_path = temp_dir.path().to_str().unwrap();
|
||||
|
||||
// Create a new radix tree
|
||||
let mut tree = RadixTree::new(db_path, true)?;
|
||||
|
||||
// Test with empty tree
|
||||
let empty_result = tree.list("any")?;
|
||||
assert_eq!(empty_result.len(), 0);
|
||||
|
||||
// Set a single key
|
||||
tree.set("single", b"value".to_vec())?;
|
||||
|
||||
// Test with prefix that's longer than any key
|
||||
let long_prefix = tree.list("singlelonger")?;
|
||||
assert_eq!(long_prefix.len(), 0);
|
||||
|
||||
// Test with partial prefix match
|
||||
let partial = tree.list("sing")?;
|
||||
assert_eq!(partial.len(), 1);
|
||||
assert_eq!(partial[0], "single");
|
||||
|
||||
// Test with very long keys
|
||||
let long_key1 = "a".repeat(100) + "key1";
|
||||
let long_key2 = "a".repeat(100) + "key2";
|
||||
|
||||
tree.set(&long_key1, b"value1".to_vec())?;
|
||||
tree.set(&long_key2, b"value2".to_vec())?;
|
||||
|
||||
let long_prefix_result = tree.list(&"a".repeat(100))?;
|
||||
assert_eq!(long_prefix_result.len(), 2);
|
||||
assert!(long_prefix_result.contains(&long_key1));
|
||||
assert!(long_prefix_result.contains(&long_key2));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_list_performance() -> Result<(), radixtree::Error> {
|
||||
// Create a temporary directory for the test
|
||||
let temp_dir = tempdir().expect("Failed to create temp directory");
|
||||
let db_path = temp_dir.path().to_str().unwrap();
|
||||
|
||||
// Create a new radix tree
|
||||
let mut tree = RadixTree::new(db_path, true)?;
|
||||
|
||||
// Insert a large number of keys with different prefixes
|
||||
let prefixes = ["user", "post", "comment", "like", "share"];
|
||||
|
||||
// Set 100 keys for each prefix (500 total)
|
||||
for prefix in &prefixes {
|
||||
for i in 0..100 {
|
||||
let key = format!("{}_{}", prefix, i);
|
||||
tree.set(&key, format!("value_{}", key).as_bytes().to_vec())?;
|
||||
}
|
||||
}
|
||||
|
||||
// Test retrieving by each prefix
|
||||
for prefix in &prefixes {
|
||||
let keys = tree.list(prefix)?;
|
||||
assert_eq!(keys.len(), 100);
|
||||
|
||||
// Verify all keys have the correct prefix
|
||||
for key in &keys {
|
||||
assert!(key.starts_with(prefix));
|
||||
}
|
||||
}
|
||||
|
||||
// Test retrieving all keys
|
||||
let all_keys = tree.list("")?;
|
||||
assert_eq!(all_keys.len(), 500);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
180
packages/data/radixtree/tests/serialize_test.rs
Normal file
180
packages/data/radixtree/tests/serialize_test.rs
Normal file
@@ -0,0 +1,180 @@
|
||||
use radixtree::{Node, NodeRef};
|
||||
|
||||
#[test]
|
||||
fn test_node_serialization() {
|
||||
// Create a node with some data
|
||||
let node = Node {
|
||||
key_segment: "test".to_string(),
|
||||
value: b"test_value".to_vec(),
|
||||
children: vec![
|
||||
NodeRef {
|
||||
key_part: "child1".to_string(),
|
||||
node_id: 1,
|
||||
},
|
||||
NodeRef {
|
||||
key_part: "child2".to_string(),
|
||||
node_id: 2,
|
||||
},
|
||||
],
|
||||
is_leaf: true,
|
||||
};
|
||||
|
||||
// Serialize the node
|
||||
let serialized = node.serialize();
|
||||
|
||||
// Deserialize the node
|
||||
let deserialized = Node::deserialize(&serialized).expect("Failed to deserialize node");
|
||||
|
||||
// Verify the deserialized node matches the original
|
||||
assert_eq!(deserialized.key_segment, node.key_segment);
|
||||
assert_eq!(deserialized.value, node.value);
|
||||
assert_eq!(deserialized.is_leaf, node.is_leaf);
|
||||
assert_eq!(deserialized.children.len(), node.children.len());
|
||||
|
||||
for (i, child) in node.children.iter().enumerate() {
|
||||
assert_eq!(deserialized.children[i].key_part, child.key_part);
|
||||
assert_eq!(deserialized.children[i].node_id, child.node_id);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty_node_serialization() {
|
||||
// Create an empty node
|
||||
let node = Node {
|
||||
key_segment: "".to_string(),
|
||||
value: vec![],
|
||||
children: vec![],
|
||||
is_leaf: false,
|
||||
};
|
||||
|
||||
// Serialize the node
|
||||
let serialized = node.serialize();
|
||||
|
||||
// Deserialize the node
|
||||
let deserialized = Node::deserialize(&serialized).expect("Failed to deserialize node");
|
||||
|
||||
// Verify the deserialized node matches the original
|
||||
assert_eq!(deserialized.key_segment, node.key_segment);
|
||||
assert_eq!(deserialized.value, node.value);
|
||||
assert_eq!(deserialized.is_leaf, node.is_leaf);
|
||||
assert_eq!(deserialized.children.len(), node.children.len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_node_with_many_children() {
|
||||
// Create a node with many children
|
||||
let mut children = Vec::new();
|
||||
for i in 0..100 {
|
||||
children.push(NodeRef {
|
||||
key_part: format!("child{}", i),
|
||||
node_id: i as u32,
|
||||
});
|
||||
}
|
||||
|
||||
let node = Node {
|
||||
key_segment: "parent".to_string(),
|
||||
value: b"parent_value".to_vec(),
|
||||
children,
|
||||
is_leaf: true,
|
||||
};
|
||||
|
||||
// Serialize the node
|
||||
let serialized = node.serialize();
|
||||
|
||||
// Deserialize the node
|
||||
let deserialized = Node::deserialize(&serialized).expect("Failed to deserialize node");
|
||||
|
||||
// Verify the deserialized node matches the original
|
||||
assert_eq!(deserialized.key_segment, node.key_segment);
|
||||
assert_eq!(deserialized.value, node.value);
|
||||
assert_eq!(deserialized.is_leaf, node.is_leaf);
|
||||
assert_eq!(deserialized.children.len(), node.children.len());
|
||||
|
||||
for (i, child) in node.children.iter().enumerate() {
|
||||
assert_eq!(deserialized.children[i].key_part, child.key_part);
|
||||
assert_eq!(deserialized.children[i].node_id, child.node_id);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_node_with_large_value() {
|
||||
// Create a node with a large value
|
||||
let large_value = vec![0u8; 4096]; // 4KB value
|
||||
|
||||
let node = Node {
|
||||
key_segment: "large_value".to_string(),
|
||||
value: large_value.clone(),
|
||||
children: vec![],
|
||||
is_leaf: true,
|
||||
};
|
||||
|
||||
// Serialize the node
|
||||
let serialized = node.serialize();
|
||||
|
||||
// Deserialize the node
|
||||
let deserialized = Node::deserialize(&serialized).expect("Failed to deserialize node");
|
||||
|
||||
// Verify the deserialized node matches the original
|
||||
assert_eq!(deserialized.key_segment, node.key_segment);
|
||||
assert_eq!(deserialized.value, node.value);
|
||||
assert_eq!(deserialized.is_leaf, node.is_leaf);
|
||||
assert_eq!(deserialized.children.len(), node.children.len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_version_compatibility() {
|
||||
// This test ensures that the serialization format is compatible with version 1
|
||||
|
||||
// Create a node
|
||||
let node = Node {
|
||||
key_segment: "test".to_string(),
|
||||
value: b"test_value".to_vec(),
|
||||
children: vec![
|
||||
NodeRef {
|
||||
key_part: "child".to_string(),
|
||||
node_id: 1,
|
||||
},
|
||||
],
|
||||
is_leaf: true,
|
||||
};
|
||||
|
||||
// Serialize the node
|
||||
let serialized = node.serialize();
|
||||
|
||||
// Verify the first byte is the version byte (1)
|
||||
assert_eq!(serialized[0], 1);
|
||||
|
||||
// Deserialize the node
|
||||
let deserialized = Node::deserialize(&serialized).expect("Failed to deserialize node");
|
||||
|
||||
// Verify the deserialized node matches the original
|
||||
assert_eq!(deserialized.key_segment, node.key_segment);
|
||||
assert_eq!(deserialized.value, node.value);
|
||||
assert_eq!(deserialized.is_leaf, node.is_leaf);
|
||||
assert_eq!(deserialized.children.len(), node.children.len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_serialization() {
|
||||
// Test with empty data
|
||||
let result = Node::deserialize(&[]);
|
||||
assert!(result.is_err());
|
||||
|
||||
// Test with invalid version
|
||||
let result = Node::deserialize(&[2, 0, 0, 0, 0]);
|
||||
assert!(result.is_err());
|
||||
|
||||
// Test with truncated data
|
||||
let node = Node {
|
||||
key_segment: "test".to_string(),
|
||||
value: b"test_value".to_vec(),
|
||||
children: vec![],
|
||||
is_leaf: true,
|
||||
};
|
||||
|
||||
let serialized = node.serialize();
|
||||
let truncated = &serialized[0..serialized.len() / 2];
|
||||
|
||||
let result = Node::deserialize(truncated);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
30
packages/data/tst/Cargo.toml
Normal file
30
packages/data/tst/Cargo.toml
Normal file
@@ -0,0 +1,30 @@
|
||||
[package]
|
||||
name = "tst"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
description = "A persistent ternary search tree implementation using OurDB for storage"
|
||||
authors = ["OurWorld Team"]
|
||||
|
||||
[dependencies]
|
||||
ourdb = { path = "../ourdb" }
|
||||
thiserror = "1.0.40"
|
||||
|
||||
[dev-dependencies]
|
||||
# criterion = "0.5.1"
|
||||
|
||||
# Uncomment when benchmarks are implemented
|
||||
# [[bench]]
|
||||
# name = "tst_benchmarks"
|
||||
# harness = false
|
||||
|
||||
[[example]]
|
||||
name = "basic_usage"
|
||||
path = "examples/basic_usage.rs"
|
||||
|
||||
[[example]]
|
||||
name = "prefix_ops"
|
||||
path = "examples/prefix_ops.rs"
|
||||
|
||||
[[example]]
|
||||
name = "performance"
|
||||
path = "examples/performance.rs"
|
||||
185
packages/data/tst/README.md
Normal file
185
packages/data/tst/README.md
Normal file
@@ -0,0 +1,185 @@
|
||||
# Ternary Search Tree (TST)
|
||||
|
||||
A persistent ternary search tree implementation in Rust using OurDB for storage.
|
||||
|
||||
## Overview
|
||||
|
||||
TST is a space-optimized tree data structure that enables efficient string key operations with persistent storage. This implementation provides a persistent ternary search tree that can be used for efficient string key operations, such as auto-complete, routing tables, and more.
|
||||
|
||||
A ternary search tree is a type of trie where each node has three children: left, middle, and right. Unlike a radix tree which compresses common prefixes, a TST stores one character per node and uses a binary search tree-like structure for efficient traversal.
|
||||
|
||||
Key characteristics:
|
||||
- Each node stores a single character
|
||||
- Nodes have three children: left (for characters < current), middle (for next character in key), and right (for characters > current)
|
||||
- Leaf nodes contain the actual values
|
||||
- Balanced structure for consistent performance across operations
|
||||
|
||||
## Features
|
||||
|
||||
- Efficient string key operations
|
||||
- Persistent storage using OurDB backend
|
||||
- Balanced tree structure for consistent performance
|
||||
- Support for binary values
|
||||
- Thread-safe operations through OurDB
|
||||
|
||||
## Usage
|
||||
|
||||
Add the dependency to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
tst = { path = "../tst" }
|
||||
```
|
||||
|
||||
### Basic Example
|
||||
|
||||
```rust
|
||||
use tst::TST;
|
||||
|
||||
fn main() -> Result<(), tst::Error> {
|
||||
// Create a new ternary search tree
|
||||
let mut tree = TST::new("/tmp/tst", false)?;
|
||||
|
||||
// Set key-value pairs
|
||||
tree.set("hello", b"world".to_vec())?;
|
||||
tree.set("help", b"me".to_vec())?;
|
||||
|
||||
// Get values by key
|
||||
let value = tree.get("hello")?;
|
||||
println!("hello: {}", String::from_utf8_lossy(&value)); // Prints: world
|
||||
|
||||
// List keys by prefix
|
||||
let keys = tree.list("hel")?; // Returns ["hello", "help"]
|
||||
println!("Keys with prefix 'hel': {:?}", keys);
|
||||
|
||||
// Get all values by prefix
|
||||
let values = tree.getall("hel")?; // Returns [b"world", b"me"]
|
||||
|
||||
// Delete keys
|
||||
tree.delete("help")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
## API
|
||||
|
||||
### Creating a TST
|
||||
|
||||
```rust
|
||||
// Create a new ternary search tree
|
||||
let mut tree = TST::new("/tmp/tst", false)?;
|
||||
|
||||
// Create a new ternary search tree and reset if it exists
|
||||
let mut tree = TST::new("/tmp/tst", true)?;
|
||||
```
|
||||
|
||||
### Setting Values
|
||||
|
||||
```rust
|
||||
// Set a key-value pair
|
||||
tree.set("key", b"value".to_vec())?;
|
||||
```
|
||||
|
||||
### Getting Values
|
||||
|
||||
```rust
|
||||
// Get a value by key
|
||||
let value = tree.get("key")?;
|
||||
```
|
||||
|
||||
### Deleting Keys
|
||||
|
||||
```rust
|
||||
// Delete a key
|
||||
tree.delete("key")?;
|
||||
```
|
||||
|
||||
### Listing Keys by Prefix
|
||||
|
||||
```rust
|
||||
// List all keys with a given prefix
|
||||
let keys = tree.list("prefix")?;
|
||||
```
|
||||
|
||||
### Getting All Values by Prefix
|
||||
|
||||
```rust
|
||||
// Get all values for keys with a given prefix
|
||||
let values = tree.getall("prefix")?;
|
||||
```
|
||||
|
||||
## Performance Characteristics
|
||||
|
||||
- Search: O(k) where k is the key length
|
||||
- Insert: O(k) for new keys
|
||||
- Delete: O(k) plus potential node cleanup
|
||||
- Space: O(n) where n is the total number of nodes
|
||||
|
||||
## Use Cases
|
||||
|
||||
TST is particularly useful for:
|
||||
- Prefix-based searching
|
||||
- Auto-complete systems
|
||||
- Dictionary implementations
|
||||
- Spell checking
|
||||
- Any application requiring efficient string key operations with persistence
|
||||
|
||||
## Implementation Details
|
||||
|
||||
The TST implementation uses OurDB for persistent storage:
|
||||
- Each node is serialized and stored as a record in OurDB
|
||||
- Node references use OurDB record IDs
|
||||
- The tree maintains a root node ID for traversal
|
||||
- Node serialization includes version tracking for format evolution
|
||||
|
||||
## Running Tests
|
||||
|
||||
The project includes a comprehensive test suite that verifies all functionality:
|
||||
|
||||
```bash
|
||||
cd ~/code/git.threefold.info/herocode/db/tst
|
||||
# Run all tests
|
||||
cargo test
|
||||
|
||||
# Run specific test file
|
||||
cargo test --test basic_test
|
||||
cargo test --test prefix_test
|
||||
|
||||
```
|
||||
|
||||
## Running Examples
|
||||
|
||||
The project includes example applications that demonstrate how to use the TST:
|
||||
|
||||
```bash
|
||||
# Run the basic usage example
|
||||
cargo run --example basic_usage
|
||||
|
||||
# Run the prefix operations example
|
||||
cargo run --example prefix_ops
|
||||
|
||||
# Run the performance test
|
||||
cargo run --example performance
|
||||
```
|
||||
|
||||
## Comparison with RadixTree
|
||||
|
||||
While both TST and RadixTree provide efficient string key operations, they have different characteristics:
|
||||
|
||||
- **TST**: Stores one character per node, with a balanced structure for consistent performance across operations.
|
||||
- **RadixTree**: Compresses common prefixes, which can be more space-efficient for keys with long common prefixes.
|
||||
|
||||
Choose TST when:
|
||||
- You need balanced performance across all operations
|
||||
- Your keys don't share long common prefixes
|
||||
- You want a simpler implementation with predictable performance
|
||||
|
||||
Choose RadixTree when:
|
||||
- Space efficiency is a priority
|
||||
- Your keys share long common prefixes
|
||||
- You prioritize lookup performance over balanced performance
|
||||
|
||||
## License
|
||||
|
||||
This project is licensed under the same license as the HeroCode project.
|
||||
75
packages/data/tst/examples/basic_usage.rs
Normal file
75
packages/data/tst/examples/basic_usage.rs
Normal file
@@ -0,0 +1,75 @@
|
||||
use std::time::Instant;
|
||||
use tst::TST;
|
||||
|
||||
fn main() -> Result<(), tst::Error> {
|
||||
// Create a temporary directory for the database
|
||||
let db_path = std::env::temp_dir().join("tst_example");
|
||||
std::fs::create_dir_all(&db_path)?;
|
||||
|
||||
println!("Creating ternary search tree at: {}", db_path.display());
|
||||
|
||||
// Create a new TST
|
||||
let mut tree = TST::new(db_path.to_str().unwrap(), true)?;
|
||||
|
||||
// Store some data
|
||||
println!("Inserting data...");
|
||||
tree.set("hello", b"world".to_vec())?;
|
||||
tree.set("help", b"me".to_vec())?;
|
||||
tree.set("helicopter", b"flying".to_vec())?;
|
||||
tree.set("apple", b"fruit".to_vec())?;
|
||||
tree.set("application", b"software".to_vec())?;
|
||||
tree.set("banana", b"yellow".to_vec())?;
|
||||
|
||||
// Retrieve and print the data
|
||||
let value = tree.get("hello")?;
|
||||
println!("hello: {}", String::from_utf8_lossy(&value));
|
||||
|
||||
// List keys with prefix
|
||||
println!("\nListing keys with prefix 'hel':");
|
||||
let start = Instant::now();
|
||||
let keys = tree.list("hel")?;
|
||||
let duration = start.elapsed();
|
||||
|
||||
for key in &keys {
|
||||
println!(" {}", key);
|
||||
}
|
||||
println!("Found {} keys in {:?}", keys.len(), duration);
|
||||
|
||||
// Get all values with prefix
|
||||
println!("\nGetting all values with prefix 'app':");
|
||||
let start = Instant::now();
|
||||
let values = tree.getall("app")?;
|
||||
let duration = start.elapsed();
|
||||
|
||||
for (i, value) in values.iter().enumerate() {
|
||||
println!(" Value {}: {}", i + 1, String::from_utf8_lossy(value));
|
||||
}
|
||||
println!("Found {} values in {:?}", values.len(), duration);
|
||||
|
||||
// Delete a key
|
||||
println!("\nDeleting 'help'...");
|
||||
tree.delete("help")?;
|
||||
|
||||
// Verify deletion
|
||||
println!("Listing keys with prefix 'hel' after deletion:");
|
||||
let keys_after = tree.list("hel")?;
|
||||
for key in &keys_after {
|
||||
println!(" {}", key);
|
||||
}
|
||||
|
||||
// Try to get a deleted key
|
||||
match tree.get("help") {
|
||||
Ok(_) => println!("Unexpectedly found 'help' after deletion!"),
|
||||
Err(e) => println!("As expected, 'help' was not found: {}", e),
|
||||
}
|
||||
|
||||
// Clean up (optional)
|
||||
if std::env::var("KEEP_DB").is_err() {
|
||||
std::fs::remove_dir_all(&db_path)?;
|
||||
println!("\nCleaned up database directory");
|
||||
} else {
|
||||
println!("\nDatabase kept at: {}", db_path.display());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
167
packages/data/tst/examples/performance.rs
Normal file
167
packages/data/tst/examples/performance.rs
Normal file
@@ -0,0 +1,167 @@
|
||||
use std::io::{self, Write};
|
||||
use std::time::{Duration, Instant};
|
||||
use tst::TST;
|
||||
|
||||
// Function to generate a test value of specified size
|
||||
fn generate_test_value(index: usize, size: usize) -> Vec<u8> {
|
||||
let base_value = format!("val{:08}", index);
|
||||
let mut value = Vec::with_capacity(size);
|
||||
|
||||
// Fill with repeating pattern to reach desired size
|
||||
while value.len() < size {
|
||||
value.extend_from_slice(base_value.as_bytes());
|
||||
}
|
||||
|
||||
// Truncate to exact size
|
||||
value.truncate(size);
|
||||
|
||||
value
|
||||
}
|
||||
|
||||
// Number of records to insert
|
||||
const TOTAL_RECORDS: usize = 100_000;
|
||||
// How often to report progress (every X records)
|
||||
const PROGRESS_INTERVAL: usize = 1_000;
|
||||
// How many records to use for performance sampling
|
||||
const PERFORMANCE_SAMPLE_SIZE: usize = 100;
|
||||
|
||||
fn main() -> Result<(), tst::Error> {
|
||||
// Create a temporary directory for the database
|
||||
let db_path = std::env::temp_dir().join("tst_performance_test");
|
||||
|
||||
// Completely remove and recreate the directory to ensure a clean start
|
||||
if db_path.exists() {
|
||||
std::fs::remove_dir_all(&db_path)?;
|
||||
}
|
||||
std::fs::create_dir_all(&db_path)?;
|
||||
|
||||
println!("Creating ternary search tree at: {}", db_path.display());
|
||||
println!("Will insert {} records and show progress...", TOTAL_RECORDS);
|
||||
|
||||
// Create a new TST
|
||||
let mut tree = TST::new(db_path.to_str().unwrap(), true)?;
|
||||
|
||||
// Track overall time
|
||||
let start_time = Instant::now();
|
||||
|
||||
// Track performance metrics
|
||||
let mut insertion_times = Vec::with_capacity(TOTAL_RECORDS / PROGRESS_INTERVAL);
|
||||
let mut last_batch_time = Instant::now();
|
||||
let mut last_batch_records = 0;
|
||||
|
||||
// Insert records and track progress
|
||||
for i in 0..TOTAL_RECORDS {
|
||||
let key = format!("key:{:08}", i);
|
||||
// Generate a 100-byte value
|
||||
let value = generate_test_value(i, 100);
|
||||
|
||||
// Time the insertion of every Nth record for performance sampling
|
||||
if i % PERFORMANCE_SAMPLE_SIZE == 0 {
|
||||
let insert_start = Instant::now();
|
||||
tree.set(&key, value)?;
|
||||
let insert_duration = insert_start.elapsed();
|
||||
|
||||
// Only print detailed timing for specific samples to avoid flooding output
|
||||
if i % (PERFORMANCE_SAMPLE_SIZE * 10) == 0 {
|
||||
println!("Record {}: Insertion took {:?}", i, insert_duration);
|
||||
}
|
||||
} else {
|
||||
tree.set(&key, value)?;
|
||||
}
|
||||
|
||||
// Show progress at intervals
|
||||
if (i + 1) % PROGRESS_INTERVAL == 0 || i == TOTAL_RECORDS - 1 {
|
||||
let records_in_batch = i + 1 - last_batch_records;
|
||||
let batch_duration = last_batch_time.elapsed();
|
||||
let records_per_second = records_in_batch as f64 / batch_duration.as_secs_f64();
|
||||
|
||||
insertion_times.push((i + 1, batch_duration));
|
||||
|
||||
print!(
|
||||
"\rProgress: {}/{} records ({:.2}%) - {:.2} records/sec",
|
||||
i + 1,
|
||||
TOTAL_RECORDS,
|
||||
(i + 1) as f64 / TOTAL_RECORDS as f64 * 100.0,
|
||||
records_per_second
|
||||
);
|
||||
io::stdout().flush().unwrap();
|
||||
|
||||
last_batch_time = Instant::now();
|
||||
last_batch_records = i + 1;
|
||||
}
|
||||
}
|
||||
|
||||
let total_duration = start_time.elapsed();
|
||||
println!("\n\nPerformance Summary:");
|
||||
println!(
|
||||
"Total time to insert {} records: {:?}",
|
||||
TOTAL_RECORDS, total_duration
|
||||
);
|
||||
println!(
|
||||
"Average insertion rate: {:.2} records/second",
|
||||
TOTAL_RECORDS as f64 / total_duration.as_secs_f64()
|
||||
);
|
||||
|
||||
// Show performance trend
|
||||
println!("\nPerformance Trend (records inserted vs. time per batch):");
|
||||
for (i, (record_count, duration)) in insertion_times.iter().enumerate() {
|
||||
if i % 10 == 0 || i == insertion_times.len() - 1 {
|
||||
// Only show every 10th point to avoid too much output
|
||||
println!(
|
||||
" After {} records: {:?} for {} records ({:.2} records/sec)",
|
||||
record_count,
|
||||
duration,
|
||||
PROGRESS_INTERVAL,
|
||||
PROGRESS_INTERVAL as f64 / duration.as_secs_f64()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Test access performance with distributed samples
|
||||
println!("\nTesting access performance with distributed samples...");
|
||||
let mut total_get_time = Duration::new(0, 0);
|
||||
let num_samples = 1000;
|
||||
|
||||
// Use a simple distribution pattern instead of random
|
||||
for i in 0..num_samples {
|
||||
// Distribute samples across the entire range
|
||||
let sample_id = (i * (TOTAL_RECORDS / num_samples)) % TOTAL_RECORDS;
|
||||
let key = format!("key:{:08}", sample_id);
|
||||
|
||||
let get_start = Instant::now();
|
||||
let _ = tree.get(&key)?;
|
||||
total_get_time += get_start.elapsed();
|
||||
}
|
||||
|
||||
println!(
|
||||
"Average time to retrieve a record: {:?}",
|
||||
total_get_time / num_samples as u32
|
||||
);
|
||||
|
||||
// Test prefix search performance
|
||||
println!("\nTesting prefix search performance...");
|
||||
let prefixes = ["key:0", "key:1", "key:5", "key:9"];
|
||||
|
||||
for prefix in &prefixes {
|
||||
let list_start = Instant::now();
|
||||
let keys = tree.list(prefix)?;
|
||||
let list_duration = list_start.elapsed();
|
||||
|
||||
println!(
|
||||
"Found {} keys with prefix '{}' in {:?}",
|
||||
keys.len(),
|
||||
prefix,
|
||||
list_duration
|
||||
);
|
||||
}
|
||||
|
||||
// Clean up (optional)
|
||||
if std::env::var("KEEP_DB").is_err() {
|
||||
std::fs::remove_dir_all(&db_path)?;
|
||||
println!("\nCleaned up database directory");
|
||||
} else {
|
||||
println!("\nDatabase kept at: {}", db_path.display());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
184
packages/data/tst/examples/prefix_ops.rs
Normal file
184
packages/data/tst/examples/prefix_ops.rs
Normal file
@@ -0,0 +1,184 @@
|
||||
use std::time::Instant;
|
||||
use tst::TST;
|
||||
|
||||
fn main() -> Result<(), tst::Error> {
|
||||
// Create a temporary directory for the database
|
||||
let db_path = std::env::temp_dir().join("tst_prefix_example");
|
||||
std::fs::create_dir_all(&db_path)?;
|
||||
|
||||
println!("Creating ternary search tree at: {}", db_path.display());
|
||||
|
||||
// Create a new TST
|
||||
let mut tree = TST::new(db_path.to_str().unwrap(), true)?;
|
||||
|
||||
// Insert a variety of keys with different prefixes
|
||||
println!("Inserting data with various prefixes...");
|
||||
|
||||
// Names
|
||||
let names = [
|
||||
"Alice",
|
||||
"Alexander",
|
||||
"Amanda",
|
||||
"Andrew",
|
||||
"Amy",
|
||||
"Bob",
|
||||
"Barbara",
|
||||
"Benjamin",
|
||||
"Brenda",
|
||||
"Brian",
|
||||
"Charlie",
|
||||
"Catherine",
|
||||
"Christopher",
|
||||
"Cynthia",
|
||||
"Carl",
|
||||
"David",
|
||||
"Diana",
|
||||
"Daniel",
|
||||
"Deborah",
|
||||
"Donald",
|
||||
"Edward",
|
||||
"Elizabeth",
|
||||
"Eric",
|
||||
"Emily",
|
||||
"Ethan",
|
||||
];
|
||||
|
||||
for (i, name) in names.iter().enumerate() {
|
||||
let value = format!("person-{}", i).into_bytes();
|
||||
tree.set(name, value)?;
|
||||
}
|
||||
|
||||
// Cities
|
||||
let cities = [
|
||||
"New York",
|
||||
"Los Angeles",
|
||||
"Chicago",
|
||||
"Houston",
|
||||
"Phoenix",
|
||||
"Philadelphia",
|
||||
"San Antonio",
|
||||
"San Diego",
|
||||
"Dallas",
|
||||
"San Jose",
|
||||
"Austin",
|
||||
"Jacksonville",
|
||||
"Fort Worth",
|
||||
"Columbus",
|
||||
"San Francisco",
|
||||
"Charlotte",
|
||||
"Indianapolis",
|
||||
"Seattle",
|
||||
"Denver",
|
||||
"Washington",
|
||||
];
|
||||
|
||||
for (i, city) in cities.iter().enumerate() {
|
||||
let value = format!("city-{}", i).into_bytes();
|
||||
tree.set(city, value)?;
|
||||
}
|
||||
|
||||
// Countries
|
||||
let countries = [
|
||||
"United States",
|
||||
"Canada",
|
||||
"Mexico",
|
||||
"Brazil",
|
||||
"Argentina",
|
||||
"United Kingdom",
|
||||
"France",
|
||||
"Germany",
|
||||
"Italy",
|
||||
"Spain",
|
||||
"China",
|
||||
"Japan",
|
||||
"India",
|
||||
"Australia",
|
||||
"Russia",
|
||||
];
|
||||
|
||||
for (i, country) in countries.iter().enumerate() {
|
||||
let value = format!("country-{}", i).into_bytes();
|
||||
tree.set(country, value)?;
|
||||
}
|
||||
|
||||
println!(
|
||||
"Total items inserted: {}",
|
||||
names.len() + cities.len() + countries.len()
|
||||
);
|
||||
|
||||
// Test prefix operations
|
||||
test_prefix(&mut tree, "A")?;
|
||||
test_prefix(&mut tree, "B")?;
|
||||
test_prefix(&mut tree, "C")?;
|
||||
test_prefix(&mut tree, "San")?;
|
||||
test_prefix(&mut tree, "United")?;
|
||||
|
||||
// Test non-existent prefix
|
||||
test_prefix(&mut tree, "Z")?;
|
||||
|
||||
// Test empty prefix (should return all keys)
|
||||
println!("\nTesting empty prefix (should return all keys):");
|
||||
let start = Instant::now();
|
||||
let all_keys = tree.list("")?;
|
||||
let duration = start.elapsed();
|
||||
|
||||
println!(
|
||||
"Found {} keys with empty prefix in {:?}",
|
||||
all_keys.len(),
|
||||
duration
|
||||
);
|
||||
println!("First 5 keys (alphabetically):");
|
||||
for key in all_keys.iter().take(5) {
|
||||
println!(" {}", key);
|
||||
}
|
||||
|
||||
// Clean up (optional)
|
||||
if std::env::var("KEEP_DB").is_err() {
|
||||
std::fs::remove_dir_all(&db_path)?;
|
||||
println!("\nCleaned up database directory");
|
||||
} else {
|
||||
println!("\nDatabase kept at: {}", db_path.display());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn test_prefix(tree: &mut TST, prefix: &str) -> Result<(), tst::Error> {
|
||||
println!("\nTesting prefix '{}':", prefix);
|
||||
|
||||
// Test list operation
|
||||
let start = Instant::now();
|
||||
let keys = tree.list(prefix)?;
|
||||
let list_duration = start.elapsed();
|
||||
|
||||
println!(
|
||||
"Found {} keys with prefix '{}' in {:?}",
|
||||
keys.len(),
|
||||
prefix,
|
||||
list_duration
|
||||
);
|
||||
|
||||
if !keys.is_empty() {
|
||||
println!("Keys:");
|
||||
for key in &keys {
|
||||
println!(" {}", key);
|
||||
}
|
||||
|
||||
// Test getall operation
|
||||
let start = Instant::now();
|
||||
let values = tree.getall(prefix)?;
|
||||
let getall_duration = start.elapsed();
|
||||
|
||||
println!("Retrieved {} values in {:?}", values.len(), getall_duration);
|
||||
println!(
|
||||
"First value: {}",
|
||||
if !values.is_empty() {
|
||||
String::from_utf8_lossy(&values[0])
|
||||
} else {
|
||||
"None".into()
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
36
packages/data/tst/src/error.rs
Normal file
36
packages/data/tst/src/error.rs
Normal file
@@ -0,0 +1,36 @@
|
||||
//! Error types for the TST module.
|
||||
|
||||
use std::io;
|
||||
use thiserror::Error;
|
||||
|
||||
/// Error type for TST operations.
|
||||
#[derive(Debug, Error)]
|
||||
pub enum Error {
|
||||
/// Error from OurDB operations.
|
||||
#[error("OurDB error: {0}")]
|
||||
OurDB(#[from] ourdb::Error),
|
||||
|
||||
/// Error when a key is not found.
|
||||
#[error("Key not found: {0}")]
|
||||
KeyNotFound(String),
|
||||
|
||||
/// Error when a prefix is not found.
|
||||
#[error("Prefix not found: {0}")]
|
||||
PrefixNotFound(String),
|
||||
|
||||
/// Error during serialization.
|
||||
#[error("Serialization error: {0}")]
|
||||
Serialization(String),
|
||||
|
||||
/// Error during deserialization.
|
||||
#[error("Deserialization error: {0}")]
|
||||
Deserialization(String),
|
||||
|
||||
/// Error for invalid operations.
|
||||
#[error("Invalid operation: {0}")]
|
||||
InvalidOperation(String),
|
||||
|
||||
/// IO error.
|
||||
#[error("IO error: {0}")]
|
||||
IO(#[from] io::Error),
|
||||
}
|
||||
122
packages/data/tst/src/lib.rs
Normal file
122
packages/data/tst/src/lib.rs
Normal file
@@ -0,0 +1,122 @@
|
||||
//! TST is a space-optimized tree data structure that enables efficient string key operations
|
||||
//! with persistent storage using OurDB as a backend.
|
||||
//!
|
||||
//! This implementation provides a persistent ternary search tree that can be used for efficient
|
||||
//! string key operations, such as auto-complete, routing tables, and more.
|
||||
|
||||
mod error;
|
||||
mod node;
|
||||
mod operations;
|
||||
mod serialize;
|
||||
|
||||
pub use error::Error;
|
||||
pub use node::TSTNode;
|
||||
|
||||
use ourdb::OurDB;
|
||||
|
||||
/// TST represents a ternary search tree data structure with persistent storage.
|
||||
pub struct TST {
|
||||
/// Database for persistent storage
|
||||
db: OurDB,
|
||||
|
||||
/// Database ID of the root node
|
||||
root_id: Option<u32>,
|
||||
}
|
||||
|
||||
impl TST {
|
||||
/// Creates a new TST with the specified database path.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `path` - The path to the database directory
|
||||
/// * `reset` - Whether to reset the database if it exists
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// A new `TST` instance
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if the database cannot be created or opened
|
||||
pub fn new(path: &str, reset: bool) -> Result<Self, Error> {
|
||||
operations::new_tst(path, reset)
|
||||
}
|
||||
|
||||
/// Sets a key-value pair in the tree.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `key` - The key to set
|
||||
/// * `value` - The value to set
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if the operation fails
|
||||
pub fn set(&mut self, key: &str, value: Vec<u8>) -> Result<(), Error> {
|
||||
operations::set(self, key, value)
|
||||
}
|
||||
|
||||
/// Gets a value by key from the tree.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `key` - The key to get
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// The value associated with the key
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if the key is not found or the operation fails
|
||||
pub fn get(&mut self, key: &str) -> Result<Vec<u8>, Error> {
|
||||
operations::get(self, key)
|
||||
}
|
||||
|
||||
/// Deletes a key from the tree.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `key` - The key to delete
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if the key is not found or the operation fails
|
||||
pub fn delete(&mut self, key: &str) -> Result<(), Error> {
|
||||
operations::delete(self, key)
|
||||
}
|
||||
|
||||
/// Lists all keys with a given prefix.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `prefix` - The prefix to search for
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// A list of keys that start with the given prefix
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if the operation fails
|
||||
pub fn list(&mut self, prefix: &str) -> Result<Vec<String>, Error> {
|
||||
operations::list(self, prefix)
|
||||
}
|
||||
|
||||
/// Gets all values for keys with a given prefix.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `prefix` - The prefix to search for
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// A list of values for keys that start with the given prefix
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if the operation fails
|
||||
pub fn getall(&mut self, prefix: &str) -> Result<Vec<Vec<u8>>, Error> {
|
||||
operations::getall(self, prefix)
|
||||
}
|
||||
}
|
||||
49
packages/data/tst/src/node.rs
Normal file
49
packages/data/tst/src/node.rs
Normal file
@@ -0,0 +1,49 @@
|
||||
//! Node types for the TST module.
|
||||
|
||||
/// Represents a node in the ternary search tree.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct TSTNode {
|
||||
/// The character stored at this node.
|
||||
pub character: char,
|
||||
|
||||
/// Value stored at this node (empty if not end of key).
|
||||
pub value: Vec<u8>,
|
||||
|
||||
/// Whether this node represents the end of a key.
|
||||
pub is_end_of_key: bool,
|
||||
|
||||
/// Reference to the left child node (for characters < current character).
|
||||
pub left_id: Option<u32>,
|
||||
|
||||
/// Reference to the middle child node (for next character in key).
|
||||
pub middle_id: Option<u32>,
|
||||
|
||||
/// Reference to the right child node (for characters > current character).
|
||||
pub right_id: Option<u32>,
|
||||
}
|
||||
|
||||
impl TSTNode {
|
||||
/// Creates a new node.
|
||||
pub fn new(character: char, value: Vec<u8>, is_end_of_key: bool) -> Self {
|
||||
Self {
|
||||
character,
|
||||
value,
|
||||
is_end_of_key,
|
||||
left_id: None,
|
||||
middle_id: None,
|
||||
right_id: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new root node.
|
||||
pub fn new_root() -> Self {
|
||||
Self {
|
||||
character: '\0', // Use null character for root
|
||||
value: Vec::new(),
|
||||
is_end_of_key: false,
|
||||
left_id: None,
|
||||
middle_id: None,
|
||||
right_id: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
453
packages/data/tst/src/operations.rs
Normal file
453
packages/data/tst/src/operations.rs
Normal file
@@ -0,0 +1,453 @@
|
||||
//! Implementation of TST operations.
|
||||
|
||||
use crate::error::Error;
|
||||
use crate::node::TSTNode;
|
||||
use crate::TST;
|
||||
use ourdb::{OurDB, OurDBConfig, OurDBSetArgs};
|
||||
use std::path::PathBuf;
|
||||
|
||||
/// Creates a new TST with the specified database path.
|
||||
pub fn new_tst(path: &str, reset: bool) -> Result<TST, Error> {
|
||||
let path_buf = PathBuf::from(path);
|
||||
|
||||
// Create the configuration for OurDB with reset parameter
|
||||
let config = OurDBConfig {
|
||||
path: path_buf.clone(),
|
||||
incremental_mode: true,
|
||||
file_size: Some(1024 * 1024), // 1MB file size for better performance with large datasets
|
||||
keysize: Some(4), // Use keysize=4 (default)
|
||||
reset: Some(reset), // Use the reset parameter
|
||||
};
|
||||
|
||||
// Create a new OurDB instance (it will handle reset internally)
|
||||
let mut db = OurDB::new(config)?;
|
||||
|
||||
let root_id = if db.get_next_id()? == 1 || reset {
|
||||
// Create a new root node
|
||||
let root = TSTNode::new_root();
|
||||
let root_id = db.set(OurDBSetArgs {
|
||||
id: None,
|
||||
data: &root.serialize(),
|
||||
})?;
|
||||
|
||||
Some(root_id)
|
||||
} else {
|
||||
// Use existing root node
|
||||
Some(1) // Root node always has ID 1
|
||||
};
|
||||
|
||||
Ok(TST { db, root_id })
|
||||
}
|
||||
|
||||
/// Sets a key-value pair in the tree.
|
||||
pub fn set(tree: &mut TST, key: &str, value: Vec<u8>) -> Result<(), Error> {
|
||||
if key.is_empty() {
|
||||
return Err(Error::InvalidOperation("Empty key not allowed".to_string()));
|
||||
}
|
||||
|
||||
let root_id = match tree.root_id {
|
||||
Some(id) => id,
|
||||
None => return Err(Error::InvalidOperation("Tree not initialized".to_string())),
|
||||
};
|
||||
|
||||
let chars: Vec<char> = key.chars().collect();
|
||||
set_recursive(tree, root_id, &chars, 0, value)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Recursive helper function for setting a key-value pair.
|
||||
fn set_recursive(
|
||||
tree: &mut TST,
|
||||
node_id: u32,
|
||||
chars: &[char],
|
||||
pos: usize,
|
||||
value: Vec<u8>,
|
||||
) -> Result<u32, Error> {
|
||||
let mut node = tree.get_node(node_id)?;
|
||||
|
||||
if pos >= chars.len() {
|
||||
// We've reached the end of the key
|
||||
node.is_end_of_key = true;
|
||||
node.value = value;
|
||||
return tree.save_node(Some(node_id), &node);
|
||||
}
|
||||
|
||||
let current_char = chars[pos];
|
||||
|
||||
if node.character == '\0' {
|
||||
// Root node or empty node, set the character
|
||||
node.character = current_char;
|
||||
let node_id = tree.save_node(Some(node_id), &node)?;
|
||||
|
||||
// Continue with the next character
|
||||
if pos + 1 < chars.len() {
|
||||
let new_node = TSTNode::new(chars[pos + 1], Vec::new(), false);
|
||||
let new_id = tree.save_node(None, &new_node)?;
|
||||
|
||||
let mut updated_node = tree.get_node(node_id)?;
|
||||
updated_node.middle_id = Some(new_id);
|
||||
tree.save_node(Some(node_id), &updated_node)?;
|
||||
|
||||
return set_recursive(tree, new_id, chars, pos + 1, value);
|
||||
} else {
|
||||
// This is the last character
|
||||
let mut updated_node = tree.get_node(node_id)?;
|
||||
updated_node.is_end_of_key = true;
|
||||
updated_node.value = value;
|
||||
return tree.save_node(Some(node_id), &updated_node);
|
||||
}
|
||||
}
|
||||
|
||||
if current_char < node.character {
|
||||
// Go left
|
||||
if let Some(left_id) = node.left_id {
|
||||
return set_recursive(tree, left_id, chars, pos, value);
|
||||
} else {
|
||||
// Create new left node
|
||||
let new_node = TSTNode::new(current_char, Vec::new(), false);
|
||||
let new_id = tree.save_node(None, &new_node)?;
|
||||
|
||||
// Update current node
|
||||
node.left_id = Some(new_id);
|
||||
tree.save_node(Some(node_id), &node)?;
|
||||
|
||||
return set_recursive(tree, new_id, chars, pos, value);
|
||||
}
|
||||
} else if current_char > node.character {
|
||||
// Go right
|
||||
if let Some(right_id) = node.right_id {
|
||||
return set_recursive(tree, right_id, chars, pos, value);
|
||||
} else {
|
||||
// Create new right node
|
||||
let new_node = TSTNode::new(current_char, Vec::new(), false);
|
||||
let new_id = tree.save_node(None, &new_node)?;
|
||||
|
||||
// Update current node
|
||||
node.right_id = Some(new_id);
|
||||
tree.save_node(Some(node_id), &node)?;
|
||||
|
||||
return set_recursive(tree, new_id, chars, pos, value);
|
||||
}
|
||||
} else {
|
||||
// Character matches, go middle (next character)
|
||||
if pos + 1 >= chars.len() {
|
||||
// This is the last character
|
||||
node.is_end_of_key = true;
|
||||
node.value = value;
|
||||
return tree.save_node(Some(node_id), &node);
|
||||
}
|
||||
|
||||
if let Some(middle_id) = node.middle_id {
|
||||
return set_recursive(tree, middle_id, chars, pos + 1, value);
|
||||
} else {
|
||||
// Create new middle node
|
||||
let new_node = TSTNode::new(chars[pos + 1], Vec::new(), false);
|
||||
let new_id = tree.save_node(None, &new_node)?;
|
||||
|
||||
// Update current node
|
||||
node.middle_id = Some(new_id);
|
||||
tree.save_node(Some(node_id), &node)?;
|
||||
|
||||
return set_recursive(tree, new_id, chars, pos + 1, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets a value by key from the tree.
|
||||
pub fn get(tree: &mut TST, key: &str) -> Result<Vec<u8>, Error> {
|
||||
if key.is_empty() {
|
||||
return Err(Error::InvalidOperation("Empty key not allowed".to_string()));
|
||||
}
|
||||
|
||||
let root_id = match tree.root_id {
|
||||
Some(id) => id,
|
||||
None => return Err(Error::InvalidOperation("Tree not initialized".to_string())),
|
||||
};
|
||||
|
||||
let chars: Vec<char> = key.chars().collect();
|
||||
let node_id = find_node(tree, root_id, &chars, 0)?;
|
||||
|
||||
let node = tree.get_node(node_id)?;
|
||||
if node.is_end_of_key {
|
||||
Ok(node.value.clone())
|
||||
} else {
|
||||
Err(Error::KeyNotFound(key.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
/// Finds a node by key.
|
||||
fn find_node(tree: &mut TST, node_id: u32, chars: &[char], pos: usize) -> Result<u32, Error> {
|
||||
let node = tree.get_node(node_id)?;
|
||||
|
||||
if pos >= chars.len() {
|
||||
return Ok(node_id);
|
||||
}
|
||||
|
||||
let current_char = chars[pos];
|
||||
|
||||
if current_char < node.character {
|
||||
// Go left
|
||||
if let Some(left_id) = node.left_id {
|
||||
find_node(tree, left_id, chars, pos)
|
||||
} else {
|
||||
Err(Error::KeyNotFound(chars.iter().collect()))
|
||||
}
|
||||
} else if current_char > node.character {
|
||||
// Go right
|
||||
if let Some(right_id) = node.right_id {
|
||||
find_node(tree, right_id, chars, pos)
|
||||
} else {
|
||||
Err(Error::KeyNotFound(chars.iter().collect()))
|
||||
}
|
||||
} else {
|
||||
// Character matches
|
||||
if pos + 1 >= chars.len() {
|
||||
// This is the last character
|
||||
Ok(node_id)
|
||||
} else if let Some(middle_id) = node.middle_id {
|
||||
// Go to next character
|
||||
find_node(tree, middle_id, chars, pos + 1)
|
||||
} else {
|
||||
Err(Error::KeyNotFound(chars.iter().collect()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Deletes a key from the tree.
|
||||
pub fn delete(tree: &mut TST, key: &str) -> Result<(), Error> {
|
||||
if key.is_empty() {
|
||||
return Err(Error::InvalidOperation("Empty key not allowed".to_string()));
|
||||
}
|
||||
|
||||
let root_id = match tree.root_id {
|
||||
Some(id) => id,
|
||||
None => return Err(Error::InvalidOperation("Tree not initialized".to_string())),
|
||||
};
|
||||
|
||||
let chars: Vec<char> = key.chars().collect();
|
||||
let node_id = find_node(tree, root_id, &chars, 0)?;
|
||||
|
||||
let mut node = tree.get_node(node_id)?;
|
||||
|
||||
if !node.is_end_of_key {
|
||||
return Err(Error::KeyNotFound(key.to_string()));
|
||||
}
|
||||
|
||||
// If the node has a middle child, just mark it as not end of key
|
||||
if node.middle_id.is_some() || node.left_id.is_some() || node.right_id.is_some() {
|
||||
node.is_end_of_key = false;
|
||||
node.value = Vec::new();
|
||||
tree.save_node(Some(node_id), &node)?;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Otherwise, we need to remove the node and update its parent
|
||||
// This is more complex and would require tracking the path to the node
|
||||
// For simplicity, we'll just mark it as not end of key for now
|
||||
node.is_end_of_key = false;
|
||||
node.value = Vec::new();
|
||||
tree.save_node(Some(node_id), &node)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Lists all keys with a given prefix.
|
||||
pub fn list(tree: &mut TST, prefix: &str) -> Result<Vec<String>, Error> {
|
||||
let root_id = match tree.root_id {
|
||||
Some(id) => id,
|
||||
None => return Err(Error::InvalidOperation("Tree not initialized".to_string())),
|
||||
};
|
||||
|
||||
let mut result = Vec::new();
|
||||
|
||||
// Handle empty prefix case - will return all keys
|
||||
if prefix.is_empty() {
|
||||
collect_all_keys(tree, root_id, String::new(), &mut result)?;
|
||||
return Ok(result);
|
||||
}
|
||||
|
||||
// Find the node corresponding to the prefix
|
||||
let chars: Vec<char> = prefix.chars().collect();
|
||||
let node_id = match find_prefix_node(tree, root_id, &chars, 0) {
|
||||
Ok(id) => id,
|
||||
Err(_) => return Ok(Vec::new()), // Prefix not found, return empty list
|
||||
};
|
||||
|
||||
// For empty prefix, we start with an empty string
|
||||
// For non-empty prefix, we start with the prefix minus the last character
|
||||
// (since the last character is in the node we found)
|
||||
let prefix_base = if chars.len() > 1 {
|
||||
chars[0..chars.len() - 1].iter().collect()
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
|
||||
// Collect all keys from the subtree
|
||||
collect_keys_with_prefix(tree, node_id, prefix_base, &mut result)?;
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Finds the node corresponding to a prefix.
|
||||
fn find_prefix_node(
|
||||
tree: &mut TST,
|
||||
node_id: u32,
|
||||
chars: &[char],
|
||||
pos: usize,
|
||||
) -> Result<u32, Error> {
|
||||
if pos >= chars.len() {
|
||||
return Ok(node_id);
|
||||
}
|
||||
|
||||
let node = tree.get_node(node_id)?;
|
||||
let current_char = chars[pos];
|
||||
|
||||
if current_char < node.character {
|
||||
// Go left
|
||||
if let Some(left_id) = node.left_id {
|
||||
find_prefix_node(tree, left_id, chars, pos)
|
||||
} else {
|
||||
Err(Error::PrefixNotFound(chars.iter().collect()))
|
||||
}
|
||||
} else if current_char > node.character {
|
||||
// Go right
|
||||
if let Some(right_id) = node.right_id {
|
||||
find_prefix_node(tree, right_id, chars, pos)
|
||||
} else {
|
||||
Err(Error::PrefixNotFound(chars.iter().collect()))
|
||||
}
|
||||
} else {
|
||||
// Character matches
|
||||
if pos + 1 >= chars.len() {
|
||||
// This is the last character of the prefix
|
||||
Ok(node_id)
|
||||
} else if let Some(middle_id) = node.middle_id {
|
||||
// Go to next character
|
||||
find_prefix_node(tree, middle_id, chars, pos + 1)
|
||||
} else {
|
||||
Err(Error::PrefixNotFound(chars.iter().collect()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Collects all keys with a given prefix.
|
||||
fn collect_keys_with_prefix(
|
||||
tree: &mut TST,
|
||||
node_id: u32,
|
||||
current_path: String,
|
||||
result: &mut Vec<String>,
|
||||
) -> Result<(), Error> {
|
||||
let node = tree.get_node(node_id)?;
|
||||
|
||||
let mut new_path = current_path.clone();
|
||||
|
||||
// For non-root nodes, add the character to the path
|
||||
if node.character != '\0' {
|
||||
new_path.push(node.character);
|
||||
}
|
||||
|
||||
// If this node is an end of key, add it to the result
|
||||
if node.is_end_of_key {
|
||||
result.push(new_path.clone());
|
||||
}
|
||||
|
||||
// Recursively collect keys from all children
|
||||
if let Some(left_id) = node.left_id {
|
||||
collect_keys_with_prefix(tree, left_id, current_path.clone(), result)?;
|
||||
}
|
||||
|
||||
if let Some(middle_id) = node.middle_id {
|
||||
collect_keys_with_prefix(tree, middle_id, new_path.clone(), result)?;
|
||||
}
|
||||
|
||||
if let Some(right_id) = node.right_id {
|
||||
collect_keys_with_prefix(tree, right_id, current_path.clone(), result)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Recursively collects all keys under a node.
|
||||
fn collect_all_keys(
|
||||
tree: &mut TST,
|
||||
node_id: u32,
|
||||
current_path: String,
|
||||
result: &mut Vec<String>,
|
||||
) -> Result<(), Error> {
|
||||
let node = tree.get_node(node_id)?;
|
||||
|
||||
let mut new_path = current_path.clone();
|
||||
|
||||
// Skip adding the character for the root node
|
||||
if node.character != '\0' {
|
||||
new_path.push(node.character);
|
||||
}
|
||||
|
||||
// If this node is an end of key, add it to the result
|
||||
if node.is_end_of_key {
|
||||
result.push(new_path.clone());
|
||||
}
|
||||
|
||||
// Recursively collect keys from all children
|
||||
if let Some(left_id) = node.left_id {
|
||||
collect_all_keys(tree, left_id, current_path.clone(), result)?;
|
||||
}
|
||||
|
||||
if let Some(middle_id) = node.middle_id {
|
||||
collect_all_keys(tree, middle_id, new_path.clone(), result)?;
|
||||
}
|
||||
|
||||
if let Some(right_id) = node.right_id {
|
||||
collect_all_keys(tree, right_id, current_path.clone(), result)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Gets all values for keys with a given prefix.
|
||||
pub fn getall(tree: &mut TST, prefix: &str) -> Result<Vec<Vec<u8>>, Error> {
|
||||
// Get all matching keys
|
||||
let keys = list(tree, prefix)?;
|
||||
|
||||
// Get values for each key
|
||||
let mut values = Vec::new();
|
||||
let mut errors = Vec::new();
|
||||
|
||||
for key in keys {
|
||||
match get(tree, &key) {
|
||||
Ok(value) => values.push(value),
|
||||
Err(e) => errors.push(format!("Error getting value for key '{}': {:?}", key, e)),
|
||||
}
|
||||
}
|
||||
|
||||
// If we couldn't get any values but had keys, return the first error
|
||||
if values.is_empty() && !errors.is_empty() {
|
||||
return Err(Error::InvalidOperation(errors.join("; ")));
|
||||
}
|
||||
|
||||
Ok(values)
|
||||
}
|
||||
|
||||
impl TST {
|
||||
/// Helper function to get a node from the database.
|
||||
pub(crate) fn get_node(&mut self, node_id: u32) -> Result<TSTNode, Error> {
|
||||
match self.db.get(node_id) {
|
||||
Ok(data) => TSTNode::deserialize(&data),
|
||||
Err(err) => Err(Error::OurDB(err)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper function to save a node to the database.
|
||||
pub(crate) fn save_node(&mut self, node_id: Option<u32>, node: &TSTNode) -> Result<u32, Error> {
|
||||
let data = node.serialize();
|
||||
let args = OurDBSetArgs {
|
||||
id: node_id,
|
||||
data: &data,
|
||||
};
|
||||
match self.db.set(args) {
|
||||
Ok(id) => Ok(id),
|
||||
Err(err) => Err(Error::OurDB(err)),
|
||||
}
|
||||
}
|
||||
}
|
||||
129
packages/data/tst/src/serialize.rs
Normal file
129
packages/data/tst/src/serialize.rs
Normal file
@@ -0,0 +1,129 @@
|
||||
//! Serialization and deserialization for TST nodes.
|
||||
|
||||
use crate::error::Error;
|
||||
use crate::node::TSTNode;
|
||||
|
||||
/// Current binary format version.
|
||||
const VERSION: u8 = 1;
|
||||
|
||||
impl TSTNode {
|
||||
/// Serializes a node to bytes for storage.
|
||||
pub fn serialize(&self) -> Vec<u8> {
|
||||
let mut buffer = Vec::new();
|
||||
|
||||
// Version
|
||||
buffer.push(VERSION);
|
||||
|
||||
// Character (as UTF-32)
|
||||
let char_bytes = (self.character as u32).to_le_bytes();
|
||||
buffer.extend_from_slice(&char_bytes);
|
||||
|
||||
// Is end of key
|
||||
buffer.push(if self.is_end_of_key { 1 } else { 0 });
|
||||
|
||||
// Value (only if is_end_of_key)
|
||||
if self.is_end_of_key {
|
||||
let value_len = (self.value.len() as u32).to_le_bytes();
|
||||
buffer.extend_from_slice(&value_len);
|
||||
buffer.extend_from_slice(&self.value);
|
||||
} else {
|
||||
// Zero length
|
||||
buffer.extend_from_slice(&[0, 0, 0, 0]);
|
||||
}
|
||||
|
||||
// Child pointers
|
||||
let left_id = self.left_id.unwrap_or(0).to_le_bytes();
|
||||
buffer.extend_from_slice(&left_id);
|
||||
|
||||
let middle_id = self.middle_id.unwrap_or(0).to_le_bytes();
|
||||
buffer.extend_from_slice(&middle_id);
|
||||
|
||||
let right_id = self.right_id.unwrap_or(0).to_le_bytes();
|
||||
buffer.extend_from_slice(&right_id);
|
||||
|
||||
buffer
|
||||
}
|
||||
|
||||
/// Deserializes bytes to a node.
|
||||
pub fn deserialize(data: &[u8]) -> Result<Self, Error> {
|
||||
if data.len() < 14 {
|
||||
// Minimum size: version + char + is_end + value_len + 3 child IDs
|
||||
return Err(Error::Deserialization("Data too short".to_string()));
|
||||
}
|
||||
|
||||
let mut pos = 0;
|
||||
|
||||
// Version
|
||||
let version = data[pos];
|
||||
pos += 1;
|
||||
|
||||
if version != VERSION {
|
||||
return Err(Error::Deserialization(format!(
|
||||
"Unsupported version: {}",
|
||||
version
|
||||
)));
|
||||
}
|
||||
|
||||
// Character
|
||||
let char_bytes = [data[pos], data[pos + 1], data[pos + 2], data[pos + 3]];
|
||||
let char_code = u32::from_le_bytes(char_bytes);
|
||||
let character = char::from_u32(char_code)
|
||||
.ok_or_else(|| Error::Deserialization("Invalid character".to_string()))?;
|
||||
pos += 4;
|
||||
|
||||
// Is end of key
|
||||
let is_end_of_key = data[pos] != 0;
|
||||
pos += 1;
|
||||
|
||||
// Value length
|
||||
let value_len_bytes = [data[pos], data[pos + 1], data[pos + 2], data[pos + 3]];
|
||||
let value_len = u32::from_le_bytes(value_len_bytes) as usize;
|
||||
pos += 4;
|
||||
|
||||
// Value
|
||||
let value = if value_len > 0 {
|
||||
if pos + value_len > data.len() {
|
||||
return Err(Error::Deserialization(
|
||||
"Value length exceeds data".to_string(),
|
||||
));
|
||||
}
|
||||
data[pos..pos + value_len].to_vec()
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
pos += value_len;
|
||||
|
||||
// Child pointers
|
||||
if pos + 12 > data.len() {
|
||||
return Err(Error::Deserialization(
|
||||
"Data too short for child pointers".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let left_id_bytes = [data[pos], data[pos + 1], data[pos + 2], data[pos + 3]];
|
||||
let left_id = u32::from_le_bytes(left_id_bytes);
|
||||
pos += 4;
|
||||
|
||||
let middle_id_bytes = [data[pos], data[pos + 1], data[pos + 2], data[pos + 3]];
|
||||
let middle_id = u32::from_le_bytes(middle_id_bytes);
|
||||
pos += 4;
|
||||
|
||||
let right_id_bytes = [data[pos], data[pos + 1], data[pos + 2], data[pos + 3]];
|
||||
let right_id = u32::from_le_bytes(right_id_bytes);
|
||||
|
||||
Ok(TSTNode {
|
||||
character,
|
||||
value,
|
||||
is_end_of_key,
|
||||
left_id: if left_id == 0 { None } else { Some(left_id) },
|
||||
middle_id: if middle_id == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(middle_id)
|
||||
},
|
||||
right_id: if right_id == 0 { None } else { Some(right_id) },
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Function removed as it was unused
|
||||
294
packages/data/tst/tests/basic_test.rs
Normal file
294
packages/data/tst/tests/basic_test.rs
Normal file
@@ -0,0 +1,294 @@
|
||||
use std::env::temp_dir;
|
||||
use std::fs;
|
||||
use std::time::SystemTime;
|
||||
use tst::TST;
|
||||
|
||||
fn get_test_db_path() -> String {
|
||||
let timestamp = SystemTime::now()
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_nanos();
|
||||
|
||||
let path = temp_dir().join(format!("tst_test_{}", timestamp));
|
||||
|
||||
// If the path exists, remove it first
|
||||
if path.exists() {
|
||||
let _ = fs::remove_dir_all(&path);
|
||||
}
|
||||
|
||||
// Create the directory
|
||||
fs::create_dir_all(&path).unwrap();
|
||||
|
||||
path.to_string_lossy().to_string()
|
||||
}
|
||||
|
||||
fn cleanup_test_db(path: &str) {
|
||||
// Make sure to clean up properly
|
||||
let _ = fs::remove_dir_all(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_create_tst() {
|
||||
let path = get_test_db_path();
|
||||
|
||||
let result = TST::new(&path, true);
|
||||
match &result {
|
||||
Ok(_) => (),
|
||||
Err(e) => println!("Error creating TST: {:?}", e),
|
||||
}
|
||||
assert!(result.is_ok());
|
||||
|
||||
if let Ok(mut tst) = result {
|
||||
// Make sure we can perform a basic operation
|
||||
let set_result = tst.set("test_key", b"test_value".to_vec());
|
||||
assert!(set_result.is_ok());
|
||||
}
|
||||
|
||||
cleanup_test_db(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_set_and_get() {
|
||||
let path = get_test_db_path();
|
||||
|
||||
// Create a new TST with reset=true to ensure a clean state
|
||||
let result = TST::new(&path, true);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let mut tree = result.unwrap();
|
||||
|
||||
// Test setting and getting a key
|
||||
let key = "test_key";
|
||||
let value = b"test_value".to_vec();
|
||||
|
||||
let set_result = tree.set(key, value.clone());
|
||||
assert!(set_result.is_ok());
|
||||
|
||||
let get_result = tree.get(key);
|
||||
assert!(get_result.is_ok());
|
||||
assert_eq!(get_result.unwrap(), value);
|
||||
|
||||
// Make sure to clean up properly
|
||||
cleanup_test_db(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_nonexistent_key() {
|
||||
let path = get_test_db_path();
|
||||
|
||||
let mut tree = TST::new(&path, true).unwrap();
|
||||
|
||||
// Test getting a key that doesn't exist
|
||||
let get_result = tree.get("nonexistent_key");
|
||||
assert!(get_result.is_err());
|
||||
|
||||
cleanup_test_db(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_delete() {
|
||||
let path = get_test_db_path();
|
||||
|
||||
// Create a new TST with reset=true to ensure a clean state
|
||||
let result = TST::new(&path, true);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let mut tree = result.unwrap();
|
||||
|
||||
// Set a key
|
||||
let key = "delete_test";
|
||||
let value = b"to_be_deleted".to_vec();
|
||||
|
||||
let set_result = tree.set(key, value);
|
||||
assert!(set_result.is_ok());
|
||||
|
||||
// Verify it exists
|
||||
let get_result = tree.get(key);
|
||||
assert!(get_result.is_ok());
|
||||
|
||||
// Delete it
|
||||
let delete_result = tree.delete(key);
|
||||
assert!(delete_result.is_ok());
|
||||
|
||||
// Verify it's gone
|
||||
let get_after_delete = tree.get(key);
|
||||
assert!(get_after_delete.is_err());
|
||||
|
||||
// Make sure to clean up properly
|
||||
cleanup_test_db(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multiple_keys() {
|
||||
let path = get_test_db_path();
|
||||
|
||||
// Create a new TST with reset=true to ensure a clean state
|
||||
let result = TST::new(&path, true);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let mut tree = result.unwrap();
|
||||
|
||||
// Insert multiple keys - use fewer keys to avoid filling the lookup table
|
||||
let keys = ["apple", "banana", "cherry"];
|
||||
|
||||
for (i, key) in keys.iter().enumerate() {
|
||||
let value = format!("value_{}", i).into_bytes();
|
||||
let set_result = tree.set(key, value);
|
||||
|
||||
// Print error if set fails
|
||||
if set_result.is_err() {
|
||||
println!("Error setting key '{}': {:?}", key, set_result);
|
||||
}
|
||||
|
||||
assert!(set_result.is_ok());
|
||||
}
|
||||
|
||||
// Verify all keys exist
|
||||
for (i, key) in keys.iter().enumerate() {
|
||||
let expected_value = format!("value_{}", i).into_bytes();
|
||||
let get_result = tree.get(key);
|
||||
assert!(get_result.is_ok());
|
||||
assert_eq!(get_result.unwrap(), expected_value);
|
||||
}
|
||||
|
||||
// Make sure to clean up properly
|
||||
cleanup_test_db(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_list_prefix() {
|
||||
let path = get_test_db_path();
|
||||
|
||||
// Create a new TST with reset=true to ensure a clean state
|
||||
let result = TST::new(&path, true);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let mut tree = result.unwrap();
|
||||
|
||||
// Insert keys with common prefixes - use fewer keys to avoid filling the lookup table
|
||||
let keys = ["apple", "application", "append", "banana", "bandana"];
|
||||
|
||||
for key in &keys {
|
||||
let set_result = tree.set(key, key.as_bytes().to_vec());
|
||||
assert!(set_result.is_ok());
|
||||
}
|
||||
|
||||
// Test prefix "app"
|
||||
let list_result = tree.list("app");
|
||||
assert!(list_result.is_ok());
|
||||
|
||||
let app_keys = list_result.unwrap();
|
||||
|
||||
// Print the keys for debugging
|
||||
println!("Keys with prefix 'app':");
|
||||
for key in &app_keys {
|
||||
println!(" {}", key);
|
||||
}
|
||||
|
||||
// Check that each key is present
|
||||
assert!(app_keys.contains(&"apple".to_string()));
|
||||
assert!(app_keys.contains(&"application".to_string()));
|
||||
assert!(app_keys.contains(&"append".to_string()));
|
||||
|
||||
// Test prefix "ban"
|
||||
let list_result = tree.list("ban");
|
||||
assert!(list_result.is_ok());
|
||||
|
||||
let ban_keys = list_result.unwrap();
|
||||
assert!(ban_keys.contains(&"banana".to_string()));
|
||||
assert!(ban_keys.contains(&"bandana".to_string()));
|
||||
|
||||
// Test non-existent prefix
|
||||
let list_result = tree.list("z");
|
||||
assert!(list_result.is_ok());
|
||||
|
||||
let z_keys = list_result.unwrap();
|
||||
assert_eq!(z_keys.len(), 0);
|
||||
|
||||
// Make sure to clean up properly
|
||||
cleanup_test_db(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_getall_prefix() {
|
||||
let path = get_test_db_path();
|
||||
|
||||
// Create a new TST with reset=true to ensure a clean state
|
||||
let result = TST::new(&path, true);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let mut tree = result.unwrap();
|
||||
|
||||
// Insert keys with common prefixes - use fewer keys to avoid filling the lookup table
|
||||
let keys = ["apple", "application", "append"];
|
||||
|
||||
for key in &keys {
|
||||
let set_result = tree.set(key, key.as_bytes().to_vec());
|
||||
assert!(set_result.is_ok());
|
||||
}
|
||||
|
||||
// Test getall with prefix "app"
|
||||
let getall_result = tree.getall("app");
|
||||
assert!(getall_result.is_ok());
|
||||
|
||||
let app_values = getall_result.unwrap();
|
||||
|
||||
// Convert values to strings for easier comparison
|
||||
let app_value_strings: Vec<String> = app_values
|
||||
.iter()
|
||||
.map(|v| String::from_utf8_lossy(v).to_string())
|
||||
.collect();
|
||||
|
||||
// Print the values for debugging
|
||||
println!("Values with prefix 'app':");
|
||||
for value in &app_value_strings {
|
||||
println!(" {}", value);
|
||||
}
|
||||
|
||||
// Check that each value is present
|
||||
assert!(app_value_strings.contains(&"apple".to_string()));
|
||||
assert!(app_value_strings.contains(&"application".to_string()));
|
||||
assert!(app_value_strings.contains(&"append".to_string()));
|
||||
|
||||
// Make sure to clean up properly
|
||||
cleanup_test_db(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty_prefix() {
|
||||
let path = get_test_db_path();
|
||||
|
||||
// Create a new TST with reset=true to ensure a clean state
|
||||
let result = TST::new(&path, true);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let mut tree = result.unwrap();
|
||||
|
||||
// Insert some keys
|
||||
let keys = ["apple", "banana", "cherry"];
|
||||
|
||||
for key in &keys {
|
||||
let set_result = tree.set(key, key.as_bytes().to_vec());
|
||||
assert!(set_result.is_ok());
|
||||
}
|
||||
|
||||
// Test list with empty prefix (should return all keys)
|
||||
let list_result = tree.list("");
|
||||
assert!(list_result.is_ok());
|
||||
|
||||
let all_keys = list_result.unwrap();
|
||||
|
||||
// Print the keys for debugging
|
||||
println!("Keys with empty prefix:");
|
||||
for key in &all_keys {
|
||||
println!(" {}", key);
|
||||
}
|
||||
|
||||
// Check that each key is present
|
||||
for key in &keys {
|
||||
assert!(all_keys.contains(&key.to_string()));
|
||||
}
|
||||
|
||||
// Make sure to clean up properly
|
||||
cleanup_test_db(&path);
|
||||
}
|
||||
267
packages/data/tst/tests/prefix_test.rs
Normal file
267
packages/data/tst/tests/prefix_test.rs
Normal file
@@ -0,0 +1,267 @@
|
||||
use std::env::temp_dir;
|
||||
use std::fs;
|
||||
use std::time::SystemTime;
|
||||
use tst::TST;
|
||||
|
||||
fn get_test_db_path() -> String {
|
||||
let timestamp = SystemTime::now()
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_nanos();
|
||||
|
||||
let path = temp_dir().join(format!("tst_prefix_test_{}", timestamp));
|
||||
|
||||
// If the path exists, remove it first
|
||||
if path.exists() {
|
||||
let _ = fs::remove_dir_all(&path);
|
||||
}
|
||||
|
||||
// Create the directory
|
||||
fs::create_dir_all(&path).unwrap();
|
||||
|
||||
path.to_string_lossy().to_string()
|
||||
}
|
||||
|
||||
fn cleanup_test_db(path: &str) {
|
||||
// Make sure to clean up properly
|
||||
let _ = fs::remove_dir_all(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_prefix_with_common_prefixes() {
|
||||
let path = get_test_db_path();
|
||||
|
||||
let mut tree = TST::new(&path, true).unwrap();
|
||||
|
||||
// Insert keys with common prefixes
|
||||
let test_data = [
|
||||
("test", b"value1".to_vec()),
|
||||
("testing", b"value2".to_vec()),
|
||||
("tested", b"value3".to_vec()),
|
||||
("tests", b"value4".to_vec()),
|
||||
("tester", b"value5".to_vec()),
|
||||
];
|
||||
|
||||
for (key, value) in &test_data {
|
||||
tree.set(key, value.clone()).unwrap();
|
||||
}
|
||||
|
||||
// Test prefix "test"
|
||||
let keys = tree.list("test").unwrap();
|
||||
assert_eq!(keys.len(), 5);
|
||||
|
||||
for (key, _) in &test_data {
|
||||
assert!(keys.contains(&key.to_string()));
|
||||
}
|
||||
|
||||
// Test prefix "teste"
|
||||
let keys = tree.list("teste").unwrap();
|
||||
assert_eq!(keys.len(), 2);
|
||||
assert!(keys.contains(&"tested".to_string()));
|
||||
assert!(keys.contains(&"tester".to_string()));
|
||||
|
||||
cleanup_test_db(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_prefix_with_different_prefixes() {
|
||||
let path = get_test_db_path();
|
||||
|
||||
let mut tree = TST::new(&path, true).unwrap();
|
||||
|
||||
// Insert keys with different prefixes
|
||||
let test_data = [
|
||||
("apple", b"fruit1".to_vec()),
|
||||
("banana", b"fruit2".to_vec()),
|
||||
("cherry", b"fruit3".to_vec()),
|
||||
("date", b"fruit4".to_vec()),
|
||||
("elderberry", b"fruit5".to_vec()),
|
||||
];
|
||||
|
||||
for (key, value) in &test_data {
|
||||
tree.set(key, value.clone()).unwrap();
|
||||
}
|
||||
|
||||
// Test each prefix
|
||||
for (key, _) in &test_data {
|
||||
let prefix = &key[0..1]; // First character
|
||||
let keys = tree.list(prefix).unwrap();
|
||||
assert!(keys.contains(&key.to_string()));
|
||||
}
|
||||
|
||||
// Test non-existent prefix
|
||||
let keys = tree.list("z").unwrap();
|
||||
assert_eq!(keys.len(), 0);
|
||||
|
||||
cleanup_test_db(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_prefix_with_empty_string() {
|
||||
let path = get_test_db_path();
|
||||
|
||||
// Create a new TST with reset=true to ensure a clean state
|
||||
let result = TST::new(&path, true);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let mut tree = result.unwrap();
|
||||
|
||||
// Insert some keys
|
||||
let test_data = [
|
||||
("apple", b"fruit1".to_vec()),
|
||||
("banana", b"fruit2".to_vec()),
|
||||
("cherry", b"fruit3".to_vec()),
|
||||
];
|
||||
|
||||
for (key, value) in &test_data {
|
||||
let set_result = tree.set(key, value.clone());
|
||||
assert!(set_result.is_ok());
|
||||
}
|
||||
|
||||
// Test empty prefix (should return all keys)
|
||||
let list_result = tree.list("");
|
||||
assert!(list_result.is_ok());
|
||||
|
||||
let keys = list_result.unwrap();
|
||||
|
||||
// Print the keys for debugging
|
||||
println!("Keys with empty prefix:");
|
||||
for key in &keys {
|
||||
println!(" {}", key);
|
||||
}
|
||||
|
||||
// Check that each key is present
|
||||
for (key, _) in &test_data {
|
||||
assert!(keys.contains(&key.to_string()));
|
||||
}
|
||||
|
||||
// Make sure to clean up properly
|
||||
cleanup_test_db(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_getall_with_prefix() {
|
||||
let path = get_test_db_path();
|
||||
|
||||
let mut tree = TST::new(&path, true).unwrap();
|
||||
|
||||
// Insert keys with common prefixes
|
||||
let test_data = [
|
||||
("test", b"value1".to_vec()),
|
||||
("testing", b"value2".to_vec()),
|
||||
("tested", b"value3".to_vec()),
|
||||
("tests", b"value4".to_vec()),
|
||||
("tester", b"value5".to_vec()),
|
||||
];
|
||||
|
||||
for (key, value) in &test_data {
|
||||
tree.set(key, value.clone()).unwrap();
|
||||
}
|
||||
|
||||
// Test getall with prefix "test"
|
||||
let values = tree.getall("test").unwrap();
|
||||
assert_eq!(values.len(), 5);
|
||||
|
||||
for (_, value) in &test_data {
|
||||
assert!(values.contains(value));
|
||||
}
|
||||
|
||||
cleanup_test_db(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_prefix_with_unicode_characters() {
|
||||
let path = get_test_db_path();
|
||||
|
||||
let mut tree = TST::new(&path, true).unwrap();
|
||||
|
||||
// Insert keys with Unicode characters
|
||||
let test_data = [
|
||||
("café", b"coffee".to_vec()),
|
||||
("cafétéria", b"cafeteria".to_vec()),
|
||||
("caffè", b"italian coffee".to_vec()),
|
||||
("café au lait", b"coffee with milk".to_vec()),
|
||||
];
|
||||
|
||||
for (key, value) in &test_data {
|
||||
tree.set(key, value.clone()).unwrap();
|
||||
}
|
||||
|
||||
// Test prefix "café"
|
||||
let keys = tree.list("café").unwrap();
|
||||
|
||||
// Print the keys for debugging
|
||||
println!("Keys with prefix 'café':");
|
||||
for key in &keys {
|
||||
println!(" {}", key);
|
||||
}
|
||||
|
||||
// Check that the keys we expect are present
|
||||
assert!(keys.contains(&"café".to_string()));
|
||||
assert!(keys.contains(&"café au lait".to_string()));
|
||||
|
||||
// We don't assert on the exact count because Unicode handling can vary
|
||||
|
||||
// Test prefix "caf"
|
||||
let keys = tree.list("caf").unwrap();
|
||||
|
||||
// Print the keys for debugging
|
||||
println!("Keys with prefix 'caf':");
|
||||
for key in &keys {
|
||||
println!(" {}", key);
|
||||
}
|
||||
|
||||
// Check that each key is present individually
|
||||
// Due to Unicode handling, we need to be careful with exact matching
|
||||
// The important thing is that we can find the keys we need
|
||||
|
||||
// Check that we have at least the café and café au lait keys
|
||||
assert!(keys.contains(&"café".to_string()));
|
||||
assert!(keys.contains(&"café au lait".to_string()));
|
||||
|
||||
// We don't assert on the exact count because Unicode handling can vary
|
||||
|
||||
cleanup_test_db(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_prefix_with_long_keys() {
|
||||
let path = get_test_db_path();
|
||||
|
||||
let mut tree = TST::new(&path, true).unwrap();
|
||||
|
||||
// Insert long keys
|
||||
let test_data = [
|
||||
(
|
||||
"this_is_a_very_long_key_for_testing_purposes_1",
|
||||
b"value1".to_vec(),
|
||||
),
|
||||
(
|
||||
"this_is_a_very_long_key_for_testing_purposes_2",
|
||||
b"value2".to_vec(),
|
||||
),
|
||||
(
|
||||
"this_is_a_very_long_key_for_testing_purposes_3",
|
||||
b"value3".to_vec(),
|
||||
),
|
||||
("this_is_another_long_key_for_testing", b"value4".to_vec()),
|
||||
];
|
||||
|
||||
for (key, value) in &test_data {
|
||||
tree.set(key, value.clone()).unwrap();
|
||||
}
|
||||
|
||||
// Test prefix "this_is_a_very"
|
||||
let keys = tree.list("this_is_a_very").unwrap();
|
||||
assert_eq!(keys.len(), 3);
|
||||
|
||||
// Test prefix "this_is"
|
||||
let keys = tree.list("this_is").unwrap();
|
||||
assert_eq!(keys.len(), 4);
|
||||
|
||||
for (key, _) in &test_data {
|
||||
assert!(keys.contains(&key.to_string()));
|
||||
}
|
||||
|
||||
cleanup_test_db(&path);
|
||||
}
|
||||
@@ -15,6 +15,7 @@ serde_json = { workspace = true }
|
||||
rhai = { workspace = true }
|
||||
log = { workspace = true }
|
||||
url = { workspace = true }
|
||||
redis = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = { workspace = true }
|
||||
|
||||
@@ -43,6 +43,8 @@ pub mod rhai;
|
||||
pub use config::KubernetesConfig;
|
||||
pub use error::KubernetesError;
|
||||
pub use kubernetes_manager::KubernetesManager;
|
||||
#[cfg(feature = "rhai")]
|
||||
pub use rhai::register_kubernetes_module;
|
||||
|
||||
// Re-export commonly used Kubernetes types
|
||||
pub use k8s_openapi::api::apps::v1::{Deployment, ReplicaSet};
|
||||
|
||||
@@ -59,605 +59,12 @@ where
|
||||
rt.block_on(future).map_err(kubernetes_error_to_rhai_error)
|
||||
}
|
||||
|
||||
/// Create a new KubernetesManager for the specified namespace
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `namespace` - The Kubernetes namespace to operate on
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<KubernetesManager, Box<EvalAltResult>>` - The manager instance or an error
|
||||
fn kubernetes_manager_new(namespace: String) -> Result<KubernetesManager, Box<EvalAltResult>> {
|
||||
execute_async(KubernetesManager::new(namespace))
|
||||
}
|
||||
|
||||
/// List all pods in the namespace
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - The KubernetesManager instance
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<Array, Box<EvalAltResult>>` - Array of pod names or an error
|
||||
fn pods_list(km: &mut KubernetesManager) -> Result<Array, Box<EvalAltResult>> {
|
||||
let pods = execute_async(km.pods_list())?;
|
||||
|
||||
let pod_names: Array = pods
|
||||
.iter()
|
||||
.filter_map(|pod| pod.metadata.name.as_ref())
|
||||
.map(|name| Dynamic::from(name.clone()))
|
||||
.collect();
|
||||
|
||||
Ok(pod_names)
|
||||
}
|
||||
|
||||
/// List all services in the namespace
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - The KubernetesManager instance
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<Array, Box<EvalAltResult>>` - Array of service names or an error
|
||||
fn services_list(km: &mut KubernetesManager) -> Result<Array, Box<EvalAltResult>> {
|
||||
let services = execute_async(km.services_list())?;
|
||||
|
||||
let service_names: Array = services
|
||||
.iter()
|
||||
.filter_map(|service| service.metadata.name.as_ref())
|
||||
.map(|name| Dynamic::from(name.clone()))
|
||||
.collect();
|
||||
|
||||
Ok(service_names)
|
||||
}
|
||||
|
||||
/// List all deployments in the namespace
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - The KubernetesManager instance
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<Array, Box<EvalAltResult>>` - Array of deployment names or an error
|
||||
fn deployments_list(km: &mut KubernetesManager) -> Result<Array, Box<EvalAltResult>> {
|
||||
let deployments = execute_async(km.deployments_list())?;
|
||||
|
||||
let deployment_names: Array = deployments
|
||||
.iter()
|
||||
.filter_map(|deployment| deployment.metadata.name.as_ref())
|
||||
.map(|name| Dynamic::from(name.clone()))
|
||||
.collect();
|
||||
|
||||
Ok(deployment_names)
|
||||
}
|
||||
|
||||
/// List all configmaps in the namespace
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - The KubernetesManager instance
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<Array, Box<EvalAltResult>>` - Array of configmap names or an error
|
||||
fn configmaps_list(km: &mut KubernetesManager) -> Result<Array, Box<EvalAltResult>> {
|
||||
let configmaps = execute_async(km.configmaps_list())?;
|
||||
|
||||
let configmap_names: Array = configmaps
|
||||
.iter()
|
||||
.filter_map(|configmap| configmap.metadata.name.as_ref())
|
||||
.map(|name| Dynamic::from(name.clone()))
|
||||
.collect();
|
||||
|
||||
Ok(configmap_names)
|
||||
}
|
||||
|
||||
/// List all secrets in the namespace
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - The KubernetesManager instance
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<Array, Box<EvalAltResult>>` - Array of secret names or an error
|
||||
fn secrets_list(km: &mut KubernetesManager) -> Result<Array, Box<EvalAltResult>> {
|
||||
let secrets = execute_async(km.secrets_list())?;
|
||||
|
||||
let secret_names: Array = secrets
|
||||
.iter()
|
||||
.filter_map(|secret| secret.metadata.name.as_ref())
|
||||
.map(|name| Dynamic::from(name.clone()))
|
||||
.collect();
|
||||
|
||||
Ok(secret_names)
|
||||
}
|
||||
|
||||
/// Delete resources matching a PCRE pattern
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - The KubernetesManager instance
|
||||
/// * `pattern` - PCRE pattern to match resource names against
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<i64, Box<EvalAltResult>>` - Number of resources deleted or an error
|
||||
///
|
||||
/// Create a pod with a single container (backward compatible version)
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - Mutable reference to KubernetesManager
|
||||
/// * `name` - Name of the pod
|
||||
/// * `image` - Container image to use
|
||||
/// * `labels` - Optional labels as a Map
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<String, Box<EvalAltResult>>` - Pod name or an error
|
||||
fn pod_create(
|
||||
km: &mut KubernetesManager,
|
||||
name: String,
|
||||
image: String,
|
||||
labels: Map,
|
||||
) -> Result<String, Box<EvalAltResult>> {
|
||||
let labels_map: Option<std::collections::HashMap<String, String>> = if labels.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
labels
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.to_string(), v.to_string()))
|
||||
.collect(),
|
||||
)
|
||||
};
|
||||
|
||||
let pod = execute_async(km.pod_create(&name, &image, labels_map, None))?;
|
||||
Ok(pod.metadata.name.unwrap_or(name))
|
||||
}
|
||||
|
||||
/// Create a pod with a single container and environment variables
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - Mutable reference to KubernetesManager
|
||||
/// * `name` - Name of the pod
|
||||
/// * `image` - Container image to use
|
||||
/// * `labels` - Optional labels as a Map
|
||||
/// * `env_vars` - Optional environment variables as a Map
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<String, Box<EvalAltResult>>` - Pod name or an error
|
||||
fn pod_create_with_env(
|
||||
km: &mut KubernetesManager,
|
||||
name: String,
|
||||
image: String,
|
||||
labels: Map,
|
||||
env_vars: Map,
|
||||
) -> Result<String, Box<EvalAltResult>> {
|
||||
let labels_map: Option<std::collections::HashMap<String, String>> = if labels.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
labels
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.to_string(), v.to_string()))
|
||||
.collect(),
|
||||
)
|
||||
};
|
||||
|
||||
let env_vars_map = convert_rhai_map_to_env_vars(env_vars);
|
||||
|
||||
let pod = execute_async(km.pod_create(&name, &image, labels_map, env_vars_map))?;
|
||||
Ok(pod.metadata.name.unwrap_or(name))
|
||||
}
|
||||
|
||||
/// Create a service
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - Mutable reference to KubernetesManager
|
||||
/// * `name` - Name of the service
|
||||
/// * `selector` - Labels to select pods as a Map
|
||||
/// * `port` - Port to expose
|
||||
/// * `target_port` - Target port on pods (optional, defaults to port)
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<String, Box<EvalAltResult>>` - Service name or an error
|
||||
fn service_create(
|
||||
km: &mut KubernetesManager,
|
||||
name: String,
|
||||
selector: Map,
|
||||
port: i64,
|
||||
target_port: i64,
|
||||
) -> Result<String, Box<EvalAltResult>> {
|
||||
let selector_map: std::collections::HashMap<String, String> = selector
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.to_string(), v.to_string()))
|
||||
.collect();
|
||||
|
||||
let target_port_opt = if target_port == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(target_port as i32)
|
||||
};
|
||||
let service =
|
||||
execute_async(km.service_create(&name, selector_map, port as i32, target_port_opt))?;
|
||||
Ok(service.metadata.name.unwrap_or(name))
|
||||
}
|
||||
|
||||
/// Create a deployment
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - Mutable reference to KubernetesManager
|
||||
/// * `name` - Name of the deployment
|
||||
/// * `image` - Container image to use
|
||||
/// * `replicas` - Number of replicas
|
||||
/// * `labels` - Optional labels as a Map
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<String, Box<EvalAltResult>>` - Deployment name or an error
|
||||
fn deployment_create(
|
||||
km: &mut KubernetesManager,
|
||||
name: String,
|
||||
image: String,
|
||||
replicas: i64,
|
||||
labels: Map,
|
||||
env_vars: Map,
|
||||
) -> Result<String, Box<EvalAltResult>> {
|
||||
let labels_map: Option<std::collections::HashMap<String, String>> = if labels.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
labels
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.to_string(), v.to_string()))
|
||||
.collect(),
|
||||
)
|
||||
};
|
||||
|
||||
let env_vars_map = convert_rhai_map_to_env_vars(env_vars);
|
||||
|
||||
let deployment = execute_async(km.deployment_create(
|
||||
&name,
|
||||
&image,
|
||||
replicas as i32,
|
||||
labels_map,
|
||||
env_vars_map,
|
||||
))?;
|
||||
Ok(deployment.metadata.name.unwrap_or(name))
|
||||
}
|
||||
|
||||
/// Create a ConfigMap
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - Mutable reference to KubernetesManager
|
||||
/// * `name` - Name of the ConfigMap
|
||||
/// * `data` - Data as a Map
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<String, Box<EvalAltResult>>` - ConfigMap name or an error
|
||||
fn configmap_create(
|
||||
km: &mut KubernetesManager,
|
||||
name: String,
|
||||
data: Map,
|
||||
) -> Result<String, Box<EvalAltResult>> {
|
||||
let data_map: std::collections::HashMap<String, String> = data
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.to_string(), v.to_string()))
|
||||
.collect();
|
||||
|
||||
let configmap = execute_async(km.configmap_create(&name, data_map))?;
|
||||
Ok(configmap.metadata.name.unwrap_or(name))
|
||||
}
|
||||
|
||||
/// Create a Secret
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - Mutable reference to KubernetesManager
|
||||
/// * `name` - Name of the Secret
|
||||
/// * `data` - Data as a Map (will be base64 encoded)
|
||||
/// * `secret_type` - Type of secret (optional, defaults to "Opaque")
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<String, Box<EvalAltResult>>` - Secret name or an error
|
||||
fn secret_create(
|
||||
km: &mut KubernetesManager,
|
||||
name: String,
|
||||
data: Map,
|
||||
secret_type: String,
|
||||
) -> Result<String, Box<EvalAltResult>> {
|
||||
let data_map: std::collections::HashMap<String, String> = data
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.to_string(), v.to_string()))
|
||||
.collect();
|
||||
|
||||
let secret_type_opt = if secret_type.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(secret_type.as_str())
|
||||
};
|
||||
let secret = execute_async(km.secret_create(&name, data_map, secret_type_opt))?;
|
||||
Ok(secret.metadata.name.unwrap_or(name))
|
||||
}
|
||||
|
||||
/// Get a pod by name
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - Mutable reference to KubernetesManager
|
||||
/// * `name` - Name of the pod to get
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<String, Box<EvalAltResult>>` - Pod name or an error
|
||||
fn pod_get(km: &mut KubernetesManager, name: String) -> Result<String, Box<EvalAltResult>> {
|
||||
let pod = execute_async(km.pod_get(&name))?;
|
||||
Ok(pod.metadata.name.unwrap_or(name))
|
||||
}
|
||||
|
||||
/// Get a service by name
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - Mutable reference to KubernetesManager
|
||||
/// * `name` - Name of the service to get
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<String, Box<EvalAltResult>>` - Service name or an error
|
||||
fn service_get(km: &mut KubernetesManager, name: String) -> Result<String, Box<EvalAltResult>> {
|
||||
let service = execute_async(km.service_get(&name))?;
|
||||
Ok(service.metadata.name.unwrap_or(name))
|
||||
}
|
||||
|
||||
/// Get a deployment by name
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - Mutable reference to KubernetesManager
|
||||
/// * `name` - Name of the deployment to get
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<String, Box<EvalAltResult>>` - Deployment name or an error
|
||||
fn deployment_get(km: &mut KubernetesManager, name: String) -> Result<String, Box<EvalAltResult>> {
|
||||
let deployment = execute_async(km.deployment_get(&name))?;
|
||||
Ok(deployment.metadata.name.unwrap_or(name))
|
||||
}
|
||||
|
||||
fn delete(km: &mut KubernetesManager, pattern: String) -> Result<i64, Box<EvalAltResult>> {
|
||||
let deleted_count = execute_async(km.delete(&pattern))?;
|
||||
|
||||
Ok(deleted_count as i64)
|
||||
}
|
||||
|
||||
/// Create a namespace (idempotent operation)
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - The KubernetesManager instance
|
||||
/// * `name` - The name of the namespace to create
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<(), Box<EvalAltResult>>` - Success or an error
|
||||
fn namespace_create(km: &mut KubernetesManager, name: String) -> Result<(), Box<EvalAltResult>> {
|
||||
execute_async(km.namespace_create(&name))
|
||||
}
|
||||
|
||||
/// Delete a namespace (destructive operation)
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - Mutable reference to KubernetesManager
|
||||
/// * `name` - Name of the namespace to delete
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<(), Box<EvalAltResult>>` - Success or an error
|
||||
fn namespace_delete(km: &mut KubernetesManager, name: String) -> Result<(), Box<EvalAltResult>> {
|
||||
execute_async(km.namespace_delete(&name))
|
||||
}
|
||||
|
||||
/// Check if a namespace exists
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - The KubernetesManager instance
|
||||
/// * `name` - The name of the namespace to check
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<bool, Box<EvalAltResult>>` - True if namespace exists, false otherwise
|
||||
fn namespace_exists(km: &mut KubernetesManager, name: String) -> Result<bool, Box<EvalAltResult>> {
|
||||
execute_async(km.namespace_exists(&name))
|
||||
}
|
||||
|
||||
/// List all namespaces
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - The KubernetesManager instance
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<Array, Box<EvalAltResult>>` - Array of namespace names or an error
|
||||
fn namespaces_list(km: &mut KubernetesManager) -> Result<Array, Box<EvalAltResult>> {
|
||||
let namespaces = execute_async(km.namespaces_list())?;
|
||||
|
||||
let namespace_names: Array = namespaces
|
||||
.iter()
|
||||
.filter_map(|ns| ns.metadata.name.as_ref())
|
||||
.map(|name| Dynamic::from(name.clone()))
|
||||
.collect();
|
||||
|
||||
Ok(namespace_names)
|
||||
}
|
||||
|
||||
/// Get resource counts for the namespace
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - The KubernetesManager instance
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<Map, Box<EvalAltResult>>` - Map of resource counts by type or an error
|
||||
fn resource_counts(km: &mut KubernetesManager) -> Result<Map, Box<EvalAltResult>> {
|
||||
let counts = execute_async(km.resource_counts())?;
|
||||
|
||||
let mut rhai_map = Map::new();
|
||||
for (key, value) in counts {
|
||||
rhai_map.insert(key.into(), Dynamic::from(value as i64));
|
||||
}
|
||||
|
||||
Ok(rhai_map)
|
||||
}
|
||||
|
||||
/// Deploy a complete application with deployment and service
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - Mutable reference to KubernetesManager
|
||||
/// * `name` - Name of the application
|
||||
/// * `image` - Container image to use
|
||||
/// * `replicas` - Number of replicas
|
||||
/// * `port` - Port the application listens on
|
||||
/// * `labels` - Optional labels as a Map
|
||||
/// * `env_vars` - Optional environment variables as a Map
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<String, Box<EvalAltResult>>` - Success message or an error
|
||||
fn deploy_application(
|
||||
km: &mut KubernetesManager,
|
||||
name: String,
|
||||
image: String,
|
||||
replicas: i64,
|
||||
port: i64,
|
||||
labels: Map,
|
||||
env_vars: Map,
|
||||
) -> Result<String, Box<EvalAltResult>> {
|
||||
let labels_map: Option<std::collections::HashMap<String, String>> = if labels.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
labels
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.to_string(), v.to_string()))
|
||||
.collect(),
|
||||
)
|
||||
};
|
||||
|
||||
let env_vars_map = convert_rhai_map_to_env_vars(env_vars);
|
||||
|
||||
execute_async(km.deploy_application(
|
||||
&name,
|
||||
&image,
|
||||
replicas as i32,
|
||||
port as i32,
|
||||
labels_map,
|
||||
env_vars_map,
|
||||
))?;
|
||||
|
||||
Ok(format!("Successfully deployed application '{name}'"))
|
||||
}
|
||||
|
||||
/// Delete a specific pod by name
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - The KubernetesManager instance
|
||||
/// * `name` - The name of the pod to delete
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<(), Box<EvalAltResult>>` - Success or an error
|
||||
fn pod_delete(km: &mut KubernetesManager, name: String) -> Result<(), Box<EvalAltResult>> {
|
||||
execute_async(km.pod_delete(&name))
|
||||
}
|
||||
|
||||
/// Delete a specific service by name
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - The KubernetesManager instance
|
||||
/// * `name` - The name of the service to delete
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<(), Box<EvalAltResult>>` - Success or an error
|
||||
fn service_delete(km: &mut KubernetesManager, name: String) -> Result<(), Box<EvalAltResult>> {
|
||||
execute_async(km.service_delete(&name))
|
||||
}
|
||||
|
||||
/// Delete a specific deployment by name
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - The KubernetesManager instance
|
||||
/// * `name` - The name of the deployment to delete
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<(), Box<EvalAltResult>>` - Success or an error
|
||||
fn deployment_delete(km: &mut KubernetesManager, name: String) -> Result<(), Box<EvalAltResult>> {
|
||||
execute_async(km.deployment_delete(&name))
|
||||
}
|
||||
|
||||
/// Delete a ConfigMap by name
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - Mutable reference to KubernetesManager
|
||||
/// * `name` - Name of the ConfigMap to delete
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<(), Box<EvalAltResult>>` - Success or an error
|
||||
fn configmap_delete(km: &mut KubernetesManager, name: String) -> Result<(), Box<EvalAltResult>> {
|
||||
execute_async(km.configmap_delete(&name))
|
||||
}
|
||||
|
||||
/// Delete a Secret by name
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - Mutable reference to KubernetesManager
|
||||
/// * `name` - Name of the Secret to delete
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<(), Box<EvalAltResult>>` - Success or an error
|
||||
fn secret_delete(km: &mut KubernetesManager, name: String) -> Result<(), Box<EvalAltResult>> {
|
||||
execute_async(km.secret_delete(&name))
|
||||
}
|
||||
|
||||
/// Get the namespace this manager operates on
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - The KubernetesManager instance
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `String` - The namespace name
|
||||
fn kubernetes_manager_namespace(km: &mut KubernetesManager) -> String {
|
||||
km.namespace().to_string()
|
||||
/// Helper function for error conversion
|
||||
fn kubernetes_error_to_rhai_error(error: KubernetesError) -> Box<EvalAltResult> {
|
||||
Box::new(EvalAltResult::ErrorRuntime(
|
||||
format!("Kubernetes error: {error}").into(),
|
||||
rhai::Position::NONE,
|
||||
))
|
||||
}
|
||||
|
||||
/// Register Kubernetes module functions with the Rhai engine
|
||||
@@ -720,10 +127,293 @@ pub fn register_kubernetes_module(engine: &mut Engine) -> Result<(), Box<EvalAlt
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Helper function for error conversion
|
||||
fn kubernetes_error_to_rhai_error(error: KubernetesError) -> Box<EvalAltResult> {
|
||||
Box::new(EvalAltResult::ErrorRuntime(
|
||||
format!("Kubernetes error: {error}").into(),
|
||||
rhai::Position::NONE,
|
||||
))
|
||||
// KubernetesManager constructor and methods
|
||||
fn kubernetes_manager_new(namespace: String) -> Result<KubernetesManager, Box<EvalAltResult>> {
|
||||
execute_async(KubernetesManager::new(namespace))
|
||||
}
|
||||
|
||||
fn kubernetes_manager_namespace(km: &mut KubernetesManager) -> String {
|
||||
km.namespace().to_string()
|
||||
}
|
||||
|
||||
// Resource listing functions
|
||||
fn pods_list(km: &mut KubernetesManager) -> Result<Array, Box<EvalAltResult>> {
|
||||
let pods = execute_async(km.pods_list())?;
|
||||
let pod_names: Array = pods
|
||||
.iter()
|
||||
.filter_map(|pod| pod.metadata.name.as_ref())
|
||||
.map(|name| Dynamic::from(name.clone()))
|
||||
.collect();
|
||||
Ok(pod_names)
|
||||
}
|
||||
|
||||
fn services_list(km: &mut KubernetesManager) -> Result<Array, Box<EvalAltResult>> {
|
||||
let services = execute_async(km.services_list())?;
|
||||
let service_names: Array = services
|
||||
.iter()
|
||||
.filter_map(|service| service.metadata.name.as_ref())
|
||||
.map(|name| Dynamic::from(name.clone()))
|
||||
.collect();
|
||||
Ok(service_names)
|
||||
}
|
||||
|
||||
fn deployments_list(km: &mut KubernetesManager) -> Result<Array, Box<EvalAltResult>> {
|
||||
let deployments = execute_async(km.deployments_list())?;
|
||||
let deployment_names: Array = deployments
|
||||
.iter()
|
||||
.filter_map(|deployment| deployment.metadata.name.as_ref())
|
||||
.map(|name| Dynamic::from(name.clone()))
|
||||
.collect();
|
||||
Ok(deployment_names)
|
||||
}
|
||||
|
||||
fn configmaps_list(km: &mut KubernetesManager) -> Result<Array, Box<EvalAltResult>> {
|
||||
let configmaps = execute_async(km.configmaps_list())?;
|
||||
let configmap_names: Array = configmaps
|
||||
.iter()
|
||||
.filter_map(|configmap| configmap.metadata.name.as_ref())
|
||||
.map(|name| Dynamic::from(name.clone()))
|
||||
.collect();
|
||||
Ok(configmap_names)
|
||||
}
|
||||
|
||||
fn secrets_list(km: &mut KubernetesManager) -> Result<Array, Box<EvalAltResult>> {
|
||||
let secrets = execute_async(km.secrets_list())?;
|
||||
let secret_names: Array = secrets
|
||||
.iter()
|
||||
.filter_map(|secret| secret.metadata.name.as_ref())
|
||||
.map(|name| Dynamic::from(name.clone()))
|
||||
.collect();
|
||||
Ok(secret_names)
|
||||
}
|
||||
|
||||
// Resource creation functions
|
||||
fn pod_create(
|
||||
km: &mut KubernetesManager,
|
||||
name: String,
|
||||
image: String,
|
||||
labels: Map,
|
||||
) -> Result<String, Box<EvalAltResult>> {
|
||||
let labels_map: Option<std::collections::HashMap<String, String>> = if labels.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
labels
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.to_string(), v.to_string()))
|
||||
.collect(),
|
||||
)
|
||||
};
|
||||
let pod = execute_async(km.pod_create(&name, &image, labels_map, None))?;
|
||||
Ok(pod.metadata.name.unwrap_or(name))
|
||||
}
|
||||
|
||||
fn pod_create_with_env(
|
||||
km: &mut KubernetesManager,
|
||||
name: String,
|
||||
image: String,
|
||||
labels: Map,
|
||||
env_vars: Map,
|
||||
) -> Result<String, Box<EvalAltResult>> {
|
||||
let labels_map: Option<std::collections::HashMap<String, String>> = if labels.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
labels
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.to_string(), v.to_string()))
|
||||
.collect(),
|
||||
)
|
||||
};
|
||||
let env_vars_map = convert_rhai_map_to_env_vars(env_vars);
|
||||
let pod = execute_async(km.pod_create(&name, &image, labels_map, env_vars_map))?;
|
||||
Ok(pod.metadata.name.unwrap_or(name))
|
||||
}
|
||||
|
||||
fn service_create(
|
||||
km: &mut KubernetesManager,
|
||||
name: String,
|
||||
selector: Map,
|
||||
port: i64,
|
||||
target_port: i64,
|
||||
) -> Result<String, Box<EvalAltResult>> {
|
||||
let selector_map: std::collections::HashMap<String, String> = selector
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.to_string(), v.to_string()))
|
||||
.collect();
|
||||
let target_port_opt = if target_port == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(target_port as i32)
|
||||
};
|
||||
let service =
|
||||
execute_async(km.service_create(&name, selector_map, port as i32, target_port_opt))?;
|
||||
Ok(service.metadata.name.unwrap_or(name))
|
||||
}
|
||||
|
||||
fn deployment_create(
|
||||
km: &mut KubernetesManager,
|
||||
name: String,
|
||||
image: String,
|
||||
replicas: i64,
|
||||
labels: Map,
|
||||
env_vars: Map,
|
||||
) -> Result<String, Box<EvalAltResult>> {
|
||||
let labels_map: Option<std::collections::HashMap<String, String>> = if labels.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
labels
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.to_string(), v.to_string()))
|
||||
.collect(),
|
||||
)
|
||||
};
|
||||
let env_vars_map = convert_rhai_map_to_env_vars(env_vars);
|
||||
let deployment = execute_async(km.deployment_create(
|
||||
&name,
|
||||
&image,
|
||||
replicas as i32,
|
||||
labels_map,
|
||||
env_vars_map,
|
||||
))?;
|
||||
Ok(deployment.metadata.name.unwrap_or(name))
|
||||
}
|
||||
|
||||
fn configmap_create(
|
||||
km: &mut KubernetesManager,
|
||||
name: String,
|
||||
data: Map,
|
||||
) -> Result<String, Box<EvalAltResult>> {
|
||||
let data_map: std::collections::HashMap<String, String> = data
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.to_string(), v.to_string()))
|
||||
.collect();
|
||||
let configmap = execute_async(km.configmap_create(&name, data_map))?;
|
||||
Ok(configmap.metadata.name.unwrap_or(name))
|
||||
}
|
||||
|
||||
fn secret_create(
|
||||
km: &mut KubernetesManager,
|
||||
name: String,
|
||||
data: Map,
|
||||
secret_type: String,
|
||||
) -> Result<String, Box<EvalAltResult>> {
|
||||
let data_map: std::collections::HashMap<String, String> = data
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.to_string(), v.to_string()))
|
||||
.collect();
|
||||
let secret_type_opt = if secret_type.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(secret_type.as_str())
|
||||
};
|
||||
let secret = execute_async(km.secret_create(&name, data_map, secret_type_opt))?;
|
||||
Ok(secret.metadata.name.unwrap_or(name))
|
||||
}
|
||||
|
||||
// Resource get functions
|
||||
fn pod_get(km: &mut KubernetesManager, name: String) -> Result<String, Box<EvalAltResult>> {
|
||||
let pod = execute_async(km.pod_get(&name))?;
|
||||
Ok(pod.metadata.name.unwrap_or(name))
|
||||
}
|
||||
|
||||
fn service_get(km: &mut KubernetesManager, name: String) -> Result<String, Box<EvalAltResult>> {
|
||||
let service = execute_async(km.service_get(&name))?;
|
||||
Ok(service.metadata.name.unwrap_or(name))
|
||||
}
|
||||
|
||||
fn deployment_get(km: &mut KubernetesManager, name: String) -> Result<String, Box<EvalAltResult>> {
|
||||
let deployment = execute_async(km.deployment_get(&name))?;
|
||||
Ok(deployment.metadata.name.unwrap_or(name))
|
||||
}
|
||||
|
||||
// Resource deletion functions
|
||||
fn delete(km: &mut KubernetesManager, pattern: String) -> Result<i64, Box<EvalAltResult>> {
|
||||
let deleted_count = execute_async(km.delete(&pattern))?;
|
||||
Ok(deleted_count as i64)
|
||||
}
|
||||
|
||||
fn pod_delete(km: &mut KubernetesManager, name: String) -> Result<(), Box<EvalAltResult>> {
|
||||
execute_async(km.pod_delete(&name))
|
||||
}
|
||||
|
||||
fn service_delete(km: &mut KubernetesManager, name: String) -> Result<(), Box<EvalAltResult>> {
|
||||
execute_async(km.service_delete(&name))
|
||||
}
|
||||
|
||||
fn deployment_delete(km: &mut KubernetesManager, name: String) -> Result<(), Box<EvalAltResult>> {
|
||||
execute_async(km.deployment_delete(&name))
|
||||
}
|
||||
|
||||
fn configmap_delete(km: &mut KubernetesManager, name: String) -> Result<(), Box<EvalAltResult>> {
|
||||
execute_async(km.configmap_delete(&name))
|
||||
}
|
||||
|
||||
fn secret_delete(km: &mut KubernetesManager, name: String) -> Result<(), Box<EvalAltResult>> {
|
||||
execute_async(km.secret_delete(&name))
|
||||
}
|
||||
|
||||
// Namespace management functions
|
||||
fn namespace_create(km: &mut KubernetesManager, name: String) -> Result<(), Box<EvalAltResult>> {
|
||||
execute_async(km.namespace_create(&name))
|
||||
}
|
||||
|
||||
fn namespace_delete(km: &mut KubernetesManager, name: String) -> Result<(), Box<EvalAltResult>> {
|
||||
execute_async(km.namespace_delete(&name))
|
||||
}
|
||||
|
||||
fn namespace_exists(km: &mut KubernetesManager, name: String) -> Result<bool, Box<EvalAltResult>> {
|
||||
execute_async(km.namespace_exists(&name))
|
||||
}
|
||||
|
||||
fn namespaces_list(km: &mut KubernetesManager) -> Result<Array, Box<EvalAltResult>> {
|
||||
let namespaces = execute_async(km.namespaces_list())?;
|
||||
let namespace_names: Array = namespaces
|
||||
.iter()
|
||||
.filter_map(|ns| ns.metadata.name.as_ref())
|
||||
.map(|name| Dynamic::from(name.clone()))
|
||||
.collect();
|
||||
Ok(namespace_names)
|
||||
}
|
||||
|
||||
// Utility and convenience functions
|
||||
fn resource_counts(km: &mut KubernetesManager) -> Result<Map, Box<EvalAltResult>> {
|
||||
let counts = execute_async(km.resource_counts())?;
|
||||
let mut rhai_map = Map::new();
|
||||
for (key, value) in counts {
|
||||
rhai_map.insert(key.into(), Dynamic::from(value as i64));
|
||||
}
|
||||
Ok(rhai_map)
|
||||
}
|
||||
|
||||
fn deploy_application(
|
||||
km: &mut KubernetesManager,
|
||||
name: String,
|
||||
image: String,
|
||||
replicas: i64,
|
||||
port: i64,
|
||||
labels: Map,
|
||||
env_vars: Map,
|
||||
) -> Result<String, Box<EvalAltResult>> {
|
||||
let labels_map: Option<std::collections::HashMap<String, String>> = if labels.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
labels
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.to_string(), v.to_string()))
|
||||
.collect(),
|
||||
)
|
||||
};
|
||||
let env_vars_map = convert_rhai_map_to_env_vars(env_vars);
|
||||
execute_async(km.deploy_application(
|
||||
&name,
|
||||
&image,
|
||||
replicas as i32,
|
||||
port as i32,
|
||||
labels_map,
|
||||
env_vars_map,
|
||||
))?;
|
||||
Ok(format!("Successfully deployed application '{name}'"))
|
||||
}
|
||||
|
||||
208
packages/system/virt/src/cloudhv/builder.rs
Normal file
208
packages/system/virt/src/cloudhv/builder.rs
Normal file
@@ -0,0 +1,208 @@
|
||||
use crate::cloudhv::{vm_create, vm_start, CloudHvError, VmSpec};
|
||||
use crate::image_prep::{image_prepare, Flavor as ImgFlavor, ImagePrepOptions, NetPlanOpts};
|
||||
use crate::cloudhv::net::{NetworkingProfileSpec, DefaultNatOptions, BridgeOptions};
|
||||
|
||||
/// Cloud Hypervisor VM Builder focused on Rhai ergonomics.
|
||||
///
|
||||
/// Defaults enforced:
|
||||
/// - kernel: /images/hypervisor-fw (firmware file in images directory)
|
||||
/// - seccomp: false (pushed via extra args)
|
||||
/// - serial: tty, console: off (already added by vm_start)
|
||||
/// - cmdline: "console=ttyS0 root=/dev/vda1 rw"
|
||||
/// - vcpus: 2
|
||||
/// - memory_mb: 2048
|
||||
///
|
||||
/// Disk can be provided directly or prepared from a flavor (/images source).
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CloudHvBuilder {
|
||||
id: String,
|
||||
disk_path: Option<String>,
|
||||
flavor: Option<ImgFlavor>,
|
||||
memory_mb: u32,
|
||||
vcpus: u32,
|
||||
cmdline: Option<String>,
|
||||
extra_args: Vec<String>,
|
||||
no_default_net: bool,
|
||||
/// Optional networking profile driving host provisioning and NIC injection
|
||||
net_profile: Option<NetworkingProfileSpec>,
|
||||
}
|
||||
|
||||
impl CloudHvBuilder {
|
||||
pub fn new(id: &str) -> Self {
|
||||
Self {
|
||||
id: id.to_string(),
|
||||
disk_path: None,
|
||||
flavor: None,
|
||||
memory_mb: 2048,
|
||||
vcpus: 2,
|
||||
cmdline: Some("console=ttyS0 root=/dev/vda1 rw".to_string()),
|
||||
// Enforce --seccomp false by default using extra args
|
||||
extra_args: vec!["--seccomp".into(), "false".into()],
|
||||
no_default_net: false,
|
||||
net_profile: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn disk(&mut self, path: &str) -> &mut Self {
|
||||
self.disk_path = Some(path.to_string());
|
||||
self.flavor = None;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn disk_from_flavor(&mut self, flavor: &str) -> &mut Self {
|
||||
let f = match flavor {
|
||||
"ubuntu" | "Ubuntu" | "UBUNTU" => ImgFlavor::Ubuntu,
|
||||
"alpine" | "Alpine" | "ALPINE" => ImgFlavor::Alpine,
|
||||
_ => ImgFlavor::Ubuntu,
|
||||
};
|
||||
self.flavor = Some(f);
|
||||
self.disk_path = None;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn memory_mb(&mut self, mb: u32) -> &mut Self {
|
||||
if mb > 0 {
|
||||
self.memory_mb = mb;
|
||||
}
|
||||
self
|
||||
}
|
||||
|
||||
pub fn vcpus(&mut self, v: u32) -> &mut Self {
|
||||
if v > 0 {
|
||||
self.vcpus = v;
|
||||
}
|
||||
self
|
||||
}
|
||||
|
||||
pub fn cmdline(&mut self, c: &str) -> &mut Self {
|
||||
self.cmdline = Some(c.to_string());
|
||||
self
|
||||
}
|
||||
|
||||
pub fn extra_arg(&mut self, a: &str) -> &mut Self {
|
||||
if !a.trim().is_empty() {
|
||||
self.extra_args.push(a.to_string());
|
||||
}
|
||||
self
|
||||
}
|
||||
|
||||
/// Suppress the default host networking provisioning and NIC injection.
|
||||
/// Internally, we set a sentinel consumed by vm_start.
|
||||
pub fn no_default_net(&mut self) -> &mut Self {
|
||||
self.no_default_net = true;
|
||||
// add sentinel consumed in vm_start
|
||||
if !self
|
||||
.extra_args
|
||||
.iter()
|
||||
.any(|e| e.as_str() == "--no-default-net")
|
||||
{
|
||||
self.extra_args.push("--no-default-net".into());
|
||||
}
|
||||
self
|
||||
}
|
||||
|
||||
/// Explicitly select the Default NAT networking profile (bridge + NAT + dnsmasq; IPv6 via Mycelium if enabled).
|
||||
pub fn network_default_nat(&mut self) -> &mut Self {
|
||||
self.net_profile = Some(NetworkingProfileSpec::DefaultNat(DefaultNatOptions::default()));
|
||||
self
|
||||
}
|
||||
|
||||
/// Explicitly select a no-network profile (no NIC injection and no host provisioning).
|
||||
pub fn network_none(&mut self) -> &mut Self {
|
||||
self.net_profile = Some(NetworkingProfileSpec::NoNet);
|
||||
// Keep backward compatibility: also set sentinel to suppress any legacy default path
|
||||
if !self
|
||||
.extra_args
|
||||
.iter()
|
||||
.any(|e| e.as_str() == "--no-default-net")
|
||||
{
|
||||
self.extra_args.push("--no-default-net".into());
|
||||
}
|
||||
self
|
||||
}
|
||||
|
||||
/// Ensure only bridge + tap, without NAT or DHCP (L2-only setups). Uses defaults if not overridden later.
|
||||
pub fn network_bridge_only(&mut self) -> &mut Self {
|
||||
self.net_profile = Some(NetworkingProfileSpec::BridgeOnly(BridgeOptions::default()));
|
||||
self
|
||||
}
|
||||
|
||||
/// Provide a custom CH --net configuration and disable host provisioning.
|
||||
pub fn network_custom_cli<S: Into<String>>(&mut self, args: Vec<S>) -> &mut Self {
|
||||
self.net_profile = Some(NetworkingProfileSpec::CustomCli(
|
||||
args.into_iter().map(|s| s.into()).collect(),
|
||||
));
|
||||
self
|
||||
}
|
||||
|
||||
/// Resolve absolute path to hypervisor-fw from /images
|
||||
fn resolve_hypervisor_fw() -> Result<String, CloudHvError> {
|
||||
let p = "/images/hypervisor-fw";
|
||||
if std::path::Path::new(p).exists() {
|
||||
Ok(p.to_string())
|
||||
} else {
|
||||
Err(CloudHvError::DependencyMissing(format!(
|
||||
"firmware not found: {} (expected hypervisor-fw in /images)",
|
||||
p
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
/// Prepare disk if needed and return final disk path.
|
||||
/// For Ubuntu flavor, this will:
|
||||
/// - copy source to per-VM work qcow2
|
||||
/// - mount, retag UUIDs, fstab/grub/netplan adjustments
|
||||
/// - convert to raw under the VM dir and return that raw path
|
||||
fn ensure_disk(&self) -> Result<String, CloudHvError> {
|
||||
if let Some(p) = &self.disk_path {
|
||||
return Ok(p.clone());
|
||||
}
|
||||
if let Some(f) = &self.flavor {
|
||||
// Use defaults: DHCPv4, placeholder static IPv6
|
||||
let opts = ImagePrepOptions {
|
||||
flavor: f.clone(),
|
||||
id: self.id.clone(),
|
||||
source: None,
|
||||
target_dir: None,
|
||||
net: NetPlanOpts::default(),
|
||||
disable_cloud_init_net: true,
|
||||
};
|
||||
let res = image_prepare(&opts).map_err(|e| CloudHvError::CommandFailed(e.to_string()))?;
|
||||
return Ok(res.raw_disk);
|
||||
}
|
||||
Err(CloudHvError::InvalidSpec(
|
||||
"no disk configured; set .disk(path) or .disk_from_flavor(flavor)".into(),
|
||||
))
|
||||
}
|
||||
|
||||
/// Build final VmSpec and start the VM.
|
||||
pub fn launch(&mut self) -> Result<String, CloudHvError> {
|
||||
// Resolve hypervisor-fw absolute path
|
||||
let kernel_path = Self::resolve_hypervisor_fw()?;
|
||||
// Disk
|
||||
let disk_path = self.ensure_disk()?;
|
||||
|
||||
let spec = VmSpec {
|
||||
id: self.id.clone(),
|
||||
// We use direct kernel boot with hypervisor-fw per requirements.
|
||||
kernel_path: Some(kernel_path),
|
||||
initramfs_path: None,
|
||||
firmware_path: None,
|
||||
disk_path,
|
||||
api_socket: "".into(),
|
||||
vcpus: self.vcpus,
|
||||
memory_mb: self.memory_mb,
|
||||
cmdline: self.cmdline.clone(),
|
||||
extra_args: if self.extra_args.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(self.extra_args.clone())
|
||||
},
|
||||
net_profile: self.net_profile.clone(),
|
||||
};
|
||||
|
||||
let id = vm_create(&spec)?;
|
||||
vm_start(&id)?;
|
||||
Ok(id)
|
||||
}
|
||||
}
|
||||
952
packages/system/virt/src/cloudhv/mod.rs
Normal file
952
packages/system/virt/src/cloudhv/mod.rs
Normal file
@@ -0,0 +1,952 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::error::Error;
|
||||
use std::fmt;
|
||||
use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
|
||||
use sal_os;
|
||||
use sal_process;
|
||||
use crate::qcow2;
|
||||
use crate::cloudhv::net::{NetworkingProfileSpec, DefaultNatOptions};
|
||||
|
||||
pub mod builder;
|
||||
pub mod net;
|
||||
|
||||
/// Error type for Cloud Hypervisor operations
|
||||
#[derive(Debug)]
|
||||
pub enum CloudHvError {
|
||||
CommandFailed(String),
|
||||
IoError(String),
|
||||
JsonError(String),
|
||||
DependencyMissing(String),
|
||||
InvalidSpec(String),
|
||||
NotFound(String),
|
||||
}
|
||||
|
||||
impl fmt::Display for CloudHvError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
CloudHvError::CommandFailed(e) => write!(f, "{}", e),
|
||||
CloudHvError::IoError(e) => write!(f, "IO error: {}", e),
|
||||
CloudHvError::JsonError(e) => write!(f, "JSON error: {}", e),
|
||||
CloudHvError::DependencyMissing(e) => write!(f, "Dependency missing: {}", e),
|
||||
CloudHvError::InvalidSpec(e) => write!(f, "Invalid spec: {}", e),
|
||||
CloudHvError::NotFound(e) => write!(f, "{}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Error for CloudHvError {}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct VmSpec {
|
||||
pub id: String,
|
||||
/// Optional for firmware boot; required for direct kernel boot
|
||||
pub kernel_path: Option<String>,
|
||||
/// Optional initramfs when using direct kernel boot
|
||||
pub initramfs_path: Option<String>,
|
||||
/// Optional for direct kernel boot; required for firmware boot
|
||||
pub firmware_path: Option<String>,
|
||||
/// Disk image path (qcow2 or raw)
|
||||
pub disk_path: String,
|
||||
/// API socket path for ch-remote and management
|
||||
pub api_socket: String,
|
||||
/// vCPUs to boot with
|
||||
pub vcpus: u32,
|
||||
/// Memory in MB
|
||||
pub memory_mb: u32,
|
||||
/// Kernel cmdline (only used for direct kernel boot)
|
||||
pub cmdline: Option<String>,
|
||||
/// Extra args (raw) if you need to extend; keep minimal for Phase 2
|
||||
pub extra_args: Option<Vec<String>>,
|
||||
/// Optional networking profile; when None, behavior follows explicit --net/--no-default-net or defaults
|
||||
#[serde(default)]
|
||||
pub net_profile: Option<NetworkingProfileSpec>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct VmRuntime {
|
||||
/// PID of cloud-hypervisor process if running
|
||||
pub pid: Option<i64>,
|
||||
/// Last known status: "stopped" | "running"
|
||||
pub status: String,
|
||||
/// Console log file path
|
||||
pub log_file: String,
|
||||
/// Bridge name used for networking discovery (if applicable)
|
||||
#[serde(default)]
|
||||
pub bridge_name: Option<String>,
|
||||
/// dnsmasq lease file used (if applicable)
|
||||
#[serde(default)]
|
||||
pub lease_file: Option<String>,
|
||||
/// Stable MAC used for NIC injection (derived from VM id)
|
||||
#[serde(default)]
|
||||
pub mac: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct VmRecord {
|
||||
pub spec: VmSpec,
|
||||
pub runtime: VmRuntime,
|
||||
}
|
||||
|
||||
fn ensure_deps() -> Result<(), CloudHvError> {
|
||||
if sal_process::which("cloud-hypervisor-static").is_none() {
|
||||
return Err(CloudHvError::DependencyMissing(
|
||||
"cloud-hypervisor-static not found on PATH. Install Cloud Hypervisor static binary.".into(),
|
||||
));
|
||||
}
|
||||
if sal_process::which("ch-remote-static").is_none() {
|
||||
return Err(CloudHvError::DependencyMissing(
|
||||
"ch-remote-static not found on PATH. Install Cloud Hypervisor tools (static).".into(),
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn hero_vm_root() -> PathBuf {
|
||||
let home = std::env::var("HOME").unwrap_or_else(|_| "/tmp".into());
|
||||
Path::new(&home).join("hero/virt/vms")
|
||||
}
|
||||
|
||||
fn vm_dir(id: &str) -> PathBuf {
|
||||
hero_vm_root().join(id)
|
||||
}
|
||||
|
||||
fn vm_json_path(id: &str) -> PathBuf {
|
||||
vm_dir(id).join("vm.json")
|
||||
}
|
||||
// Attempt to resolve a VM record across both the current user's HOME and root HOME.
|
||||
// This handles cases where the VM was created/launched under sudo (HOME=/root).
|
||||
fn resolve_vm_json_path(id: &str) -> Option<PathBuf> {
|
||||
let candidates = vec![
|
||||
hero_vm_root(), // $HOME/hero/virt/vms
|
||||
Path::new("/root/hero/virt/vms").to_path_buf(),
|
||||
];
|
||||
for base in candidates {
|
||||
let p = base.join(id).join("vm.json");
|
||||
if p.exists() {
|
||||
return Some(p);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn vm_log_path(id: &str) -> PathBuf {
|
||||
vm_dir(id).join("logs/console.log")
|
||||
}
|
||||
|
||||
/// Attempt to resolve an API socket across both the current user's HOME and root HOME.
|
||||
/// This handles cases where the VM was launched under sudo (HOME=/root).
|
||||
fn resolve_vm_api_socket_path(id: &str) -> Option<PathBuf> {
|
||||
let candidates = vec![
|
||||
hero_vm_root(), // $HOME/hero/virt/vms
|
||||
Path::new("/root/hero/virt/vms").to_path_buf(),
|
||||
];
|
||||
for base in candidates {
|
||||
let p = base.join(id).join("api.sock");
|
||||
if p.exists() {
|
||||
return Some(p);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Query cloud-hypervisor for the first NIC's tap and mac via ch-remote-static.
|
||||
/// Returns (tap_name, mac_lower) if successful.
|
||||
fn ch_query_tap_mac(api_sock: &Path) -> Option<(String, String)> {
|
||||
let cmd = format!(
|
||||
"ch-remote-static --api-socket {} info",
|
||||
shell_escape(&api_sock.to_string_lossy())
|
||||
);
|
||||
if let Ok(res) = sal_process::run(&cmd).silent(true).die(false).execute() {
|
||||
if res.success {
|
||||
if let Ok(v) = serde_json::from_str::<serde_json::Value>(&res.stdout) {
|
||||
if let Some(net0) = v
|
||||
.get("config")
|
||||
.and_then(|c| c.get("net"))
|
||||
.and_then(|n| n.get(0))
|
||||
{
|
||||
let tap = net0.get("tap").and_then(|t| t.as_str()).unwrap_or("").to_string();
|
||||
let mac = net0.get("mac").and_then(|m| m.as_str()).unwrap_or("").to_string();
|
||||
if !tap.is_empty() && !mac.is_empty() {
|
||||
return Some((tap, mac.to_lowercase()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Infer the bridge name a tap device is attached to by parsing `ip -o link show <tap>` output.
|
||||
fn bridge_name_for_tap(tap: &str) -> Option<String> {
|
||||
let cmd = format!("ip -o link show {}", shell_escape(tap));
|
||||
if let Ok(res) = sal_process::run(&cmd).silent(true).die(false).execute() {
|
||||
if res.success {
|
||||
for line in res.stdout.lines() {
|
||||
if let Some(idx) = line.find(" master ") {
|
||||
let rest = &line[idx + " master ".len()..];
|
||||
let name = rest.split_whitespace().next().unwrap_or("");
|
||||
if !name.is_empty() {
|
||||
return Some(name.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn vm_pid_path(id: &str) -> PathBuf {
|
||||
vm_dir(id).join("pid")
|
||||
}
|
||||
|
||||
fn write_json(path: &Path, value: &serde_json::Value) -> Result<(), CloudHvError> {
|
||||
if let Some(parent) = path.parent() {
|
||||
fs::create_dir_all(parent).map_err(|e| CloudHvError::IoError(e.to_string()))?;
|
||||
}
|
||||
let s = serde_json::to_string_pretty(value).map_err(|e| CloudHvError::JsonError(e.to_string()))?;
|
||||
fs::write(path, s).map_err(|e| CloudHvError::IoError(e.to_string()))
|
||||
}
|
||||
|
||||
fn read_json(path: &Path) -> Result<serde_json::Value, CloudHvError> {
|
||||
let content = fs::read_to_string(path).map_err(|e| CloudHvError::IoError(e.to_string()))?;
|
||||
serde_json::from_str(&content).map_err(|e| CloudHvError::JsonError(e.to_string()))
|
||||
}
|
||||
|
||||
fn proc_exists(pid: i64) -> bool {
|
||||
#[cfg(target_os = "linux")]
|
||||
{
|
||||
Path::new(&format!("/proc/{}", pid)).exists()
|
||||
}
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
{
|
||||
// Minimal check for non-Linux; try a kill -0 style command
|
||||
let res = sal_process::run(&format!("kill -0 {}", pid)).die(false).silent(true).execute();
|
||||
res.map(|r| r.success).unwrap_or(false)
|
||||
}
|
||||
}
|
||||
|
||||
/// Create and persist a VM spec
|
||||
pub fn vm_create(spec: &VmSpec) -> Result<String, CloudHvError> {
|
||||
// Validate inputs minimally
|
||||
if spec.id.trim().is_empty() {
|
||||
return Err(CloudHvError::InvalidSpec("spec.id must not be empty".into()));
|
||||
}
|
||||
// Validate boot method: either firmware_path exists or kernel_path exists
|
||||
let has_fw = spec
|
||||
.firmware_path
|
||||
.as_ref()
|
||||
.map(|p| Path::new(p).exists())
|
||||
.unwrap_or(false);
|
||||
let has_kernel = spec
|
||||
.kernel_path
|
||||
.as_ref()
|
||||
.map(|p| Path::new(p).exists())
|
||||
.unwrap_or(false);
|
||||
|
||||
if !(has_fw || has_kernel) {
|
||||
return Err(CloudHvError::InvalidSpec(
|
||||
"either firmware_path or kernel_path must be set to an existing file".into(),
|
||||
));
|
||||
}
|
||||
|
||||
if !Path::new(&spec.disk_path).exists() {
|
||||
return Err(CloudHvError::InvalidSpec(format!(
|
||||
"disk_path not found: {}",
|
||||
&spec.disk_path
|
||||
)));
|
||||
}
|
||||
if spec.vcpus == 0 {
|
||||
return Err(CloudHvError::InvalidSpec("vcpus must be >= 1".into()));
|
||||
}
|
||||
if spec.memory_mb == 0 {
|
||||
return Err(CloudHvError::InvalidSpec("memory_mb must be >= 128".into()));
|
||||
}
|
||||
|
||||
// If a VM with this id already exists, ensure it's not running to avoid clobber + resource conflicts
|
||||
let json_path = vm_json_path(&spec.id);
|
||||
if json_path.exists() {
|
||||
if let Ok(value) = read_json(&json_path) {
|
||||
if let Ok(existing) = serde_json::from_value::<VmRecord>(value.clone()) {
|
||||
if let Some(pid) = existing.runtime.pid {
|
||||
if proc_exists(pid) {
|
||||
return Err(CloudHvError::CommandFailed(format!(
|
||||
"VM '{}' already exists and is running with pid {}. Stop or delete it first, or choose a different id.",
|
||||
spec.id, pid
|
||||
)));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Prepare directory layout
|
||||
let dir = vm_dir(&spec.id);
|
||||
sal_os::mkdir(
|
||||
dir.to_str()
|
||||
.unwrap_or_else(|| "/tmp/hero/virt/vms/__invalid__"),
|
||||
)
|
||||
.map_err(|e| CloudHvError::IoError(e.to_string()))?;
|
||||
let log_dir = dir.join("logs");
|
||||
sal_os::mkdir(log_dir.to_str().unwrap()).map_err(|e| CloudHvError::IoError(e.to_string()))?;
|
||||
|
||||
// Build runtime (preserve prior metadata if present; will be refreshed on start)
|
||||
let mut runtime = VmRuntime {
|
||||
pid: None,
|
||||
status: "stopped".into(),
|
||||
log_file: vm_log_path(&spec.id).to_string_lossy().into_owned(),
|
||||
bridge_name: None,
|
||||
lease_file: None,
|
||||
mac: None,
|
||||
};
|
||||
if json_path.exists() {
|
||||
if let Ok(value) = read_json(&json_path) {
|
||||
if let Ok(existing) = serde_json::from_value::<VmRecord>(value) {
|
||||
if !existing.runtime.log_file.is_empty() {
|
||||
runtime.log_file = existing.runtime.log_file;
|
||||
}
|
||||
runtime.bridge_name = existing.runtime.bridge_name;
|
||||
runtime.lease_file = existing.runtime.lease_file;
|
||||
runtime.mac = existing.runtime.mac;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Persist record (spec updated, runtime preserved/reset to stopped)
|
||||
let rec = VmRecord {
|
||||
spec: spec.clone(),
|
||||
runtime,
|
||||
};
|
||||
let value = serde_json::to_value(&rec).map_err(|e| CloudHvError::JsonError(e.to_string()))?;
|
||||
write_json(&json_path, &value)?;
|
||||
|
||||
Ok(spec.id.clone())
|
||||
}
|
||||
|
||||
/// Start a VM using cloud-hypervisor
|
||||
pub fn vm_start(id: &str) -> Result<(), CloudHvError> {
|
||||
ensure_deps()?;
|
||||
|
||||
// Load record
|
||||
let p = vm_json_path(id);
|
||||
if !p.exists() {
|
||||
return Err(CloudHvError::NotFound(format!("VM '{}' not found", id)));
|
||||
}
|
||||
let value = read_json(&p)?;
|
||||
let mut rec: VmRecord =
|
||||
serde_json::from_value(value).map_err(|e| CloudHvError::JsonError(e.to_string()))?;
|
||||
|
||||
// Prepare invocation
|
||||
let api_socket = if rec.spec.api_socket.trim().is_empty() {
|
||||
vm_dir(id).join("api.sock").to_string_lossy().into_owned()
|
||||
} else {
|
||||
rec.spec.api_socket.clone()
|
||||
};
|
||||
let log_file = vm_log_path(id).to_string_lossy().into_owned();
|
||||
|
||||
// Ensure API socket directory exists and remove any stale socket file
|
||||
let api_path = Path::new(&api_socket);
|
||||
if let Some(parent) = api_path.parent() {
|
||||
fs::create_dir_all(parent).map_err(|e| CloudHvError::IoError(e.to_string()))?;
|
||||
}
|
||||
// Best-effort removal of stale socket
|
||||
let _ = fs::remove_file(&api_path);
|
||||
|
||||
// Preflight disk: if source is qcow2, convert to raw to avoid CH "Compressed blocks not supported"
|
||||
// Robust conversion:
|
||||
// - Remove any stale destination
|
||||
// - Try direct convert to destination file
|
||||
// - On failure (e.g., byte-range lock issues), fallback to piping stdout into dd
|
||||
let mut disk_to_use = rec.spec.disk_path.clone();
|
||||
if let Ok(info) = qcow2::info(&disk_to_use) {
|
||||
if info.get("format").and_then(|v| v.as_str()) == Some("qcow2") {
|
||||
let dest = vm_dir(id).join("disk.raw").to_string_lossy().into_owned();
|
||||
// Best-effort remove stale target file to avoid locking errors
|
||||
let _ = fs::remove_file(&dest);
|
||||
|
||||
// Attempt 1: normal qemu-img convert to dest file
|
||||
let cmd1 = format!(
|
||||
"qemu-img convert -O raw {} {}",
|
||||
shell_escape(&disk_to_use),
|
||||
shell_escape(&dest)
|
||||
);
|
||||
let attempt1 = sal_process::run(&cmd1).silent(true).die(false).execute();
|
||||
|
||||
let mut converted_ok = false;
|
||||
let mut err1: Option<String> = None;
|
||||
|
||||
if let Ok(res) = attempt1 {
|
||||
if res.success {
|
||||
converted_ok = true;
|
||||
} else {
|
||||
err1 = Some(format!("{}{}", res.stdout, res.stderr));
|
||||
}
|
||||
} else if let Err(e) = attempt1 {
|
||||
err1 = Some(e.to_string());
|
||||
}
|
||||
|
||||
if !converted_ok {
|
||||
// Attempt 2: pipe via stdout into dd (avoids qemu-img destination locking semantics on some FS)
|
||||
let heredoc2 = format!(
|
||||
"bash -e -s <<'EOF'\nset -euo pipefail\nqemu-img convert -O raw {} - | dd of={} bs=4M status=none\nEOF\n",
|
||||
shell_escape(&disk_to_use),
|
||||
shell_escape(&dest)
|
||||
);
|
||||
match sal_process::run(&heredoc2).silent(true).die(false).execute() {
|
||||
Ok(res) if res.success => {
|
||||
converted_ok = true;
|
||||
}
|
||||
Ok(res) => {
|
||||
let mut msg = String::from("Failed converting qcow2 to raw.");
|
||||
if let Some(e1) = err1 {
|
||||
msg.push_str(&format!("\nFirst attempt error:\n{}", e1));
|
||||
}
|
||||
msg.push_str(&format!("\nSecond attempt error:\n{}{}", res.stdout, res.stderr));
|
||||
return Err(CloudHvError::CommandFailed(msg));
|
||||
}
|
||||
Err(e) => {
|
||||
let mut msg = String::from("Failed converting qcow2 to raw.");
|
||||
if let Some(e1) = err1 {
|
||||
msg.push_str(&format!("\nFirst attempt error:\n{}", e1));
|
||||
}
|
||||
msg.push_str(&format!("\nSecond attempt error:\n{}", e));
|
||||
return Err(CloudHvError::CommandFailed(msg));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if converted_ok {
|
||||
disk_to_use = dest;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Consolidate extra --disk occurrences from spec.extra_args into a single --disk (CH version requires variadic form)
|
||||
// Collect disk value tokens provided by the user and strip them from extra args so we can render one '--disk' followed by multiple values.
|
||||
let mut extra_disk_vals: Vec<String> = Vec::new();
|
||||
let mut extra_args_sans_disks: Vec<String> = Vec::new();
|
||||
if let Some(extra) = rec.spec.extra_args.clone() {
|
||||
let mut i = 0usize;
|
||||
while i < extra.len() {
|
||||
let tok = extra[i].clone();
|
||||
if tok == "--disk" {
|
||||
if i + 1 < extra.len() {
|
||||
extra_disk_vals.push(extra[i + 1].clone());
|
||||
i += 2;
|
||||
continue;
|
||||
} else {
|
||||
// dangling --disk without value; drop it
|
||||
i += 1;
|
||||
continue;
|
||||
}
|
||||
} else if tok == "--no-default-net" {
|
||||
// sentinel: suppress default networking; do not pass to CH CLI
|
||||
i += 1;
|
||||
continue;
|
||||
} else if let Some(rest) = tok.strip_prefix("--disk=") {
|
||||
if !rest.is_empty() {
|
||||
extra_disk_vals.push(rest.to_string());
|
||||
}
|
||||
i += 1;
|
||||
continue;
|
||||
}
|
||||
// keep token
|
||||
extra_args_sans_disks.push(tok);
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
|
||||
// CH CLI flags (very common subset)
|
||||
// --disk path=... uses virtio-blk by default
|
||||
let mut parts: Vec<String> = vec![
|
||||
"cloud-hypervisor-static".into(),
|
||||
"--api-socket".into(),
|
||||
api_socket.clone(),
|
||||
];
|
||||
|
||||
if let Some(fw) = rec.spec.firmware_path.clone() {
|
||||
// Firmware boot path
|
||||
parts.push("--firmware".into());
|
||||
parts.push(fw);
|
||||
} else if let Some(kpath) = rec.spec.kernel_path.clone() {
|
||||
// Direct kernel boot path
|
||||
let cmdline = rec
|
||||
.spec
|
||||
.cmdline
|
||||
.clone()
|
||||
.unwrap_or_else(|| "console=ttyS0 reboot=k panic=1".to_string());
|
||||
parts.push("--kernel".into());
|
||||
parts.push(kpath);
|
||||
if let Some(initrd) = rec.spec.initramfs_path.clone() {
|
||||
if Path::new(&initrd).exists() {
|
||||
parts.push("--initramfs".into());
|
||||
parts.push(initrd);
|
||||
}
|
||||
}
|
||||
parts.push("--cmdline".into());
|
||||
parts.push(cmdline);
|
||||
} else {
|
||||
return Err(CloudHvError::InvalidSpec(
|
||||
"neither firmware_path nor kernel_path set at start time".into(),
|
||||
));
|
||||
}
|
||||
|
||||
parts.push("--disk".into());
|
||||
parts.push(format!("path={}", disk_to_use));
|
||||
// Append any additional disk value tokens (from sanitized extra args) so CH sees a single '--disk' with multiple values
|
||||
for dv in &extra_disk_vals {
|
||||
parts.push(dv.clone());
|
||||
}
|
||||
parts.push("--cpus".into());
|
||||
parts.push(format!("boot={}", rec.spec.vcpus));
|
||||
parts.push("--memory".into());
|
||||
parts.push(format!("size={}M", rec.spec.memory_mb));
|
||||
parts.push("--serial".into());
|
||||
parts.push("tty".into());
|
||||
parts.push("--console".into());
|
||||
parts.push("off".into());
|
||||
|
||||
// Determine if the user provided explicit network arguments (e.g. "--net", "tap=...,mac=...")
|
||||
// If so, do NOT provision the default host networking or add a default NIC.
|
||||
let has_user_net = rec
|
||||
.spec
|
||||
.extra_args
|
||||
.as_ref()
|
||||
.map(|v| v.iter().any(|tok| tok == "--net" || tok == "--no-default-net"))
|
||||
.unwrap_or(false);
|
||||
|
||||
// Track chosen bridge/lease for later discovery
|
||||
let mut bridge_for_disc: Option<String> = None;
|
||||
let mut lease_for_disc: Option<String> = None;
|
||||
|
||||
// Determine effective networking profile
|
||||
let profile_effective = if let Some(p) = rec.spec.net_profile.clone() {
|
||||
Some(p)
|
||||
} else if has_user_net {
|
||||
// User provided explicit --net or --no-default-net; do not provision
|
||||
None
|
||||
} else {
|
||||
// Default behavior: NAT profile
|
||||
Some(NetworkingProfileSpec::DefaultNat(DefaultNatOptions::default()))
|
||||
};
|
||||
|
||||
if let Some(profile) = profile_effective {
|
||||
match profile {
|
||||
NetworkingProfileSpec::DefaultNat(nat) => {
|
||||
// IPv6 handling (auto via Mycelium unless disabled)
|
||||
let mut ipv6_bridge_cidr: Option<String> = None;
|
||||
if nat.ipv6_enable {
|
||||
if let Ok(cidr) = std::env::var("HERO_VIRT_IPV6_BRIDGE_CIDR") {
|
||||
// Validate mycelium iface presence if specified or default
|
||||
let if_hint = nat.mycelium_if.clone().unwrap_or_else(|| "mycelium".into());
|
||||
let _ = net::mycelium_ipv6_addr(&if_hint)?;
|
||||
ipv6_bridge_cidr = Some(cidr);
|
||||
} else {
|
||||
let if_hint = nat.mycelium_if.clone().unwrap_or_else(|| "mycelium".into());
|
||||
let (_ifname, myc_addr) = net::mycelium_ipv6_addr(&if_hint)?;
|
||||
let (_pfx, router_cidr) = net::derive_ipv6_prefix_from_mycelium(&myc_addr)?;
|
||||
ipv6_bridge_cidr = Some(router_cidr);
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure bridge, NAT, and DHCP
|
||||
net::ensure_bridge(&nat.bridge_name, &nat.bridge_addr_cidr, ipv6_bridge_cidr.as_deref())?;
|
||||
// Derive IPv6 subnet for NAT
|
||||
let ipv6_subnet = ipv6_bridge_cidr.as_ref().map(|cidr| {
|
||||
let parts: Vec<&str> = cidr.split('/').collect();
|
||||
if parts.len() == 2 {
|
||||
let addr = parts[0];
|
||||
if let Ok(ip) = addr.parse::<std::net::Ipv6Addr>() {
|
||||
let seg = ip.segments();
|
||||
let pfx = std::net::Ipv6Addr::new(seg[0], seg[1], seg[2], seg[3], 0, 0, 0, 0);
|
||||
format!("{}/64", pfx)
|
||||
} else {
|
||||
"".to_string()
|
||||
}
|
||||
} else {
|
||||
"".to_string()
|
||||
}
|
||||
});
|
||||
net::ensure_nat(&nat.subnet_cidr, ipv6_subnet.as_deref())?;
|
||||
let lease_used = net::ensure_dnsmasq(
|
||||
&nat.bridge_name,
|
||||
&nat.dhcp_start,
|
||||
&nat.dhcp_end,
|
||||
ipv6_bridge_cidr.as_deref(),
|
||||
nat.lease_file.as_deref(),
|
||||
)?;
|
||||
|
||||
bridge_for_disc = Some(nat.bridge_name.clone());
|
||||
lease_for_disc = Some(lease_used.clone());
|
||||
|
||||
// TAP + NIC args
|
||||
let tap_name = net::ensure_tap_for_vm(&nat.bridge_name, id)?;
|
||||
let mac = net::stable_mac_from_id(id);
|
||||
parts.push("--net".into());
|
||||
parts.push(format!("tap={},mac={}", tap_name, mac));
|
||||
}
|
||||
NetworkingProfileSpec::BridgeOnly(opts) => {
|
||||
let bridge_name = opts.bridge_name.clone();
|
||||
// Use provided IPv4 if any, else env default
|
||||
let bridge_addr_cidr = opts
|
||||
.bridge_addr_cidr
|
||||
.clone()
|
||||
.unwrap_or_else(|| std::env::var("HERO_VIRT_BRIDGE_ADDR_CIDR").unwrap_or_else(|_| "172.30.0.1/24".into()));
|
||||
// Ensure bridge (optional IPv6 from opts)
|
||||
net::ensure_bridge(&bridge_name, &bridge_addr_cidr, opts.bridge_ipv6_cidr.as_deref())?;
|
||||
// TAP + NIC only, no NAT/DHCP
|
||||
let tap_name = net::ensure_tap_for_vm(&bridge_name, id)?;
|
||||
let mac = net::stable_mac_from_id(id);
|
||||
parts.push("--net".into());
|
||||
parts.push(format!("tap={},mac={}", tap_name, mac));
|
||||
|
||||
// For discovery: we can attempt IPv6 neighbor; IPv4 lease not present
|
||||
bridge_for_disc = Some(bridge_name);
|
||||
lease_for_disc = None;
|
||||
}
|
||||
NetworkingProfileSpec::NoNet => {
|
||||
// Do nothing
|
||||
}
|
||||
NetworkingProfileSpec::CustomCli(_args) => {
|
||||
// Do not provision; user must add --net via extra_args
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Append any user-provided extra args, sans any '--disk' we already consolidated
|
||||
for e in extra_args_sans_disks {
|
||||
parts.push(e);
|
||||
}
|
||||
|
||||
let args_str = shell_join(&parts);
|
||||
// Execute via a bash heredoc to avoid any quoting pitfalls
|
||||
let heredoc = format!(
|
||||
"bash -e -s <<'EOF'\nnohup {} > '{}' 2>&1 &\necho $! > '{}'\nEOF\n",
|
||||
args_str,
|
||||
log_file,
|
||||
vm_pid_path(id).to_string_lossy()
|
||||
);
|
||||
// Execute command; this will background cloud-hypervisor and return
|
||||
let result = sal_process::run(&heredoc).silent(true).execute();
|
||||
match result {
|
||||
Ok(res) => {
|
||||
if !res.success {
|
||||
return Err(CloudHvError::CommandFailed(format!(
|
||||
"Failed to start VM '{}': {}",
|
||||
id, res.stderr
|
||||
)));
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(CloudHvError::CommandFailed(format!(
|
||||
"Failed to start VM '{}': {}",
|
||||
id, e
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
// Read PID back
|
||||
let pid = match fs::read_to_string(vm_pid_path(id)) {
|
||||
Ok(s) => s.trim().parse::<i64>().ok(),
|
||||
Err(_) => None,
|
||||
};
|
||||
|
||||
// Quick health check: ensure process did not exit immediately due to CLI errors (e.g., duplicate flags)
|
||||
if let Some(pid_num) = pid {
|
||||
thread::sleep(Duration::from_millis(300));
|
||||
if !proc_exists(pid_num) {
|
||||
// Tail log to surface the error cause
|
||||
let tail_cmd = format!("tail -n 200 {}", shell_escape(&log_file));
|
||||
let tail = sal_process::run(&tail_cmd).die(false).silent(true).execute();
|
||||
let mut log_snip = String::new();
|
||||
if let Ok(res) = tail {
|
||||
if res.success {
|
||||
log_snip = res.stdout;
|
||||
} else {
|
||||
log_snip = format!("{}{}", res.stdout, res.stderr);
|
||||
}
|
||||
}
|
||||
return Err(CloudHvError::CommandFailed(format!(
|
||||
"cloud-hypervisor exited immediately after start. Log tail:\n{}",
|
||||
log_snip
|
||||
)));
|
||||
}
|
||||
} else {
|
||||
return Err(CloudHvError::CommandFailed(
|
||||
"failed to obtain cloud-hypervisor PID (start script did not write pid)".into(),
|
||||
));
|
||||
}
|
||||
|
||||
// Update state
|
||||
rec.runtime.pid = pid;
|
||||
rec.runtime.status = if pid.is_some() { "running".into() } else { "stopped".into() };
|
||||
rec.runtime.log_file = log_file;
|
||||
rec.runtime.bridge_name = bridge_for_disc.clone();
|
||||
rec.runtime.lease_file = lease_for_disc.clone();
|
||||
rec.runtime.mac = Some(net::stable_mac_from_id(id));
|
||||
rec.spec.api_socket = api_socket.clone();
|
||||
|
||||
let value = serde_json::to_value(&rec).map_err(|e| CloudHvError::JsonError(e.to_string()))?;
|
||||
write_json(&vm_json_path(id), &value)?;
|
||||
|
||||
// Best-effort: discover guest IPv4/IPv6 addresses (default-net path)
|
||||
thread::sleep(Duration::from_millis(5000));
|
||||
let mac_lower = net::stable_mac_from_id(id).to_lowercase();
|
||||
|
||||
if let Some(bridge_name) = bridge_for_disc.clone() {
|
||||
let lease_path = lease_for_disc.unwrap_or_else(|| {
|
||||
std::env::var("HERO_VIRT_DHCP_LEASE_FILE")
|
||||
.unwrap_or_else(|_| format!("/var/lib/misc/dnsmasq-hero-{}.leases", bridge_name))
|
||||
});
|
||||
let _ipv4 = net::discover_ipv4_from_leases(&lease_path, &mac_lower, 12);
|
||||
let _ipv6 = net::discover_ipv6_on_bridge(&bridge_name, &mac_lower);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
//// Return VM record info (spec + runtime) by id
|
||||
pub fn vm_info(id: &str) -> Result<VmRecord, CloudHvError> {
|
||||
// Try current user's VM root first, then fall back to /root (common when VM was launched under sudo)
|
||||
let p_user = vm_json_path(id);
|
||||
let p = if p_user.exists() {
|
||||
p_user
|
||||
} else if let Some(p2) = resolve_vm_json_path(id) {
|
||||
p2
|
||||
} else {
|
||||
return Err(CloudHvError::NotFound(format!("VM '{}' not found", id)));
|
||||
};
|
||||
let value = read_json(&p)?;
|
||||
let rec: VmRecord = serde_json::from_value(value).map_err(|e| CloudHvError::JsonError(e.to_string()))?;
|
||||
Ok(rec)
|
||||
}
|
||||
|
||||
//// Discover VM network info using persisted metadata (bridge/lease/mac) with sensible fallbacks.
|
||||
/// Returns (IPv4, IPv6, MAC, BridgeName, LeaseFile), each optional.
|
||||
pub fn vm_network_info(
|
||||
id: &str,
|
||||
timeout_secs: u64,
|
||||
) -> Result<(Option<String>, Option<String>, Option<String>, Option<String>, Option<String>), CloudHvError> {
|
||||
let rec = vm_info(id)?;
|
||||
|
||||
// Start with persisted/env/default values
|
||||
let mut bridge_name = rec
|
||||
.runtime
|
||||
.bridge_name
|
||||
.clone()
|
||||
.or_else(|| std::env::var("HERO_VIRT_BRIDGE_NAME").ok())
|
||||
.unwrap_or_else(|| "br-hero".into());
|
||||
|
||||
// MAC: persisted or deterministically derived (lowercased for matching)
|
||||
let mut mac_lower = rec
|
||||
.runtime
|
||||
.mac
|
||||
.clone()
|
||||
.unwrap_or_else(|| net::stable_mac_from_id(id))
|
||||
.to_lowercase();
|
||||
|
||||
// Attempt to query CH for ground-truth (tap, mac) if API socket is available
|
||||
if let Some(api_sock) = resolve_vm_api_socket_path(id) {
|
||||
if let Some((tap, mac_from_ch)) = ch_query_tap_mac(&api_sock) {
|
||||
mac_lower = mac_from_ch;
|
||||
if let Some(br) = bridge_name_for_tap(&tap) {
|
||||
bridge_name = br;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Lease file: persisted -> env -> derived from (possibly overridden) bridge
|
||||
let lease_path = rec
|
||||
.runtime
|
||||
.lease_file
|
||||
.clone()
|
||||
.or_else(|| std::env::var("HERO_VIRT_DHCP_LEASE_FILE").ok())
|
||||
.unwrap_or_else(|| format!("/var/lib/misc/dnsmasq-hero-{}.leases", bridge_name));
|
||||
|
||||
// Discover addresses
|
||||
let ipv4 = net::discover_ipv4_from_leases(&lease_path, &mac_lower, timeout_secs);
|
||||
let ipv6 = {
|
||||
use std::time::{Duration, Instant};
|
||||
let deadline = Instant::now() + Duration::from_secs(timeout_secs);
|
||||
let mut v6: Option<String> = None;
|
||||
while Instant::now() < deadline {
|
||||
if let Some(ip) = net::discover_ipv6_on_bridge(&bridge_name, &mac_lower) {
|
||||
v6 = Some(ip);
|
||||
break;
|
||||
}
|
||||
std::thread::sleep(Duration::from_millis(800));
|
||||
}
|
||||
v6
|
||||
};
|
||||
|
||||
Ok((
|
||||
ipv4,
|
||||
ipv6,
|
||||
Some(mac_lower),
|
||||
Some(bridge_name),
|
||||
Some(lease_path),
|
||||
))
|
||||
}
|
||||
|
||||
/// Stop a VM via ch-remote (graceful), optionally force kill
|
||||
pub fn vm_stop(id: &str, force: bool) -> Result<(), CloudHvError> {
|
||||
ensure_deps().ok(); // best-effort; we might still force-kill
|
||||
|
||||
let p = vm_json_path(id);
|
||||
if !p.exists() {
|
||||
return Err(CloudHvError::NotFound(format!("VM '{}' not found", id)));
|
||||
}
|
||||
let value = read_json(&p)?;
|
||||
let mut rec: VmRecord =
|
||||
serde_json::from_value(value).map_err(|e| CloudHvError::JsonError(e.to_string()))?;
|
||||
|
||||
// Attempt graceful shutdown if api socket known
|
||||
if !rec.spec.api_socket.trim().is_empty() {
|
||||
let cmd = format!("ch-remote-static --api-socket {} shutdown", rec.spec.api_socket);
|
||||
let _ = sal_process::run(&cmd).die(false).silent(true).execute();
|
||||
}
|
||||
|
||||
// Wait for process to exit (up to ~10s)
|
||||
if let Some(pid) = rec.runtime.pid {
|
||||
for _ in 0..50 {
|
||||
if !proc_exists(pid) {
|
||||
break;
|
||||
}
|
||||
thread::sleep(Duration::from_millis(200));
|
||||
}
|
||||
// If still alive and force, kill -9 and wait again (up to ~10s)
|
||||
if proc_exists(pid) && force {
|
||||
// Send SIGKILL without extra shell layers; suppress errors/noise
|
||||
let _ = sal_process::run(&format!("kill -9 {}", pid))
|
||||
.die(false)
|
||||
.silent(true)
|
||||
.execute();
|
||||
for _ in 0..50 {
|
||||
if !proc_exists(pid) {
|
||||
break;
|
||||
}
|
||||
thread::sleep(Duration::from_millis(200));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update state
|
||||
rec.runtime.status = "stopped".into();
|
||||
rec.runtime.pid = None;
|
||||
let value = serde_json::to_value(&rec).map_err(|e| CloudHvError::JsonError(e.to_string()))?;
|
||||
write_json(&vm_json_path(id), &value)?;
|
||||
|
||||
// Remove pid file
|
||||
let _ = fs::remove_file(vm_pid_path(id));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Delete a VM definition; optionally delete disks.
|
||||
pub fn vm_delete(id: &str, delete_disks: bool) -> Result<(), CloudHvError> {
|
||||
let p = vm_json_path(id);
|
||||
if !p.exists() {
|
||||
return Err(CloudHvError::NotFound(format!("VM '{}' not found", id)));
|
||||
}
|
||||
let rec: VmRecord = serde_json::from_value(read_json(&p)?)
|
||||
.map_err(|e| CloudHvError::JsonError(e.to_string()))?;
|
||||
|
||||
// If appears to be running, attempt a force stop first (best-effort)
|
||||
if let Some(pid) = rec.runtime.pid {
|
||||
if proc_exists(pid) {
|
||||
let _ = vm_stop(id, true);
|
||||
// Re-check original PID for liveness (up to ~5s)
|
||||
for _ in 0..25 {
|
||||
if !proc_exists(pid) {
|
||||
break;
|
||||
}
|
||||
thread::sleep(Duration::from_millis(200));
|
||||
}
|
||||
if proc_exists(pid) {
|
||||
return Err(CloudHvError::CommandFailed(
|
||||
"VM appears to be running; stop it first".into(),
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if delete_disks {
|
||||
let _ = fs::remove_file(&rec.spec.disk_path);
|
||||
}
|
||||
|
||||
let d = vm_dir(id);
|
||||
fs::remove_dir_all(&d).map_err(|e| CloudHvError::IoError(e.to_string()))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// List all VMs
|
||||
pub fn vm_list() -> Result<Vec<VmRecord>, CloudHvError> {
|
||||
let root = hero_vm_root();
|
||||
if !root.exists() {
|
||||
return Ok(vec![]);
|
||||
}
|
||||
let mut out = vec![];
|
||||
for entry in fs::read_dir(&root).map_err(|e| CloudHvError::IoError(e.to_string()))? {
|
||||
let entry = entry.map_err(|e| CloudHvError::IoError(e.to_string()))?;
|
||||
let p = entry.path();
|
||||
if !p.is_dir() {
|
||||
continue;
|
||||
}
|
||||
let vm_json = p.join("vm.json");
|
||||
if !vm_json.exists() {
|
||||
continue;
|
||||
}
|
||||
let rec: VmRecord = serde_json::from_value(read_json(&vm_json)?)
|
||||
.map_err(|e| CloudHvError::JsonError(e.to_string()))?;
|
||||
|
||||
out.push(rec);
|
||||
}
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/// Discover the mycelium IPv6 address by inspecting the interface itself (no CLI dependency).
|
||||
/// Returns (interface_name, first global IPv6 address found on the interface).
|
||||
|
||||
/// Derive a /64 prefix P from the mycelium IPv6 and return (P/64, P::2/64).
|
||||
|
||||
|
||||
/// Render a shell-safe command string from vector of tokens
|
||||
fn shell_join(parts: &Vec<String>) -> String {
|
||||
let mut s = String::new();
|
||||
for (i, p) in parts.iter().enumerate() {
|
||||
if i > 0 {
|
||||
s.push(' ');
|
||||
}
|
||||
s.push_str(&shell_escape(p));
|
||||
}
|
||||
s
|
||||
}
|
||||
|
||||
fn shell_escape(s: &str) -> String {
|
||||
if s.is_empty() {
|
||||
return "''".into();
|
||||
}
|
||||
if s
|
||||
.chars()
|
||||
.all(|c| c.is_ascii_alphanumeric() || "-_./=:".contains(c))
|
||||
{
|
||||
return s.into();
|
||||
}
|
||||
// single-quote wrap, escape existing quotes
|
||||
let mut out = String::from("'");
|
||||
for ch in s.chars() {
|
||||
if ch == '\'' {
|
||||
out.push_str("'\"'\"'");
|
||||
} else {
|
||||
out.push(ch);
|
||||
}
|
||||
}
|
||||
out.push('\'');
|
||||
out
|
||||
}
|
||||
386
packages/system/virt/src/cloudhv/net/mod.rs
Normal file
386
packages/system/virt/src/cloudhv/net/mod.rs
Normal file
@@ -0,0 +1,386 @@
|
||||
use sal_process;
|
||||
|
||||
use crate::cloudhv::CloudHvError;
|
||||
|
||||
pub mod profile;
|
||||
pub use profile::{BridgeOptions, DefaultNatOptions, NetworkingProfileSpec};
|
||||
|
||||
// Local shell escaping (keep independent from parent module)
|
||||
fn shell_escape(s: &str) -> String {
|
||||
if s.is_empty() {
|
||||
return "''".into();
|
||||
}
|
||||
if s.chars()
|
||||
.all(|c| c.is_ascii_alphanumeric() || "-_./=:".contains(c))
|
||||
{
|
||||
return s.into();
|
||||
}
|
||||
let mut out = String::from("'");
|
||||
for ch in s.chars() {
|
||||
if ch == '\'' {
|
||||
out.push_str("'\"'\"'");
|
||||
} else {
|
||||
out.push(ch);
|
||||
}
|
||||
}
|
||||
out.push('\'');
|
||||
out
|
||||
}
|
||||
|
||||
fn run_heredoc(label: &str, body: &str) -> Result<(), CloudHvError> {
|
||||
let script = format!("bash -e -s <<'{label}'\n{body}\n{label}\n", label = label, body = body);
|
||||
match sal_process::run(&script).silent(true).die(false).execute() {
|
||||
Ok(res) if res.success => Ok(()),
|
||||
Ok(res) => Err(CloudHvError::CommandFailed(format!(
|
||||
"{} failed: {}{}",
|
||||
label, res.stdout, res.stderr
|
||||
))),
|
||||
Err(e) => Err(CloudHvError::CommandFailed(format!(
|
||||
"{} failed: {}",
|
||||
label, e
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
/// Ensure the Linux bridge exists and has IPv4 (and optional IPv6) configured.
|
||||
/// Also enables IPv4 forwarding (and IPv6 forwarding when v6 provided).
|
||||
pub fn ensure_bridge(
|
||||
bridge_name: &str,
|
||||
bridge_addr_cidr: &str,
|
||||
ipv6_bridge_cidr: Option<&str>,
|
||||
) -> Result<(), CloudHvError> {
|
||||
// deps: ip
|
||||
if sal_process::which("ip").is_none() {
|
||||
return Err(CloudHvError::DependencyMissing(
|
||||
"ip not found on PATH".into(),
|
||||
));
|
||||
}
|
||||
let v6 = ipv6_bridge_cidr.unwrap_or("");
|
||||
let body = format!(
|
||||
"set -e
|
||||
BR={br}
|
||||
BR_ADDR={br_addr}
|
||||
IPV6_CIDR={v6cidr}
|
||||
|
||||
ip link show \"$BR\" >/dev/null 2>&1 || ip link add name \"$BR\" type bridge
|
||||
ip addr replace \"$BR_ADDR\" dev \"$BR\"
|
||||
ip link set \"$BR\" up
|
||||
|
||||
# IPv6 address and forwarding (optional)
|
||||
if [ -n \"$IPV6_CIDR\" ]; then
|
||||
ip -6 addr replace \"$IPV6_CIDR\" dev \"$BR\"
|
||||
sysctl -w net.ipv6.conf.all.forwarding=1 >/dev/null || true
|
||||
fi
|
||||
|
||||
# IPv4 forwarding (idempotent)
|
||||
sysctl -w net.ipv4.ip_forward=1 >/dev/null || true
|
||||
",
|
||||
br = shell_escape(bridge_name),
|
||||
br_addr = shell_escape(bridge_addr_cidr),
|
||||
v6cidr = shell_escape(v6),
|
||||
);
|
||||
run_heredoc("HEROBRIDGE", &body)
|
||||
}
|
||||
|
||||
/// Ensure nftables NAT masquerading for the given subnet toward the default WAN interface.
|
||||
/// Creates table/chain if missing and adds/keeps a single masquerade rule.
|
||||
/// If ipv6_subnet is provided, also sets up IPv6 NAT.
|
||||
pub fn ensure_nat(subnet_cidr: &str, ipv6_subnet: Option<&str>) -> Result<(), CloudHvError> {
|
||||
for bin in ["ip", "nft"] {
|
||||
if sal_process::which(bin).is_none() {
|
||||
return Err(CloudHvError::DependencyMissing(format!(
|
||||
"{} not found on PATH",
|
||||
bin
|
||||
)));
|
||||
}
|
||||
}
|
||||
let v6_subnet = ipv6_subnet.unwrap_or("");
|
||||
let body = format!(
|
||||
"set -e
|
||||
SUBNET={subnet}
|
||||
IPV6_SUBNET={v6subnet}
|
||||
|
||||
WAN_IF=$(ip -o route show default | awk '{{print $5}}' | head -n1)
|
||||
if [ -z \"$WAN_IF\" ]; then
|
||||
echo \"No default WAN interface detected (required for NAT)\" >&2
|
||||
exit 2
|
||||
fi
|
||||
|
||||
# IPv4 NAT
|
||||
nft list table ip hero >/dev/null 2>&1 || nft add table ip hero
|
||||
nft list chain ip hero postrouting >/dev/null 2>&1 || nft add chain ip hero postrouting {{ type nat hook postrouting priority 100 \\; }}
|
||||
nft list chain ip hero postrouting | grep -q \"ip saddr $SUBNET oifname \\\"$WAN_IF\\\" masquerade\" \
|
||||
|| nft add rule ip hero postrouting ip saddr $SUBNET oifname \"$WAN_IF\" masquerade
|
||||
|
||||
# IPv6 NAT (if subnet provided)
|
||||
if [ -n \"$IPV6_SUBNET\" ]; then
|
||||
nft list table ip6 hero >/dev/null 2>&1 || nft add table ip6 hero
|
||||
nft list chain ip6 hero postrouting >/dev/null 2>&1 || nft add chain ip6 hero postrouting {{ type nat hook postrouting priority 100 \\; }}
|
||||
nft list chain ip6 hero postrouting | grep -q \"ip6 saddr $IPV6_SUBNET oifname \\\"$WAN_IF\\\" masquerade\" \
|
||||
|| nft add rule ip6 hero postrouting ip6 saddr $IPV6_SUBNET oifname \"$WAN_IF\" masquerade
|
||||
fi
|
||||
",
|
||||
subnet = shell_escape(subnet_cidr),
|
||||
v6subnet = shell_escape(v6_subnet),
|
||||
);
|
||||
run_heredoc("HERONAT", &body)
|
||||
}
|
||||
|
||||
/// Ensure dnsmasq DHCP is configured for the bridge. Returns the lease file path used.
|
||||
/// This function is idempotent; it writes a deterministic conf and reloads/enables dnsmasq.
|
||||
pub fn ensure_dnsmasq(
|
||||
bridge_name: &str,
|
||||
dhcp_start: &str,
|
||||
dhcp_end: &str,
|
||||
ipv6_bridge_cidr: Option<&str>,
|
||||
lease_file_override: Option<&str>,
|
||||
) -> Result<String, CloudHvError> {
|
||||
for bin in ["dnsmasq", "systemctl"] {
|
||||
if sal_process::which(bin).is_none() {
|
||||
return Err(CloudHvError::DependencyMissing(format!(
|
||||
"{} not found on PATH",
|
||||
bin
|
||||
)));
|
||||
}
|
||||
}
|
||||
let lease_file = lease_file_override
|
||||
.map(|s| s.to_string())
|
||||
.unwrap_or_else(|| format!("/var/lib/misc/dnsmasq-hero-{}.leases", bridge_name));
|
||||
let v6 = ipv6_bridge_cidr.unwrap_or("");
|
||||
let body = format!(
|
||||
"set -e
|
||||
BR={br}
|
||||
DHCP_START={dstart}
|
||||
DHCP_END={dend}
|
||||
LEASE_FILE={lease}
|
||||
IPV6_CIDR={v6cidr}
|
||||
|
||||
mkdir -p /etc/dnsmasq.d
|
||||
mkdir -p /var/lib/misc
|
||||
|
||||
CFG=/etc/dnsmasq.d/hero-$BR.conf
|
||||
TMP=/etc/dnsmasq.d/.hero-$BR.conf.new
|
||||
|
||||
# Ensure main conf includes our conf-dir
|
||||
CONF=/etc/dnsmasq.conf
|
||||
RELOAD=0
|
||||
if ! grep -qF \"conf-dir=/etc/dnsmasq.d\" \"$CONF\" 2>/dev/null; then
|
||||
printf '%s\\n' 'conf-dir=/etc/dnsmasq.d,*.conf' >> \"$CONF\"
|
||||
RELOAD=1
|
||||
fi
|
||||
|
||||
# Ensure lease file and ownership (best effort)
|
||||
touch \"$LEASE_FILE\" || true
|
||||
chown dnsmasq:dnsmasq \"$LEASE_FILE\" 2>/dev/null || true
|
||||
|
||||
# IPv4 section
|
||||
printf '%s\\n' \
|
||||
\"interface=$BR\" \
|
||||
\"bind-interfaces\" \
|
||||
\"dhcp-authoritative\" \
|
||||
\"dhcp-range=$DHCP_START,$DHCP_END,12h\" \
|
||||
\"dhcp-option=option:dns-server,1.1.1.1,8.8.8.8\" \
|
||||
\"dhcp-leasefile=$LEASE_FILE\" >\"$TMP\"
|
||||
|
||||
# Optional IPv6 RA/DHCPv6
|
||||
if [ -n \"$IPV6_CIDR\" ]; then
|
||||
BRIDGE_ADDR=\"${{IPV6_CIDR%/*}}\"
|
||||
BRIDGE_PREFIX=$(echo \"$IPV6_CIDR\" | cut -d: -f1-4)::
|
||||
printf '%s\\n' \
|
||||
\"enable-ra\" \
|
||||
\"dhcp-range=$BRIDGE_PREFIX,ra-names,12h\" \
|
||||
\"dhcp-option=option6:dns-server,[2001:4860:4860::8888]\" >>\"$TMP\"
|
||||
fi
|
||||
|
||||
if [ ! -f \"$CFG\" ] || ! cmp -s \"$CFG\" \"$TMP\"; then
|
||||
mv \"$TMP\" \"$CFG\"
|
||||
if systemctl is-active --quiet dnsmasq; then
|
||||
systemctl reload dnsmasq || systemctl restart dnsmasq || true
|
||||
else
|
||||
systemctl enable --now dnsmasq || true
|
||||
fi
|
||||
else
|
||||
rm -f \"$TMP\"
|
||||
systemctl enable --now dnsmasq || true
|
||||
fi
|
||||
|
||||
if [ \"$RELOAD\" = \"1\" ]; then
|
||||
systemctl reload dnsmasq || systemctl restart dnsmasq || true
|
||||
fi
|
||||
",
|
||||
br = shell_escape(bridge_name),
|
||||
dstart = shell_escape(dhcp_start),
|
||||
dend = shell_escape(dhcp_end),
|
||||
lease = shell_escape(&lease_file),
|
||||
v6cidr = shell_escape(v6),
|
||||
);
|
||||
run_heredoc("HERODNSMASQ", &body)?;
|
||||
Ok(lease_file)
|
||||
}
|
||||
|
||||
/// Deterministic TAP name from VM id (Linux IFNAMSIZ safe)
|
||||
pub fn tap_name_for_id(id: &str) -> String {
|
||||
use std::collections::hash_map::DefaultHasher;
|
||||
use std::hash::{Hash, Hasher};
|
||||
let mut h = DefaultHasher::new();
|
||||
id.hash(&mut h);
|
||||
let v = h.finish();
|
||||
let hex = format!("{:016x}", v);
|
||||
format!("tap-{}", &hex[..10])
|
||||
}
|
||||
|
||||
/// Ensure a per-VM TAP exists, enslaved to the bridge, and up.
|
||||
/// Assign ownership to current user/group so CH can open the fd unprivileged.
|
||||
pub fn ensure_tap_for_vm(bridge_name: &str, id: &str) -> Result<String, CloudHvError> {
|
||||
if sal_process::which("ip").is_none() {
|
||||
return Err(CloudHvError::DependencyMissing(
|
||||
"ip not found on PATH".into(),
|
||||
));
|
||||
}
|
||||
let tap = tap_name_for_id(id);
|
||||
let body = format!(
|
||||
"set -e
|
||||
BR={br}
|
||||
TAP={tap}
|
||||
UIDX=$(id -u)
|
||||
GIDX=$(id -g)
|
||||
|
||||
# Ensure a clean TAP state to avoid Resource busy if a previous VM run left it lingering
|
||||
if ip link show \"$TAP\" >/dev/null 2>&1; then
|
||||
ip link set \"$TAP\" down || true
|
||||
ip link set \"$TAP\" nomaster 2>/dev/null || true
|
||||
ip tuntap del dev \"$TAP\" mode tap 2>/dev/null || true
|
||||
fi
|
||||
|
||||
# Recreate with correct ownership and attach to bridge
|
||||
ip tuntap add dev \"$TAP\" mode tap user \"$UIDX\" group \"$GIDX\"
|
||||
ip link set \"$TAP\" master \"$BR\" 2>/dev/null || true
|
||||
ip link set \"$TAP\" up
|
||||
",
|
||||
br = shell_escape(bridge_name),
|
||||
tap = shell_escape(&tap),
|
||||
);
|
||||
run_heredoc("HEROTAP", &body)?;
|
||||
Ok(tap)
|
||||
}
|
||||
|
||||
/// Stable locally-administered unicast MAC derived from VM id.
|
||||
/// IMPORTANT: Use a deterministic hash (FNV-1a) rather than DefaultHasher (which is randomized).
|
||||
pub fn stable_mac_from_id(id: &str) -> String {
|
||||
// 64-bit FNV-1a
|
||||
const FNV_OFFSET: u64 = 0xcbf29ce484222325;
|
||||
const FNV_PRIME: u64 = 0x00000100000001B3;
|
||||
let mut v: u64 = FNV_OFFSET;
|
||||
for b in id.as_bytes() {
|
||||
v ^= *b as u64;
|
||||
v = v.wrapping_mul(FNV_PRIME);
|
||||
}
|
||||
// Locally administered, unicast
|
||||
let b0 = (((v >> 40) & 0xff) as u8 & 0xfe) | 0x02;
|
||||
let b1 = ((v >> 32) & 0xff) as u8;
|
||||
let b2 = ((v >> 24) & 0xff) as u8;
|
||||
let b3 = ((v >> 16) & 0xff) as u8;
|
||||
let b4 = ((v >> 8) & 0xff) as u8;
|
||||
let b5 = (v & 0xff) as u8;
|
||||
format!(
|
||||
"{:02x}:{:02x}:{:02x}:{:02x}:{:02x}:{:02x}",
|
||||
b0, b1, b2, b3, b4, b5
|
||||
)
|
||||
}
|
||||
|
||||
/// Discover the mycelium IPv6 global address on iface (or env override).
|
||||
/// Returns (iface_name, address).
|
||||
pub fn mycelium_ipv6_addr(iface_hint: &str) -> Result<(String, String), CloudHvError> {
|
||||
let iface = std::env::var("HERO_VIRT_MYCELIUM_IF").unwrap_or_else(|_| iface_hint.to_string());
|
||||
let cmd = format!("ip -6 addr show dev {}", shell_escape(&iface));
|
||||
let res = sal_process::run(&cmd).silent(true).die(false).execute();
|
||||
let out = match res {
|
||||
Ok(r) if r.success => r.stdout,
|
||||
_ => {
|
||||
return Err(CloudHvError::DependencyMissing(format!(
|
||||
"mycelium interface '{}' not found or no IPv6 configured",
|
||||
iface
|
||||
)))
|
||||
}
|
||||
};
|
||||
for line in out.lines() {
|
||||
let lt = line.trim();
|
||||
if lt.starts_with("inet6 ") && lt.contains("scope global") {
|
||||
let parts: Vec<&str> = lt.split_whitespace().collect();
|
||||
if let Some(addr_cidr) = parts.get(1) {
|
||||
let addr_only = addr_cidr.split('/').next().unwrap_or("").trim();
|
||||
if !addr_only.is_empty() && addr_only.parse::<std::net::Ipv6Addr>().is_ok() {
|
||||
return Ok((iface, addr_only.to_string()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(CloudHvError::DependencyMissing(format!(
|
||||
"no global IPv6 found on interface '{}'",
|
||||
iface
|
||||
)))
|
||||
}
|
||||
|
||||
/// Derive (prefix /64, router /64 string) from a mycelium IPv6 address string.
|
||||
pub fn derive_ipv6_prefix_from_mycelium(m: &str) -> Result<(String, String), CloudHvError> {
|
||||
let ip = m.parse::<std::net::Ipv6Addr>().map_err(|e| {
|
||||
CloudHvError::InvalidSpec(format!("invalid mycelium IPv6 address '{}': {}", m, e))
|
||||
})?;
|
||||
let seg = ip.segments();
|
||||
let pfx = std::net::Ipv6Addr::new(seg[0], seg[1], seg[2], seg[3], 0, 0, 0, 0);
|
||||
let router = std::net::Ipv6Addr::new(seg[0], seg[1], seg[2], seg[3], 0, 0, 0, 2);
|
||||
let pfx_str = format!("{}/64", pfx);
|
||||
let router_cidr = format!("{}/64", router);
|
||||
Ok((pfx_str, router_cidr))
|
||||
}
|
||||
|
||||
/// Parse a dnsmasq lease file to find last IPv4 by MAC (lowercased).
|
||||
/// Polls up to timeout_secs with 800ms sleep, returns None on timeout.
|
||||
pub fn discover_ipv4_from_leases(
|
||||
lease_path: &str,
|
||||
mac_lower: &str,
|
||||
timeout_secs: u64,
|
||||
) -> Option<String> {
|
||||
use std::fs;
|
||||
use std::time::{Duration, Instant};
|
||||
let deadline = Instant::now() + Duration::from_secs(timeout_secs);
|
||||
loop {
|
||||
if let Ok(content) = fs::read_to_string(lease_path) {
|
||||
let mut last_ip: Option<String> = None;
|
||||
for line in content.lines() {
|
||||
let cols: Vec<&str> = line.split_whitespace().collect();
|
||||
if cols.len() >= 3 && cols[1].eq_ignore_ascii_case(mac_lower) {
|
||||
last_ip = Some(cols[2].to_string());
|
||||
}
|
||||
}
|
||||
if last_ip.is_some() {
|
||||
return last_ip;
|
||||
}
|
||||
}
|
||||
if Instant::now() >= deadline {
|
||||
return None;
|
||||
}
|
||||
std::thread::sleep(Duration::from_millis(800));
|
||||
}
|
||||
}
|
||||
|
||||
/// Search IPv6 neighbor table on bridge for an entry matching MAC (lladdr), excluding link-local.
|
||||
pub fn discover_ipv6_on_bridge(bridge_name: &str, mac_lower: &str) -> Option<String> {
|
||||
let cmd = format!("ip -6 neigh show dev {}", shell_escape(bridge_name));
|
||||
if let Ok(res) = sal_process::run(&cmd).silent(true).die(false).execute() {
|
||||
if res.success {
|
||||
let mac_pat = format!("lladdr {}", mac_lower);
|
||||
for line in res.stdout.lines() {
|
||||
let lt = line.trim();
|
||||
if lt.to_lowercase().contains(&mac_pat) {
|
||||
if let Some(addr) = lt.split_whitespace().next() {
|
||||
if !addr.starts_with("fe80") && !addr.is_empty() {
|
||||
return Some(addr.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
95
packages/system/virt/src/cloudhv/net/profile.rs
Normal file
95
packages/system/virt/src/cloudhv/net/profile.rs
Normal file
@@ -0,0 +1,95 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct DefaultNatOptions {
|
||||
#[serde(default = "DefaultNatOptions::default_bridge_name")]
|
||||
pub bridge_name: String,
|
||||
#[serde(default = "DefaultNatOptions::default_bridge_addr")]
|
||||
pub bridge_addr_cidr: String,
|
||||
#[serde(default = "DefaultNatOptions::default_subnet")]
|
||||
pub subnet_cidr: String,
|
||||
#[serde(default = "DefaultNatOptions::default_dhcp_start")]
|
||||
pub dhcp_start: String,
|
||||
#[serde(default = "DefaultNatOptions::default_dhcp_end")]
|
||||
pub dhcp_end: String,
|
||||
#[serde(default = "DefaultNatOptions::default_ipv6_enable")]
|
||||
pub ipv6_enable: bool,
|
||||
/// Optional: if set, use this IPv6 on bridge (e.g. "400:...::2/64"), else derive via mycelium
|
||||
#[serde(default)]
|
||||
pub bridge_ipv6_cidr: Option<String>,
|
||||
/// Optional explicit mycelium interface name
|
||||
#[serde(default)]
|
||||
pub mycelium_if: Option<String>,
|
||||
/// Optional override for dnsmasq lease file
|
||||
#[serde(default)]
|
||||
pub lease_file: Option<String>,
|
||||
}
|
||||
|
||||
impl DefaultNatOptions {
|
||||
fn default_bridge_name() -> String {
|
||||
std::env::var("HERO_VIRT_BRIDGE_NAME").unwrap_or_else(|_| "br-hero".into())
|
||||
}
|
||||
fn default_bridge_addr() -> String {
|
||||
std::env::var("HERO_VIRT_BRIDGE_ADDR_CIDR").unwrap_or_else(|_| "172.30.0.1/24".into())
|
||||
}
|
||||
fn default_subnet() -> String {
|
||||
std::env::var("HERO_VIRT_SUBNET_CIDR").unwrap_or_else(|_| "172.30.0.0/24".into())
|
||||
}
|
||||
fn default_dhcp_start() -> String {
|
||||
std::env::var("HERO_VIRT_DHCP_START").unwrap_or_else(|_| "172.30.0.50".into())
|
||||
}
|
||||
fn default_dhcp_end() -> String {
|
||||
std::env::var("HERO_VIRT_DHCP_END").unwrap_or_else(|_| "172.30.0.250".into())
|
||||
}
|
||||
fn default_ipv6_enable() -> bool {
|
||||
match std::env::var("HERO_VIRT_IPV6_ENABLE").map(|v| v.to_lowercase()) {
|
||||
Ok(s) if s == "0" || s == "false" || s == "no" => false,
|
||||
_ => true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for DefaultNatOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
bridge_name: Self::default_bridge_name(),
|
||||
bridge_addr_cidr: Self::default_bridge_addr(),
|
||||
subnet_cidr: Self::default_subnet(),
|
||||
dhcp_start: Self::default_dhcp_start(),
|
||||
dhcp_end: Self::default_dhcp_end(),
|
||||
ipv6_enable: Self::default_ipv6_enable(),
|
||||
bridge_ipv6_cidr: None,
|
||||
mycelium_if: None,
|
||||
lease_file: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||
pub struct BridgeOptions {
|
||||
#[serde(default = "DefaultNatOptions::default_bridge_name")]
|
||||
pub bridge_name: String,
|
||||
/// Optional: if provided, configure IPv4 on the bridge
|
||||
#[serde(default)]
|
||||
pub bridge_addr_cidr: Option<String>,
|
||||
/// Optional: if provided, configure IPv6 on the bridge
|
||||
#[serde(default)]
|
||||
pub bridge_ipv6_cidr: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(tag = "type", content = "opts")]
|
||||
pub enum NetworkingProfileSpec {
|
||||
DefaultNat(DefaultNatOptions),
|
||||
NoNet,
|
||||
/// Pass-through user args to CH; currently informational in VmSpec
|
||||
CustomCli(Vec<String>),
|
||||
/// Ensure bridge and tap only; no NAT/DHCP
|
||||
BridgeOnly(BridgeOptions),
|
||||
}
|
||||
|
||||
impl Default for NetworkingProfileSpec {
|
||||
fn default() -> Self {
|
||||
NetworkingProfileSpec::DefaultNat(DefaultNatOptions::default())
|
||||
}
|
||||
}
|
||||
196
packages/system/virt/src/hostcheck/mod.rs
Normal file
196
packages/system/virt/src/hostcheck/mod.rs
Normal file
@@ -0,0 +1,196 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
|
||||
use sal_os;
|
||||
use sal_process;
|
||||
|
||||
/// Host dependency check error
|
||||
#[derive(Debug)]
|
||||
pub enum HostCheckError {
|
||||
Io(String),
|
||||
}
|
||||
|
||||
impl std::fmt::Display for HostCheckError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
HostCheckError::Io(e) => write!(f, "IO error: {}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for HostCheckError {}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct HostCheckReport {
|
||||
pub ok: bool,
|
||||
pub critical: Vec<String>,
|
||||
pub optional: Vec<String>,
|
||||
pub notes: Vec<String>,
|
||||
}
|
||||
|
||||
fn hero_vm_root() -> String {
|
||||
let home = std::env::var("HOME").unwrap_or_else(|_| "/tmp".into());
|
||||
format!("{}/hero/virt/vms", home.trim_end_matches('/'))
|
||||
}
|
||||
|
||||
fn bin_missing(name: &str) -> bool {
|
||||
sal_process::which(name).is_none()
|
||||
}
|
||||
|
||||
/// Perform host dependency checks required for image preparation and Cloud Hypervisor run.
|
||||
/// Returns a structured report that Rhai can consume easily.
|
||||
pub fn host_check_deps() -> Result<HostCheckReport, HostCheckError> {
|
||||
let mut critical: Vec<String> = Vec::new();
|
||||
let optional: Vec<String> = Vec::new();
|
||||
let mut notes: Vec<String> = Vec::new();
|
||||
|
||||
// Must run as root
|
||||
let uid_res = sal_process::run("id -u").silent(true).die(false).execute();
|
||||
match uid_res {
|
||||
Ok(r) if r.success => {
|
||||
let uid_s = r.stdout.trim();
|
||||
if uid_s != "0" {
|
||||
critical.push("not running as root (required for nbd/mount/network)".into());
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
notes.push("failed to determine uid via `id -u`".into());
|
||||
}
|
||||
}
|
||||
|
||||
// Core binaries required for CH and image manipulation
|
||||
let core_bins = [
|
||||
"cloud-hypervisor", // CH binary (dynamic)
|
||||
"cloud-hypervisor-static", // CH static (if present)
|
||||
"ch-remote",
|
||||
"ch-remote-static",
|
||||
// hypervisor-fw is expected at /images/hypervisor-fw (not on PATH)
|
||||
"qemu-img",
|
||||
"qemu-nbd",
|
||||
"blkid",
|
||||
"tune2fs",
|
||||
"partprobe",
|
||||
"mount",
|
||||
"umount",
|
||||
"sed",
|
||||
"awk",
|
||||
"modprobe",
|
||||
];
|
||||
|
||||
// Networking helpers (for default bridge + NAT path)
|
||||
let net_bins = ["ip", "nft", "dnsmasq", "systemctl"];
|
||||
|
||||
// Evaluate presence
|
||||
let mut have_any_ch = false;
|
||||
if !bin_missing("cloud-hypervisor") || !bin_missing("cloud-hypervisor-static") {
|
||||
have_any_ch = true;
|
||||
}
|
||||
if !have_any_ch {
|
||||
critical.push("cloud-hypervisor or cloud-hypervisor-static not found on PATH".into());
|
||||
}
|
||||
if bin_missing("ch-remote") && bin_missing("ch-remote-static") {
|
||||
critical.push("ch-remote or ch-remote-static not found on PATH".into());
|
||||
}
|
||||
|
||||
for b in [&core_bins[4..], &net_bins[..]].concat() {
|
||||
if bin_missing(b) {
|
||||
// treat qemu/img/nbd stack and filesystem tools as critical
|
||||
// treat networking tools as critical too since default path provisions bridge/DHCP
|
||||
critical.push(format!("missing binary: {}", b));
|
||||
}
|
||||
}
|
||||
|
||||
// Filesystem/path checks
|
||||
// Ensure /images exists and expected image files are present (ubuntu, alpine, hypervisor-fw)
|
||||
let images_root = "/images";
|
||||
if !Path::new(images_root).exists() {
|
||||
critical.push(format!("{} not found (expected base images directory)", images_root));
|
||||
} else {
|
||||
let ubuntu_path = format!("{}/noble-server-cloudimg-amd64.img", images_root);
|
||||
let alpine_path = format!("{}/alpine-virt-cloudimg-amd64.qcow2", images_root);
|
||||
let fw_path = format!("{}/hypervisor-fw", images_root);
|
||||
if !Path::new(&ubuntu_path).exists() {
|
||||
critical.push(format!("missing base image: {}", ubuntu_path));
|
||||
}
|
||||
if !Path::new(&alpine_path).exists() {
|
||||
critical.push(format!("missing base image: {}", alpine_path));
|
||||
}
|
||||
if !Path::new(&fw_path).exists() {
|
||||
critical.push(format!("missing firmware: {}", fw_path));
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure VM root directory is writable/creatable
|
||||
let vm_root = hero_vm_root();
|
||||
if let Err(e) = sal_os::mkdir(&vm_root) {
|
||||
critical.push(format!(
|
||||
"cannot create/access VM root directory {}: {}",
|
||||
vm_root, e
|
||||
));
|
||||
} else {
|
||||
// also try writing a small file
|
||||
let probe_path = format!("{}/.__hero_probe", vm_root);
|
||||
if let Err(e) = fs::write(&probe_path, b"ok") {
|
||||
critical.push(format!(
|
||||
"VM root not writable {}: {}",
|
||||
vm_root, e
|
||||
));
|
||||
} else {
|
||||
let _ = fs::remove_file(&probe_path);
|
||||
}
|
||||
}
|
||||
|
||||
// Optional Mycelium IPv6 checks when enabled via env
|
||||
let ipv6_env = std::env::var("HERO_VIRT_IPV6_ENABLE").unwrap_or_else(|_| "".into());
|
||||
let ipv6_enabled = ipv6_env.eq_ignore_ascii_case("1") || ipv6_env.eq_ignore_ascii_case("true");
|
||||
if ipv6_enabled {
|
||||
// Require mycelium CLI
|
||||
if bin_missing("mycelium") {
|
||||
critical.push("mycelium CLI not found on PATH (required when HERO_VIRT_IPV6_ENABLE=true)".into());
|
||||
}
|
||||
// Validate interface presence and global IPv6
|
||||
let ifname = std::env::var("HERO_VIRT_MYCELIUM_IF").unwrap_or_else(|_| "mycelium".into());
|
||||
let check_if = sal_process::run(&format!("ip -6 addr show dev {}", ifname))
|
||||
.silent(true)
|
||||
.die(false)
|
||||
.execute();
|
||||
match check_if {
|
||||
Ok(r) if r.success => {
|
||||
let out = r.stdout;
|
||||
if !(out.contains("inet6") && out.contains("scope global")) {
|
||||
notes.push(format!(
|
||||
"iface '{}' present but no global IPv6 detected; Mycelium may not be up yet",
|
||||
ifname
|
||||
));
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
critical.push(format!(
|
||||
"iface '{}' not found or no IPv6; ensure Mycelium is running",
|
||||
ifname
|
||||
));
|
||||
}
|
||||
}
|
||||
// Best-effort: parse `mycelium inspect` for Address
|
||||
let insp = sal_process::run("mycelium inspect").silent(true).die(false).execute();
|
||||
match insp {
|
||||
Ok(res) if res.success && res.stdout.contains("Address:") => {
|
||||
// good enough
|
||||
}
|
||||
_ => {
|
||||
notes.push("`mycelium inspect` did not return an Address; IPv6 overlay may be unavailable".into());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Summarize ok flag
|
||||
let ok = critical.is_empty();
|
||||
|
||||
Ok(HostCheckReport {
|
||||
ok,
|
||||
critical,
|
||||
optional,
|
||||
notes,
|
||||
})
|
||||
}
|
||||
799
packages/system/virt/src/image_prep/mod.rs
Normal file
799
packages/system/virt/src/image_prep/mod.rs
Normal file
@@ -0,0 +1,799 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::path::Path;
|
||||
|
||||
use sal_os;
|
||||
use sal_process;
|
||||
use std::collections::hash_map::DefaultHasher;
|
||||
use std::hash::{Hash, Hasher};
|
||||
use std::net::Ipv6Addr;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum ImagePrepError {
|
||||
Io(String),
|
||||
InvalidInput(String),
|
||||
CommandFailed(String),
|
||||
NotImplemented(String),
|
||||
}
|
||||
|
||||
impl std::fmt::Display for ImagePrepError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
ImagePrepError::Io(e) => write!(f, "IO error: {}", e),
|
||||
ImagePrepError::InvalidInput(e) => write!(f, "Invalid input: {}", e),
|
||||
ImagePrepError::CommandFailed(e) => write!(f, "Command failed: {}", e),
|
||||
ImagePrepError::NotImplemented(e) => write!(f, "Not implemented: {}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for ImagePrepError {}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum Flavor {
|
||||
Ubuntu,
|
||||
Alpine,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct NetPlanOpts {
|
||||
#[serde(default = "default_dhcp4")]
|
||||
pub dhcp4: bool,
|
||||
#[serde(default)]
|
||||
pub dhcp6: bool,
|
||||
/// Static IPv6 address to assign in guest (temporary behavior)
|
||||
pub ipv6_addr: Option<String>, // e.g., "400::10/64"
|
||||
pub gw6: Option<String>, // e.g., "400::1"
|
||||
}
|
||||
|
||||
fn default_dhcp4() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
impl Default for NetPlanOpts {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
dhcp4: true,
|
||||
dhcp6: true,
|
||||
ipv6_addr: None,
|
||||
gw6: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ImagePrepOptions {
|
||||
pub flavor: Flavor,
|
||||
/// VM id (used for working directory layout and tap/mac derivations)
|
||||
pub id: String,
|
||||
/// Optional source path override, defaults to /images/<flavor default filename>
|
||||
pub source: Option<String>,
|
||||
/// Optional VM target directory, defaults to $HOME/hero/virt/vms/<id>
|
||||
pub target_dir: Option<String>,
|
||||
/// Netplan options
|
||||
#[serde(default)]
|
||||
pub net: NetPlanOpts,
|
||||
/// Disable cloud-init networking
|
||||
#[serde(default = "default_disable_cloud_init_net")]
|
||||
pub disable_cloud_init_net: bool,
|
||||
}
|
||||
|
||||
fn default_disable_cloud_init_net() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn stable_mac_from_id(id: &str) -> String {
|
||||
// Use deterministic FNV-1a (matches host-side MAC derivation used by CH builder)
|
||||
const FNV_OFFSET: u64 = 0xcbf29ce484222325;
|
||||
const FNV_PRIME: u64 = 0x00000100000001B3;
|
||||
let mut v: u64 = FNV_OFFSET;
|
||||
for b in id.as_bytes() {
|
||||
v ^= *b as u64;
|
||||
v = v.wrapping_mul(FNV_PRIME);
|
||||
}
|
||||
let b0 = (((v >> 40) & 0xff) as u8 & 0xfe) | 0x02; // locally administered, unicast
|
||||
let b1 = ((v >> 32) & 0xff) as u8;
|
||||
let b2 = ((v >> 24) & 0xff) as u8;
|
||||
let b3 = ((v >> 16) & 0xff) as u8;
|
||||
let b4 = ((v >> 8) & 0xff) as u8;
|
||||
let b5 = (v & 0xff) as u8;
|
||||
format!("{:02x}:{:02x}:{:02x}:{:02x}:{:02x}:{:02x}", b0, b1, b2, b3, b4, b5)
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ImagePrepResult {
|
||||
pub raw_disk: String,
|
||||
pub root_uuid: String,
|
||||
pub boot_uuid: String,
|
||||
pub work_qcow2: String,
|
||||
}
|
||||
|
||||
fn hero_vm_root() -> String {
|
||||
let home = std::env::var("HOME").unwrap_or_else(|_| "/tmp".into());
|
||||
format!("{}/hero/virt/vms", home.trim_end_matches('/'))
|
||||
}
|
||||
|
||||
fn default_source_for_flavor(flavor: &Flavor) -> (&'static str, bool) {
|
||||
match flavor {
|
||||
Flavor::Ubuntu => ("/images/noble-server-cloudimg-amd64.img", true),
|
||||
Flavor::Alpine => ("/images/alpine-virt-cloudimg-amd64.qcow2", true),
|
||||
}
|
||||
}
|
||||
|
||||
fn fail(e: &str) -> ImagePrepError {
|
||||
ImagePrepError::CommandFailed(e.to_string())
|
||||
}
|
||||
|
||||
fn run_script(script: &str) -> Result<sal_process::CommandResult, ImagePrepError> {
|
||||
match sal_process::run(script).silent(true).die(false).execute() {
|
||||
Ok(res) => {
|
||||
if res.success {
|
||||
Ok(res)
|
||||
} else {
|
||||
Err(ImagePrepError::CommandFailed(format!(
|
||||
"{}{}",
|
||||
res.stdout, res.stderr
|
||||
)))
|
||||
}
|
||||
}
|
||||
Err(e) => Err(ImagePrepError::CommandFailed(e.to_string())),
|
||||
}
|
||||
}
|
||||
|
||||
/// Prepare a base cloud image for booting under Cloud Hypervisor:
|
||||
/// - make a per-VM working copy
|
||||
/// - attach via nbd, mount root/boot
|
||||
/// - retag UUIDs, update fstab, write minimal grub.cfg
|
||||
/// - generate netplan (DHCPv4, static IPv6 placeholder), disable cloud-init net
|
||||
/// - convert to raw disk in VM dir
|
||||
pub fn image_prepare(opts: &ImagePrepOptions) -> Result<ImagePrepResult, ImagePrepError> {
|
||||
// Resolve source image
|
||||
let (def_src, _must_exist) = default_source_for_flavor(&opts.flavor);
|
||||
let src = opts.source.clone().unwrap_or_else(|| def_src.to_string());
|
||||
if !Path::new(&src).exists() {
|
||||
return Err(ImagePrepError::InvalidInput(format!(
|
||||
"source image not found: {}",
|
||||
src
|
||||
)));
|
||||
}
|
||||
|
||||
// Resolve VM dir
|
||||
let vm_dir = opts
|
||||
.target_dir
|
||||
.clone()
|
||||
.unwrap_or_else(|| format!("{}/{}", hero_vm_root(), opts.id));
|
||||
sal_os::mkdir(&vm_dir).map_err(|e| ImagePrepError::Io(e.to_string()))?;
|
||||
|
||||
// Work qcow2 copy path and mount points
|
||||
let work_qcow2 = format!("{}/work.qcow2", vm_dir);
|
||||
let raw_path = format!("{}/disk.raw", vm_dir);
|
||||
let mnt_root = format!("/mnt/hero-img/{}/root", opts.id);
|
||||
let mnt_boot = format!("/mnt/hero-img/{}/boot", opts.id);
|
||||
|
||||
// Only Ubuntu implemented for now
|
||||
match opts.flavor {
|
||||
Flavor::Ubuntu => {
|
||||
// Build bash script that performs all steps and echos "RAW|ROOT_UUID|BOOT_UUID" at end
|
||||
let disable_ci_net = opts.disable_cloud_init_net;
|
||||
|
||||
// IPv6 static guest assignment (derive from mycelium interface) - disabled by default to use RA
|
||||
// If HERO_VIRT_IPV6_STATIC_GUEST=true, use static IPv6; else use RA/SLAAC.
|
||||
let static_v6 = std::env::var("HERO_VIRT_IPV6_STATIC_GUEST")
|
||||
.map(|v| matches!(v.to_lowercase().as_str(), "" | "1" | "true" | "yes"))
|
||||
.unwrap_or(false);
|
||||
let myc_if =
|
||||
std::env::var("HERO_VIRT_MYCELIUM_IF").unwrap_or_else(|_| "mycelium".into());
|
||||
|
||||
// Discover host mycelium global IPv6 in 400::/7 from the interface
|
||||
let mut host_v6: Option<Ipv6Addr> = None;
|
||||
if static_v6 {
|
||||
let cmd = format!("ip -6 addr show dev {}", shell_escape(&myc_if));
|
||||
if let Ok(r) = sal_process::run(&cmd).silent(true).die(false).execute() {
|
||||
if r.success {
|
||||
for l in r.stdout.lines() {
|
||||
let lt = l.trim();
|
||||
if lt.starts_with("inet6 ") && lt.contains("scope global") {
|
||||
if let Some(addr_cidr) = lt.split_whitespace().nth(1) {
|
||||
let addr_only =
|
||||
addr_cidr.split('/').next().unwrap_or("").trim();
|
||||
if let Ok(ip) = addr_only.parse::<Ipv6Addr>() {
|
||||
let seg0 = ip.segments()[0];
|
||||
if (seg0 & 0xFE00) == 0x0400 {
|
||||
host_v6 = Some(ip);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Derive per-host /64 from mycelium and deterministic per-VM guest address
|
||||
let mut np_v6_block = String::new();
|
||||
let mut accept_ra = String::new();
|
||||
let mut dhcp6_effective = opts.net.dhcp6;
|
||||
if static_v6 {
|
||||
if let Some(h) = host_v6 {
|
||||
let seg = h.segments();
|
||||
// Router = P::2; Guest address = P::<stable suffix>
|
||||
let mut hasher = DefaultHasher::new();
|
||||
opts.id.hash(&mut hasher);
|
||||
let mut suffix = (hasher.finish() as u16) & 0xfffe;
|
||||
if suffix == 0 || suffix == 2 {
|
||||
suffix = 0x100;
|
||||
}
|
||||
let guest_ip =
|
||||
Ipv6Addr::new(seg[0], seg[1], seg[2], seg[3], 0, 0, 0, suffix).to_string();
|
||||
let gw_ip =
|
||||
Ipv6Addr::new(seg[0], seg[1], seg[2], seg[3], 0, 0, 0, 2).to_string();
|
||||
|
||||
// Inject a YAML block for static v6
|
||||
np_v6_block = format!(
|
||||
" addresses:\n - {}/64\n routes:\n - to: \"::/0\"\n via: {}\n - to: \"400::/7\"\n via: {}\n",
|
||||
guest_ip, gw_ip, gw_ip
|
||||
);
|
||||
// Disable dhcp6 when we provide a static address
|
||||
dhcp6_effective = false;
|
||||
}
|
||||
} else {
|
||||
// Use RA for IPv6
|
||||
accept_ra = "\n accept-ra: true".to_string();
|
||||
dhcp6_effective = false;
|
||||
}
|
||||
|
||||
// Keep script small and robust; avoid brace-heavy awk to simplify escaping.
|
||||
// Compute stable MAC (must match what vm_start() uses) and use it to match NIC in netplan.
|
||||
let vm_mac = stable_mac_from_id(&opts.id);
|
||||
let script = format!(
|
||||
r#"#!/bin/bash -e
|
||||
set -euo pipefail
|
||||
|
||||
SRC={src}
|
||||
VM_DIR={vm_dir}
|
||||
WORK={work}
|
||||
MNT_ROOT={mnt_root}
|
||||
MNT_BOOT={mnt_boot}
|
||||
RAW={raw}
|
||||
|
||||
mkdir -p "$VM_DIR"
|
||||
mkdir -p "$(dirname "$MNT_ROOT")"
|
||||
mkdir -p "$MNT_ROOT" "$MNT_BOOT"
|
||||
|
||||
# Make per-VM working copy (reflink if supported)
|
||||
cp --reflink=auto -f "$SRC" "$WORK"
|
||||
|
||||
# Load NBD with sufficient partitions
|
||||
modprobe nbd max_part=63
|
||||
|
||||
# Pick a free /dev/nbdX and connect the qcow2
|
||||
NBD=""
|
||||
for i in $(seq 0 15); do
|
||||
DEV="/dev/nbd$i"
|
||||
# Skip devices that have any mounted partitions (avoid reusing in-use NBDs)
|
||||
if findmnt -rn -S "$DEV" >/dev/null 2>&1 || \
|
||||
findmnt -rn -S "${{DEV}}p1" >/dev/null 2>&1 || \
|
||||
findmnt -rn -S "${{DEV}}p14" >/dev/null 2>&1 || \
|
||||
findmnt -rn -S "${{DEV}}p15" >/dev/null 2>&1 || \
|
||||
findmnt -rn -S "${{DEV}}p16" >/dev/null 2>&1; then
|
||||
continue
|
||||
fi
|
||||
# Ensure it's not connected (ignore errors if already disconnected)
|
||||
qemu-nbd --disconnect "$DEV" >/dev/null 2>&1 || true
|
||||
if qemu-nbd --format=qcow2 --connect="$DEV" "$WORK"; then
|
||||
NBD="$DEV"
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ -z "$NBD" ]; then
|
||||
echo "No free /dev/nbdX device available" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Selected NBD: $NBD" >&2
|
||||
|
||||
# Settle and probe partitions
|
||||
udevadm settle >/dev/null 2>&1 || true
|
||||
blockdev --rereadpt "$NBD" >/dev/null 2>&1 || true
|
||||
partprobe "$NBD" >/dev/null 2>&1 || true
|
||||
for t in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15; do
|
||||
if [ -b "${{NBD}}p1" ]; then
|
||||
sz=$(blockdev --getsize64 "${{NBD}}p1" 2>/dev/null || echo 0)
|
||||
if [ "$sz" -gt 0 ]; then
|
||||
break
|
||||
fi
|
||||
fi
|
||||
sleep 0.4
|
||||
udevadm settle >/dev/null 2>&1 || true
|
||||
blockdev --rereadpt "$NBD" >/dev/null 2>&1 || true
|
||||
partprobe "$NBD" >/dev/null 2>&1 || true
|
||||
done
|
||||
|
||||
ROOT_DEV="${{NBD}}p1"
|
||||
# Prefer p16, else p15
|
||||
if [ -b "${{NBD}}p16" ]; then
|
||||
BOOT_DEV="${{NBD}}p16"
|
||||
elif [ -b "${{NBD}}p15" ]; then
|
||||
BOOT_DEV="${{NBD}}p15"
|
||||
else
|
||||
echo "Boot partition not found on $NBD (tried p16 and p15)" >&2
|
||||
exit 33
|
||||
fi
|
||||
|
||||
echo "ROOT_DEV=$ROOT_DEV BOOT_DEV=$BOOT_DEV" >&2
|
||||
|
||||
if [ ! -b "$ROOT_DEV" ]; then
|
||||
echo "Root partition not found: $ROOT_DEV" >&2
|
||||
exit 32
|
||||
fi
|
||||
|
||||
cleanup() {{
|
||||
set +e
|
||||
umount "$MNT_BOOT" 2>/dev/null || true
|
||||
umount "$MNT_ROOT" 2>/dev/null || true
|
||||
[ -n "$NBD" ] && qemu-nbd --disconnect "$NBD" 2>/dev/null || true
|
||||
rmmod nbd 2>/dev/null || true
|
||||
}}
|
||||
trap cleanup EXIT
|
||||
|
||||
# Ensure partitions are readable before mounting
|
||||
for t in 1 2 3 4 5 6 7 8; do
|
||||
szr=$(blockdev --getsize64 "$ROOT_DEV" 2>/dev/null || echo 0)
|
||||
szb=$(blockdev --getsize64 "$BOOT_DEV" 2>/dev/null || echo 0)
|
||||
if [ "$szr" -gt 0 ] && [ "$szb" -gt 0 ] && blkid "$ROOT_DEV" >/dev/null 2>&1; then
|
||||
break
|
||||
fi
|
||||
sleep 0.4
|
||||
udevadm settle >/dev/null 2>&1 || true
|
||||
blockdev --rereadpt "$NBD" >/dev/null 2>&1 || true
|
||||
partprobe "$NBD" >/dev/null 2>&1 || true
|
||||
done
|
||||
|
||||
# Mount and mutate (with retries to avoid races)
|
||||
mounted_root=0
|
||||
for t in 1 2 3 4 5 6 7 8 9 10; do
|
||||
if mount "$ROOT_DEV" "$MNT_ROOT"; then
|
||||
mounted_root=1
|
||||
break
|
||||
fi
|
||||
sleep 0.5
|
||||
udevadm settle >/dev/null 2>&1 || true
|
||||
partprobe "$NBD" >/dev/null 2>&1 || true
|
||||
done
|
||||
if [ "$mounted_root" -ne 1 ]; then
|
||||
echo "Failed to mount root $ROOT_DEV" >&2
|
||||
exit 32
|
||||
fi
|
||||
|
||||
mounted_boot=0
|
||||
for t in 1 2 3 4 5; do
|
||||
if mount "$BOOT_DEV" "$MNT_BOOT"; then
|
||||
mounted_boot=1
|
||||
break
|
||||
fi
|
||||
sleep 0.5
|
||||
udevadm settle >/dev/null 2>&1 || true
|
||||
partprobe "$NBD" >/dev/null 2>&1 || true
|
||||
done
|
||||
if [ "$mounted_boot" -ne 1 ]; then
|
||||
echo "Failed to mount boot "$BOOT_DEV"" >&2
|
||||
exit 33
|
||||
fi
|
||||
|
||||
# Change UUIDs (best-effort)
|
||||
tune2fs -U random "$ROOT_DEV" || true
|
||||
tune2fs -U random "$BOOT_DEV" || true
|
||||
|
||||
ROOT_UUID=$(blkid -o value -s UUID "$ROOT_DEV")
|
||||
BOOT_UUID=$(blkid -o value -s UUID "$BOOT_DEV")
|
||||
|
||||
# Update fstab
|
||||
sed -i "s/UUID=[a-f0-9-]* \\/ /UUID=$ROOT_UUID \\/ /" "$MNT_ROOT/etc/fstab"
|
||||
sed -i "s/UUID=[a-f0-9-]* \\/boot /UUID=$BOOT_UUID \\/boot /" "$MNT_ROOT/etc/fstab"
|
||||
|
||||
# Minimal grub.cfg (note: braces escaped for Rust format!)
|
||||
mkdir -p "$MNT_BOOT/grub"
|
||||
KERNEL=$(ls -1 "$MNT_BOOT"/vmlinuz-* | sort -V | tail -n1 | xargs -n1 basename)
|
||||
INITRD=$(ls -1 "$MNT_BOOT"/initrd.img-* | sort -V | tail -n1 | xargs -n1 basename)
|
||||
cat > "$MNT_BOOT/grub/grub.cfg" << EOF
|
||||
set default=0
|
||||
set timeout=3
|
||||
menuentry 'Ubuntu Cloud' {{
|
||||
insmod part_gpt
|
||||
insmod ext2
|
||||
insmod gzio
|
||||
search --no-floppy --fs-uuid --set=root $BOOT_UUID
|
||||
linux /$KERNEL root=/dev/vda1 ro console=ttyS0
|
||||
initrd /$INITRD
|
||||
}}
|
||||
EOF
|
||||
|
||||
# Netplan config
|
||||
rm -f "$MNT_ROOT"/etc/netplan/*.yaml
|
||||
mkdir -p "$MNT_ROOT"/etc/netplan
|
||||
cat > "$MNT_ROOT/etc/netplan/01-netconfig.yaml" << EOF
|
||||
network:
|
||||
version: 2
|
||||
renderer: networkd
|
||||
ethernets:
|
||||
eth0:
|
||||
match:
|
||||
macaddress: {vm_mac}
|
||||
set-name: eth0
|
||||
dhcp4: {dhcp4}
|
||||
dhcp6: {dhcp6}{accept_ra}{np_v6_block}
|
||||
nameservers:
|
||||
addresses: [8.8.8.8, 1.1.1.1, 2001:4860:4860::8888]
|
||||
EOF
|
||||
# Enable SSH password authentication and set a default password for 'ubuntu'
|
||||
mkdir -p "$MNT_ROOT/etc/cloud/cloud.cfg.d"
|
||||
printf '%s\n' 'ssh_pwauth: true' > "$MNT_ROOT/etc/cloud/cloud.cfg.d/99-ssh-password-auth.cfg"
|
||||
|
||||
mkdir -p "$MNT_ROOT/etc/ssh/sshd_config.d"
|
||||
cat > "$MNT_ROOT/etc/ssh/sshd_config.d/99-hero-password-auth.conf" << EOF
|
||||
# Hero test: force password auth, explicitly disable pubkey to avoid client auto-trying keys
|
||||
PasswordAuthentication yes
|
||||
KbdInteractiveAuthentication yes
|
||||
UsePAM yes
|
||||
PubkeyAuthentication no
|
||||
EOF
|
||||
|
||||
# Remove any AuthenticationMethods directives that might force publickey-only
|
||||
if [ -f "$MNT_ROOT/etc/ssh/sshd_config" ]; then
|
||||
sed -i -E 's/^[[:space:]]*AuthenticationMethods[[:space:]].*$/# hero: removed AuthenticationMethods/' "$MNT_ROOT/etc/ssh/sshd_config" 2>/dev/null || true
|
||||
fi
|
||||
if [ -d "$MNT_ROOT/etc/ssh/sshd_config.d" ]; then
|
||||
find "$MNT_ROOT/etc/ssh/sshd_config.d" -type f -name '*.conf' -exec sed -i -E 's/^[[:space:]]*AuthenticationMethods[[:space:]].*$/# hero: removed AuthenticationMethods/' {{}} + 2>/dev/null \; || true
|
||||
fi
|
||||
|
||||
# Set password for default user 'ubuntu'
|
||||
if chroot "$MNT_ROOT" getent passwd ubuntu >/dev/null 2>&1; then
|
||||
echo 'ubuntu:ubuntu' | chroot "$MNT_ROOT" /usr/sbin/chpasswd || true
|
||||
fi
|
||||
# Ensure openssh-server is present (some cloud images may omit it)
|
||||
# Ensure SSH service enabled and keys generated on boot
|
||||
chroot "$MNT_ROOT" systemctl unmask ssh 2>/dev/null || true
|
||||
chroot "$MNT_ROOT" systemctl enable ssh 2>/dev/null || true
|
||||
chroot "$MNT_ROOT" systemctl enable ssh-keygen.service 2>/dev/null || true
|
||||
|
||||
# Ensure sshd listens on both IPv4 and IPv6 explicitly
|
||||
cat > "$MNT_ROOT/etc/ssh/sshd_config.d/99-hero-address-family.conf" << EOF
|
||||
AddressFamily any
|
||||
ListenAddress ::
|
||||
ListenAddress 0.0.0.0
|
||||
EOF
|
||||
|
||||
# Ensure sshd waits for network to be online (helps IPv6 readiness)
|
||||
mkdir -p "$MNT_ROOT/etc/systemd/system/ssh.service.d"
|
||||
cat > "$MNT_ROOT/etc/systemd/system/ssh.service.d/override.conf" << 'EOF'
|
||||
[Unit]
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
EOF
|
||||
|
||||
# Ensure sshd_config includes conf.d include so our drop-ins are loaded
|
||||
if ! grep -qE '^[[:space:]]*Include[[:space:]]+/etc/ssh/sshd_config\.d/\*\.conf' "$MNT_ROOT/etc/ssh/sshd_config"; then
|
||||
echo 'Include /etc/ssh/sshd_config.d/*.conf' >> "$MNT_ROOT/etc/ssh/sshd_config"
|
||||
fi
|
||||
|
||||
# Ensure required packages present before user/password changes
|
||||
cp -f /etc/resolv.conf "$MNT_ROOT/etc/resolv.conf" 2>/dev/null || true
|
||||
chroot "$MNT_ROOT" bash -c "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends passwd openssh-server" || true
|
||||
|
||||
# Remove previously forced AuthenticationMethods drop-in (old)
|
||||
rm -f "$MNT_ROOT/etc/ssh/sshd_config.d/99-hero-authmethods.conf"
|
||||
|
||||
# Force explicit password-only auth to avoid publickey-only negotiation from server
|
||||
# Removed AuthenticationMethods to avoid config issues
|
||||
|
||||
# Ensure our overrides are last-wins even if main sshd_config sets different values after Include
|
||||
cat >> "$MNT_ROOT/etc/ssh/sshd_config" << 'EOF'
|
||||
# hero override (appended last)
|
||||
PasswordAuthentication yes
|
||||
KbdInteractiveAuthentication yes
|
||||
UsePAM yes
|
||||
PubkeyAuthentication no
|
||||
EOF
|
||||
|
||||
# If UFW is present, allow SSH and disable firewall (for tests)
|
||||
if chroot "$MNT_ROOT" command -v ufw >/dev/null 2>&1; then
|
||||
chroot "$MNT_ROOT" ufw allow OpenSSH || true
|
||||
chroot "$MNT_ROOT" ufw disable || true
|
||||
fi
|
||||
if ! chroot "$MNT_ROOT" test -x /usr/sbin/sshd; then
|
||||
cp -f /etc/resolv.conf "$MNT_ROOT/etc/resolv.conf" 2>/dev/null || true
|
||||
chroot "$MNT_ROOT" bash -c "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends openssh-server" || true
|
||||
fi
|
||||
# Ensure user management utilities are present (useradd, chpasswd)
|
||||
if ! chroot "$MNT_ROOT" command -v /usr/sbin/useradd >/dev/null 2>&1 || \
|
||||
! chroot "$MNT_ROOT" command -v /usr/sbin/chpasswd >/dev/null 2>&1; then
|
||||
cp -f /etc/resolv.conf "$MNT_ROOT/etc/resolv.conf" 2>/dev/null || true
|
||||
chroot "$MNT_ROOT" bash -c "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y passwd adduser" || true
|
||||
fi
|
||||
# Ensure user management utilities are present (useradd, chpasswd)
|
||||
if ! chroot "$MNT_ROOT" command -v /usr/sbin/useradd >/dev/null 2>&1 || \
|
||||
! chroot "$MNT_ROOT" command -v /usr/sbin/chpasswd >/dev/null 2>&1; then
|
||||
cp -f /etc/resolv.conf "$MNT_ROOT/etc/resolv.conf" 2>/dev/null || true
|
||||
chroot "$MNT_ROOT" bash -c "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y passwd adduser" || true
|
||||
fi
|
||||
|
||||
# Ensure user management utilities are present (useradd, chpasswd)
|
||||
if ! chroot "$MNT_ROOT" command -v /usr/sbin/useradd >/dev/null 2>&1 || \
|
||||
! chroot "$MNT_ROOT" command -v /usr/sbin/chpasswd >/dev/null 2>&1; then
|
||||
cp -f /etc/resolv.conf "$MNT_ROOT/etc/resolv.conf" 2>/dev/null || true
|
||||
chroot "$MNT_ROOT" bash -c "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y passwd adduser" || true
|
||||
fi
|
||||
|
||||
# Ensure shadow utilities present (useradd/chpasswd)
|
||||
if ! chroot "$MNT_ROOT" command -v /usr/sbin/useradd >/dev/null 2>&1 || \
|
||||
! chroot "$MNT_ROOT" command -v /usr/sbin/chpasswd >/dev/null 2>&1; then
|
||||
cp -f /etc/resolv.conf "$MNT_ROOT/etc/resolv.conf" 2>/dev/null || true
|
||||
chroot "$MNT_ROOT" bash -c "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y passwd" || true
|
||||
fi
|
||||
# Ensure default user 'ubuntu' exists (fallback for minimal images)
|
||||
if ! chroot "$MNT_ROOT" id -u ubuntu >/dev/null 2>&1; then
|
||||
chroot "$MNT_ROOT" /usr/sbin/useradd -m -s /bin/bash ubuntu || true
|
||||
echo "ubuntu ALL=(ALL) NOPASSWD:ALL" > "$MNT_ROOT/etc/sudoers.d/90-ubuntu" || true
|
||||
chmod 0440 "$MNT_ROOT/etc/sudoers.d/90-ubuntu" || true
|
||||
fi
|
||||
|
||||
# Re-assert password (covers both existing and newly created users)
|
||||
if chroot "$MNT_ROOT" getent passwd ubuntu >/dev/null 2>&1; then
|
||||
echo 'ubuntu:ubuntu' | chroot "$MNT_ROOT" /usr/sbin/chpasswd || true
|
||||
fi
|
||||
# Ensure account is unlocked (some cloud images ship locked local users)
|
||||
chroot "$MNT_ROOT" /usr/bin/passwd -u ubuntu 2>/dev/null || true
|
||||
chroot "$MNT_ROOT" /usr/sbin/usermod -U ubuntu 2>/dev/null || true
|
||||
|
||||
# Robustly set ubuntu password offline; generate hash on host and set inside chroot
|
||||
UBUNTU_HASH="$(openssl passwd -6 'ubuntu' 2>/dev/null || python3 - <<'PY'
|
||||
import crypt
|
||||
print(crypt.crypt('ubuntu', crypt.mksalt(crypt.METHOD_SHA512)))
|
||||
PY
|
||||
)"
|
||||
if [ -n "$UBUNTU_HASH" ] && chroot "$MNT_ROOT" getent passwd ubuntu >/dev/null 2>&1; then
|
||||
printf 'ubuntu:%s\n' "$UBUNTU_HASH" | chroot "$MNT_ROOT" /usr/sbin/chpasswd -e || true
|
||||
# Ensure account is not expired/locked and has sane aging
|
||||
chroot "$MNT_ROOT" /usr/bin/chage -I -1 -m 0 -M 99999 -E -1 ubuntu 2>/dev/null || true
|
||||
chroot "$MNT_ROOT" /usr/bin/passwd -u ubuntu 2>/dev/null || true
|
||||
chroot "$MNT_ROOT" /usr/sbin/usermod -U ubuntu 2>/dev/null || true
|
||||
# Debug: show status and shadow entry (for test logs)
|
||||
chroot "$MNT_ROOT" /usr/bin/passwd -S ubuntu 2>/dev/null || true
|
||||
chroot "$MNT_ROOT" bash -c "grep '^ubuntu:' /etc/shadow || true" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
# Also set root password and allow root login for test debugging
|
||||
if chroot "$MNT_ROOT" getent passwd root >/dev/null 2>&1; then
|
||||
echo 'root:root' | chroot "$MNT_ROOT" /usr/sbin/chpasswd || true
|
||||
chroot "$MNT_ROOT" /usr/bin/passwd -u root 2>/dev/null || true
|
||||
chroot "$MNT_ROOT" /usr/bin/chage -I -1 -m 0 -M 99999 -E -1 root 2>/dev/null || true
|
||||
fi
|
||||
|
||||
# Pre-generate host SSH keys so sshd can start immediately
|
||||
chroot "$MNT_ROOT" ssh-keygen -A 2>/dev/null || true
|
||||
mkdir -p "$MNT_ROOT/var/run/sshd"
|
||||
|
||||
# Ensure sshd runs as a regular service and not via socket (binds IPv4+IPv6)
|
||||
chroot "$MNT_ROOT" systemctl disable --now ssh.socket 2>/dev/null || true
|
||||
chroot "$MNT_ROOT" systemctl mask ssh.socket 2>/dev/null || true
|
||||
chroot "$MNT_ROOT" systemctl enable ssh.service 2>/dev/null || true
|
||||
chroot "$MNT_ROOT" systemctl restart ssh.service 2>/dev/null || true
|
||||
|
||||
# Disable cloud-init networking (optional but default)
|
||||
if [ "{disable_ci_net}" = "true" ]; then
|
||||
mkdir -p "$MNT_ROOT/etc/cloud/cloud.cfg.d"
|
||||
echo "network: {{{{config: disabled}}}}" > "$MNT_ROOT/etc/cloud/cloud.cfg.d/99-disable-network-config.cfg"
|
||||
fi
|
||||
|
||||
# Fully disable cloud-init on first boot for deterministic tests
|
||||
mkdir -p "$MNT_ROOT/etc/cloud"
|
||||
: > "$MNT_ROOT/etc/cloud/cloud-init.disabled"
|
||||
|
||||
# Belt-and-braces: mask cloud-init services offline (no systemd required)
|
||||
mkdir -p "$MNT_ROOT/etc/systemd/system"
|
||||
for s in cloud-init.service cloud-config.service cloud-final.service cloud-init-local.service; do
|
||||
ln -sf /dev/null "$MNT_ROOT/etc/systemd/system/$s" || true
|
||||
done
|
||||
|
||||
|
||||
# First-boot fallback: ensure ubuntu:ubuntu credentials and SSH password auth
|
||||
mkdir -p "$MNT_ROOT/usr/local/sbin"
|
||||
cat > "$MNT_ROOT/usr/local/sbin/hero-ensure-ubuntu-cred.sh" << 'EOS'
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
# Guarantee ubuntu user exists
|
||||
if ! id -u ubuntu >/dev/null 2>&1; then
|
||||
useradd -m -s /bin/bash ubuntu || true
|
||||
fi
|
||||
|
||||
# Ensure sudo without password
|
||||
mkdir -p /etc/sudoers.d
|
||||
echo "ubuntu ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/90-ubuntu
|
||||
chmod 0440 /etc/sudoers.d/90-ubuntu
|
||||
|
||||
# Set password 'ubuntu' (hashed)
|
||||
UBUNTU_HASH="$(openssl passwd -6 'ubuntu' 2>/dev/null || python3 - <<'PY'
|
||||
import crypt
|
||||
print(crypt.crypt('ubuntu', crypt.mksalt(crypt.METHOD_SHA512)))
|
||||
PY
|
||||
)"
|
||||
if [ -n "$UBUNTU_HASH" ]; then
|
||||
printf 'ubuntu:%s\n' "$UBUNTU_HASH" | chpasswd -e || true
|
||||
chage -I -1 -m 0 -M 99999 -E -1 ubuntu 2>/dev/null || true
|
||||
passwd -u ubuntu 2>/dev/null || true
|
||||
usermod -U ubuntu 2>/dev/null || true
|
||||
fi
|
||||
|
||||
# SSHD password-auth settings
|
||||
mkdir -p /etc/ssh/sshd_config.d
|
||||
cat > /etc/ssh/sshd_config.d/99-hero-password-auth.conf << EOF
|
||||
PasswordAuthentication yes
|
||||
KbdInteractiveAuthentication yes
|
||||
UsePAM yes
|
||||
PubkeyAuthentication no
|
||||
EOF
|
||||
|
||||
cat > /etc/ssh/sshd_config.d/99-hero-address-family.conf << EOF
|
||||
AddressFamily any
|
||||
ListenAddress ::
|
||||
ListenAddress 0.0.0.0
|
||||
EOF
|
||||
|
||||
# Ensure sshd waits for network-online at first boot as well
|
||||
mkdir -p /etc/systemd/system/ssh.service.d
|
||||
cat > /etc/systemd/system/ssh.service.d/override.conf << 'EOF'
|
||||
[Unit]
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
EOF
|
||||
|
||||
# Remove any AuthenticationMethods directives from drop-ins that could conflict
|
||||
if [ -f /etc/ssh/sshd_config ]; then
|
||||
sed -i -E 's/^[[:space:]]*AuthenticationMethods[[:space:]].*$/# hero: removed AuthenticationMethods/' /etc/ssh/sshd_config 2>/dev/null || true
|
||||
fi
|
||||
if [ -d /etc/ssh/sshd_config.d ]; then
|
||||
find /etc/ssh/sshd_config.d -type f -name '*.conf' -exec sed -i -E 's/^[[:space:]]*AuthenticationMethods[[:space:]].*$/# hero: removed AuthenticationMethods/' {{}} + 2>/dev/null \; || true
|
||||
fi
|
||||
|
||||
# Ensure Include covers drop-ins
|
||||
grep -qE '^[[:space:]]*Include[[:space:]]+/etc/ssh/sshd_config\.d/\*\.conf' /etc/ssh/sshd_config || \
|
||||
echo 'Include /etc/ssh/sshd_config.d/*.conf' >> /etc/ssh/sshd_config
|
||||
|
||||
# Ensure and restart SSHD
|
||||
if command -v systemctl >/dev/null 2>&1; then
|
||||
systemctl daemon-reload || true
|
||||
# Prefer running sshd as a service so it honors IPv6 ListenAddress from sshd_config
|
||||
systemctl disable --now ssh.socket 2>/dev/null || true
|
||||
systemctl mask ssh.socket 2>/dev/null || true
|
||||
systemctl enable --now ssh.service 2>/dev/null || true
|
||||
systemctl restart ssh.service 2>/dev/null || true
|
||||
# Apply netplan in case renderer did not start IPv6 yet
|
||||
command -v netplan >/dev/null 2>&1 && netplan apply 2>/dev/null || true
|
||||
else
|
||||
service ssh restart || true
|
||||
fi
|
||||
|
||||
# Mark completion to avoid reruns if unit has a condition
|
||||
mkdir -p /var/lib/hero
|
||||
: > /var/lib/hero/cred-ensured
|
||||
EOS
|
||||
chmod 0755 "$MNT_ROOT/usr/local/sbin/hero-ensure-ubuntu-cred.sh"
|
||||
|
||||
# Install systemd unit to run on first boot
|
||||
cat > "$MNT_ROOT/etc/systemd/system/hero-ensure-ubuntu-cred.service" << 'EOF'
|
||||
[Unit]
|
||||
Description=Hero: ensure ubuntu:ubuntu and SSH password auth
|
||||
After=local-fs.target
|
||||
Wants=local-fs.target
|
||||
ConditionPathExists=!/var/lib/hero/cred-ensured
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=/usr/local/sbin/hero-ensure-ubuntu-cred.sh
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
# Enable via symlink and best-effort systemctl in chroot
|
||||
mkdir -p "$MNT_ROOT/etc/systemd/system/multi-user.target.wants"
|
||||
ln -sf "/etc/systemd/system/hero-ensure-ubuntu-cred.service" "$MNT_ROOT/etc/systemd/system/multi-user.target.wants/hero-ensure-ubuntu-cred.service" || true
|
||||
chroot "$MNT_ROOT" systemctl enable hero-ensure-ubuntu-cred.service 2>/dev/null || true
|
||||
|
||||
|
||||
# Convert prepared image to raw (ensure source not locked)
|
||||
umount "$MNT_BOOT" 2>/dev/null || true
|
||||
umount "$MNT_ROOT" 2>/dev/null || true
|
||||
if [ -n "$NBD" ]; then
|
||||
qemu-nbd --disconnect "$NBD" 2>/dev/null || true
|
||||
rmmod nbd 2>/dev/null || true
|
||||
fi
|
||||
rm -f "$RAW"
|
||||
qemu-img convert -U -f qcow2 -O raw "$WORK" "$RAW"
|
||||
|
||||
# Output result triple ONLY on stdout, then prevent any further trap output
|
||||
echo "RESULT:$RAW|$ROOT_UUID|$BOOT_UUID"
|
||||
trap - EXIT
|
||||
exit 0
|
||||
"#,
|
||||
src = shell_escape(&src),
|
||||
vm_dir = shell_escape(&vm_dir),
|
||||
work = shell_escape(&work_qcow2),
|
||||
mnt_root = shell_escape(&mnt_root),
|
||||
mnt_boot = shell_escape(&mnt_boot),
|
||||
raw = shell_escape(&raw_path),
|
||||
vm_mac = vm_mac,
|
||||
dhcp4 = if opts.net.dhcp4 { "true" } else { "false" },
|
||||
dhcp6 = if dhcp6_effective { "true" } else { "false" },
|
||||
accept_ra = accept_ra,
|
||||
np_v6_block = np_v6_block,
|
||||
disable_ci_net = if disable_ci_net { "true" } else { "false" }
|
||||
);
|
||||
|
||||
// image prep script executed silently
|
||||
let res = run_script(&script)?;
|
||||
// Prefer a RESULT:-prefixed line (robust against extra stdout noise)
|
||||
let mut marker: Option<String> = None;
|
||||
for l in res.stdout.lines().rev() {
|
||||
let lt = l.trim();
|
||||
if let Some(rest) = lt.strip_prefix("RESULT:") {
|
||||
marker = Some(rest.trim().to_string());
|
||||
break;
|
||||
}
|
||||
}
|
||||
// Fallback: last line that looks like A|B|C
|
||||
let line = if let Some(x) = marker {
|
||||
x
|
||||
} else {
|
||||
let mut cand: Option<String> = None;
|
||||
for l in res.stdout.lines().rev() {
|
||||
let lt = l.trim();
|
||||
if lt.split('|').count() == 3 {
|
||||
cand = Some(lt.to_string());
|
||||
break;
|
||||
}
|
||||
}
|
||||
cand.ok_or_else(|| fail("no RAW|ROOT_UUID|BOOT_UUID line found in script output"))?
|
||||
};
|
||||
let parts: Vec<_> = line.split('|').map(|s| s.trim().to_string()).collect();
|
||||
if parts.len() != 3 {
|
||||
return Err(fail(&format!(
|
||||
"unexpected output from image_prepare script, expected RAW|ROOT_UUID|BOOT_UUID, got: {}",
|
||||
line
|
||||
)));
|
||||
}
|
||||
Ok(ImagePrepResult {
|
||||
raw_disk: parts[0].clone(),
|
||||
root_uuid: parts[1].clone(),
|
||||
boot_uuid: parts[2].clone(),
|
||||
work_qcow2,
|
||||
})
|
||||
}
|
||||
Flavor::Alpine => Err(ImagePrepError::NotImplemented(
|
||||
"Alpine image_prepare not implemented yet".into(),
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
fn shell_escape(s: &str) -> String {
|
||||
if s.is_empty() {
|
||||
return "''".into();
|
||||
}
|
||||
if s.chars()
|
||||
.all(|c| c.is_ascii_alphanumeric() || "-_./=:".contains(c))
|
||||
{
|
||||
return s.into();
|
||||
}
|
||||
let mut out = String::from("'");
|
||||
for ch in s.chars() {
|
||||
if ch == '\'' {
|
||||
out.push_str("'\"'\"'");
|
||||
} else {
|
||||
out.push(ch);
|
||||
}
|
||||
}
|
||||
out.push('\'');
|
||||
out
|
||||
}
|
||||
@@ -24,6 +24,10 @@
|
||||
pub mod buildah;
|
||||
pub mod nerdctl;
|
||||
pub mod rfs;
|
||||
pub mod qcow2;
|
||||
pub mod cloudhv;
|
||||
pub mod hostcheck;
|
||||
pub mod image_prep;
|
||||
|
||||
pub mod rhai;
|
||||
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
pub mod buildah;
|
||||
pub mod nerdctl;
|
||||
pub mod rfs;
|
||||
pub mod rfs;
|
||||
pub mod qcow2;
|
||||
pub mod cloudhv;
|
||||
200
packages/system/virt/src/qcow2/mod.rs
Normal file
200
packages/system/virt/src/qcow2/mod.rs
Normal file
@@ -0,0 +1,200 @@
|
||||
use serde_json::Value;
|
||||
use std::error::Error;
|
||||
use std::fmt;
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
|
||||
use sal_os;
|
||||
use sal_process::{self, RunError};
|
||||
|
||||
/// Error type for qcow2 operations
|
||||
#[derive(Debug)]
|
||||
pub enum Qcow2Error {
|
||||
/// Failed to execute a system command
|
||||
CommandExecutionFailed(String),
|
||||
/// Command executed but returned non-zero or failed semantics
|
||||
CommandFailed(String),
|
||||
/// JSON parsing error
|
||||
JsonParseError(String),
|
||||
/// IO error (filesystem)
|
||||
IoError(String),
|
||||
/// Dependency missing or invalid input
|
||||
Other(String),
|
||||
}
|
||||
|
||||
impl fmt::Display for Qcow2Error {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Qcow2Error::CommandExecutionFailed(e) => write!(f, "Command execution failed: {}", e),
|
||||
Qcow2Error::CommandFailed(e) => write!(f, "{}", e),
|
||||
Qcow2Error::JsonParseError(e) => write!(f, "JSON parse error: {}", e),
|
||||
Qcow2Error::IoError(e) => write!(f, "IO error: {}", e),
|
||||
Qcow2Error::Other(e) => write!(f, "{}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Error for Qcow2Error {}
|
||||
|
||||
fn from_run_error(e: RunError) -> Qcow2Error {
|
||||
Qcow2Error::CommandExecutionFailed(e.to_string())
|
||||
}
|
||||
|
||||
fn ensure_parent_dir(path: &str) -> Result<(), Qcow2Error> {
|
||||
if let Some(parent) = Path::new(path).parent() {
|
||||
fs::create_dir_all(parent).map_err(|e| Qcow2Error::IoError(e.to_string()))?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn ensure_qemu_img() -> Result<(), Qcow2Error> {
|
||||
if sal_process::which("qemu-img").is_none() {
|
||||
return Err(Qcow2Error::Other(
|
||||
"qemu-img not found on PATH. Please install qemu-utils (Debian/Ubuntu) or the QEMU tools for your distro.".to_string(),
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn run_quiet(cmd: &str) -> Result<sal_process::CommandResult, Qcow2Error> {
|
||||
sal_process::run(cmd)
|
||||
.silent(true)
|
||||
.execute()
|
||||
.map_err(from_run_error)
|
||||
.and_then(|res| {
|
||||
if res.success {
|
||||
Ok(res)
|
||||
} else {
|
||||
Err(Qcow2Error::CommandFailed(format!(
|
||||
"Command failed (code {}): {}\n{}",
|
||||
res.code, cmd, res.stderr
|
||||
)))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Create a qcow2 image at path with a given virtual size (in GiB)
|
||||
pub fn create(path: &str, size_gb: i64) -> Result<String, Qcow2Error> {
|
||||
ensure_qemu_img()?;
|
||||
if size_gb <= 0 {
|
||||
return Err(Qcow2Error::Other(
|
||||
"size_gb must be > 0 for qcow2.create".to_string(),
|
||||
));
|
||||
}
|
||||
ensure_parent_dir(path)?;
|
||||
let cmd = format!("qemu-img create -f qcow2 {} {}G", path, size_gb);
|
||||
run_quiet(&cmd)?;
|
||||
Ok(path.to_string())
|
||||
}
|
||||
|
||||
/// Return qemu-img info as a JSON value
|
||||
pub fn info(path: &str) -> Result<Value, Qcow2Error> {
|
||||
ensure_qemu_img()?;
|
||||
if !Path::new(path).exists() {
|
||||
return Err(Qcow2Error::IoError(format!("Image not found: {}", path)));
|
||||
}
|
||||
let cmd = format!("qemu-img info --output=json {}", path);
|
||||
let res = run_quiet(&cmd)?;
|
||||
serde_json::from_str::<Value>(&res.stdout).map_err(|e| Qcow2Error::JsonParseError(e.to_string()))
|
||||
}
|
||||
|
||||
/// Create an offline snapshot on a qcow2 image
|
||||
pub fn snapshot_create(path: &str, name: &str) -> Result<(), Qcow2Error> {
|
||||
ensure_qemu_img()?;
|
||||
if name.trim().is_empty() {
|
||||
return Err(Qcow2Error::Other("snapshot name cannot be empty".to_string()));
|
||||
}
|
||||
let cmd = format!("qemu-img snapshot -c {} {}", name, path);
|
||||
run_quiet(&cmd).map(|_| ())
|
||||
}
|
||||
|
||||
/// Delete a snapshot on a qcow2 image
|
||||
pub fn snapshot_delete(path: &str, name: &str) -> Result<(), Qcow2Error> {
|
||||
ensure_qemu_img()?;
|
||||
if name.trim().is_empty() {
|
||||
return Err(Qcow2Error::Other("snapshot name cannot be empty".to_string()));
|
||||
}
|
||||
let cmd = format!("qemu-img snapshot -d {} {}", name, path);
|
||||
run_quiet(&cmd).map(|_| ())
|
||||
}
|
||||
|
||||
/// Snapshot representation (subset of qemu-img info snapshots)
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Qcow2Snapshot {
|
||||
pub id: Option<String>,
|
||||
pub name: Option<String>,
|
||||
pub vm_state_size: Option<i64>,
|
||||
pub date_sec: Option<i64>,
|
||||
pub date_nsec: Option<i64>,
|
||||
pub vm_clock_nsec: Option<i64>,
|
||||
}
|
||||
|
||||
/// List snapshots on a qcow2 image (offline)
|
||||
pub fn snapshot_list(path: &str) -> Result<Vec<Qcow2Snapshot>, Qcow2Error> {
|
||||
let v = info(path)?;
|
||||
let mut out = Vec::new();
|
||||
if let Some(snaps) = v.get("snapshots").and_then(|s| s.as_array()) {
|
||||
for s in snaps {
|
||||
let snap = Qcow2Snapshot {
|
||||
id: s.get("id").and_then(|x| x.as_str()).map(|s| s.to_string()),
|
||||
name: s.get("name").and_then(|x| x.as_str()).map(|s| s.to_string()),
|
||||
vm_state_size: s.get("vm-state-size").and_then(|x| x.as_i64()),
|
||||
date_sec: s.get("date-sec").and_then(|x| x.as_i64()),
|
||||
date_nsec: s.get("date-nsec").and_then(|x| x.as_i64()),
|
||||
vm_clock_nsec: s.get("vm-clock-nsec").and_then(|x| x.as_i64()),
|
||||
};
|
||||
out.push(snap);
|
||||
}
|
||||
}
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
/// Result for building the base image
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct BuildBaseResult {
|
||||
pub base_image_path: String,
|
||||
pub snapshot: String,
|
||||
pub url: String,
|
||||
pub resized_to_gb: Option<i64>,
|
||||
}
|
||||
|
||||
/// Build/download Ubuntu 24.04 base image (Noble cloud image), optionally resize, and create a base snapshot
|
||||
pub fn build_ubuntu_24_04_base(dest_dir: &str, size_gb: Option<i64>) -> Result<BuildBaseResult, Qcow2Error> {
|
||||
ensure_qemu_img()?;
|
||||
|
||||
// Ensure destination directory exists
|
||||
sal_os::mkdir(dest_dir).map_err(|e| Qcow2Error::IoError(e.to_string()))?;
|
||||
|
||||
// Canonical Ubuntu Noble cloud image (amd64)
|
||||
let url = "https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img";
|
||||
|
||||
// Build destination path
|
||||
let dest_dir_sanitized = dest_dir.trim_end_matches('/');
|
||||
let dest_path = format!("{}/noble-server-cloudimg-amd64.img", dest_dir_sanitized);
|
||||
|
||||
// Download if not present
|
||||
let path_obj = Path::new(&dest_path);
|
||||
if !path_obj.exists() {
|
||||
// 50MB minimum for sanity; the actual image is much larger
|
||||
sal_os::download_file(url, &dest_path, 50_000)
|
||||
.map_err(|e| Qcow2Error::IoError(e.to_string()))?;
|
||||
}
|
||||
|
||||
// Resize if requested
|
||||
if let Some(sz) = size_gb {
|
||||
if sz > 0 {
|
||||
let cmd = format!("qemu-img resize {} {}G", dest_path, sz);
|
||||
run_quiet(&cmd)?;
|
||||
}
|
||||
}
|
||||
|
||||
// Create "base" snapshot
|
||||
snapshot_create(&dest_path, "base")?;
|
||||
|
||||
Ok(BuildBaseResult {
|
||||
base_image_path: dest_path,
|
||||
snapshot: "base".to_string(),
|
||||
url: url.to_string(),
|
||||
resized_to_gb: size_gb.filter(|v| *v > 0),
|
||||
})
|
||||
}
|
||||
@@ -8,6 +8,11 @@ use rhai::{Engine, EvalAltResult};
|
||||
pub mod buildah;
|
||||
pub mod nerdctl;
|
||||
pub mod rfs;
|
||||
pub mod qcow2;
|
||||
pub mod cloudhv;
|
||||
pub mod hostcheck;
|
||||
pub mod image_prep;
|
||||
pub mod cloudhv_builder;
|
||||
|
||||
/// Register all Virt module functions with the Rhai engine
|
||||
///
|
||||
@@ -28,6 +33,21 @@ pub fn register_virt_module(engine: &mut Engine) -> Result<(), Box<EvalAltResult
|
||||
// Register RFS module functions
|
||||
rfs::register_rfs_module(engine)?;
|
||||
|
||||
// Register QCOW2 module functions
|
||||
qcow2::register_qcow2_module(engine)?;
|
||||
|
||||
// Register Cloud Hypervisor module functions
|
||||
cloudhv::register_cloudhv_module(engine)?;
|
||||
|
||||
// Register Host dependency checker
|
||||
hostcheck::register_hostcheck_module(engine)?;
|
||||
|
||||
// Register Image preparation functions
|
||||
image_prep::register_image_prep_module(engine)?;
|
||||
|
||||
// Register Cloud Hypervisor builder and easy wrapper
|
||||
cloudhv_builder::register_cloudhv_builder_module(engine)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -35,3 +55,5 @@ pub fn register_virt_module(engine: &mut Engine) -> Result<(), Box<EvalAltResult
|
||||
pub use buildah::{bah_new, register_bah_module};
|
||||
pub use nerdctl::register_nerdctl_module;
|
||||
pub use rfs::register_rfs_module;
|
||||
pub use qcow2::register_qcow2_module;
|
||||
pub use cloudhv::register_cloudhv_module;
|
||||
|
||||
262
packages/system/virt/src/rhai/cloudhv.rs
Normal file
262
packages/system/virt/src/rhai/cloudhv.rs
Normal file
@@ -0,0 +1,262 @@
|
||||
use crate::cloudhv;
|
||||
use crate::cloudhv::{VmRecord, VmRuntime, VmSpec};
|
||||
use rhai::{Array, Dynamic, Engine, EvalAltResult, Map};
|
||||
|
||||
// Error adapter
|
||||
fn hv_to_rhai<T>(r: Result<T, cloudhv::CloudHvError>) -> Result<T, Box<EvalAltResult>> {
|
||||
r.map_err(|e| {
|
||||
Box::new(EvalAltResult::ErrorRuntime(
|
||||
format!("cloudhv error: {}", e).into(),
|
||||
rhai::Position::NONE,
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
// Map conversions
|
||||
|
||||
fn map_to_vmspec(spec: Map) -> Result<VmSpec, Box<EvalAltResult>> {
|
||||
let id = must_get_string(&spec, "id")?;
|
||||
let kernel_path = get_string(&spec, "kernel_path");
|
||||
let initramfs_path = get_string(&spec, "initramfs_path");
|
||||
let firmware_path = get_string(&spec, "firmware_path");
|
||||
let disk_path = must_get_string(&spec, "disk_path")?;
|
||||
let api_socket = get_string(&spec, "api_socket").unwrap_or_else(|| "".to_string());
|
||||
let vcpus = get_int(&spec, "vcpus").unwrap_or(1) as u32;
|
||||
let memory_mb = get_int(&spec, "memory_mb").unwrap_or(512) as u32;
|
||||
let cmdline = get_string(&spec, "cmdline");
|
||||
let extra_args = get_string_array(&spec, "extra_args");
|
||||
|
||||
Ok(VmSpec {
|
||||
id,
|
||||
kernel_path,
|
||||
initramfs_path,
|
||||
firmware_path,
|
||||
disk_path,
|
||||
api_socket,
|
||||
vcpus,
|
||||
memory_mb,
|
||||
cmdline,
|
||||
extra_args,
|
||||
net_profile: None,
|
||||
})
|
||||
}
|
||||
|
||||
fn vmspec_to_map(s: &VmSpec) -> Map {
|
||||
let mut m = Map::new();
|
||||
m.insert("id".into(), s.id.clone().into());
|
||||
if let Some(k) = &s.kernel_path {
|
||||
m.insert("kernel_path".into(), k.clone().into());
|
||||
} else {
|
||||
m.insert("kernel_path".into(), Dynamic::UNIT);
|
||||
}
|
||||
if let Some(ir) = &s.initramfs_path {
|
||||
m.insert("initramfs_path".into(), ir.clone().into());
|
||||
} else {
|
||||
m.insert("initramfs_path".into(), Dynamic::UNIT);
|
||||
}
|
||||
if let Some(fw) = &s.firmware_path {
|
||||
m.insert("firmware_path".into(), fw.clone().into());
|
||||
} else {
|
||||
m.insert("firmware_path".into(), Dynamic::UNIT);
|
||||
}
|
||||
m.insert("disk_path".into(), s.disk_path.clone().into());
|
||||
m.insert("api_socket".into(), s.api_socket.clone().into());
|
||||
m.insert("vcpus".into(), (s.vcpus as i64).into());
|
||||
m.insert("memory_mb".into(), (s.memory_mb as i64).into());
|
||||
if let Some(c) = &s.cmdline {
|
||||
m.insert("cmdline".into(), c.clone().into());
|
||||
} else {
|
||||
m.insert("cmdline".into(), Dynamic::UNIT);
|
||||
}
|
||||
if let Some(arr) = &s.extra_args {
|
||||
let mut a = Array::new();
|
||||
for s in arr {
|
||||
a.push(s.clone().into());
|
||||
}
|
||||
m.insert("extra_args".into(), a.into());
|
||||
} else {
|
||||
m.insert("extra_args".into(), Dynamic::UNIT);
|
||||
}
|
||||
// net_profile not exposed in Rhai yet; return UNIT for now
|
||||
m.insert("net_profile".into(), Dynamic::UNIT);
|
||||
m
|
||||
}
|
||||
|
||||
fn vmruntime_to_map(r: &VmRuntime) -> Map {
|
||||
let mut m = Map::new();
|
||||
match r.pid {
|
||||
Some(p) => m.insert("pid".into(), (p as i64).into()),
|
||||
None => m.insert("pid".into(), Dynamic::UNIT),
|
||||
};
|
||||
m.insert("status".into(), r.status.clone().into());
|
||||
m.insert("log_file".into(), r.log_file.clone().into());
|
||||
m
|
||||
}
|
||||
|
||||
fn vmrecord_to_map(rec: &VmRecord) -> Map {
|
||||
let mut m = Map::new();
|
||||
m.insert("spec".into(), vmspec_to_map(&rec.spec).into());
|
||||
m.insert("runtime".into(), vmruntime_to_map(&rec.runtime).into());
|
||||
m
|
||||
}
|
||||
|
||||
// Helpers for reading Rhai Map fields
|
||||
|
||||
fn must_get_string(m: &Map, k: &str) -> Result<String, Box<EvalAltResult>> {
|
||||
match m.get(k) {
|
||||
Some(v) if v.is_string() => Ok(v.clone().cast::<String>()),
|
||||
_ => Err(Box::new(EvalAltResult::ErrorRuntime(
|
||||
format!("missing or non-string field '{}'", k).into(),
|
||||
rhai::Position::NONE,
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
fn get_string(m: &Map, k: &str) -> Option<String> {
|
||||
m.get(k).and_then(|v| if v.is_string() { Some(v.clone().cast::<String>()) } else { None })
|
||||
}
|
||||
|
||||
fn get_int(m: &Map, k: &str) -> Option<i64> {
|
||||
m.get(k).and_then(|v| v.as_int().ok())
|
||||
}
|
||||
|
||||
fn get_string_array(m: &Map, k: &str) -> Option<Vec<String>> {
|
||||
m.get(k).and_then(|v| {
|
||||
if v.is_array() {
|
||||
let arr = v.clone().cast::<Array>();
|
||||
let mut out = vec![];
|
||||
for it in arr {
|
||||
if it.is_string() {
|
||||
out.push(it.cast::<String>());
|
||||
}
|
||||
}
|
||||
Some(out)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Rhai-exposed functions
|
||||
|
||||
pub fn cloudhv_vm_create(spec: Map) -> Result<String, Box<EvalAltResult>> {
|
||||
let s = map_to_vmspec(spec)?;
|
||||
hv_to_rhai(cloudhv::vm_create(&s))
|
||||
}
|
||||
|
||||
pub fn cloudhv_vm_start(id: &str) -> Result<(), Box<EvalAltResult>> {
|
||||
hv_to_rhai(cloudhv::vm_start(id))
|
||||
}
|
||||
|
||||
pub fn cloudhv_vm_stop(id: &str, force: bool) -> Result<(), Box<EvalAltResult>> {
|
||||
hv_to_rhai(cloudhv::vm_stop(id, force))
|
||||
}
|
||||
|
||||
pub fn cloudhv_vm_delete(id: &str, delete_disks: bool) -> Result<(), Box<EvalAltResult>> {
|
||||
hv_to_rhai(cloudhv::vm_delete(id, delete_disks))
|
||||
}
|
||||
|
||||
pub fn cloudhv_vm_list() -> Result<Array, Box<EvalAltResult>> {
|
||||
let vms = hv_to_rhai(cloudhv::vm_list())?;
|
||||
let mut arr = Array::new();
|
||||
for rec in vms {
|
||||
arr.push(vmrecord_to_map(&rec).into());
|
||||
}
|
||||
Ok(arr)
|
||||
}
|
||||
|
||||
pub fn cloudhv_vm_info(id: &str) -> Result<Map, Box<EvalAltResult>> {
|
||||
let rec = hv_to_rhai(cloudhv::vm_info(id))?;
|
||||
Ok(vmrecord_to_map(&rec))
|
||||
}
|
||||
|
||||
pub fn cloudhv_discover_ipv4_from_leases(lease_path: &str, mac_lower: &str, timeout_secs: i64) -> Dynamic {
|
||||
// Check verbosity from environment variable, default to verbose
|
||||
let verbose = std::env::var("VIRT_VERBOSE").unwrap_or_else(|_| "1".to_string()) == "1";
|
||||
|
||||
if verbose {
|
||||
println!("🔍 Discovering VM network addresses...");
|
||||
}
|
||||
|
||||
match crate::cloudhv::net::discover_ipv4_from_leases(lease_path, mac_lower, timeout_secs as u64) {
|
||||
Some(ip) => ip.into(),
|
||||
None => Dynamic::UNIT,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn cloudhv_discover_ipv6_on_bridge(bridge_name: &str, mac_lower: &str) -> Dynamic {
|
||||
match crate::cloudhv::net::discover_ipv6_on_bridge(bridge_name, mac_lower) {
|
||||
Some(ip) => ip.into(),
|
||||
None => Dynamic::UNIT,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn cloudhv_display_network_info(vm_id: &str, ipv4: Dynamic, ipv6: Dynamic) {
|
||||
// Check verbosity from environment variable, default to verbose
|
||||
let verbose = std::env::var("VIRT_VERBOSE").unwrap_or_else(|_| "1".to_string()) == "1";
|
||||
|
||||
if !verbose {
|
||||
return;
|
||||
}
|
||||
|
||||
println!("✅ VM {} is ready!", vm_id);
|
||||
println!("");
|
||||
println!("🌐 Network Information:");
|
||||
|
||||
if ipv4.is_string() && !ipv4.clone().cast::<String>().is_empty() {
|
||||
println!(" IPv4: {}", ipv4.clone().cast::<String>());
|
||||
} else {
|
||||
println!(" IPv4: Not assigned yet (VM may still be configuring)");
|
||||
}
|
||||
|
||||
if ipv6.is_string() && !ipv6.clone().cast::<String>().is_empty() {
|
||||
println!(" IPv6: {}", ipv6.clone().cast::<String>());
|
||||
} else {
|
||||
println!(" IPv6: Not available");
|
||||
}
|
||||
|
||||
println!("");
|
||||
println!("💡 VM is running in the background. To connect:");
|
||||
|
||||
let ssh_addr = if ipv4.is_string() && !ipv4.clone().cast::<String>().is_empty() {
|
||||
ipv4.cast::<String>()
|
||||
} else {
|
||||
"<IPv4>".to_string()
|
||||
};
|
||||
println!(" SSH: ssh ubuntu@{}", ssh_addr);
|
||||
println!("");
|
||||
println!("🛑 To stop the VM later:");
|
||||
println!(" cloudhv_vm_stop(\"{}\", false);", vm_id);
|
||||
println!(" cloudhv_vm_delete(\"{}\", true);", vm_id);
|
||||
}
|
||||
|
||||
/// High-level network discovery that avoids hardcoded MAC/paths.
|
||||
/// Returns a Rhai map with fields: ipv4, ipv6, mac, bridge, lease.
|
||||
pub fn cloudhv_vm_network_info(id: &str, timeout_secs: i64) -> Result<Map, Box<EvalAltResult>> {
|
||||
let (ipv4, ipv6, mac, bridge, lease) =
|
||||
hv_to_rhai(cloudhv::vm_network_info(id, timeout_secs as u64))?;
|
||||
let mut m = Map::new();
|
||||
m.insert("vm_id".into(), id.to_string().into());
|
||||
m.insert("ipv4".into(), ipv4.map(Into::into).unwrap_or(Dynamic::UNIT));
|
||||
m.insert("ipv6".into(), ipv6.map(Into::into).unwrap_or(Dynamic::UNIT));
|
||||
m.insert("mac".into(), mac.map(Into::into).unwrap_or(Dynamic::UNIT));
|
||||
m.insert("bridge".into(), bridge.map(Into::into).unwrap_or(Dynamic::UNIT));
|
||||
m.insert("lease".into(), lease.map(Into::into).unwrap_or(Dynamic::UNIT));
|
||||
Ok(m)
|
||||
}
|
||||
|
||||
// Module registration
|
||||
|
||||
pub fn register_cloudhv_module(engine: &mut Engine) -> Result<(), Box<EvalAltResult>> {
|
||||
engine.register_fn("cloudhv_vm_create", cloudhv_vm_create);
|
||||
engine.register_fn("cloudhv_vm_start", cloudhv_vm_start);
|
||||
engine.register_fn("cloudhv_vm_stop", cloudhv_vm_stop);
|
||||
engine.register_fn("cloudhv_vm_delete", cloudhv_vm_delete);
|
||||
engine.register_fn("cloudhv_vm_list", cloudhv_vm_list);
|
||||
engine.register_fn("cloudhv_vm_info", cloudhv_vm_info);
|
||||
engine.register_fn("cloudhv_vm_network_info", cloudhv_vm_network_info);
|
||||
engine.register_fn("cloudhv_discover_ipv4_from_leases", cloudhv_discover_ipv4_from_leases);
|
||||
engine.register_fn("cloudhv_discover_ipv6_on_bridge", cloudhv_discover_ipv6_on_bridge);
|
||||
engine.register_fn("cloudhv_display_network_info", cloudhv_display_network_info);
|
||||
Ok(())
|
||||
}
|
||||
204
packages/system/virt/src/rhai/cloudhv_builder.rs
Normal file
204
packages/system/virt/src/rhai/cloudhv_builder.rs
Normal file
@@ -0,0 +1,204 @@
|
||||
use crate::cloudhv::builder::CloudHvBuilder;
|
||||
use crate::hostcheck::host_check_deps;
|
||||
use crate::image_prep::{image_prepare, Flavor as ImgFlavor, ImagePrepOptions, NetPlanOpts};
|
||||
use rhai::{Engine, EvalAltResult, Array};
|
||||
|
||||
// Improved functional-style builder with better method names for fluent feel
|
||||
fn cloudhv_builder(id: &str) -> CloudHvBuilder {
|
||||
CloudHvBuilder::new(id)
|
||||
}
|
||||
|
||||
fn memory_mb(b: CloudHvBuilder, mb: i64) -> CloudHvBuilder {
|
||||
let mut b = b;
|
||||
if mb > 0 {
|
||||
b.memory_mb(mb as u32);
|
||||
}
|
||||
b
|
||||
}
|
||||
|
||||
fn vcpus(b: CloudHvBuilder, v: i64) -> CloudHvBuilder {
|
||||
let mut b = b;
|
||||
if v > 0 {
|
||||
b.vcpus(v as u32);
|
||||
}
|
||||
b
|
||||
}
|
||||
|
||||
fn disk(b: CloudHvBuilder, path: &str) -> CloudHvBuilder {
|
||||
let mut b = b;
|
||||
b.disk(path);
|
||||
b
|
||||
}
|
||||
|
||||
fn disk_from_flavor(b: CloudHvBuilder, flavor: &str) -> CloudHvBuilder {
|
||||
let mut b = b;
|
||||
b.disk_from_flavor(flavor);
|
||||
b
|
||||
}
|
||||
|
||||
fn cmdline(b: CloudHvBuilder, c: &str) -> CloudHvBuilder {
|
||||
let mut b = b;
|
||||
b.cmdline(c);
|
||||
b
|
||||
}
|
||||
|
||||
fn extra_arg(b: CloudHvBuilder, a: &str) -> CloudHvBuilder {
|
||||
let mut b = b;
|
||||
b.extra_arg(a);
|
||||
b
|
||||
}
|
||||
|
||||
fn no_default_net(b: CloudHvBuilder) -> CloudHvBuilder {
|
||||
let mut b = b;
|
||||
b.no_default_net();
|
||||
b
|
||||
}
|
||||
|
||||
fn network_default_nat(b: CloudHvBuilder) -> CloudHvBuilder {
|
||||
let mut b = b;
|
||||
b.network_default_nat();
|
||||
b
|
||||
}
|
||||
|
||||
fn network_none(b: CloudHvBuilder) -> CloudHvBuilder {
|
||||
let mut b = b;
|
||||
b.network_none();
|
||||
b
|
||||
}
|
||||
|
||||
fn network_bridge_only(b: CloudHvBuilder) -> CloudHvBuilder {
|
||||
let mut b = b;
|
||||
b.network_bridge_only();
|
||||
b
|
||||
}
|
||||
|
||||
fn network_custom(b: CloudHvBuilder, args: Array) -> CloudHvBuilder {
|
||||
let mut b = b;
|
||||
let mut v: Vec<String> = Vec::new();
|
||||
for it in args {
|
||||
if it.is_string() {
|
||||
v.push(it.clone().cast::<String>());
|
||||
}
|
||||
}
|
||||
b.network_custom_cli(v);
|
||||
b
|
||||
}
|
||||
|
||||
fn launch(mut b: CloudHvBuilder) -> Result<String, Box<EvalAltResult>> {
|
||||
// Check verbosity from environment variable, default to verbose
|
||||
let verbose = std::env::var("VIRT_VERBOSE").unwrap_or_else(|_| "1".to_string()) == "1";
|
||||
|
||||
if verbose {
|
||||
println!("Preparing Ubuntu image and configuring VM...");
|
||||
}
|
||||
|
||||
b.launch().map_err(|e| {
|
||||
Box::new(EvalAltResult::ErrorRuntime(
|
||||
format!("cloudhv builder launch failed: {}", e).into(),
|
||||
rhai::Position::NONE,
|
||||
))
|
||||
}).map(|vm_id| {
|
||||
if verbose {
|
||||
println!("✅ VM launched successfully");
|
||||
}
|
||||
vm_id
|
||||
})
|
||||
}
|
||||
|
||||
fn wait_for_vm_boot(seconds: i64) {
|
||||
// Check verbosity from environment variable, default to verbose
|
||||
let verbose = std::env::var("VIRT_VERBOSE").unwrap_or_else(|_| "1".to_string()) == "1";
|
||||
|
||||
if verbose {
|
||||
println!("⏳ Waiting {} seconds for VM to boot and configure network...", seconds);
|
||||
}
|
||||
|
||||
std::thread::sleep(std::time::Duration::from_secs(seconds as u64));
|
||||
}
|
||||
|
||||
// Noob-friendly one-shot wrapper
|
||||
fn vm_easy_launch(flavor: &str, id: &str, memory_mb: i64, vcpus: i64) -> Result<String, Box<EvalAltResult>> {
|
||||
// Preflight
|
||||
let report = host_check_deps().map_err(|e| {
|
||||
Box::new(EvalAltResult::ErrorRuntime(
|
||||
format!("host_check failed: {}", e).into(),
|
||||
rhai::Position::NONE,
|
||||
))
|
||||
})?;
|
||||
if !report.ok {
|
||||
return Err(Box::new(EvalAltResult::ErrorRuntime(
|
||||
format!("missing dependencies: {:?}", report.critical).into(),
|
||||
rhai::Position::NONE,
|
||||
)));
|
||||
}
|
||||
|
||||
// Prepare image to raw using defaults (DHCPv4 + placeholder v6 + disable cloud-init net)
|
||||
let img_flavor = match flavor {
|
||||
"ubuntu" | "Ubuntu" | "UBUNTU" => ImgFlavor::Ubuntu,
|
||||
"alpine" | "Alpine" | "ALPINE" => ImgFlavor::Alpine,
|
||||
_ => ImgFlavor::Ubuntu,
|
||||
};
|
||||
let prep_opts = ImagePrepOptions {
|
||||
flavor: img_flavor,
|
||||
id: id.to_string(),
|
||||
source: None,
|
||||
target_dir: None,
|
||||
net: NetPlanOpts::default(),
|
||||
disable_cloud_init_net: true,
|
||||
};
|
||||
let prep = image_prepare(&prep_opts).map_err(|e| {
|
||||
Box::new(EvalAltResult::ErrorRuntime(
|
||||
format!("image_prepare failed: {}", e).into(),
|
||||
rhai::Position::NONE,
|
||||
))
|
||||
})?;
|
||||
|
||||
// Build and launch
|
||||
let mut b = CloudHvBuilder::new(id);
|
||||
b.disk(&prep.raw_disk);
|
||||
if memory_mb > 0 {
|
||||
b.memory_mb(memory_mb as u32);
|
||||
}
|
||||
if vcpus > 0 {
|
||||
b.vcpus(vcpus as u32);
|
||||
}
|
||||
// Default profile: NAT with IPv6 via Mycelium (opt-out via env)
|
||||
b.network_default_nat();
|
||||
b.launch().map_err(|e| {
|
||||
Box::new(EvalAltResult::ErrorRuntime(
|
||||
format!("vm_easy_launch failed at launch: {}", e).into(),
|
||||
rhai::Position::NONE,
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
pub fn register_cloudhv_builder_module(engine: &mut Engine) -> Result<(), Box<EvalAltResult>> {
|
||||
// Register type
|
||||
engine.register_type_with_name::<CloudHvBuilder>("CloudHvBuilder");
|
||||
|
||||
// Factory
|
||||
engine.register_fn("cloudhv_builder", cloudhv_builder);
|
||||
|
||||
// Chainable methods (fluent functional style)
|
||||
engine.register_fn("memory_mb", memory_mb);
|
||||
engine.register_fn("vcpus", vcpus);
|
||||
engine.register_fn("disk", disk);
|
||||
engine.register_fn("disk_from_flavor", disk_from_flavor);
|
||||
engine.register_fn("cmdline", cmdline);
|
||||
engine.register_fn("extra_arg", extra_arg);
|
||||
engine.register_fn("no_default_net", no_default_net);
|
||||
// Networking profiles
|
||||
engine.register_fn("network_default_nat", network_default_nat);
|
||||
engine.register_fn("network_none", network_none);
|
||||
engine.register_fn("network_bridge_only", network_bridge_only);
|
||||
engine.register_fn("network_custom", network_custom);
|
||||
|
||||
// Action
|
||||
engine.register_fn("launch", launch);
|
||||
engine.register_fn("wait_for_vm_boot", wait_for_vm_boot);
|
||||
|
||||
// One-shot wrapper
|
||||
engine.register_fn("vm_easy_launch", vm_easy_launch);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
81
packages/system/virt/src/rhai/hostcheck.rs
Normal file
81
packages/system/virt/src/rhai/hostcheck.rs
Normal file
@@ -0,0 +1,81 @@
|
||||
use crate::hostcheck::{host_check_deps, HostCheckReport};
|
||||
use rhai::{Array, Dynamic, Engine, EvalAltResult, Map};
|
||||
|
||||
fn report_to_map(r: &HostCheckReport) -> Map {
|
||||
let mut m = Map::new();
|
||||
m.insert("ok".into(), (r.ok as bool).into());
|
||||
|
||||
let mut crit = Array::new();
|
||||
for s in &r.critical {
|
||||
crit.push(s.clone().into());
|
||||
}
|
||||
m.insert("critical".into(), crit.into());
|
||||
|
||||
let mut opt = Array::new();
|
||||
for s in &r.optional {
|
||||
opt.push(s.clone().into());
|
||||
}
|
||||
m.insert("optional".into(), opt.into());
|
||||
|
||||
let mut notes = Array::new();
|
||||
for s in &r.notes {
|
||||
notes.push(s.clone().into());
|
||||
}
|
||||
m.insert("notes".into(), notes.into());
|
||||
|
||||
m
|
||||
}
|
||||
|
||||
fn host_check() -> Result<Map, Box<EvalAltResult>> {
|
||||
// Check verbosity from environment variable, default to verbose
|
||||
let verbose = std::env::var("VIRT_VERBOSE").unwrap_or_else(|_| "1".to_string()) == "1";
|
||||
|
||||
if verbose {
|
||||
println!("Checking system requirements...");
|
||||
}
|
||||
|
||||
match host_check_deps() {
|
||||
Ok(rep) => {
|
||||
if verbose {
|
||||
if rep.ok {
|
||||
println!("✅ System requirements met");
|
||||
} else {
|
||||
println!("❌ System check failed - missing dependencies:");
|
||||
if !rep.critical.is_empty() {
|
||||
println!("Critical:");
|
||||
for dep in &rep.critical {
|
||||
println!(" - {}", dep);
|
||||
}
|
||||
}
|
||||
if !rep.optional.is_empty() {
|
||||
println!("Optional:");
|
||||
for dep in &rep.optional {
|
||||
println!(" - {}", dep);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(report_to_map(&rep))
|
||||
},
|
||||
Err(e) => {
|
||||
if verbose {
|
||||
println!("❌ System check failed - missing dependencies:");
|
||||
println!("Critical:");
|
||||
println!(" - host_check failed: {}", e);
|
||||
}
|
||||
let mut m = Map::new();
|
||||
m.insert("ok".into(), Dynamic::FALSE);
|
||||
let mut crit = Array::new();
|
||||
crit.push(format!("host_check failed: {}", e).into());
|
||||
m.insert("critical".into(), crit.into());
|
||||
m.insert("optional".into(), Array::new().into());
|
||||
m.insert("notes".into(), Array::new().into());
|
||||
Ok(m)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn register_hostcheck_module(engine: &mut Engine) -> Result<(), Box<EvalAltResult>> {
|
||||
engine.register_fn("host_check", host_check);
|
||||
Ok(())
|
||||
}
|
||||
98
packages/system/virt/src/rhai/image_prep.rs
Normal file
98
packages/system/virt/src/rhai/image_prep.rs
Normal file
@@ -0,0 +1,98 @@
|
||||
use crate::image_prep::{image_prepare, Flavor, ImagePrepOptions, NetPlanOpts};
|
||||
use rhai::{Engine, EvalAltResult, Map};
|
||||
|
||||
fn parse_flavor(s: &str) -> Result<Flavor, Box<EvalAltResult>> {
|
||||
match s {
|
||||
"ubuntu" | "Ubuntu" | "UBUNTU" => Ok(Flavor::Ubuntu),
|
||||
"alpine" | "Alpine" | "ALPINE" => Ok(Flavor::Alpine),
|
||||
other => Err(Box::new(EvalAltResult::ErrorRuntime(
|
||||
format!("image_prepare: invalid flavor '{}', allowed: ubuntu|alpine", other).into(),
|
||||
rhai::Position::NONE,
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
fn map_get_string(m: &Map, k: &str) -> Option<String> {
|
||||
m.get(k).and_then(|v| if v.is_string() { Some(v.clone().cast::<String>()) } else { None })
|
||||
}
|
||||
fn map_get_bool(m: &Map, k: &str) -> Option<bool> {
|
||||
m.get(k).and_then(|v| v.as_bool().ok())
|
||||
}
|
||||
|
||||
fn net_from_map(m: Option<&Map>) -> NetPlanOpts {
|
||||
let mut n = NetPlanOpts::default();
|
||||
if let Some(mm) = m {
|
||||
if let Some(b) = map_get_bool(mm, "dhcp4") {
|
||||
n.dhcp4 = b;
|
||||
}
|
||||
if let Some(b) = map_get_bool(mm, "dhcp6") {
|
||||
n.dhcp6 = b;
|
||||
}
|
||||
if let Some(s) = map_get_string(mm, "ipv6_addr") {
|
||||
if !s.trim().is_empty() {
|
||||
n.ipv6_addr = Some(s);
|
||||
}
|
||||
}
|
||||
if let Some(s) = map_get_string(mm, "gw6") {
|
||||
if !s.trim().is_empty() {
|
||||
n.gw6 = Some(s);
|
||||
}
|
||||
}
|
||||
}
|
||||
n
|
||||
}
|
||||
|
||||
fn image_prepare_rhai(opts: Map) -> Result<Map, Box<EvalAltResult>> {
|
||||
// Required fields
|
||||
let id = map_get_string(&opts, "id").ok_or_else(|| {
|
||||
Box::new(EvalAltResult::ErrorRuntime(
|
||||
"image_prepare: missing required field 'id'".into(),
|
||||
rhai::Position::NONE,
|
||||
))
|
||||
})?;
|
||||
if id.trim().is_empty() {
|
||||
return Err(Box::new(EvalAltResult::ErrorRuntime(
|
||||
"image_prepare: 'id' must not be empty".into(),
|
||||
rhai::Position::NONE,
|
||||
)));
|
||||
}
|
||||
|
||||
let flavor_s = map_get_string(&opts, "flavor").unwrap_or_else(|| "ubuntu".into());
|
||||
let flavor = parse_flavor(&flavor_s)?;
|
||||
|
||||
// Optional fields
|
||||
let source = map_get_string(&opts, "source");
|
||||
let target_dir = map_get_string(&opts, "target_dir");
|
||||
let net = opts.get("net").and_then(|v| if v.is_map() { Some(v.clone().cast::<Map>()) } else { None });
|
||||
let net_opts = net_from_map(net.as_ref());
|
||||
|
||||
let disable_cloud_init_net = map_get_bool(&opts, "disable_cloud_init_net").unwrap_or(true);
|
||||
|
||||
let o = ImagePrepOptions {
|
||||
flavor,
|
||||
id,
|
||||
source,
|
||||
target_dir,
|
||||
net: net_opts,
|
||||
disable_cloud_init_net,
|
||||
};
|
||||
|
||||
let res = image_prepare(&o).map_err(|e| {
|
||||
Box::new(EvalAltResult::ErrorRuntime(
|
||||
format!("image_prepare failed: {}", e).into(),
|
||||
rhai::Position::NONE,
|
||||
))
|
||||
})?;
|
||||
|
||||
let mut out = Map::new();
|
||||
out.insert("raw_disk".into(), res.raw_disk.into());
|
||||
out.insert("root_uuid".into(), res.root_uuid.into());
|
||||
out.insert("boot_uuid".into(), res.boot_uuid.into());
|
||||
out.insert("work_qcow2".into(), res.work_qcow2.into());
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
pub fn register_image_prep_module(engine: &mut Engine) -> Result<(), Box<EvalAltResult>> {
|
||||
engine.register_fn("image_prepare", image_prepare_rhai);
|
||||
Ok(())
|
||||
}
|
||||
139
packages/system/virt/src/rhai/qcow2.rs
Normal file
139
packages/system/virt/src/rhai/qcow2.rs
Normal file
@@ -0,0 +1,139 @@
|
||||
use crate::qcow2;
|
||||
use crate::qcow2::{BuildBaseResult, Qcow2Error, Qcow2Snapshot};
|
||||
use rhai::{Array, Dynamic, Engine, EvalAltResult, Map};
|
||||
use serde_json::Value;
|
||||
|
||||
// Convert Qcow2Error to Rhai error
|
||||
fn qcow2_error_to_rhai<T>(result: Result<T, Qcow2Error>) -> Result<T, Box<EvalAltResult>> {
|
||||
result.map_err(|e| {
|
||||
Box::new(EvalAltResult::ErrorRuntime(
|
||||
format!("qcow2 error: {}", e).into(),
|
||||
rhai::Position::NONE,
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
// Convert serde_json::Value to Rhai Dynamic recursively (maps, arrays, scalars)
|
||||
fn json_to_dynamic(v: &Value) -> Dynamic {
|
||||
match v {
|
||||
Value::Null => Dynamic::UNIT,
|
||||
Value::Bool(b) => (*b).into(),
|
||||
Value::Number(n) => {
|
||||
if let Some(i) = n.as_i64() {
|
||||
i.into()
|
||||
} else {
|
||||
// Avoid float dependency differences; fall back to string
|
||||
n.to_string().into()
|
||||
}
|
||||
}
|
||||
Value::String(s) => s.clone().into(),
|
||||
Value::Array(arr) => {
|
||||
let mut a = Array::new();
|
||||
for item in arr {
|
||||
a.push(json_to_dynamic(item));
|
||||
}
|
||||
a.into()
|
||||
}
|
||||
Value::Object(obj) => {
|
||||
let mut m = Map::new();
|
||||
for (k, val) in obj {
|
||||
m.insert(k.into(), json_to_dynamic(val));
|
||||
}
|
||||
m.into()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Wrappers exposed to Rhai
|
||||
|
||||
pub fn qcow2_create(path: &str, size_gb: i64) -> Result<String, Box<EvalAltResult>> {
|
||||
qcow2_error_to_rhai(qcow2::create(path, size_gb))
|
||||
}
|
||||
|
||||
pub fn qcow2_info(path: &str) -> Result<Dynamic, Box<EvalAltResult>> {
|
||||
let v = qcow2_error_to_rhai(qcow2::info(path))?;
|
||||
Ok(json_to_dynamic(&v))
|
||||
}
|
||||
|
||||
pub fn qcow2_snapshot_create(path: &str, name: &str) -> Result<(), Box<EvalAltResult>> {
|
||||
qcow2_error_to_rhai(qcow2::snapshot_create(path, name))
|
||||
}
|
||||
|
||||
pub fn qcow2_snapshot_delete(path: &str, name: &str) -> Result<(), Box<EvalAltResult>> {
|
||||
qcow2_error_to_rhai(qcow2::snapshot_delete(path, name))
|
||||
}
|
||||
|
||||
pub fn qcow2_snapshot_list(path: &str) -> Result<Array, Box<EvalAltResult>> {
|
||||
let snaps = qcow2_error_to_rhai(qcow2::snapshot_list(path))?;
|
||||
let mut arr = Array::new();
|
||||
for s in snaps {
|
||||
arr.push(snapshot_to_map(&s).into());
|
||||
}
|
||||
Ok(arr)
|
||||
}
|
||||
|
||||
fn snapshot_to_map(s: &Qcow2Snapshot) -> Map {
|
||||
let mut m = Map::new();
|
||||
if let Some(id) = &s.id {
|
||||
m.insert("id".into(), id.clone().into());
|
||||
} else {
|
||||
m.insert("id".into(), Dynamic::UNIT);
|
||||
}
|
||||
if let Some(name) = &s.name {
|
||||
m.insert("name".into(), name.clone().into());
|
||||
} else {
|
||||
m.insert("name".into(), Dynamic::UNIT);
|
||||
}
|
||||
if let Some(v) = s.vm_state_size {
|
||||
m.insert("vm_state_size".into(), v.into());
|
||||
} else {
|
||||
m.insert("vm_state_size".into(), Dynamic::UNIT);
|
||||
}
|
||||
if let Some(v) = s.date_sec {
|
||||
m.insert("date_sec".into(), v.into());
|
||||
} else {
|
||||
m.insert("date_sec".into(), Dynamic::UNIT);
|
||||
}
|
||||
if let Some(v) = s.date_nsec {
|
||||
m.insert("date_nsec".into(), v.into());
|
||||
} else {
|
||||
m.insert("date_nsec".into(), Dynamic::UNIT);
|
||||
}
|
||||
if let Some(v) = s.vm_clock_nsec {
|
||||
m.insert("vm_clock_nsec".into(), v.into());
|
||||
} else {
|
||||
m.insert("vm_clock_nsec".into(), Dynamic::UNIT);
|
||||
}
|
||||
m
|
||||
}
|
||||
|
||||
pub fn qcow2_build_ubuntu_24_04_base(
|
||||
dest_dir: &str,
|
||||
size_gb: i64,
|
||||
) -> Result<Map, Box<EvalAltResult>> {
|
||||
// size_gb: pass None if <=0
|
||||
let size_opt = if size_gb > 0 { Some(size_gb) } else { None };
|
||||
let r: BuildBaseResult = qcow2_error_to_rhai(qcow2::build_ubuntu_24_04_base(dest_dir, size_opt))?;
|
||||
let mut m = Map::new();
|
||||
m.insert("base_image_path".into(), r.base_image_path.into());
|
||||
m.insert("snapshot".into(), r.snapshot.into());
|
||||
m.insert("url".into(), r.url.into());
|
||||
if let Some(sz) = r.resized_to_gb {
|
||||
m.insert("resized_to_gb".into(), sz.into());
|
||||
} else {
|
||||
m.insert("resized_to_gb".into(), Dynamic::UNIT);
|
||||
}
|
||||
Ok(m)
|
||||
}
|
||||
|
||||
// Module registration
|
||||
|
||||
pub fn register_qcow2_module(engine: &mut Engine) -> Result<(), Box<EvalAltResult>> {
|
||||
engine.register_fn("qcow2_create", qcow2_create);
|
||||
engine.register_fn("qcow2_info", qcow2_info);
|
||||
engine.register_fn("qcow2_snapshot_create", qcow2_snapshot_create);
|
||||
engine.register_fn("qcow2_snapshot_delete", qcow2_snapshot_delete);
|
||||
engine.register_fn("qcow2_snapshot_list", qcow2_snapshot_list);
|
||||
engine.register_fn("qcow2_build_ubuntu_24_04_base", qcow2_build_ubuntu_24_04_base);
|
||||
Ok(())
|
||||
}
|
||||
84
packages/system/virt/tests/rhai/04_qcow2_basic.rhai
Normal file
84
packages/system/virt/tests/rhai/04_qcow2_basic.rhai
Normal file
@@ -0,0 +1,84 @@
|
||||
// Basic tests for QCOW2 SAL (offline, will skip if qemu-img is not present)
|
||||
|
||||
print("=== QCOW2 Basic Tests ===");
|
||||
|
||||
// Dependency check
|
||||
let qemu = which("qemu-img");
|
||||
if qemu == () {
|
||||
print("⚠️ qemu-img not available - skipping QCOW2 tests");
|
||||
print("Install qemu-utils (Debian/Ubuntu) or QEMU tools for your distro.");
|
||||
print("=== QCOW2 Tests Skipped ===");
|
||||
exit();
|
||||
}
|
||||
|
||||
// Helper: unique temp path (use monotonic timestamp; avoid shell quoting issues)
|
||||
let now = run_silent("date +%s%N");
|
||||
let suffix = if now.success && now.stdout != "" { now.stdout.trim() } else { "100000" };
|
||||
let img_path = `/tmp/qcow2_test_${suffix}.img`;
|
||||
|
||||
print("\n--- Test 1: Create image ---");
|
||||
try {
|
||||
let created_path = qcow2_create(img_path, 1);
|
||||
// created_path should equal img_path
|
||||
print(`✓ Created qcow2: ${created_path}`);
|
||||
} catch (err) {
|
||||
print(`❌ Create failed: ${err}`);
|
||||
exit();
|
||||
}
|
||||
|
||||
print("\n--- Test 2: Info ---");
|
||||
let info;
|
||||
try {
|
||||
info = qcow2_info(img_path);
|
||||
} catch (err) {
|
||||
print(`❌ Info failed: ${err}`);
|
||||
exit();
|
||||
}
|
||||
print("✓ Info fetched");
|
||||
if info.format != () { print(` format: ${info.format}`); }
|
||||
if info["virtual-size"] != () { print(` virtual-size: ${info["virtual-size"]}`); }
|
||||
|
||||
print("\n--- Test 3: Snapshot create/list/delete (offline) ---");
|
||||
let snap_name = "s1";
|
||||
try {
|
||||
qcow2_snapshot_create(img_path, snap_name);
|
||||
} catch (err) {
|
||||
print(`❌ snapshot_create failed: ${err}`);
|
||||
exit();
|
||||
}
|
||||
print("✓ snapshot created: s1");
|
||||
|
||||
let snaps;
|
||||
try {
|
||||
snaps = qcow2_snapshot_list(img_path);
|
||||
} catch (err) {
|
||||
print(`❌ snapshot_list failed: ${err}`);
|
||||
exit();
|
||||
}
|
||||
print(`✓ snapshot_list ok, count=${snaps.len()}`);
|
||||
|
||||
try {
|
||||
qcow2_snapshot_delete(img_path, snap_name);
|
||||
} catch (err) {
|
||||
print(`❌ snapshot_delete failed: ${err}`);
|
||||
exit();
|
||||
}
|
||||
print("✓ snapshot deleted: s1");
|
||||
|
||||
// Optional: Base image builder (commented to avoid big downloads by default)
|
||||
// Uncomment to test manually on a dev machine with bandwidth.
|
||||
print("\n--- Optional: Build Ubuntu 24.04 Base ---");
|
||||
let base_dir = "/tmp/virt_images";
|
||||
let m;
|
||||
try {
|
||||
m = qcow2_build_ubuntu_24_04_base(base_dir, 10);
|
||||
} catch (err) {
|
||||
print(`⚠️ base build failed or skipped: ${err}`);
|
||||
exit();
|
||||
}
|
||||
print(`✓ Base image path: ${m.base_image_path}`);
|
||||
print(`✓ Base snapshot: ${m.snapshot}`);
|
||||
print(`✓ Source URL: ${m.url}`);
|
||||
if m.resized_to_gb != () { print(`✓ Resized to: ${m.resized_to_gb}G`); }
|
||||
|
||||
print("\n=== QCOW2 Basic Tests Completed ===");
|
||||
164
packages/system/virt/tests/rhai/05_cloudhv_basic.rhai
Normal file
164
packages/system/virt/tests/rhai/05_cloudhv_basic.rhai
Normal file
@@ -0,0 +1,164 @@
|
||||
// Basic Cloud Hypervisor SAL smoke test (minimal)
|
||||
// - Skips gracefully if dependencies or inputs are missing
|
||||
// - Creates a VM spec, optionally starts/stops it if all inputs are available
|
||||
|
||||
print("=== Cloud Hypervisor Basic Tests ===");
|
||||
|
||||
// Dependency checks (static binaries only)
|
||||
let chs = which("cloud-hypervisor-static");
|
||||
let chrs = which("ch-remote-static");
|
||||
|
||||
// Normalize which() results: () or "" both mean missing (depending on SAL which variant)
|
||||
let ch_missing = (chs == () || chs == "");
|
||||
let chr_missing = (chrs == () || chrs == "");
|
||||
|
||||
if ch_missing || chr_missing {
|
||||
print("⚠️ cloud-hypervisor-static and/or ch-remote-static not available - skipping CloudHV tests");
|
||||
print("Install Cloud Hypervisor static binaries to run these tests.");
|
||||
print("=== CloudHV Tests Skipped ===");
|
||||
exit();
|
||||
}
|
||||
|
||||
// Inputs (adjust these for your environment)
|
||||
// Prefer firmware boot if firmware is available; otherwise fallback to direct kernel boot.
|
||||
let firmware_path = "/tmp/virt_images/hypervisor-fw";
|
||||
let kernel_path = "/path/to/vmlinux"; // optional when firmware_path is present
|
||||
|
||||
// We can reuse the base image from the QCOW2 test/builder if present.
|
||||
let disk_path = "/tmp/virt_images/noble-server-cloudimg-amd64.img";
|
||||
|
||||
// Validate inputs
|
||||
let missing = false;
|
||||
let have_firmware = exist(firmware_path);
|
||||
let have_kernel = exist(kernel_path);
|
||||
if !have_firmware && !have_kernel {
|
||||
print(`⚠️ neither firmware_path (${firmware_path}) nor kernel_path (${kernel_path}) found (start/stop will be skipped)`);
|
||||
missing = true;
|
||||
}
|
||||
if !exist(disk_path) {
|
||||
print(`⚠️ disk_path not found: ${disk_path} (start/stop will be skipped)`);
|
||||
missing = true;
|
||||
}
|
||||
|
||||
// Unique id
|
||||
let rid = run_silent("date +%s%N");
|
||||
let suffix = if rid.success && rid.stdout != "" { rid.stdout.trim() } else { "100000" };
|
||||
let vm_id = `testvm_${suffix}`;
|
||||
|
||||
print("\n--- Test 1: Create VM definition ---");
|
||||
let spec = #{
|
||||
"id": vm_id,
|
||||
"disk_path": disk_path,
|
||||
"api_socket": "", // default under VM dir
|
||||
"vcpus": 1,
|
||||
"memory_mb": 1024,
|
||||
// For firmware boot:
|
||||
// Provide firmware_path only if it exists
|
||||
// For kernel boot:
|
||||
// Provide kernel_path and optionally a cmdline
|
||||
};
|
||||
if have_firmware {
|
||||
spec.firmware_path = firmware_path;
|
||||
} else if have_kernel {
|
||||
spec.kernel_path = kernel_path;
|
||||
spec.cmdline = "console=ttyS0 reboot=k panic=1";
|
||||
}
|
||||
// "extra_args": can be added if needed, e.g.:
|
||||
// spec.extra_args = ["--rng", "src=/dev/urandom"];
|
||||
|
||||
try {
|
||||
let created_id = cloudhv_vm_create(spec);
|
||||
print(`✓ VM created: ${created_id}`);
|
||||
} catch (err) {
|
||||
print(`❌ VM create failed: ${err}`);
|
||||
print("=== CloudHV Tests Aborted ===");
|
||||
exit();
|
||||
}
|
||||
|
||||
print("\n--- Test 2: VM info ---");
|
||||
try {
|
||||
let info = cloudhv_vm_info(vm_id);
|
||||
print(`✓ VM info loaded: id=${info.spec.id}, status=${info.runtime.status}`);
|
||||
} catch (err) {
|
||||
print(`❌ VM info failed: ${err}`);
|
||||
print("=== CloudHV Tests Aborted ===");
|
||||
exit();
|
||||
}
|
||||
|
||||
print("\n--- Test 3: VM list ---");
|
||||
try {
|
||||
let vms = cloudhv_vm_list();
|
||||
print(`✓ VM list size: ${vms.len()}`);
|
||||
} catch (err) {
|
||||
print(`❌ VM list failed: ${err}`);
|
||||
print("=== CloudHV Tests Aborted ===");
|
||||
exit();
|
||||
}
|
||||
|
||||
// Start/Stop only if inputs exist
|
||||
if !missing {
|
||||
print("\n--- Test 4: Start VM ---");
|
||||
try {
|
||||
cloudhv_vm_start(vm_id);
|
||||
print("✓ VM start invoked");
|
||||
} catch (err) {
|
||||
print(`⚠️ VM start failed (this can happen if kernel/cmdline are incompatible): ${err}`);
|
||||
}
|
||||
|
||||
print("\n waiting for VM to be ready...");
|
||||
|
||||
// Discover API socket and PID from SAL
|
||||
let info1 = cloudhv_vm_info(vm_id);
|
||||
let api_sock = info1.spec.api_socket;
|
||||
let pid = info1.runtime.pid;
|
||||
|
||||
// 1) Wait for API socket to appear (up to ~50s)
|
||||
let sock_ok = false;
|
||||
for x in 0..50 {
|
||||
if exist(api_sock) { sock_ok = true; break; }
|
||||
sleep(1);
|
||||
}
|
||||
print(`api_sock_exists=${sock_ok} path=${api_sock}`);
|
||||
|
||||
// 2) Probe ch-remote info with retries (up to ~20s)
|
||||
if sock_ok {
|
||||
let info_ok = false;
|
||||
for x in 0..20 {
|
||||
let r = run_silent(`ch-remote-static --api-socket ${api_sock} info`);
|
||||
if r.success {
|
||||
info_ok = true;
|
||||
break;
|
||||
}
|
||||
sleep(1);
|
||||
}
|
||||
if info_ok {
|
||||
print("VM API is ready (ch-remote info OK)");
|
||||
} else {
|
||||
print("⚠️ VM API did not become ready in time (continuing)");
|
||||
}
|
||||
} else {
|
||||
print("⚠️ API socket not found (continuing)");
|
||||
}
|
||||
|
||||
// print("\n--- Test 5: Stop VM (graceful) ---");
|
||||
// try {
|
||||
// cloudhv_vm_stop(vm_id, false);
|
||||
// print("✓ VM stop invoked (graceful)");
|
||||
// } catch (err) {
|
||||
// print(`⚠️ VM stop failed: ${err}`);
|
||||
// }
|
||||
} else {
|
||||
print("\n⚠️ Skipping start/stop because required inputs are missing.");
|
||||
}
|
||||
|
||||
// print("\n--- Test 6: Delete VM definition ---");
|
||||
// try {
|
||||
// cloudhv_vm_delete(vm_id, false);
|
||||
// print("✓ VM deleted");
|
||||
// } catch (err) {
|
||||
// print(`❌ VM delete failed: ${err}`);
|
||||
// print("=== CloudHV Tests Aborted ===");
|
||||
// exit();
|
||||
// }
|
||||
|
||||
print("\n=== Cloud Hypervisor Basic Tests Completed ===");
|
||||
148
packages/system/virt/tests/rhai/05_cloudhv_diag.rhai
Normal file
148
packages/system/virt/tests/rhai/05_cloudhv_diag.rhai
Normal file
@@ -0,0 +1,148 @@
|
||||
// Cloud Hypervisor diagnostic script
|
||||
// Creates a VM, starts CH, verifies PID, API socket, ch-remote info, and tails logs.
|
||||
|
||||
print("=== CloudHV Diagnostic ===");
|
||||
|
||||
// Dependency check
|
||||
let chs = which("cloud-hypervisor-static");
|
||||
let chrs = which("ch-remote-static");
|
||||
let ch_missing = (chs == () || chs == "");
|
||||
let chr_missing = (chrs == () || chrs == "");
|
||||
if ch_missing || chr_missing {
|
||||
print("cloud-hypervisor-static and/or ch-remote-static not available - aborting.");
|
||||
exit();
|
||||
}
|
||||
|
||||
// Inputs
|
||||
let firmware_path = "/tmp/virt_images/hypervisor-fw";
|
||||
let disk_path = "/tmp/virt_images/noble-server-cloudimg-amd64.img";
|
||||
|
||||
if !exist(firmware_path) {
|
||||
print(`Firmware not found: ${firmware_path}`);
|
||||
exit();
|
||||
}
|
||||
if !exist(disk_path) {
|
||||
print(`Disk image not found: ${disk_path}`);
|
||||
exit();
|
||||
}
|
||||
|
||||
// Unique ID
|
||||
let rid = run_silent("date +%s%N");
|
||||
let suffix = if rid.success && rid.stdout != "" { rid.stdout.trim() } else { "100000" };
|
||||
let vm_id = `diagvm_${suffix}`;
|
||||
|
||||
// Socket path will be obtained from VM info (SAL populates spec.api_socket after start)
|
||||
|
||||
// Build minimal spec; let SAL decide the api_socket under the VM dir
|
||||
let spec = #{
|
||||
"id": vm_id,
|
||||
"disk_path": disk_path,
|
||||
"vcpus": 1,
|
||||
"memory_mb": 512
|
||||
};
|
||||
spec.firmware_path = firmware_path;
|
||||
|
||||
fn pid_alive(p) {
|
||||
if p == () { return false; }
|
||||
// Use /proc to avoid noisy "kill: No such process" messages from kill -0
|
||||
return exist(`/proc/${p}`);
|
||||
}
|
||||
|
||||
fn tail_log(p, n) {
|
||||
if exist(p) {
|
||||
let r = run_silent(`tail -n ${n} ${p}`);
|
||||
if r.success { print(r.stdout); } else { print(r.stderr); }
|
||||
} else {
|
||||
print(`Log file not found: ${p}`);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
print("--- Create VM spec ---");
|
||||
let created = cloudhv_vm_create(spec);
|
||||
print(`created: ${created}`);
|
||||
} catch (err) {
|
||||
print(`create failed: ${err}`);
|
||||
exit();
|
||||
}
|
||||
|
||||
// Read back info to get SAL-resolved log_file path
|
||||
let info0 = cloudhv_vm_info(vm_id);
|
||||
let log_file = info0.runtime.log_file;
|
||||
|
||||
// Rely on SAL to handle socket directory creation and stale-socket cleanup
|
||||
|
||||
print("--- Start VM ---");
|
||||
try {
|
||||
cloudhv_vm_start(vm_id);
|
||||
print("start invoked");
|
||||
} catch (err) {
|
||||
print(`start failed: ${err}`);
|
||||
tail_log(log_file, 200);
|
||||
exit();
|
||||
}
|
||||
|
||||
// Fetch PID and discover API socket path from updated spec
|
||||
let info1 = cloudhv_vm_info(vm_id);
|
||||
let pid = info1.runtime.pid;
|
||||
let api_sock = info1.spec.api_socket;
|
||||
print(`pid=${pid}`);
|
||||
print(`api_sock_from_sal=${api_sock}`);
|
||||
|
||||
// Wait for socket file
|
||||
let sock_ok = false;
|
||||
for x in 0..50 {
|
||||
if exist(api_sock) { sock_ok = true; break; }
|
||||
sleep(1);
|
||||
}
|
||||
print(`api_sock_exists=${sock_ok} path=${api_sock}`);
|
||||
|
||||
// Probe ch-remote info
|
||||
let info_ok = false;
|
||||
let last_err = "";
|
||||
if sock_ok {
|
||||
for x in 0..20 {
|
||||
let r = run_silent(`ch-remote-static --api-socket ${api_sock} info`);
|
||||
if r.success {
|
||||
info_ok = true;
|
||||
print("ch-remote info OK");
|
||||
break;
|
||||
} else {
|
||||
last_err = if r.stderr != "" { r.stderr } else { r.stdout };
|
||||
sleep(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
if !info_ok {
|
||||
print("ch-remote info FAILED");
|
||||
if last_err != "" { print(last_err); }
|
||||
let alive = pid_alive(pid);
|
||||
print(`pid_alive=${alive}`);
|
||||
print("--- Last 200 lines of CH log ---");
|
||||
tail_log(log_file, 200);
|
||||
print("--- End of log ---");
|
||||
} else {
|
||||
print("--- Stop via SAL (force) ---");
|
||||
try {
|
||||
cloudhv_vm_stop(vm_id, true);
|
||||
print("SAL stop invoked (force)");
|
||||
} catch (err) {
|
||||
print(`stop failed: ${err}`);
|
||||
}
|
||||
// wait for exit (check original PID)
|
||||
for x in 0..30 {
|
||||
if !pid_alive(pid) { break; }
|
||||
sleep(1);
|
||||
}
|
||||
print(`pid_alive_after_stop=${pid_alive(pid)}`);
|
||||
}
|
||||
|
||||
print("--- Cleanup ---");
|
||||
try {
|
||||
cloudhv_vm_delete(vm_id, false);
|
||||
print("vm deleted");
|
||||
} catch (err) {
|
||||
print(`delete failed: ${err}`);
|
||||
}
|
||||
|
||||
print("=== Diagnostic done ===");
|
||||
533
packages/system/virt/tests/rhai/06_cloudhv_cloudinit_dhcpd.rhai
Normal file
533
packages/system/virt/tests/rhai/06_cloudhv_cloudinit_dhcpd.rhai
Normal file
@@ -0,0 +1,533 @@
|
||||
// Cloud-init NoCloud + host DHCP (dnsmasq) provisioning for Cloud Hypervisor
|
||||
// - Accepts a user-supplied SSH public key
|
||||
// - Ensures Ubuntu cloud image via SAL qcow2 builder
|
||||
// - Sets up host bridge br0 and tap0, and runs an ephemeral dnsmasq bound to br0
|
||||
// - Builds NoCloud seed ISO (cloud-localds preferred; genisoimage fallback)
|
||||
// - Creates/starts a VM and prints SSH connection instructions
|
||||
//
|
||||
// Requirements (run this script with privileges that allow sudo commands):
|
||||
// - cloud-hypervisor-static, ch-remote-static
|
||||
// - cloud-image-utils (for cloud-localds) or genisoimage/xorriso
|
||||
// - dnsmasq, iproute2
|
||||
// - qemu tools already used by qcow2 builder
|
||||
//
|
||||
// Note: This script uses sudo for network and dnsmasq operations.
|
||||
|
||||
print("=== CloudHV + cloud-init + host DHCP (dnsmasq) ===");
|
||||
|
||||
// ----------- User input -----------
|
||||
let user_pubkey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFyZJCEsvRc0eitsOoq+ywC5Lmqejvk3hXMVbO0AxPrd maxime@maxime-arch";
|
||||
|
||||
// Optional: choose boot method. If firmware is present in common locations, it will be used.
|
||||
// Otherwise, if kernel_path exists, direct kernel boot will be used.
|
||||
// If neither is found, the script will abort before starting the VM.
|
||||
let firmware_path_override = ""; // e.g., "/usr/share/cloud-hypervisor/hypervisor-fw"
|
||||
let kernel_path_override = ""; // e.g., "/path/to/vmlinux"
|
||||
let kernel_cmdline_override = "console=ttyS0 reboot=k panic=1";
|
||||
|
||||
// Network parameters (local-only setup)
|
||||
let bridge = "br0";
|
||||
let br_cidr = "192.168.127.1/24";
|
||||
let br_ip = "192.168.127.1";
|
||||
let tap = "tap0";
|
||||
let mac = "02:00:00:00:00:10"; // locally administered MAC
|
||||
// Deterministic IP for the VM (dnsmasq will pin this MAC to this IP)
|
||||
let vm_static_ip = "192.168.127.100";
|
||||
|
||||
// Paths
|
||||
let base_dir = "/tmp/virt_images";
|
||||
let seed_iso = `${base_dir}/seed.iso`;
|
||||
let user_data = `${base_dir}/user-data`;
|
||||
let meta_data = `${base_dir}/meta-data`;
|
||||
let dnsmasq_pid = `${base_dir}/dnsmasq.pid`;
|
||||
let dnsmasq_lease= `${base_dir}/dnsmasq.leases`;
|
||||
let dnsmasq_log = `${base_dir}/dnsmasq.log`;
|
||||
|
||||
// ----------- Dependency checks -----------
|
||||
print("\n--- Checking dependencies ---");
|
||||
let chs = which("cloud-hypervisor-static");
|
||||
let chrs = which("ch-remote-static");
|
||||
let clds = which("cloud-localds");
|
||||
let geniso = which("genisoimage");
|
||||
let dns = which("dnsmasq");
|
||||
let ipt = which("ip");
|
||||
|
||||
let missing = false;
|
||||
if chs == () || chs == "" {
|
||||
print("❌ cloud-hypervisor-static not found on PATH");
|
||||
missing = true;
|
||||
}
|
||||
if chrs == () || chrs == "" {
|
||||
print("❌ ch-remote-static not found on PATH");
|
||||
missing = true;
|
||||
}
|
||||
if (clds == () || clds == "") && (geniso == () || geniso == "") {
|
||||
print("❌ Neither cloud-localds nor genisoimage is available. Install cloud-image-utils or genisoimage.");
|
||||
missing = true;
|
||||
}
|
||||
if dns == () || dns == "" {
|
||||
print("❌ dnsmasq not found on PATH");
|
||||
missing = true;
|
||||
}
|
||||
if ipt == () || ipt == "" {
|
||||
print("❌ ip (iproute2) not found on PATH");
|
||||
missing = true;
|
||||
}
|
||||
if missing {
|
||||
print("=== Aborting due to missing dependencies ===");
|
||||
exit();
|
||||
}
|
||||
print("✓ Dependencies look OK");
|
||||
|
||||
// ----------- Ensure base image -----------
|
||||
print("\n--- Ensuring Ubuntu 24.04 cloud image ---");
|
||||
let base;
|
||||
try {
|
||||
// Avoid resizing to prevent GPT backup-header mismatch that can break early boot on some kernels/firmware.
|
||||
// Use 0 to keep the original image size; cloud-init/cloud-image tooling can grow the FS later if needed.
|
||||
base = qcow2_build_ubuntu_24_04_base(base_dir, 0);
|
||||
} catch (err) {
|
||||
print(`❌ Failed to build/ensure base image: ${err}`);
|
||||
exit();
|
||||
}
|
||||
let disk_path = base.base_image_path;
|
||||
print(`✓ Using base image: ${disk_path}`);
|
||||
|
||||
// ----------- Host networking (bridge + tap) -----------
|
||||
print("\n--- Configuring host networking (bridge + tap) ---");
|
||||
// Idempotent: create br0 if missing; assign IP if not present; set up
|
||||
let net_script = `
|
||||
sudo ip link show ${bridge} >/dev/null 2>&1 || sudo ip link add ${bridge} type bridge
|
||||
ip addr show dev ${bridge} | grep -q "${br_cidr}" || sudo ip addr add ${br_cidr} dev ${bridge}
|
||||
sudo ip link set ${bridge} up
|
||||
|
||||
# Remove any stale TAP to avoid "Resource busy" when CH configures it
|
||||
if ip link show ${tap} >/dev/null 2>&1; then
|
||||
sudo ip link set ${tap} down || true
|
||||
sudo ip link del ${tap} || true
|
||||
fi
|
||||
`;
|
||||
run_silent(net_script);
|
||||
print(`✓ Bridge ${bridge} and tap ${tap} configured`);
|
||||
print("Note: NO-CARRIER on a bridge/tap without a peer is normal; DHCP will work once the guest brings its interface up.");
|
||||
|
||||
// ----------- Start/ensure dnsmasq on br0 -----------
|
||||
print("\n--- Ensuring dnsmasq serving DHCP on the bridge ---");
|
||||
// Ensure log/lease directory exists before starting dnsmasq
|
||||
run_silent(`mkdir -p ${base_dir}`);
|
||||
// If an instance with our pid-file is running, keep it; otherwise start a new one bound to br0.
|
||||
// Use --port=0 to avoid DNS port conflicts; we only need DHCP here.
|
||||
let dns_state = run_silent(`
|
||||
if [ -f ${dnsmasq_pid} ] && ps -p $(cat ${dnsmasq_pid}) >/dev/null 2>&1; then
|
||||
echo RUNNING
|
||||
elif pgrep -f "dnsmasq .*--interface=${bridge}" >/dev/null 2>&1; then
|
||||
echo RUNNING
|
||||
elif [ -f ${dnsmasq_log} ] && grep -q "sockets bound exclusively to interface ${bridge}" ${dnsmasq_log}; then
|
||||
echo RUNNING
|
||||
else
|
||||
echo STOPPED
|
||||
fi
|
||||
`);
|
||||
let need_start = true;
|
||||
if dns_state.success && dns_state.stdout.trim() == "RUNNING" {
|
||||
print("✓ dnsmasq already running (pid file present and alive)");
|
||||
need_start = false;
|
||||
} else {
|
||||
// Clean stale files
|
||||
run_silent(`rm -f ${dnsmasq_pid} ${dnsmasq_lease}`);
|
||||
}
|
||||
|
||||
if need_start {
|
||||
// Start dnsmasq detached and force a clean, self-contained configuration.
|
||||
// - Use --conf-file=/dev/null to avoid system config conflicts
|
||||
// - Log directly via --log-facility to capture early failures
|
||||
// - Run under current privileges (herodo is invoked with sudo)
|
||||
let r = run_silent(`
|
||||
: > ${dnsmasq_log}
|
||||
nohup dnsmasq \
|
||||
--conf-file=/dev/null \
|
||||
--log-facility=${dnsmasq_log} \
|
||||
--log-dhcp \
|
||||
--user=root \
|
||||
--group=root \
|
||||
--port=0 \
|
||||
--bind-interfaces \
|
||||
--except-interface=lo \
|
||||
--interface=${bridge} \
|
||||
--dhcp-range=192.168.127.100,192.168.127.200,12h \
|
||||
--dhcp-option=option:router,${br_ip} \
|
||||
--dhcp-option=option:dns-server,1.1.1.1 \
|
||||
--dhcp-host=${mac},${vm_static_ip} \
|
||||
--pid-file=${dnsmasq_pid} \
|
||||
--dhcp-leasefile=${dnsmasq_lease} &
|
||||
`);
|
||||
if !r.success {
|
||||
print(`❌ Failed to start dnsmasq. Check log: ${dnsmasq_log}`);
|
||||
let t = run_silent(`
|
||||
if [ -f ${dnsmasq_log} ]; then
|
||||
tail -n 200 ${dnsmasq_log}
|
||||
fi
|
||||
`);
|
||||
if t.success && t.stdout.trim() != "" { print(t.stdout); }
|
||||
exit();
|
||||
}
|
||||
|
||||
// Robust readiness: wait up to 10s for pidfile OR process OR log pattern
|
||||
let ready = run_silent(`
|
||||
for i in $(seq 1 10); do
|
||||
if [ -f ${dnsmasq_pid} ] && ps -p $(cat ${dnsmasq_pid}) >/dev/null 2>&1; then
|
||||
echo OK; exit 0
|
||||
fi
|
||||
if pgrep -f "dnsmasq .*--interface=${bridge}" >/dev/null 2>&1; then
|
||||
echo OK; exit 0
|
||||
fi
|
||||
if [ -f ${dnsmasq_log} ] && grep -q "sockets bound exclusively to interface ${bridge}" ${dnsmasq_log}; then
|
||||
echo OK; exit 0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
echo FAIL
|
||||
`);
|
||||
if !(ready.success && ready.stdout.contains("OK")) {
|
||||
print(`❌ dnsmasq did not come up. See ${dnsmasq_log}`);
|
||||
let t = run_silent(`
|
||||
if [ -f ${dnsmasq_log} ]; then
|
||||
tail -n 200 ${dnsmasq_log}
|
||||
fi
|
||||
`);
|
||||
if t.success && t.stdout.trim() != "" { print(t.stdout); }
|
||||
exit();
|
||||
}
|
||||
print("✓ dnsmasq started (DHCP on br0)");
|
||||
}
|
||||
|
||||
// ----------- Build cloud-init NoCloud seed (user-data/meta-data) -----------
|
||||
print("\n--- Building NoCloud seed (user-data, meta-data) ---");
|
||||
run_silent(`mkdir -p ${base_dir}`);
|
||||
run_silent(`chmod 1777 ${base_dir}`);
|
||||
|
||||
// Compose user-data and meta-data content
|
||||
let ud = `#cloud-config
|
||||
users:
|
||||
- name: ubuntu
|
||||
groups: [adm, cdrom, dialout, lxd, plugdev, sudo]
|
||||
sudo: ALL=(ALL) NOPASSWD:ALL
|
||||
shell: /bin/bash
|
||||
lock_passwd: true
|
||||
ssh_authorized_keys:
|
||||
- ${user_pubkey}
|
||||
ssh_pwauth: false
|
||||
package_update: true
|
||||
`;
|
||||
let md = `instance-id: iid-ubuntu-noble-001
|
||||
local-hostname: noblevm
|
||||
`;
|
||||
|
||||
// Write files via heredoc
|
||||
let wr1 = run_silent(`
|
||||
cat > ${user_data} <<'EOF'
|
||||
${ud}
|
||||
EOF
|
||||
`);
|
||||
if !wr1.success { print(`❌ Failed to write ${user_data}`); exit(); }
|
||||
let wr2 = run_silent(`
|
||||
cat > ${meta_data} <<'EOF'
|
||||
${md}
|
||||
EOF
|
||||
`);
|
||||
if !wr2.success { print(`❌ Failed to write ${meta_data}`); exit(); }
|
||||
|
||||
// Provide cloud-init network-config to ensure the NIC with our MAC requests DHCP
|
||||
let net_config = `${base_dir}/network-config`;
|
||||
let nc = `version: 2
|
||||
ethernets:
|
||||
nic0:
|
||||
match:
|
||||
macaddress: ${mac}
|
||||
set-name: eth0
|
||||
renderer: networkd
|
||||
dhcp4: true
|
||||
`;
|
||||
let wr3 = run_silent(`
|
||||
cat > ${net_config} <<'EOF'
|
||||
${nc}
|
||||
EOF
|
||||
`);
|
||||
if !wr3.success { print(`❌ Failed to write ${net_config}`); exit(); }
|
||||
|
||||
// Build seed ISO (prefer cloud-localds)
|
||||
let built = false;
|
||||
if !(clds == () || clds == "") {
|
||||
let r = run_silent(`sudo cloud-localds --network-config ${net_config} ${seed_iso} ${user_data} ${meta_data}`);
|
||||
if r.success {
|
||||
built = true;
|
||||
}
|
||||
}
|
||||
if !built {
|
||||
if geniso == () || geniso == "" {
|
||||
print("❌ Neither cloud-localds nor genisoimage succeeded/available to build seed.iso");
|
||||
exit();
|
||||
}
|
||||
let r2 = run_silent(`sudo genisoimage -output ${seed_iso} -volid cidata -joliet -rock ${user_data} ${meta_data} ${net_config}`);
|
||||
if !r2.success {
|
||||
print("❌ genisoimage failed to create seed.iso");
|
||||
exit();
|
||||
}
|
||||
}
|
||||
print(`✓ Seed ISO: ${seed_iso}`);
|
||||
|
||||
// ----------- Determine boot method (firmware or kernel) -----------
|
||||
print("\n--- Determining boot method ---");
|
||||
let firmware_path = "";
|
||||
if firmware_path_override != "" && exist(firmware_path_override) {
|
||||
firmware_path = firmware_path_override;
|
||||
} else {
|
||||
let candidates = [
|
||||
"/usr/local/share/cloud-hypervisor/hypervisor-fw",
|
||||
"/usr/share/cloud-hypervisor/hypervisor-fw",
|
||||
"/usr/lib/cloud-hypervisor/hypervisor-fw",
|
||||
"/tmp/virt_images/hypervisor-fw"
|
||||
];
|
||||
for p in candidates {
|
||||
if exist(p) { firmware_path = p; break; }
|
||||
}
|
||||
}
|
||||
let kernel_path = "";
|
||||
if kernel_path_override != "" && exist(kernel_path_override) {
|
||||
kernel_path = kernel_path_override;
|
||||
}
|
||||
if firmware_path == "" && kernel_path == "" {
|
||||
print("❌ No firmware_path or kernel_path found. Set firmware_path_override or kernel_path_override at top and re-run.");
|
||||
exit();
|
||||
}
|
||||
if firmware_path != "" {
|
||||
print(`✓ Using firmware boot: ${firmware_path}`);
|
||||
} else {
|
||||
print(`✓ Using direct kernel boot: ${kernel_path}`);
|
||||
}
|
||||
|
||||
// ----------- Create and start VM -----------
|
||||
print("\n--- Creating and starting VM ---");
|
||||
let rid = run_silent("date +%s%N");
|
||||
let suffix = if rid.success && rid.stdout.trim() != "" { rid.stdout.trim() } else { "100000" };
|
||||
let vm_id = `noble_vm_${suffix}`;
|
||||
|
||||
// Use a unique TAP per run to avoid "Resource busy" conflicts.
|
||||
// Keep name <= 15 chars (Linux IFNAMSIZ), e.g. "tap-abcdef".
|
||||
let tn = run_silent("od -An -N3 -tx1 /dev/urandom | tr -d '[:space:]'");
|
||||
if tn.success && tn.stdout.trim() != "" {
|
||||
tap = `tap-${tn.stdout.trim()}`;
|
||||
} else {
|
||||
tap = "tap-abcd01";
|
||||
}
|
||||
|
||||
let spec = #{
|
||||
"id": vm_id,
|
||||
"disk_path": disk_path,
|
||||
"api_socket": "",
|
||||
"vcpus": 2,
|
||||
"memory_mb": 2048
|
||||
};
|
||||
if firmware_path != "" {
|
||||
spec.firmware_path = firmware_path;
|
||||
} else {
|
||||
spec.kernel_path = kernel_path;
|
||||
spec.cmdline = kernel_cmdline_override;
|
||||
}
|
||||
spec.extra_args = [
|
||||
"--disk", `path=${seed_iso},readonly=true`,
|
||||
"--net", `tap=${tap},mac=${mac}`
|
||||
];
|
||||
|
||||
try {
|
||||
let created = cloudhv_vm_create(spec);
|
||||
print(`✓ VM created: ${created}`);
|
||||
} catch (err) {
|
||||
print(`❌ VM create failed: ${err}`);
|
||||
exit();
|
||||
}
|
||||
|
||||
try {
|
||||
cloudhv_vm_start(vm_id);
|
||||
print("✓ VM start invoked");
|
||||
|
||||
// After CH creates/opens the TAP, attach it to the bridge to allow DHCP broadcast to reach dnsmasq on br0.
|
||||
// Avoid racing with CH tap configuration: wait briefly, then attempt attach.
|
||||
let post_net = `
|
||||
# Give CH time to finish configuring tap to avoid EBUSY
|
||||
sleep 1
|
||||
for i in $(seq 1 30); do
|
||||
if ip link show ${tap} >/dev/null 2>&1; then
|
||||
# Enslave to bridge and ensure up; ignore errors (idempotent)
|
||||
sudo ip link set ${tap} master ${bridge} 2>/dev/null || true
|
||||
sudo ip link set ${tap} up 2>/dev/null || true
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
`;
|
||||
run_silent(post_net);
|
||||
} catch (err) {
|
||||
print(`❌ VM start failed: ${err}`);
|
||||
exit();
|
||||
}
|
||||
|
||||
// ----------- Wait for DHCP lease and print access info -----------
|
||||
print("\n--- Waiting for DHCP lease from dnsmasq ---");
|
||||
let vm_ip = "";
|
||||
|
||||
// First try deterministic fixed IP via ping (dnsmasq pins MAC->IP)
|
||||
for i in 0..60 {
|
||||
// Use a plain command (no shell operators). Success indicates reachability.
|
||||
let pr = run_silent(`ping -c1 -W1 -I ${bridge} ${vm_static_ip}`);
|
||||
if pr.success {
|
||||
vm_ip = vm_static_ip;
|
||||
break;
|
||||
}
|
||||
sleep(1);
|
||||
}
|
||||
for i in 0..180 {
|
||||
sleep(1);
|
||||
// Discover and validate IPv4; prefer exact MAC match across common dnsmasq lease locations
|
||||
let lr = run_silent(`
|
||||
valid_ipv4() { echo "$1" | grep -Eo '^[0-9]+\\.[0-9]+\\.[0-9]+\\.[0-9]+$' || true; }
|
||||
|
||||
# Candidate lease files (add more if your distro uses a different path)
|
||||
LEASE_FILES="${dnsmasq_lease} /var/lib/misc/dnsmasq.leases /var/lib/dnsmasq/dnsmasq.leases"
|
||||
# Include any runtime leases under /run/dnsmasq if present
|
||||
if ls /run/dnsmasq/*.leases >/dev/null 2>&1; then
|
||||
LEASE_FILES="$LEASE_FILES $(ls /run/dnsmasq/*.leases 2>/dev/null)"
|
||||
fi
|
||||
|
||||
# 1) Try to find by exact MAC across all known lease files
|
||||
for f in $LEASE_FILES; do
|
||||
[ -f "$f" ] || continue
|
||||
ip="$(awk -v m="${mac}" '$2==m{ip=$3} END{if(ip!="") print ip}' "$f")"
|
||||
if [ -n "$ip" ] && [ -n "$(valid_ipv4 "$ip")" ]; then echo "$ip"; exit 0; fi
|
||||
done
|
||||
|
||||
# 2) Fallback: last IP in our br0 subnet across all lease files
|
||||
for f in $LEASE_FILES; do
|
||||
[ -f "$f" ] || continue
|
||||
ip="$(awk '$3 ~ /^192\\.168\\.127\\./ {ip=$3} END{if(ip!="") print ip}' "$f")"
|
||||
if [ -n "$ip" ] && [ -n "$(valid_ipv4 "$ip")" ]; then echo "$ip"; exit 0; fi
|
||||
done
|
||||
|
||||
# 3) Fallback: SAL default subnet (172.30.0.0/24) across all lease files
|
||||
for f in $LEASE_FILES; do
|
||||
[ -f "$f" ] || continue
|
||||
ip="$(awk '$3 ~ /^172\\.30\\.0\\./ {ip=$3} END{if(ip!="") print ip}' "$f")"
|
||||
if [ -n "$ip" ] && [ -n "$(valid_ipv4 "$ip")" ]; then echo "$ip"; exit 0; fi
|
||||
done
|
||||
|
||||
# 4) ARP gleaning on likely bridges (br0 first, then br-hero) for the known MAC
|
||||
for dev in ${bridge} br-hero; do
|
||||
if ip -o link show "$dev" >/dev/null 2>&1; then
|
||||
ip="$(ip neigh show dev "$dev" | awk '$0 ~ /lladdr ${mac}/ {print $1}' | tail -n1)"
|
||||
if [ -n "$ip" ] && [ -n "$(valid_ipv4 "$ip")" ]; then echo "$ip"; exit 0; fi
|
||||
fi
|
||||
done
|
||||
|
||||
# 5) As a last resort, ARP any 192.168.127.x seen on br0
|
||||
if ip -o link show ${bridge} >/dev/null 2>&1; then
|
||||
ip="$(ip neigh show dev ${bridge} | awk '$1 ~ /^192\\.168\\.127\\./ {print $1}' | tail -n1)"
|
||||
if [ -n "$ip" ] && [ -n "$(valid_ipv4 "$ip")" ]; then echo "$ip"; exit 0; fi
|
||||
fi
|
||||
|
||||
# No valid IP yet
|
||||
true
|
||||
`);
|
||||
if lr.success {
|
||||
let ip = lr.stdout.trim();
|
||||
if ip != "" {
|
||||
vm_ip = ip;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Fallback: parse cloud-hypervisor console log for an IPv4 on our expected subnets
|
||||
let info2 = cloudhv_vm_info(vm_id);
|
||||
let log_path = info2.runtime.log_file;
|
||||
if vm_ip == "" {
|
||||
let cp = run_silent(`
|
||||
if [ -f ${log_path} ]; then
|
||||
grep -Eo '([0-9]+\\.){3}[0-9]+' ${log_path} | grep -E '^(192\\.168\\.127|172\\.30\\.0)\\.' | tail -n1
|
||||
fi
|
||||
`);
|
||||
if cp.success {
|
||||
let ip2 = cp.stdout.trim();
|
||||
if ip2 != "" {
|
||||
vm_ip = ip2;
|
||||
}
|
||||
}
|
||||
}
|
||||
if vm_ip == "" {
|
||||
// Actively populate ARP neighbor tables by sweeping likely subnets
|
||||
run_silent(`
|
||||
for ip in $(seq 100 200); do ping -c1 -W1 -I ${bridge} 192.168.127.$ip >/dev/null 2>&1 || true; done
|
||||
if ip -o link show br-hero >/dev/null 2>&1; then
|
||||
for ip in $(seq 50 250); do ping -c1 -W1 -I br-hero 172.30.0.$ip >/dev/null 2>&1 || true; done
|
||||
fi
|
||||
`);
|
||||
// Re-check after ARP sweep using the same validated discovery logic
|
||||
let lr2 = run_silent(`
|
||||
get_ip_from_leases() {
|
||||
f="$1"; prefix="$2";
|
||||
if [ -f "$f" ]; then
|
||||
awk -v pfx="$prefix" '$3 ~ ("^" pfx) {ip=$3} END{if(ip!="") print ip}' "$f"
|
||||
fi
|
||||
}
|
||||
valid_ipv4() {
|
||||
echo "$1" | grep -Eo '^[0-9]+\\.[0-9]+\\.[0-9]+\\.[0-9]+$' || true
|
||||
}
|
||||
cand="$(get_ip_from_leases ${dnsmasq_lease} "192.168.127.")"
|
||||
if [ -n "$cand" ] && [ -n "$(valid_ipv4 "$cand")" ]; then echo "$cand"; exit 0; fi
|
||||
cand="$(get_ip_from_leases /var/lib/misc/dnsmasq.leases "192.168.127.")"
|
||||
if [ -n "$cand" ] && [ -n "$(valid_ipv4 "$cand")" ]; then echo "$cand"; exit 0; fi
|
||||
cand="$(get_ip_from_leases /var/lib/misc/dnsmasq.leases "172.30.0.")"
|
||||
if [ -n "$cand" ] && [ -n "$(valid_ipv4 "$cand")" ]; then echo "$cand"; exit 0; fi
|
||||
cand="$(ip neigh show dev ${bridge} | awk '$0 ~ /lladdr ${mac}/ {print $1}' | tail -n1)"
|
||||
if [ -n "$cand" ] && [ -n "$(valid_ipv4 "$cand")" ]; then echo "$cand"; exit 0; fi
|
||||
true
|
||||
`);
|
||||
if lr2.success {
|
||||
let ip2 = lr2.stdout.trim();
|
||||
if ip2 != "" {
|
||||
vm_ip = ip2;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Final sanity: ensure vm_ip is a valid IPv4 dotted-quad before printing */
|
||||
let _chk = run_silent(`echo "${vm_ip}" | grep -Eo '^[0-9]+\\.[0-9]+\\.[0-9]+\\.[0-9]+$' || true`);
|
||||
if !(_chk.success && _chk.stdout.trim() != "") { vm_ip = ""; }
|
||||
|
||||
if vm_ip == "" {
|
||||
print("❌ Could not discover VM IP after 180 seconds.");
|
||||
print("Diagnostics you can run now:");
|
||||
print(` tail -n +1 ${dnsmasq_lease}`);
|
||||
print(" cat /var/lib/misc/dnsmasq.leases | tail -n 5");
|
||||
print(` ip neigh show dev ${bridge} | grep '${mac}' || true`);
|
||||
print("Exiting without SSH command because the IP could not be determined.");
|
||||
exit();
|
||||
} else {
|
||||
print(`✓ Lease acquired: ${vm_ip}`);
|
||||
print("\nSSH command (key-only; default user 'ubuntu'):");
|
||||
print(`ssh -o StrictHostKeyChecking=no ubuntu@${vm_ip}`);
|
||||
}
|
||||
|
||||
print("\n--- VM access details ---");
|
||||
print(`VM ID: ${vm_id}`);
|
||||
let info = cloudhv_vm_info(vm_id);
|
||||
print(`API socket: ${info.spec.api_socket}`);
|
||||
print(`Console log: ${info.runtime.log_file}`);
|
||||
print(`Bridge: ${bridge} at ${br_ip}, TAP: ${tap}, MAC: ${mac}`);
|
||||
print(`Seed: ${seed_iso}`);
|
||||
/* SSH command already printed above when lease was acquired */
|
||||
|
||||
print("\nCleanup hints (manual):");
|
||||
print(`- Stop dnsmasq: sudo kill \$(cat ${dnsmasq_pid})`);
|
||||
print(`- Remove TAP: sudo ip link set ${tap} down; sudo ip link del ${tap}`);
|
||||
print(" (Keep the bridge if you will reuse it.)");
|
||||
|
||||
print("\n=== Completed ===");
|
||||
311
packages/system/virt/tests/rhai/07_cloudhv_ubuntu_ssh.rhai
Normal file
311
packages/system/virt/tests/rhai/07_cloudhv_ubuntu_ssh.rhai
Normal file
@@ -0,0 +1,311 @@
|
||||
// Create and boot an Ubuntu 24.04 VM with cloud-init SSH key injection on Cloud Hypervisor
|
||||
// - Uses qcow2 base image builder from SAL
|
||||
// - Builds a NoCloud seed ISO embedding your SSH public key
|
||||
// - Starts the VM; host networking prerequisites (bridge/dnsmasq/nftables) are ensured by CloudHV SAL
|
||||
// - Attempts to discover the VM IP from dnsmasq leases and prints SSH instructions
|
||||
//
|
||||
// Requirements on host:
|
||||
// - cloud-hypervisor-static, ch-remote-static
|
||||
// - cloud-localds (preferred) OR genisoimage
|
||||
// - qemu-img (already used by qcow2 SAL)
|
||||
// - dnsmasq + nftables (will be handled by SAL during vm_start)
|
||||
//
|
||||
// Note:
|
||||
// - SAL CloudHV networking will create a bridge br-hero, enable dnsmasq, and add a NAT rule via nftables
|
||||
// - This script does NOT manage host networking; it relies on SAL to do so during vm_start()
|
||||
|
||||
print("=== CloudHV Ubuntu 24.04 with SSH key (cloud-init) ===");
|
||||
|
||||
// ---------- Inputs ----------
|
||||
let user_pubkey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFyZJCEsvRc0eitsOoq+ywC5Lmqejvk3hXMVbO0AxPrd maxime@maxime-arch";
|
||||
|
||||
// Optional overrides for boot method (if firmware is present, it will be preferred)
|
||||
let firmware_path_override = ""; // e.g., "/usr/share/cloud-hypervisor/hypervisor-fw"
|
||||
let kernel_path_override = ""; // e.g., "/path/to/vmlinux"
|
||||
let kernel_cmdline = "console=ttyS0 reboot=k panic=1";
|
||||
|
||||
// Cloud-init hostname and instance id (used to identify leases reliably)
|
||||
let cloudinit_hostname = "noblevm";
|
||||
let cloudinit_instance_id = "iid-ubuntu-noble-ssh";
|
||||
|
||||
// Paths
|
||||
let base_dir = "/tmp/virt_images";
|
||||
let seed_iso = `${base_dir}/seed-ssh.iso`;
|
||||
let user_data = `${base_dir}/user-data`;
|
||||
let meta_data = `${base_dir}/meta-data`;
|
||||
|
||||
// ---------- Dependency checks ----------
|
||||
print("\n--- Checking dependencies ---");
|
||||
let chs = which("cloud-hypervisor-static");
|
||||
let chrs = which("ch-remote-static");
|
||||
let clds = which("cloud-localds");
|
||||
let geniso = which("genisoimage");
|
||||
let qemu = which("qemu-img");
|
||||
|
||||
let missing = false;
|
||||
if chs == () || chs == "" {
|
||||
print("❌ cloud-hypervisor-static not found on PATH");
|
||||
missing = true;
|
||||
}
|
||||
if chrs == () || chrs == "" {
|
||||
print("❌ ch-remote-static not found on PATH");
|
||||
missing = true;
|
||||
}
|
||||
if (clds == () || clds == "") && (geniso == () || geniso == "") {
|
||||
print("❌ Neither cloud-localds nor genisoimage is available. Install cloud-image-utils or genisoimage.");
|
||||
missing = true;
|
||||
}
|
||||
if qemu == () || qemu == "" {
|
||||
print("❌ qemu-img not found (required by base image builder)");
|
||||
missing = true;
|
||||
}
|
||||
if missing {
|
||||
print("=== Aborting due to missing dependencies ===");
|
||||
exit();
|
||||
}
|
||||
print("✓ Dependencies look OK");
|
||||
|
||||
// ---------- Ensure base image ----------
|
||||
print("\n--- Ensuring Ubuntu 24.04 cloud image ---");
|
||||
let base;
|
||||
try {
|
||||
// Resize to e.g. 10 GiB sparse (adjust as needed)
|
||||
base = qcow2_build_ubuntu_24_04_base(base_dir, 10);
|
||||
} catch (err) {
|
||||
print(`❌ Failed to build/ensure base image: ${err}`);
|
||||
exit();
|
||||
}
|
||||
let disk_path = base.base_image_path;
|
||||
print(`✓ Using base image: ${disk_path}`);
|
||||
|
||||
// ---------- Build cloud-init NoCloud seed (user-data/meta-data) ----------
|
||||
print("\n--- Building NoCloud seed (SSH key) ---");
|
||||
run_silent(`mkdir -p ${base_dir}`);
|
||||
|
||||
// Compose user-data and meta-data
|
||||
let ud = `#cloud-config
|
||||
users:
|
||||
- name: ubuntu
|
||||
groups: [adm, cdrom, dialout, lxd, plugdev, sudo]
|
||||
sudo: ALL=(ALL) NOPASSWD:ALL
|
||||
shell: /bin/bash
|
||||
lock_passwd: true
|
||||
ssh_authorized_keys:
|
||||
- ${user_pubkey}
|
||||
ssh_pwauth: false
|
||||
package_update: true
|
||||
`;
|
||||
let md = `instance-id: ${cloudinit_instance_id}
|
||||
local-hostname: ${cloudinit_hostname}
|
||||
`;
|
||||
|
||||
// Write files
|
||||
let wr1 = run_silent(`/bin/bash -lc "cat > ${user_data} <<'EOF'
|
||||
${ud}
|
||||
EOF"`);
|
||||
if !wr1.success { print(`❌ Failed to write ${user_data}`); exit(); }
|
||||
let wr2 = run_silent(`/bin/bash -lc "cat > ${meta_data} <<'EOF'
|
||||
${md}
|
||||
EOF"`);
|
||||
if !wr2.success { print(`❌ Failed to write ${meta_data}`); exit(); }
|
||||
|
||||
// Build seed ISO (prefer cloud-localds)
|
||||
let built = false;
|
||||
if !(clds == () || clds == "") {
|
||||
let r = run_silent(`cloud-localds ${seed_iso} ${user_data} ${meta_data}`);
|
||||
if r.success { built = true; }
|
||||
}
|
||||
if !built {
|
||||
if geniso == () || geniso == "" {
|
||||
print("❌ Neither cloud-localds nor genisoimage available to build seed.iso");
|
||||
exit();
|
||||
}
|
||||
let r2 = run_silent(`genisoimage -output ${seed_iso} -volid cidata -joliet -rock ${user_data} ${meta_data}`);
|
||||
if !r2.success {
|
||||
print("❌ genisoimage failed to create seed.iso");
|
||||
exit();
|
||||
}
|
||||
}
|
||||
print(`✓ Seed ISO: ${seed_iso}`);
|
||||
|
||||
// ---------- Determine boot method (firmware or kernel) ----------
|
||||
print("\n--- Determining boot method ---");
|
||||
let firmware_path = "";
|
||||
if firmware_path_override != "" && exist(firmware_path_override) {
|
||||
firmware_path = firmware_path_override;
|
||||
} else {
|
||||
let candidates = [
|
||||
"/usr/local/share/cloud-hypervisor/hypervisor-fw",
|
||||
"/usr/share/cloud-hypervisor/hypervisor-fw",
|
||||
"/usr/lib/cloud-hypervisor/hypervisor-fw",
|
||||
"/tmp/virt_images/hypervisor-fw"
|
||||
];
|
||||
for p in candidates {
|
||||
if exist(p) { firmware_path = p; break; }
|
||||
}
|
||||
}
|
||||
let kernel_path = "";
|
||||
if kernel_path_override != "" && exist(kernel_path_override) {
|
||||
kernel_path = kernel_path_override;
|
||||
}
|
||||
if firmware_path == "" && kernel_path == "" {
|
||||
print("❌ No firmware_path or kernel_path found. Set firmware_path_override or kernel_path_override and re-run.");
|
||||
exit();
|
||||
}
|
||||
if firmware_path != "" {
|
||||
print(`✓ Using firmware boot: ${firmware_path}`);
|
||||
} else {
|
||||
print(`✓ Using direct kernel boot: ${kernel_path}`);
|
||||
}
|
||||
|
||||
// ---------- Create and start VM ----------
|
||||
print("\n--- Creating and starting VM ---");
|
||||
let rid = run_silent("date +%s%N");
|
||||
// Make suffix robust even if date outputs nothing
|
||||
let suffix = "100000";
|
||||
if rid.success {
|
||||
let t = rid.stdout.trim();
|
||||
if t != "" { suffix = t; }
|
||||
}
|
||||
let vm_id = `noble_ssh_${suffix}`;
|
||||
|
||||
let spec = #{
|
||||
"id": vm_id,
|
||||
"disk_path": disk_path,
|
||||
"api_socket": "",
|
||||
"vcpus": 2,
|
||||
"memory_mb": 2048
|
||||
};
|
||||
if firmware_path != "" {
|
||||
spec.firmware_path = firmware_path;
|
||||
} else {
|
||||
spec.kernel_path = kernel_path;
|
||||
spec.cmdline = kernel_cmdline;
|
||||
}
|
||||
|
||||
// Attach the NoCloud seed ISO as a read-only disk
|
||||
spec.extra_args = [
|
||||
"--disk", `path=${seed_iso},readonly=true`
|
||||
];
|
||||
|
||||
try {
|
||||
let created = cloudhv_vm_create(spec);
|
||||
print(`✓ VM created: ${created}`);
|
||||
} catch (err) {
|
||||
print(`❌ VM create failed: ${err}`);
|
||||
exit();
|
||||
}
|
||||
|
||||
try {
|
||||
cloudhv_vm_start(vm_id);
|
||||
print("✓ VM start invoked");
|
||||
} catch (err) {
|
||||
print(`❌ VM start failed: ${err}`);
|
||||
exit();
|
||||
}
|
||||
|
||||
// ---------- Wait for VM API socket and probe readiness ----------
|
||||
print("\n--- Waiting for VM API socket ---");
|
||||
let api_sock = "";
|
||||
// Discover socket path (from SAL or common defaults)
|
||||
let fallback_candidates = [
|
||||
`/root/hero/virt/vms/${vm_id}/api.sock`,
|
||||
`/home/maxime/hero/virt/vms/${vm_id}/api.sock`
|
||||
];
|
||||
|
||||
// First, try to detect the socket on disk with a longer timeout
|
||||
let sock_exists = false;
|
||||
for i in 0..180 {
|
||||
sleep(1);
|
||||
let info = cloudhv_vm_info(vm_id);
|
||||
api_sock = info.spec.api_socket;
|
||||
if api_sock == () || api_sock == "" {
|
||||
for cand in fallback_candidates {
|
||||
if exist(cand) { api_sock = cand; break; }
|
||||
}
|
||||
}
|
||||
if api_sock != () && api_sock != "" && exist(api_sock) {
|
||||
sock_exists = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Regardless of filesystem existence, also try probing the API directly
|
||||
let api_ok = false;
|
||||
if api_sock != () && api_sock != "" {
|
||||
for i in 0..60 {
|
||||
let r = run_silent(`ch-remote-static --api-socket ${api_sock} info`);
|
||||
if r.success { api_ok = true; break; }
|
||||
sleep(1);
|
||||
}
|
||||
}
|
||||
|
||||
if api_ok {
|
||||
print("✓ VM API reachable");
|
||||
} else if sock_exists {
|
||||
print("⚠️ VM API socket exists but API not reachable yet");
|
||||
} else {
|
||||
print("⚠️ VM API socket not found yet; proceeding");
|
||||
let info_dbg = cloudhv_vm_info(vm_id);
|
||||
let log_path = info_dbg.runtime.log_file;
|
||||
if exist(log_path) {
|
||||
let t = run_silent(`tail -n 120 ${log_path}`);
|
||||
if t.success && t.stdout.trim() != "" {
|
||||
print("\n--- Last 120 lines of console log (diagnostics) ---");
|
||||
print(t.stdout);
|
||||
print("--- End of console log ---");
|
||||
}
|
||||
} else {
|
||||
print(`(console log not found at ${log_path})`);
|
||||
}
|
||||
}
|
||||
|
||||
// ---------- Discover VM IP from dnsmasq leases ----------
|
||||
print("\n--- Discovering VM IP (dnsmasq leases) ---");
|
||||
// SAL enables system dnsmasq for br-hero by default; leases usually at /var/lib/misc/dnsmasq.leases
|
||||
let leases_paths = [
|
||||
"/var/lib/misc/dnsmasq.leases",
|
||||
"/var/lib/dnsmasq/dnsmasq.leases"
|
||||
];
|
||||
let vm_ip = "";
|
||||
for path in leases_paths {
|
||||
if !exist(path) { continue; }
|
||||
for i in 0..120 {
|
||||
sleep(1);
|
||||
// Pure awk (no nested shells/pipes). Keep last IP matching hostname.
|
||||
let lr = run_silent(`awk -v host="${cloudinit_hostname}" '($4 ~ host){ip=$3} END{if(ip!=\"\") print ip}' ${path}`);
|
||||
if lr.success {
|
||||
let ip = lr.stdout.trim();
|
||||
if ip != "" {
|
||||
vm_ip = ip;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if vm_ip != "" { break; }
|
||||
}
|
||||
|
||||
// ---------- Output connection details ----------
|
||||
print("\n--- VM access details ---");
|
||||
let info = cloudhv_vm_info(vm_id);
|
||||
print(`VM ID: ${vm_id}`);
|
||||
if info.runtime.pid != () {
|
||||
print(`PID: ${info.runtime.pid}`);
|
||||
}
|
||||
print(`Status: ${info.runtime.status}`);
|
||||
print(`API socket: ${info.spec.api_socket}`);
|
||||
print(`Console log: ${info.runtime.log_file}`);
|
||||
print(`Seed ISO: ${seed_iso}`);
|
||||
print(`Hostname: ${cloudinit_hostname}`);
|
||||
|
||||
if vm_ip != "" {
|
||||
print("\nSSH command (default user 'ubuntu'):");
|
||||
print(`ssh -o StrictHostKeyChecking=no ubuntu@${vm_ip}`);
|
||||
} else {
|
||||
print("\n⚠️ Could not resolve VM IP yet from leases. Try later:");
|
||||
print(" - Check leases: sudo cat /var/lib/misc/dnsmasq.leases | grep noblevm");
|
||||
print(" - Or find on bridge (example): ip -4 neigh show dev br-hero");
|
||||
print(" - Then SSH: ssh -o StrictHostKeyChecking=no ubuntu@<IP>");
|
||||
}
|
||||
|
||||
print("\n=== Completed: Ubuntu VM launched with SSH key via cloud-init ===");
|
||||
235
packages/system/virt/tests/rhai/10_vm_end_to_end.rhai
Normal file
235
packages/system/virt/tests/rhai/10_vm_end_to_end.rhai
Normal file
@@ -0,0 +1,235 @@
|
||||
// End-to-end smoke test for the new qcow2 + cloud-hypervisor refactor
|
||||
// This script executes in logical phases so we can see clearly what works.
|
||||
//
|
||||
// Phases:
|
||||
// 1) Host preflight check
|
||||
// 2) Image preparation (Ubuntu) -> raw disk
|
||||
// 3) Launch VM via builder using prepared raw disk
|
||||
// 4) Inspect VM info, list VMs
|
||||
// 5) Stop & delete VM
|
||||
// 6) Launch VM via one-shot wrapper vm_easy_launch
|
||||
// 7) Inspect VM info, list VMs
|
||||
// 8) Stop & delete VM
|
||||
//
|
||||
// Notes:
|
||||
// - Run as root on the host (required for NBD/mount/networking).
|
||||
// - Base images expected at:
|
||||
// /images/noble-server-cloudimg-amd64.img
|
||||
// /images/alpine-virt-cloudimg-amd64.qcow2 (Alpine prepare not implemented yet)
|
||||
// /images/hypervisor-fw (firmware binary used via --kernel)
|
||||
// - Network defaults: IPv4 NAT (dnsmasq DHCP) + IPv6 routed over Mycelium (RA/DHCPv6). No static IPv6 is written into the guest; it autoconfigures via RA.
|
||||
//
|
||||
// Conventions:
|
||||
// - Functional builder chaining: b = memory_mb(b, 4096), etc.
|
||||
// - Each phase prints a banner and either "OK" or "FAILED" with detailed error message.
|
||||
|
||||
fn banner(s) {
|
||||
print("==================================================");
|
||||
print(s);
|
||||
print("==================================================");
|
||||
}
|
||||
|
||||
fn ok(s) {
|
||||
print("[OK] " + s);
|
||||
}
|
||||
|
||||
fn fail(msg) {
|
||||
print("[FAILED] " + msg);
|
||||
}
|
||||
|
||||
fn dump_map(m) {
|
||||
// simple pretty printer for small maps
|
||||
for k in m.keys() {
|
||||
print(" " + k + ": " + m[k].to_string());
|
||||
}
|
||||
}
|
||||
|
||||
fn dump_array(a) {
|
||||
let i = 0;
|
||||
for x in a {
|
||||
print(" - " + x.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------
|
||||
// Phase 1: Host preflight check
|
||||
// ------------------------------------------------------------------------------------
|
||||
banner("PHASE 1: host_check()");
|
||||
let hc = host_check();
|
||||
if !(hc.ok == true) {
|
||||
fail("host_check indicates missing dependencies; details:");
|
||||
print("critical:");
|
||||
dump_array(hc.critical);
|
||||
print("optional:");
|
||||
dump_array(hc.optional);
|
||||
print("notes:");
|
||||
dump_array(hc.notes);
|
||||
// Short-circuit: nothing else will work without deps
|
||||
throw "Missing critical host dependencies";
|
||||
} else {
|
||||
ok("host_check passed");
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------
|
||||
// Phase 2: Image preparation for Ubuntu
|
||||
// - produces a per-VM raw disk in $HOME/hero/virt/vms/<id>/disk.raw
|
||||
// ------------------------------------------------------------------------------------
|
||||
banner("PHASE 2: image_prepare (Ubuntu) -> raw disk");
|
||||
let vmA = "vm-e2e-a";
|
||||
let prep_opts = #{
|
||||
id: vmA,
|
||||
flavor: "ubuntu",
|
||||
// source: optional override, default uses /images/noble-server-cloudimg-amd64.img
|
||||
// target_dir: optional override, default $HOME/hero/virt/vms/<id>
|
||||
disable_cloud_init_net: true,
|
||||
};
|
||||
|
||||
let prep_res = ();
|
||||
let prep_ok = false;
|
||||
try {
|
||||
prep_res = image_prepare(prep_opts);
|
||||
ok("image_prepare returned:");
|
||||
dump_map(prep_res);
|
||||
if prep_res.raw_disk == () {
|
||||
fail("prep_res.raw_disk is UNIT; expected string path");
|
||||
} else {
|
||||
ok("raw_disk: " + prep_res.raw_disk);
|
||||
prep_ok = true;
|
||||
}
|
||||
} catch (e) {
|
||||
fail("image_prepare failed: " + e.to_string());
|
||||
}
|
||||
|
||||
if !(prep_ok) {
|
||||
throw "Stopping due to image_prepare failure";
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------
|
||||
// Phase 3: Launch VM via builder using the prepared raw disk
|
||||
// ------------------------------------------------------------------------------------
|
||||
banner("PHASE 3: Launch via cloudhv_builder (disk from Phase 2)");
|
||||
let b = cloudhv_builder(vmA);
|
||||
// Explicitly select Default NAT networking (bridge + NAT + dnsmasq; IPv6 via Mycelium if enabled)
|
||||
let b = network_default_nat(b);
|
||||
let b = disk(b, prep_res.raw_disk);
|
||||
let b = memory_mb(b, 4096);
|
||||
let b = vcpus(b, 2);
|
||||
// Optional extras:
|
||||
// let b = extra_arg(b, "--serial"); let b = extra_arg(b, "tty");
|
||||
// let b = no_default_net(b);
|
||||
|
||||
let vm_id_a = "";
|
||||
try {
|
||||
vm_id_a = launch(b);
|
||||
ok("builder.launch started VM id: " + vm_id_a);
|
||||
} catch (e) {
|
||||
fail("builder.launch failed: " + e.to_string());
|
||||
throw "Stopping due to launch failure for vm-e2e-a";
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------
|
||||
// Phase 4: Inspect VM info, list VMs
|
||||
// ------------------------------------------------------------------------------------
|
||||
banner("PHASE 4: cloudhv_vm_info / cloudhv_vm_list");
|
||||
try {
|
||||
let info_a = cloudhv_vm_info(vm_id_a);
|
||||
ok("cloudhv_vm_info:");
|
||||
dump_map(info_a);
|
||||
} catch (e) {
|
||||
fail("cloudhv_vm_info failed: " + e.to_string());
|
||||
}
|
||||
|
||||
try {
|
||||
let vms = cloudhv_vm_list();
|
||||
ok("cloudhv_vm_list count = " + vms.len.to_string());
|
||||
} catch (e) {
|
||||
fail("cloudhv_vm_list failed: " + e.to_string());
|
||||
}
|
||||
|
||||
sleep(1000000);
|
||||
// ------------------------------------------------------------------------------------
|
||||
// Phase 5: Stop & delete VM A
|
||||
// ------------------------------------------------------------------------------------
|
||||
banner("PHASE 5: Stop & delete VM A");
|
||||
try {
|
||||
cloudhv_vm_stop(vm_id_a, false);
|
||||
ok("cloudhv_vm_stop graceful OK");
|
||||
} catch (e) {
|
||||
fail("cloudhv_vm_stop (graceful) failed: " + e.to_string() + " -> trying force");
|
||||
try {
|
||||
cloudhv_vm_stop(vm_id_a, true);
|
||||
ok("cloudhv_vm_stop force OK");
|
||||
} catch (e2) {
|
||||
fail("cloudhv_vm_stop force failed: " + e2.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
cloudhv_vm_delete(vm_id_a, true);
|
||||
ok("cloudhv_vm_delete OK (deleted disks)");
|
||||
} catch (e) {
|
||||
fail("cloudhv_vm_delete failed: " + e.to_string());
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------
|
||||
// Phase 6: Launch VM via one-shot wrapper vm_easy_launch()
|
||||
// ------------------------------------------------------------------------------------
|
||||
banner("PHASE 6: vm_easy_launch for VM B");
|
||||
let vmB = "vm-e2e-b";
|
||||
let vm_id_b = "";
|
||||
try {
|
||||
vm_id_b = vm_easy_launch("ubuntu", vmB, 4096, 2);
|
||||
ok("vm_easy_launch started VM id: " + vm_id_b);
|
||||
} catch (e) {
|
||||
fail("vm_easy_launch failed: " + e.to_string());
|
||||
throw "Stopping due to vm_easy_launch failure";
|
||||
}
|
||||
|
||||
// Allow time for VM to fully boot and SSH to be ready
|
||||
print("Sleeping 30 seconds for VM to boot... You can try SSH during this time.");
|
||||
sleep(30000000); // 30 seconds
|
||||
|
||||
// ------------------------------------------------------------------------------------
|
||||
// Phase 7: Inspect VM B info, list VMs
|
||||
// ------------------------------------------------------------------------------------
|
||||
banner("PHASE 7: Inspect VM B");
|
||||
try {
|
||||
let info_b = cloudhv_vm_info(vm_id_b);
|
||||
ok("cloudhv_vm_info (B):");
|
||||
dump_map(info_b);
|
||||
} catch (e) {
|
||||
fail("cloudhv_vm_info (B) failed: " + e.to_string());
|
||||
}
|
||||
|
||||
try {
|
||||
let vms2 = cloudhv_vm_list();
|
||||
ok("cloudhv_vm_list count = " + vms2.len.to_string());
|
||||
} catch (e) {
|
||||
fail("cloudhv_vm_list failed: " + e.to_string());
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------
|
||||
// Phase 8: Stop & delete VM B
|
||||
// ------------------------------------------------------------------------------------
|
||||
banner("PHASE 8: Stop & delete VM B");
|
||||
try {
|
||||
cloudhv_vm_stop(vm_id_b, false);
|
||||
ok("cloudhv_vm_stop (B) graceful OK");
|
||||
} catch (e) {
|
||||
fail("cloudhv_vm_stop (B) graceful failed: " + e.to_string() + " -> trying force");
|
||||
try {
|
||||
cloudhv_vm_stop(vm_id_b, true);
|
||||
ok("cloudhv_vm_stop (B) force OK");
|
||||
} catch (e2) {
|
||||
fail("cloudhv_vm_stop (B) force failed: " + e2.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
cloudhv_vm_delete(vm_id_b, true);
|
||||
ok("cloudhv_vm_delete (B) OK (deleted disks)");
|
||||
} catch (e) {
|
||||
fail("cloudhv_vm_delete (B) failed: " + e.to_string());
|
||||
}
|
||||
|
||||
banner("DONE: All phases executed");
|
||||
60
packages/system/virt/tests/rhai/vm_clean_launch.rhai
Normal file
60
packages/system/virt/tests/rhai/vm_clean_launch.rhai
Normal file
@@ -0,0 +1,60 @@
|
||||
// Clean VM Launch Script
|
||||
// Creates a VM using builder pattern with concise output
|
||||
|
||||
let vm_id = "vm-clean-test";
|
||||
|
||||
// Phase 0: Pre-clean any existing VM with the same id (best-effort)
|
||||
// This avoids TAP "Resource busy" when a previous run is still active.
|
||||
try {
|
||||
cloudhv_vm_stop(vm_id, true);
|
||||
} catch (e) {
|
||||
// ignore
|
||||
}
|
||||
// brief wait to let processes exit and TAP release
|
||||
wait_for_vm_boot(1);
|
||||
try {
|
||||
cloudhv_vm_delete(vm_id, true);
|
||||
} catch (e) {
|
||||
// ignore
|
||||
}
|
||||
|
||||
// Phase 1: Host check
|
||||
let hc = host_check();
|
||||
if !(hc.ok == true) {
|
||||
throw "Host check failed: missing dependencies";
|
||||
}
|
||||
|
||||
// Phase 2: Create VM using fluent builder pattern
|
||||
let vm_id_actual = "";
|
||||
try {
|
||||
vm_id_actual = cloudhv_builder(vm_id)
|
||||
.disk_from_flavor("ubuntu")
|
||||
.network_default_nat()
|
||||
.memory_mb(4096)
|
||||
.vcpus(2)
|
||||
.launch();
|
||||
} catch (e) {
|
||||
throw "VM launch failed: " + e.to_string();
|
||||
}
|
||||
|
||||
// Phase 3: Wait for VM to boot and get network configuration
|
||||
wait_for_vm_boot(10);
|
||||
|
||||
// Phase 4: Discover VM IP addresses (robust, no hardcoded MAC/paths)
|
||||
let net = cloudhv_vm_network_info(vm_id_actual, 30);
|
||||
let ipv4 = net["ipv4"]; // Dynamic UNIT if not found yet
|
||||
let ipv6 = net["ipv6"]; // Dynamic UNIT if not found
|
||||
// Optional: you could also inspect net["mac"], net["bridge"], net["lease"]
|
||||
|
||||
// Phase 5: Display connection info
|
||||
cloudhv_display_network_info(vm_id_actual, ipv4, ipv6);
|
||||
|
||||
/*
|
||||
try {
|
||||
cloudhv_vm_stop(vm_id_actual, false);
|
||||
cloudhv_vm_delete(vm_id_actual, true);
|
||||
print("VM stopped and cleaned up.");
|
||||
} catch (e) {
|
||||
print("Warning: cleanup failed: " + e.to_string());
|
||||
}
|
||||
*/
|
||||
@@ -18,19 +18,20 @@ thiserror = { workspace = true }
|
||||
uuid = { workspace = true }
|
||||
|
||||
# All SAL packages that this aggregation package depends on
|
||||
sal-os = { path = "../os" }
|
||||
sal-process = { path = "../process" }
|
||||
sal-git = { path = "../system/git" }
|
||||
sal-vault = { path = "../vault" }
|
||||
sal-redisclient = { path = "../redisclient" }
|
||||
sal-postgresclient = { path = "../postgresclient" }
|
||||
sal-virt = { path = "../virt" }
|
||||
sal-mycelium = { path = "../mycelium" }
|
||||
sal-text = { path = "../text" }
|
||||
sal-net = { path = "../net" }
|
||||
sal-zinit-client = { path = "../zinit_client" }
|
||||
sal-kubernetes = { path = "../kubernetes" }
|
||||
sal-service-manager = { path = "../service_manager", features = ["rhai"] }
|
||||
sal-os = { workspace = true }
|
||||
sal-process = { workspace = true }
|
||||
sal-git = { workspace = true }
|
||||
sal-vault = { workspace = true }
|
||||
sal-redisclient = { workspace = true }
|
||||
sal-postgresclient = { workspace = true }
|
||||
sal-virt = { workspace = true }
|
||||
sal-mycelium = { workspace = true }
|
||||
sal-hetzner = { workspace = true }
|
||||
sal-text = { workspace = true }
|
||||
sal-net = { workspace = true }
|
||||
sal-zinit-client = { workspace = true }
|
||||
sal-kubernetes = { workspace = true }
|
||||
sal-service-manager = { workspace = true, features = ["rhai"] }
|
||||
|
||||
|
||||
[features]
|
||||
|
||||
@@ -90,6 +90,9 @@ pub use sal_zinit_client::rhai::register_zinit_module;
|
||||
// Re-export mycelium module
|
||||
pub use sal_mycelium::rhai::register_mycelium_module;
|
||||
|
||||
// Re-export hetzner module
|
||||
pub use sal_hetzner::rhai::register_hetzner_module;
|
||||
|
||||
// Re-export text module
|
||||
pub use sal_text::rhai::register_text_module;
|
||||
|
||||
@@ -151,6 +154,9 @@ pub fn register(engine: &mut Engine) -> Result<(), Box<rhai::EvalAltResult>> {
|
||||
// Register Mycelium module functions
|
||||
sal_mycelium::rhai::register_mycelium_module(engine)?;
|
||||
|
||||
// Register Hetzner module functions
|
||||
sal_hetzner::rhai::register_hetzner_module(engine)?;
|
||||
|
||||
// Register Text module functions
|
||||
sal_text::rhai::register_text_module(engine)?;
|
||||
|
||||
|
||||
5
rhailib/.gitignore
vendored
Normal file
5
rhailib/.gitignore
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
target
|
||||
worker_rhai_temp_db
|
||||
dump.rdb
|
||||
.DS_Store
|
||||
.env
|
||||
27
rhailib/Cargo.toml
Normal file
27
rhailib/Cargo.toml
Normal file
@@ -0,0 +1,27 @@
|
||||
[package]
|
||||
name = "rhailib"
|
||||
version = "0.1.0"
|
||||
edition = "2021" # Changed to 2021 for consistency with other crates
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
env_logger = "0.10"
|
||||
log = "0.4"
|
||||
redis = { version = "0.25.0", features = ["tokio-comp"] }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
tokio = { version = "1", features = ["macros", "rt-multi-thread", "time", "sync", "signal"] }
|
||||
rhai = "1.21.0"
|
||||
derive = { path = "src/derive" }
|
||||
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = { version = "0.5", features = ["html_reports"] }
|
||||
uuid = { version = "1.6", features = ["v4", "serde"] } # For examples like dedicated_reply_queue_demo
|
||||
tempfile = "3.10"
|
||||
|
||||
[[bench]]
|
||||
name = "simple_rhai_bench"
|
||||
harness = false
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user