31 Commits

Author SHA1 Message Date
Timur Gordon
7afa5ea1c0 Merge branch 'development' of https://git.ourworld.tf/herocode/sal into development 2025-09-01 15:57:49 +02:00
Timur Gordon
6c2d96c9a5 service manager impl 2025-09-01 15:54:34 +02:00
Sameh Abouel-saad
b2fc0976bd style: format code and reorganize imports across rfsclient codebase 2025-08-28 03:50:07 +03:00
Sameh Abouel-saad
e114404ca7 fix: refactor rhai functions to use Map parameters 2025-08-28 03:43:00 +03:00
Sameh Abouel-saad
536779f521 fix rfsclient rhai example and update docs 2025-08-27 22:29:25 +03:00
Sameh Abouelsaad
c2969621b1 feat: implement RFS client with authentication and file management APIs 2025-08-27 17:59:20 +03:00
Timur Gordon
b39f24ca8f add install script and compilation fixes 2025-08-27 13:17:52 +02:00
f87a1d7f80 Merge branch 'development' of git.ourworld.tf:herocode/herolib_rust into development 2025-08-25 07:06:52 +02:00
17e5924e0b ... 2025-08-25 07:06:50 +02:00
Maxime Van Hees
768e3e176d fixed overlapping workspace roots 2025-08-21 16:20:15 +02:00
Timur Gordon
aa0248ef17 move rhailib to herolib 2025-08-21 14:32:24 +02:00
Maxime Van Hees
aab2b6f128 fixed cloud hypervisor issues + updated test script (working now) 2025-08-21 13:32:03 +02:00
Maxime Van Hees
d735316b7f cloud-hypervisor SAL + rhai test script for it 2025-08-20 18:01:21 +02:00
Maxime Van Hees
d1c80863b8 fixed test script errors 2025-08-20 15:42:12 +02:00
Maxime Van Hees
169c62da47 Merge branch 'development' of https://git.ourworld.tf/herocode/herolib_rust into development 2025-08-20 14:45:57 +02:00
Maxime Van Hees
33a5f24981 qcow2 SAL + rhai script to test functionality 2025-08-20 14:44:29 +02:00
Timur Gordon
d7562ce466 add data packages and remove empty submodule 2025-08-07 12:13:37 +02:00
ca736d62f3 /// 2025-08-06 03:27:49 +02:00
Maxime Van Hees
078c6f723b merging changes 2025-08-05 20:28:20 +02:00
Maxime Van Hees
9fdb8d8845 integrated hetzner client in repo + showcase of using scope for 'cleaner' scripts 2025-08-05 20:27:14 +02:00
8203a3b1ff Merge branch 'development' of git.ourworld.tf:herocode/herolib_rust into development 2025-08-05 16:39:01 +02:00
1770ac561e ... 2025-08-05 16:39:00 +02:00
Maxime Van Hees
eed6dbf8dc added robot hetzner code to research for later importing it into codebase 2025-08-05 16:32:29 +02:00
4cd4e04028 ... 2025-08-05 16:22:25 +02:00
8cc828fc0e ...... 2025-08-05 16:21:33 +02:00
56af312aad ... 2025-08-05 16:04:55 +02:00
dfd6931c5b ... 2025-08-05 16:00:24 +02:00
6e01f99958 ... 2025-08-05 15:43:13 +02:00
0c02d0e99f ... 2025-08-05 15:33:03 +02:00
7856fc0a4e ... 2025-07-14 13:53:01 +04:00
Mahmoud-Emad
758e59e921 docs: Improve README.md with clearer structure and installation
- Update README.md to provide a clearer structure and improved
  installation instructions.  This makes it easier for users to
  understand and use the library.
- Remove outdated and unnecessary sections like the workspace
  structure details, publishing status, and detailed features
  lists. The information is either not relevant anymore or can be
  found elsewhere.
- Simplify installation instructions to focus on the core aspects
  of installing individual packages or the meta-package with
  features.
- Add a dedicated section for building and running tests,
  improving developer experience and making the process more
  transparent.
- Modernize the overall layout and formatting for better
  readability.
2025-07-13 12:51:08 +03:00
687 changed files with 41303 additions and 1144 deletions

5
.gitignore vendored
View File

@@ -63,4 +63,7 @@ sidebars.ts
tsconfig.json
Cargo.toml.bak
for_augment
for_augment
myenv.sh

View File

@@ -12,22 +12,27 @@ readme = "README.md"
[workspace]
members = [
".",
"vault",
"git",
"redisclient",
"mycelium",
"text",
"os",
"net",
"zinit_client",
"process",
"virt",
"postgresclient",
"kubernetes",
"packages/clients/myceliumclient",
"packages/clients/postgresclient",
"packages/clients/redisclient",
"packages/clients/zinitclient",
"packages/clients/rfsclient",
"packages/core/net",
"packages/core/text",
"packages/crypt/vault",
"packages/data/ourdb",
"packages/data/radixtree",
"packages/data/tst",
"packages/system/git",
"packages/system/kubernetes",
"packages/system/os",
"packages/system/process",
"packages/system/virt",
"rhai",
"rhailib",
"herodo",
"service_manager",
"packages/clients/hetznerclient",
"packages/ai/codemonkey",
]
resolver = "2"
@@ -39,6 +44,7 @@ rust-version = "1.70.0"
# Core shared dependencies with consistent versions
anyhow = "1.0.98"
base64 = "0.22.1"
bytes = "1.7.1"
dirs = "6.0.0"
env_logger = "0.11.8"
futures = "0.3.30"
@@ -49,7 +55,7 @@ log = "0.4"
once_cell = "1.18.0"
rand = "0.8.5"
regex = "1.8.1"
reqwest = { version = "0.12.15", features = ["json"] }
reqwest = { version = "0.12.15", features = ["json", "blocking"] }
rhai = { version = "1.12.0", features = ["sync"] }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
@@ -70,6 +76,10 @@ chacha20poly1305 = "0.10.1"
k256 = { version = "0.13.4", features = ["ecdsa", "ecdh"] }
sha2 = "0.10.7"
hex = "0.4"
bincode = { version = "2.0.1", features = ["serde"] }
pbkdf2 = "0.12.2"
getrandom = { version = "0.3.3", features = ["wasm_js"] }
tera = "1.19.0"
# Ethereum dependencies
ethers = { version = "2.0.7", features = ["legacy"] }
@@ -86,27 +96,54 @@ windows = { version = "0.61.1", features = [
zinit-client = "0.4.0"
urlencoding = "2.1.3"
tokio-test = "0.4.4"
kube = { version = "0.95.0", features = ["client", "config", "derive"] }
k8s-openapi = { version = "0.23.0", features = ["latest"] }
tokio-retry = "0.3.0"
governor = "0.6.3"
tower = { version = "0.5.2", features = ["timeout", "limit"] }
serde_yaml = "0.9"
postgres-types = "0.2.5"
r2d2 = "0.8.10"
# SAL dependencies
sal-git = { path = "packages/system/git" }
sal-kubernetes = { path = "packages/system/kubernetes" }
sal-redisclient = { path = "packages/clients/redisclient" }
sal-mycelium = { path = "packages/clients/myceliumclient" }
sal-hetzner = { path = "packages/clients/hetznerclient" }
sal-rfs-client = { path = "packages/clients/rfsclient" }
sal-text = { path = "packages/core/text" }
sal-os = { path = "packages/system/os" }
sal-net = { path = "packages/core/net" }
sal-zinit-client = { path = "packages/clients/zinitclient" }
sal-process = { path = "packages/system/process" }
sal-virt = { path = "packages/system/virt" }
sal-postgresclient = { path = "packages/clients/postgresclient" }
sal-vault = { path = "packages/crypt/vault" }
sal-rhai = { path = "rhai" }
sal-service-manager = { path = "_archive/service_manager" }
[dependencies]
thiserror = "2.0.12" # For error handling in the main Error enum
tokio = { workspace = true } # For async examples
thiserror = { workspace = true }
tokio = { workspace = true }
# Optional dependencies - users can choose which modules to include
sal-git = { path = "git", optional = true }
sal-kubernetes = { path = "kubernetes", optional = true }
sal-redisclient = { path = "redisclient", optional = true }
sal-mycelium = { path = "mycelium", optional = true }
sal-text = { path = "text", optional = true }
sal-os = { path = "os", optional = true }
sal-net = { path = "net", optional = true }
sal-zinit-client = { path = "zinit_client", optional = true }
sal-process = { path = "process", optional = true }
sal-virt = { path = "virt", optional = true }
sal-postgresclient = { path = "postgresclient", optional = true }
sal-vault = { path = "vault", optional = true }
sal-rhai = { path = "rhai", optional = true }
sal-service-manager = { path = "service_manager", optional = true }
zinit-client.workspace = true
sal-git = { workspace = true, optional = true }
sal-kubernetes = { workspace = true, optional = true }
sal-redisclient = { workspace = true, optional = true }
sal-mycelium = { workspace = true, optional = true }
sal-hetzner = { workspace = true, optional = true }
sal-rfs-client = { workspace = true, optional = true }
sal-text = { workspace = true, optional = true }
sal-os = { workspace = true, optional = true }
sal-net = { workspace = true, optional = true }
sal-zinit-client = { workspace = true, optional = true }
sal-process = { workspace = true, optional = true }
sal-virt = { workspace = true, optional = true }
sal-postgresclient = { workspace = true, optional = true }
sal-vault = { workspace = true, optional = true }
sal-rhai = { workspace = true, optional = true }
sal-service-manager = { workspace = true, optional = true }
[features]
default = []
@@ -116,6 +153,8 @@ git = ["dep:sal-git"]
kubernetes = ["dep:sal-kubernetes"]
redisclient = ["dep:sal-redisclient"]
mycelium = ["dep:sal-mycelium"]
hetzner = ["dep:sal-hetzner"]
rfsclient = ["dep:sal-rfs-client"]
text = ["dep:sal-text"]
os = ["dep:sal-os"]
net = ["dep:sal-net"]
@@ -125,18 +164,20 @@ virt = ["dep:sal-virt"]
postgresclient = ["dep:sal-postgresclient"]
vault = ["dep:sal-vault"]
rhai = ["dep:sal-rhai"]
service_manager = ["dep:sal-service-manager"]
# service_manager is removed as it's not a direct member anymore
# Convenience feature groups
core = ["os", "process", "text", "net"]
clients = ["redisclient", "postgresclient", "zinit_client", "mycelium"]
infrastructure = ["git", "vault", "kubernetes", "virt", "service_manager"]
clients = ["redisclient", "postgresclient", "zinit_client", "mycelium", "hetzner", "rfsclient"]
infrastructure = ["git", "vault", "kubernetes", "virt"]
scripting = ["rhai"]
all = [
"git",
"kubernetes",
"redisclient",
"mycelium",
"hetzner",
"rfsclient",
"text",
"os",
"net",
@@ -146,7 +187,6 @@ all = [
"postgresclient",
"vault",
"rhai",
"service_manager",
]
# Examples

456
README.md
View File

@@ -1,404 +1,136 @@
# SAL (System Abstraction Layer)
# Herocode Herolib Rust Repository
**Version: 0.1.0**
## Overview
SAL is a comprehensive Rust library designed to provide a unified and simplified interface for a wide array of system-level operations and interactions. It abstracts platform-specific details, enabling developers to write robust, cross-platform code with greater ease. SAL also includes `herodo`, a powerful command-line tool for executing Rhai scripts that leverage SAL's capabilities for automation and system management tasks.
This repository contains the **Herocode Herolib** Rust library and a collection of scripts, examples, and utilities for building, testing, and publishing the SAL (System Abstraction Layer) crates. The repository includes:
## 🏗️ **Cargo Workspace Structure**
- **Rust crates** for various system components (e.g., `os`, `process`, `text`, `git`, `vault`, `kubernetes`, etc.).
- **Rhai scripts** and test suites for each crate.
- **Utility scripts** to automate common development tasks.
SAL is organized as a **Cargo workspace** with 15 specialized crates:
## Scripts
- **Root Package**: `sal` - Umbrella crate that re-exports all modules
- **12 Library Crates**: Core SAL modules (os, process, text, net, git, vault, kubernetes, virt, redisclient, postgresclient, zinit_client, mycelium)
- **1 Binary Crate**: `herodo` - Rhai script execution engine
- **1 Integration Crate**: `rhai` - Rhai scripting integration layer
The repository provides three primary helper scripts located in the repository root:
This workspace structure provides excellent build performance, dependency management, and maintainability.
| Script | Description | Typical Usage |
|--------|-------------|--------------|
| `scripts/publish-all.sh` | Publishes all SAL crates to **crates.io** in the correct dependency order. Handles version bumping, dependency updates, dryrun mode, and ratelimiting. | `./scripts/publish-all.sh [--dry-run] [--wait <seconds>] [--version <ver>]` |
| `build_herodo.sh` | Builds the `herodo` binary from the `herodo` package and optionally runs a specified Rhai script. | `./build_herodo.sh [script_name]` |
| `run_rhai_tests.sh` | Executes all Rhai test suites across the repository, logging results and providing a summary. | `./run_rhai_tests.sh` |
### **🚀 Workspace Benefits**
- **Unified Dependency Management**: Shared dependencies across all crates with consistent versions
- **Optimized Build Performance**: Parallel compilation and shared build artifacts
- **Simplified Testing**: Run tests across all modules with a single command
- **Modular Architecture**: Each module is independently maintainable while sharing common infrastructure
- **Production Ready**: 100% test coverage with comprehensive Rhai integration tests
Below are detailed usage instructions for each script.
## 📦 Installation
---
SAL is designed to be modular - install only the components you need!
## 1. `scripts/publish-all.sh`
### Option 1: Individual Crates (Recommended)
### Purpose
Install only the modules you need:
- Publishes each SAL crate in the correct dependency order.
- Updates crate versions (if `--version` is supplied).
- Updates path dependencies to version dependencies before publishing.
- Supports **dryrun** mode to preview actions without publishing.
- Handles ratelimiting between crate publishes.
### Options
| Option | Description |
|--------|-------------|
| `--dry-run` | Shows what would be published without actually publishing. |
| `--wait <seconds>` | Wait time between publishes (default: 15s). |
| `--version <ver>` | Set a new version for all crates (updates `Cargo.toml` files). |
| `-h, --help` | Show help message. |
### Example Usage
```bash
# Currently available packages
cargo add sal-os sal-process sal-text sal-net sal-git sal-vault sal-kubernetes sal-virt
# Dry run no crates will be published
./scripts/publish-all.sh --dry-run
# Coming soon (rate limited)
# cargo add sal-redisclient sal-postgresclient sal-zinit-client sal-mycelium sal-rhai
# Publish with a custom wait time and version bump
./scripts/publish-all.sh --wait 30 --version 1.2.3
# Normal publish (no dryrun)
./scripts/publish-all.sh
```
### Option 2: Meta-crate with Features
### Notes
Use the main `sal` crate with specific features:
- Must be run from the repository root (where `Cargo.toml` lives).
- Requires `cargo` and a loggedin `cargo` session (`cargo login`).
- The script automatically updates dependencies in each crates `Cargo.toml` to use the new version before publishing.
```bash
# Coming soon - meta-crate with features (rate limited)
# cargo add sal --features os,process,text
# cargo add sal --features core # os, process, text, net
# cargo add sal --features infrastructure # git, vault, kubernetes, virt
# cargo add sal --features all
---
# For now, use individual crates (see Option 1 above)
```
## 2. `build_herodo.sh`
### Quick Start Examples
### Purpose
#### Using Individual Crates (Recommended)
```rust
use sal_os::fs;
use sal_process::run;
fn main() -> Result<(), Box<dyn std::error::Error>> {
// File system operations
let files = fs::list_files(".")?;
println!("Found {} files", files.len());
// Process execution
let result = run::command("echo hello")?;
println!("Output: {}", result.stdout);
Ok(())
}
```
#### Using Meta-crate with Features
```rust
// In Cargo.toml: sal = { version = "0.1.0", features = ["os", "process"] }
use sal::os::fs;
use sal::process::run;
fn main() -> Result<(), Box<dyn std::error::Error>> {
// File system operations
let files = fs::list_files(".")?;
println!("Found {} files", files.len());
// Process execution
let result = run::command("echo hello")?;
println!("Output: {}", result.stdout);
Ok(())
}
```
#### Using Herodo for Scripting
```bash
# Build and install herodo
git clone https://github.com/PlanetFirst/sal.git
cd sal
./build_herodo.sh
# Create a script file
cat > example.rhai << 'EOF'
// File operations
let files = find_files(".", "*.rs");
print("Found " + files.len() + " Rust files");
// Process execution
let result = run("echo 'Hello from SAL!'");
print("Output: " + result.stdout);
// Network operations
let reachable = http_check("https://github.com");
print("GitHub reachable: " + reachable);
EOF
# Execute the script
herodo example.rhai
```
## 📦 Available Packages
SAL is published as individual crates, allowing you to install only what you need:
| Package | Description | Install Command |
|---------|-------------|-----------------|
| [`sal-os`](https://crates.io/crates/sal-os) | Operating system operations | `cargo add sal-os` |
| [`sal-process`](https://crates.io/crates/sal-process) | Process management | `cargo add sal-process` |
| [`sal-text`](https://crates.io/crates/sal-text) | Text processing utilities | `cargo add sal-text` |
| [`sal-net`](https://crates.io/crates/sal-net) | Network operations | `cargo add sal-net` |
| [`sal-git`](https://crates.io/crates/sal-git) | Git repository management | `cargo add sal-git` |
| [`sal-vault`](https://crates.io/crates/sal-vault) | Cryptographic operations | `cargo add sal-vault` |
| [`sal-kubernetes`](https://crates.io/crates/sal-kubernetes) | Kubernetes management | `cargo add sal-kubernetes` |
| [`sal-virt`](https://crates.io/crates/sal-virt) | Virtualization tools | `cargo add sal-virt` |
| `sal-redisclient` | Redis database client | `cargo add sal-redisclient` ⏳ |
| `sal-postgresclient` | PostgreSQL client | `cargo add sal-postgresclient` ⏳ |
| `sal-zinit-client` | Zinit process supervisor | `cargo add sal-zinit-client` ⏳ |
| `sal-mycelium` | Mycelium network client | `cargo add sal-mycelium` ⏳ |
| `sal-rhai` | Rhai scripting integration | `cargo add sal-rhai` ⏳ |
| `sal` | Meta-crate with features | `cargo add sal --features all` ⏳ |
| `herodo` | Script executor binary | Build from source ⏳ |
**Legend**: ✅ Published | ⏳ Publishing soon (rate limited)
### 📢 **Publishing Status**
**Currently Available on crates.io:**
- ✅ [`sal-os`](https://crates.io/crates/sal-os) - Operating system operations
- ✅ [`sal-process`](https://crates.io/crates/sal-process) - Process management
- ✅ [`sal-text`](https://crates.io/crates/sal-text) - Text processing utilities
- ✅ [`sal-net`](https://crates.io/crates/sal-net) - Network operations
- ✅ [`sal-git`](https://crates.io/crates/sal-git) - Git repository management
- ✅ [`sal-vault`](https://crates.io/crates/sal-vault) - Cryptographic operations
- ✅ [`sal-kubernetes`](https://crates.io/crates/sal-kubernetes) - Kubernetes management
- ✅ [`sal-virt`](https://crates.io/crates/sal-virt) - Virtualization tools
**Publishing Soon** (hit crates.io rate limit):
-`sal-redisclient`, `sal-postgresclient`, `sal-zinit-client`, `sal-mycelium`
-`sal-rhai`
-`sal` (meta-crate), `herodo` (binary)
**Estimated Timeline**: Remaining packages will be published within 24 hours once the rate limit resets.
## Core Features
SAL offers a broad spectrum of functionalities, including:
- **System Operations**: File and directory management, environment variable access, system information retrieval, and OS-specific commands.
- **Process Management**: Create, monitor, control, and interact with system processes.
- **Containerization Tools**:
- Integration with **Buildah** for building OCI/Docker-compatible container images.
- Integration with **nerdctl** for managing containers (run, stop, list, build, etc.).
- **Version Control**: Programmatic interaction with Git repositories (clone, commit, push, pull, status, etc.).
- **Database Clients**:
- **Redis**: Robust client for interacting with Redis servers.
- **PostgreSQL**: Client for executing queries and managing PostgreSQL databases.
- **Scripting Engine**: In-built support for the **Rhai** scripting language, allowing SAL functionalities to be scripted and automated, primarily through the `herodo` tool.
- **Networking & Services**:
- **Mycelium**: Tools for Mycelium network peer management and message passing.
- **Zinit**: Client for interacting with the Zinit process supervision system.
- **RFS (Remote/Virtual Filesystem)**: Mount, manage, pack, and unpack various types of filesystems (local, SSH, S3, WebDAV).
- **Text Processing**: A suite of utilities for text manipulation, formatting, and regular expressions.
- **Cryptography (`vault`)**: Functions for common cryptographic operations.
## `herodo`: The SAL Scripting Tool
`herodo` is a command-line utility bundled with SAL that executes Rhai scripts. It empowers users to automate tasks and orchestrate complex workflows by leveraging SAL's diverse modules directly from scripts.
- Builds the `herodo` binary from the `herodo` package.
- Copies the binary to a systemwide location (`/usr/local/bin`) if run as root, otherwise to `~/hero/bin`.
- Optionally runs a specified Rhai script after building.
### Usage
```bash
# Execute a single Rhai script
herodo script.rhai
# Build only
./build_herodo.sh
# Execute a script with arguments
herodo script.rhai arg1 arg2
# Execute all .rhai scripts in a directory
herodo /path/to/scripts/
# Build and run a specific Rhai script (e.g., `example`):
./build_herodo.sh example
```
If a directory is provided, `herodo` will execute all `.rhai` scripts within that directory (and its subdirectories) in alphabetical order.
### Details
### Scriptable SAL Modules via `herodo`
- The script changes to its own directory, builds the `herodo` crate (`cargo build`), and copies the binary.
- If a script name is provided, it looks for the script in:
- `src/rhaiexamples/<name>.rhai`
- `src/herodo/scripts/<name>.rhai`
- If the script is not found, the script exits with an error.
The following SAL modules and functionalities are exposed to the Rhai scripting environment through `herodo`:
---
- **OS (`os`)**: Comprehensive file system operations, file downloading & installation, and system package management. [Documentation](os/README.md)
- **Process (`process`)**: Robust command and script execution, plus process management (listing, finding, killing, checking command existence). [Documentation](process/README.md)
- **Text (`text`)**: String manipulation, prefixing, path/name fixing, text replacement, and templating. [Documentation](text/README.md)
- **Net (`net`)**: Network operations, HTTP requests, and connectivity utilities. [Documentation](net/README.md)
- **Git (`git`)**: High-level repository management and generic Git command execution with Redis-backed authentication (clone, pull, push, commit, etc.). [Documentation](git/README.md)
- **Vault (`vault`)**: Cryptographic operations, keypair management, encryption, decryption, hashing, etc. [Documentation](vault/README.md)
- **Redis Client (`redisclient`)**: Execute Redis commands (`redis_get`, `redis_set`, `redis_execute`, etc.). [Documentation](redisclient/README.md)
- **PostgreSQL Client (`postgresclient`)**: Execute SQL queries against PostgreSQL databases. [Documentation](postgresclient/README.md)
- **Zinit (`zinit_client`)**: Client for Zinit process supervisor (service management, logs). [Documentation](zinit_client/README.md)
- **Mycelium (`mycelium`)**: Client for Mycelium decentralized networking API (node info, peer management, messaging). [Documentation](mycelium/README.md)
- **Virtualization (`virt`)**:
- **Buildah**: OCI/Docker image building functions. [Documentation](virt/README.md)
- **nerdctl**: Container lifecycle management (`nerdctl_run`, `nerdctl_stop`, `nerdctl_images`, `nerdctl_image_build`, etc.)
- **RFS**: Mount various filesystems (local, SSH, S3, etc.), pack/unpack filesystem layers.
## 3. `run_rhai_tests.sh`
### Example `herodo` Rhai Script
### Purpose
```rhai
// file: /opt/scripts/example_task.rhai
- Runs **all** Rhai test suites across the repository.
- Supports both the legacy `rhai_tests` directory and the newer `*/tests/rhai` layout.
- Logs output to `run_rhai_tests.log` and prints a summary.
// OS operations
println("Checking for /tmp/my_app_data...");
if !exist("/tmp/my_app_data") {
mkdir("/tmp/my_app_data");
println("Created directory /tmp/my_app_data");
}
// Redis operations
println("Setting Redis key 'app_status' to 'running'");
redis_set("app_status", "running");
let status = redis_get("app_status");
println("Current app_status from Redis: " + status);
// Process execution
println("Listing files in /tmp:");
let output = run("ls -la /tmp");
println(output.stdout);
println("Script finished.");
```
Run with: `herodo /opt/scripts/example_task.rhai`
For more examples, check the individual module test directories (e.g., `text/tests/rhai/`, `os/tests/rhai/`, etc.) in this repository.
## Using SAL as a Rust Library
### Option 1: Individual Crates (Recommended)
Add only the SAL modules you need:
```toml
[dependencies]
sal-os = "0.1.0"
sal-process = "0.1.0"
sal-text = "0.1.0"
```
```rust
use sal_os::fs;
use sal_process::run;
use sal_text::template;
fn main() -> Result<(), Box<dyn std::error::Error>> {
// File operations
let files = fs::list_files(".")?;
println!("Found {} files", files.len());
// Process execution
let result = run::command("echo 'Hello SAL!'")?;
println!("Output: {}", result.stdout);
// Text templating
let template_str = "Hello {{name}}!";
let mut vars = std::collections::HashMap::new();
vars.insert("name".to_string(), "World".to_string());
let rendered = template::render(template_str, &vars)?;
println!("Rendered: {}", rendered);
Ok(())
}
```
### Option 2: Meta-crate with Features (Coming Soon)
```toml
[dependencies]
sal = { version = "0.1.0", features = ["os", "process", "text"] }
```
```rust
use sal::os::fs;
use sal::process::run;
use sal::text::template;
// Same code as above, but using the meta-crate
```
*(Note: The meta-crate `sal` will be available once all individual packages are published.)*
## 🎯 **Why Choose SAL?**
### **Modular Architecture**
- **Install Only What You Need**: Each package is independent - no bloated dependencies
- **Faster Compilation**: Smaller dependency trees mean faster build times
- **Smaller Binaries**: Only include the functionality you actually use
- **Clear Dependencies**: Explicit about what functionality your project uses
### **Developer Experience**
- **Consistent APIs**: All packages follow the same design patterns and conventions
- **Comprehensive Documentation**: Each package has detailed documentation and examples
- **Real-World Tested**: All functionality is production-tested, no placeholder code
- **Type Safety**: Leverages Rust's type system for safe, reliable operations
### **Scripting Power**
- **Herodo Integration**: Execute Rhai scripts with full access to SAL functionality
- **Cross-Platform**: Works consistently across Windows, macOS, and Linux
- **Automation Ready**: Perfect for DevOps, CI/CD, and system administration tasks
## 📦 **Workspace Modules Overview**
SAL is organized as a Cargo workspace with the following crates:
### **Core Library Modules**
- **`sal-os`**: Core OS interactions, file system operations, environment access
- **`sal-process`**: Process creation, management, and control
- **`sal-text`**: Utilities for text processing and manipulation
- **`sal-net`**: Network operations, HTTP requests, and connectivity utilities
### **Integration Modules**
- **`sal-git`**: Git repository management and operations
- **`sal-vault`**: Cryptographic functions and keypair management
- **`sal-rhai`**: Integration layer for the Rhai scripting engine, used by `herodo`
### **Client Modules**
- **`sal-redisclient`**: Client for Redis database interactions
- **`sal-postgresclient`**: Client for PostgreSQL database interactions
- **`sal-zinit-client`**: Client for Zinit process supervisor
- **`sal-mycelium`**: Client for Mycelium network operations
### **Specialized Modules**
- **`sal-virt`**: Virtualization-related utilities (buildah, nerdctl, rfs)
### **Root Package & Binary**
- **`sal`**: Root umbrella crate that re-exports all modules
- **`herodo`**: Command-line binary for executing Rhai scripts
## 🔨 **Building SAL**
Build the entire workspace (all crates) using Cargo:
### Usage
```bash
# Build all workspace members
cargo build --workspace
# Build for release
cargo build --workspace --release
# Build specific crate
cargo build -p sal-text
cargo build -p herodo
```
The `herodo` executable will be located at `target/debug/herodo` or `target/release/herodo`.
## 🧪 **Running Tests**
### **Rust Unit Tests**
```bash
# Run all workspace tests
cargo test --workspace
# Run tests for specific crate
cargo test -p sal-text
cargo test -p sal-os
# Run only library tests (faster)
cargo test --workspace --lib
```
### **Rhai Integration Tests**
Run comprehensive Rhai script tests that exercise `herodo` and SAL's scripted functionalities:
```bash
# Run all Rhai integration tests (16 modules)
# Run all tests
./run_rhai_tests.sh
# Results: 16/16 modules pass with 100% success rate
```
The Rhai tests validate real-world functionality across all SAL modules and provide comprehensive integration testing.
### Output
- Colored console output for readability.
- Log file (`run_rhai_tests.log`) contains full output for later review.
- Summary includes total modules, passed, and failed counts.
- Exit code `0` if all tests pass, `1` otherwise.
---
## General Development Workflow
1. **Build**: Use `build_herodo.sh` to compile the `herodo` binary.
2. **Test**: Run `run_rhai_tests.sh` to ensure all Rhai scripts pass.
3. **Publish**: When ready to release, use `scripts/publish-all.sh` (with `--dry-run` first to verify).
## Prerequisites
- **Rust toolchain** (`cargo`, `rustc`) installed.
- **Rhai** interpreter (`herodo`) built and available.
- **Git** for version control.
- **Cargo login** for publishing to crates.io.
## License
SAL is licensed under the Apache License 2.0. See the [LICENSE](LICENSE) file for details.
See `LICENSE` for details.
---
**Happy coding!**

View File

@@ -10,7 +10,7 @@ license = "Apache-2.0"
[dependencies]
# Use workspace dependencies for consistency
thiserror = "1.0"
tokio = { workspace = true }
tokio = { workspace = true, features = ["process", "time", "sync"] }
log = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
@@ -20,6 +20,9 @@ once_cell = { workspace = true }
zinit-client = { version = "0.4.0" }
# Optional Rhai integration
rhai = { workspace = true, optional = true }
# Process manager dependencies
async-trait = "0.1"
chrono = "0.4"
[target.'cfg(target_os = "macos")'.dependencies]

View File

@@ -83,20 +83,34 @@ pub trait ServiceManager: Send + Sync {
fn remove(&self, service_name: &str) -> Result<(), ServiceManagerError>;
}
// Platform-specific implementations
#[cfg(target_os = "macos")]
mod launchctl;
#[cfg(target_os = "macos")]
pub use launchctl::LaunchctlServiceManager;
// Platform-specific implementations (commented out for now to simplify)
// #[cfg(target_os = "macos")]
// mod launchctl;
// #[cfg(target_os = "macos")]
// pub use launchctl::LaunchctlServiceManager;
#[cfg(target_os = "linux")]
mod systemd;
#[cfg(target_os = "linux")]
pub use systemd::SystemdServiceManager;
// #[cfg(target_os = "linux")]
// mod systemd;
// #[cfg(target_os = "linux")]
// pub use systemd::SystemdServiceManager;
mod zinit;
pub use zinit::ZinitServiceManager;
// Process manager module for actor lifecycle management
pub mod process_manager;
pub use process_manager::{
ProcessManager, ProcessConfig, ProcessStatus, ProcessManagerError, ProcessManagerResult,
SimpleProcessManager, LogInfo,
};
pub mod tmux_manager;
pub use tmux_manager::TmuxProcessManager;
// Re-export process managers for easier access
pub use process_manager::SimpleProcessManager as SimpleManager;
pub use tmux_manager::TmuxProcessManager as TmuxManager;
#[cfg(feature = "rhai")]
pub mod rhai;
@@ -206,7 +220,11 @@ fn test_zinit_socket(socket_path: &str) -> bool {
pub fn create_service_manager() -> Result<Box<dyn ServiceManager>, ServiceManagerError> {
#[cfg(target_os = "macos")]
{
Ok(Box::new(LaunchctlServiceManager::new()))
// LaunchctlServiceManager is commented out for now
// For now, return an error on macOS since launchctl is disabled
Err(ServiceManagerError::Other(
"Service manager not available on macOS (launchctl disabled for simplification)".to_string(),
))
}
#[cfg(target_os = "linux")]
{

View File

@@ -0,0 +1,371 @@
//! # Process Manager
//!
//! This module provides process management abstractions specifically designed for
//! actor lifecycle management. It bridges the gap between the service manager
//! and actor-specific process requirements.
//!
//! The ProcessManager trait provides a unified interface for managing actor processes
//! across different process management systems (tmux, zinit, simple spawning, etc.).
use async_trait::async_trait;
use std::collections::HashMap;
use std::path::PathBuf;
use std::process::Stdio;
use std::sync::Arc;
use std::time::Duration;
use thiserror::Error;
use tokio::process::{Child, Command};
use tokio::sync::Mutex;
/// Errors that can occur during process management operations
#[derive(Error, Debug)]
pub enum ProcessManagerError {
#[error("Process '{0}' not found")]
ProcessNotFound(String),
#[error("Process '{0}' already running")]
ProcessAlreadyRunning(String),
#[error("Failed to start process '{0}': {1}")]
StartupFailed(String, String),
#[error("Failed to stop process '{0}': {1}")]
StopFailed(String, String),
#[error("Failed to get process status '{0}': {1}")]
StatusFailed(String, String),
#[error("Failed to get logs for process '{0}': {1}")]
LogsFailed(String, String),
#[error("Process manager error: {0}")]
Other(String),
#[error("IO error: {0}")]
IoError(#[from] std::io::Error),
}
/// Result type for process manager operations
pub type ProcessManagerResult<T> = Result<T, ProcessManagerError>;
/// Represents the current status of a process
#[derive(Debug, Clone, PartialEq)]
pub enum ProcessStatus {
/// Process is not running
Stopped,
/// Process is currently starting up
Starting,
/// Process is running and ready
Running,
/// Process is in the process of stopping
Stopping,
/// Process has encountered an error
Error(String),
}
/// Configuration for a process
#[derive(Debug, Clone)]
pub struct ProcessConfig {
/// Unique identifier for the process
pub process_id: String,
/// Path to the binary to execute
pub binary_path: PathBuf,
/// Command line arguments
pub args: Vec<String>,
/// Working directory (optional)
pub working_dir: Option<PathBuf>,
/// Environment variables
pub env_vars: HashMap<String, String>,
}
impl ProcessConfig {
/// Create a new process configuration
pub fn new(process_id: String, binary_path: PathBuf) -> Self {
Self {
process_id,
binary_path,
args: Vec::new(),
working_dir: None,
env_vars: HashMap::new(),
}
}
/// Add a command line argument
pub fn with_arg(mut self, arg: String) -> Self {
self.args.push(arg);
self
}
/// Add multiple command line arguments
pub fn with_args(mut self, args: Vec<String>) -> Self {
self.args.extend(args);
self
}
/// Set the working directory
pub fn with_working_dir(mut self, working_dir: PathBuf) -> Self {
self.working_dir = Some(working_dir);
self
}
/// Add an environment variable
pub fn with_env_var(mut self, key: String, value: String) -> Self {
self.env_vars.insert(key, value);
self
}
}
/// Log information for a process
#[derive(Debug, Clone)]
pub struct LogInfo {
/// Timestamp of the log entry
pub timestamp: String,
/// Log level (info, warn, error, etc.)
pub level: String,
/// Log message content
pub message: String,
}
/// Process manager abstraction for different process management systems
#[async_trait]
pub trait ProcessManager: Send + Sync {
/// Start a process with the given configuration
async fn start_process(&mut self, config: &ProcessConfig) -> ProcessManagerResult<()>;
/// Stop a process by process ID
async fn stop_process(&mut self, process_id: &str, force: bool) -> ProcessManagerResult<()>;
/// Get the status of a process
async fn process_status(&self, process_id: &str) -> ProcessManagerResult<ProcessStatus>;
/// Get logs for a process
async fn process_logs(&self, process_id: &str, lines: Option<usize>, follow: bool) -> ProcessManagerResult<Vec<LogInfo>>;
/// Check if the process manager is available and working
async fn health_check(&self) -> ProcessManagerResult<()>;
/// List all managed processes
async fn list_processes(&self) -> ProcessManagerResult<Vec<String>>;
}
/// Simple process manager implementation using direct process spawning
/// This is useful for development and testing, but production should use
/// more robust process managers like tmux or zinit.
pub struct SimpleProcessManager {
processes: Arc<Mutex<HashMap<String, Child>>>,
}
impl SimpleProcessManager {
/// Create a new simple process manager
pub fn new() -> Self {
Self {
processes: Arc::new(Mutex::new(HashMap::new())),
}
}
fn build_command(&self, config: &ProcessConfig) -> Command {
let mut cmd = Command::new(&config.binary_path);
// Add arguments
for arg in &config.args {
cmd.arg(arg);
}
// Set working directory
if let Some(working_dir) = &config.working_dir {
cmd.current_dir(working_dir);
}
// Set environment variables
for (key, value) in &config.env_vars {
cmd.env(key, value);
}
// Configure stdio
cmd.stdout(Stdio::piped())
.stderr(Stdio::piped())
.stdin(Stdio::null());
cmd
}
}
impl Default for SimpleProcessManager {
fn default() -> Self {
Self::new()
}
}
#[async_trait]
impl ProcessManager for SimpleProcessManager {
async fn start_process(&mut self, config: &ProcessConfig) -> ProcessManagerResult<()> {
let mut processes = self.processes.lock().await;
if processes.contains_key(&config.process_id) {
return Err(ProcessManagerError::ProcessAlreadyRunning(config.process_id.clone()));
}
let mut cmd = self.build_command(config);
log::debug!("Starting process for {}: {:?}", config.process_id, cmd);
let child = cmd.spawn().map_err(|e| ProcessManagerError::StartupFailed(
config.process_id.clone(),
format!("Failed to spawn process: {}", e),
))?;
processes.insert(config.process_id.clone(), child);
// Wait a moment to ensure the process started successfully
drop(processes);
tokio::time::sleep(Duration::from_millis(100)).await;
let mut processes = self.processes.lock().await;
// Check if the process is still running
if let Some(child) = processes.get_mut(&config.process_id) {
match child.try_wait() {
Ok(Some(status)) => {
processes.remove(&config.process_id);
return Err(ProcessManagerError::StartupFailed(
config.process_id.clone(),
format!("Process exited immediately with status: {}", status),
));
}
Ok(None) => {
// Process is still running
log::info!("Successfully started process {}", config.process_id);
}
Err(e) => {
processes.remove(&config.process_id);
return Err(ProcessManagerError::StartupFailed(
config.process_id.clone(),
format!("Failed to check process status: {}", e),
));
}
}
}
Ok(())
}
async fn stop_process(&mut self, process_id: &str, force: bool) -> ProcessManagerResult<()> {
let mut processes = self.processes.lock().await;
let mut child = processes.remove(process_id)
.ok_or_else(|| ProcessManagerError::ProcessNotFound(process_id.to_string()))?;
if force {
child.kill().await.map_err(|e| ProcessManagerError::StopFailed(
process_id.to_string(),
format!("Failed to kill process: {}", e),
))?;
} else {
// Try graceful shutdown first
if let Some(id) = child.id() {
#[cfg(unix)]
{
use std::process::Command as StdCommand;
let _ = StdCommand::new("kill")
.arg("-TERM")
.arg(id.to_string())
.output();
// Wait a bit for graceful shutdown
tokio::time::sleep(Duration::from_secs(5)).await;
}
}
// Force kill if still running
let _ = child.kill().await;
}
// Wait for the process to exit
let _ = child.wait().await;
log::info!("Successfully stopped process {}", process_id);
Ok(())
}
async fn process_status(&self, process_id: &str) -> ProcessManagerResult<ProcessStatus> {
let mut processes = self.processes.lock().await;
if let Some(child) = processes.get_mut(process_id) {
match child.try_wait() {
Ok(Some(_)) => {
// Process has exited
processes.remove(process_id);
Ok(ProcessStatus::Stopped)
}
Ok(None) => {
// Process is still running
Ok(ProcessStatus::Running)
}
Err(e) => {
Ok(ProcessStatus::Error(format!("Failed to check status: {}", e)))
}
}
} else {
Ok(ProcessStatus::Stopped)
}
}
async fn process_logs(&self, process_id: &str, _lines: Option<usize>, _follow: bool) -> ProcessManagerResult<Vec<LogInfo>> {
// Simple process manager doesn't capture logs by default
// This would require more sophisticated process management
log::warn!("Log retrieval not implemented for SimpleProcessManager");
Ok(vec![LogInfo {
timestamp: chrono::Utc::now().to_rfc3339(),
level: "info".to_string(),
message: format!("Log retrieval not available for process {}", process_id),
}])
}
async fn health_check(&self) -> ProcessManagerResult<()> {
// Simple process manager is always healthy if we can lock the processes
let _processes = self.processes.lock().await;
Ok(())
}
async fn list_processes(&self) -> ProcessManagerResult<Vec<String>> {
let processes = self.processes.lock().await;
Ok(processes.keys().cloned().collect())
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::path::PathBuf;
#[tokio::test]
async fn test_process_config_creation() {
let config = ProcessConfig::new(
"test_process".to_string(),
PathBuf::from("/usr/bin/echo"),
)
.with_arg("hello".to_string())
.with_arg("world".to_string())
.with_env_var("TEST_VAR".to_string(), "test_value".to_string());
assert_eq!(config.process_id, "test_process");
assert_eq!(config.binary_path, PathBuf::from("/usr/bin/echo"));
assert_eq!(config.args, vec!["hello", "world"]);
assert_eq!(config.env_vars.get("TEST_VAR"), Some(&"test_value".to_string()));
}
#[tokio::test]
async fn test_simple_process_manager_creation() {
let pm = SimpleProcessManager::new();
assert!(pm.health_check().await.is_ok());
}
#[tokio::test]
async fn test_process_status_types() {
let status1 = ProcessStatus::Running;
let status2 = ProcessStatus::Stopped;
let status3 = ProcessStatus::Error("test error".to_string());
assert_eq!(status1, ProcessStatus::Running);
assert_eq!(status2, ProcessStatus::Stopped);
assert_ne!(status1, status2);
if let ProcessStatus::Error(msg) = status3 {
assert_eq!(msg, "test error");
} else {
panic!("Expected Error status");
}
}
}

View File

@@ -0,0 +1,404 @@
//! # Tmux Process Manager
//!
//! This module provides a tmux-based process manager implementation that manages
//! processes within tmux sessions and windows. This is useful for production
//! environments where you need persistent, manageable processes.
use async_trait::async_trait;
use chrono::Utc;
use std::process::Output;
use tokio::process::Command;
use crate::process_manager::{
LogInfo, ProcessConfig, ProcessManager, ProcessManagerError, ProcessManagerResult,
ProcessStatus,
};
/// Tmux-based process manager implementation
///
/// This manager creates and manages processes within tmux sessions, providing
/// better process isolation and management capabilities compared to simple spawning.
pub struct TmuxProcessManager {
/// Name of the tmux session to use
session_name: String,
}
impl TmuxProcessManager {
/// Create a new tmux process manager with the specified session name
pub fn new(session_name: String) -> Self {
Self { session_name }
}
/// Execute a tmux command and return the output
async fn tmux_command(&self, args: &[&str]) -> ProcessManagerResult<Output> {
let output = Command::new("tmux")
.args(args)
.output()
.await
.map_err(|e| ProcessManagerError::Other(format!("Failed to execute tmux command: {}", e)))?;
log::debug!("Tmux command: tmux {}", args.join(" "));
log::debug!("Tmux output: {}", String::from_utf8_lossy(&output.stdout));
if !output.stderr.is_empty() {
log::debug!("Tmux stderr: {}", String::from_utf8_lossy(&output.stderr));
}
Ok(output)
}
/// Create the tmux session if it doesn't exist
async fn create_session_if_needed(&self) -> ProcessManagerResult<()> {
// Check if session exists
let output = self
.tmux_command(&["has-session", "-t", &self.session_name])
.await?;
if !output.status.success() {
// Session doesn't exist, create it
log::info!("Creating tmux session: {}", self.session_name);
let output = self
.tmux_command(&["new-session", "-d", "-s", &self.session_name])
.await?;
if !output.status.success() {
return Err(ProcessManagerError::Other(format!(
"Failed to create tmux session '{}': {}",
self.session_name,
String::from_utf8_lossy(&output.stderr)
)));
}
}
Ok(())
}
/// Build the command string for running a process
fn build_process_command(&self, config: &ProcessConfig) -> String {
let mut cmd_parts = vec![config.binary_path.to_string_lossy().to_string()];
cmd_parts.extend(config.args.clone());
cmd_parts.join(" ")
}
/// Get the window name for a process
fn get_window_name(&self, process_id: &str) -> String {
format!("proc-{}", process_id)
}
}
#[async_trait]
impl ProcessManager for TmuxProcessManager {
async fn start_process(&mut self, config: &ProcessConfig) -> ProcessManagerResult<()> {
self.create_session_if_needed().await?;
let window_name = self.get_window_name(&config.process_id);
let command = self.build_process_command(config);
// Check if window already exists
let check_output = self
.tmux_command(&[
"list-windows",
"-t",
&self.session_name,
"-F",
"#{window_name}",
])
.await?;
let existing_windows = String::from_utf8_lossy(&check_output.stdout);
if existing_windows.lines().any(|line| line.trim() == window_name) {
return Err(ProcessManagerError::ProcessAlreadyRunning(config.process_id.clone()));
}
// Create new window and run the process
let mut tmux_args = vec![
"new-window",
"-t",
&self.session_name,
"-n",
&window_name,
];
// Set working directory if specified
let working_dir_arg;
if let Some(working_dir) = &config.working_dir {
working_dir_arg = working_dir.to_string_lossy().to_string();
tmux_args.extend(&["-c", &working_dir_arg]);
}
tmux_args.push(&command);
let output = self.tmux_command(&tmux_args).await?;
if !output.status.success() {
return Err(ProcessManagerError::StartupFailed(
config.process_id.clone(),
format!(
"Failed to create tmux window: {}",
String::from_utf8_lossy(&output.stderr)
),
));
}
// Wait a moment and check if the process is still running
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
match self.process_status(&config.process_id).await? {
ProcessStatus::Running => {
log::info!("Successfully started process {} in tmux window {}", config.process_id, window_name);
Ok(())
}
ProcessStatus::Stopped => {
Err(ProcessManagerError::StartupFailed(
config.process_id.clone(),
"Process exited immediately after startup".to_string(),
))
}
ProcessStatus::Error(msg) => {
Err(ProcessManagerError::StartupFailed(
config.process_id.clone(),
format!("Process failed to start: {}", msg),
))
}
_ => Ok(()),
}
}
async fn stop_process(&mut self, process_id: &str, force: bool) -> ProcessManagerResult<()> {
let window_name = self.get_window_name(process_id);
// Check if window exists
let check_output = self
.tmux_command(&[
"list-windows",
"-t",
&self.session_name,
"-F",
"#{window_name}",
])
.await?;
let existing_windows = String::from_utf8_lossy(&check_output.stdout);
if !existing_windows.lines().any(|line| line.trim() == window_name) {
return Err(ProcessManagerError::ProcessNotFound(process_id.to_string()));
}
if force {
// Kill the window immediately
let output = self
.tmux_command(&["kill-window", "-t", &format!("{}:{}", self.session_name, window_name)])
.await?;
if !output.status.success() {
return Err(ProcessManagerError::StopFailed(
process_id.to_string(),
format!(
"Failed to kill tmux window: {}",
String::from_utf8_lossy(&output.stderr)
),
));
}
} else {
// Send SIGTERM to the process in the window
let output = self
.tmux_command(&[
"send-keys",
"-t",
&format!("{}:{}", self.session_name, window_name),
"C-c",
])
.await?;
if !output.status.success() {
log::warn!("Failed to send SIGTERM, trying force kill");
// Fallback to force kill
return self.stop_process(process_id, true).await;
}
// Wait a bit for graceful shutdown
tokio::time::sleep(std::time::Duration::from_secs(5)).await;
// Check if process is still running, force kill if needed
if let Ok(ProcessStatus::Running) = self.process_status(process_id).await {
log::info!("Process {} didn't stop gracefully, force killing", process_id);
return self.stop_process(process_id, true).await;
}
}
log::info!("Successfully stopped process {}", process_id);
Ok(())
}
async fn process_status(&self, process_id: &str) -> ProcessManagerResult<ProcessStatus> {
let window_name = self.get_window_name(process_id);
// Check if window exists
let check_output = self
.tmux_command(&[
"list-windows",
"-t",
&self.session_name,
"-F",
"#{window_name}",
])
.await?;
let existing_windows = String::from_utf8_lossy(&check_output.stdout);
if !existing_windows.lines().any(|line| line.trim() == window_name) {
return Ok(ProcessStatus::Stopped);
}
// Check if there are any panes in the window (process running)
let pane_output = self
.tmux_command(&[
"list-panes",
"-t",
&format!("{}:{}", self.session_name, window_name),
"-F",
"#{pane_pid}",
])
.await?;
if pane_output.status.success() && !pane_output.stdout.is_empty() {
Ok(ProcessStatus::Running)
} else {
Ok(ProcessStatus::Stopped)
}
}
async fn process_logs(&self, process_id: &str, lines: Option<usize>, _follow: bool) -> ProcessManagerResult<Vec<LogInfo>> {
let window_name = self.get_window_name(process_id);
// Capture the pane content (this is the best we can do with tmux)
let target_window = format!("{}:{}", self.session_name, window_name);
let mut tmux_args = vec![
"capture-pane",
"-t",
&target_window,
"-p",
];
// Add line limit if specified
let lines_arg;
if let Some(line_count) = lines {
lines_arg = format!("-S -{}", line_count);
tmux_args.push(&lines_arg);
}
let output = self.tmux_command(&tmux_args).await?;
if !output.status.success() {
return Err(ProcessManagerError::LogsFailed(
process_id.to_string(),
format!(
"Failed to capture tmux pane: {}",
String::from_utf8_lossy(&output.stderr)
),
));
}
let content = String::from_utf8_lossy(&output.stdout);
let timestamp = Utc::now().to_rfc3339();
let logs = content
.lines()
.filter(|line| !line.trim().is_empty())
.map(|line| LogInfo {
timestamp: timestamp.clone(),
level: "info".to_string(),
message: line.to_string(),
})
.collect();
Ok(logs)
}
async fn health_check(&self) -> ProcessManagerResult<()> {
// Check if tmux is available
let output = Command::new("tmux")
.arg("list-sessions")
.output()
.await
.map_err(|e| ProcessManagerError::Other(format!("Tmux not available: {}", e)))?;
if !output.status.success() {
let error_msg = String::from_utf8_lossy(&output.stderr);
if error_msg.contains("no server running") {
// This is fine, tmux server will start when needed
Ok(())
} else {
Err(ProcessManagerError::Other(format!("Tmux health check failed: {}", error_msg)))
}
} else {
Ok(())
}
}
async fn list_processes(&self) -> ProcessManagerResult<Vec<String>> {
// List all windows in our session that match our process naming pattern
let output = self
.tmux_command(&[
"list-windows",
"-t",
&self.session_name,
"-F",
"#{window_name}",
])
.await?;
if !output.status.success() {
// Session might not exist
return Ok(Vec::new());
}
let windows = String::from_utf8_lossy(&output.stdout);
let processes = windows
.lines()
.filter_map(|line| {
let window_name = line.trim();
if window_name.starts_with("proc-") {
Some(window_name.strip_prefix("proc-").unwrap().to_string())
} else {
None
}
})
.collect();
Ok(processes)
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::path::PathBuf;
#[tokio::test]
async fn test_tmux_manager_creation() {
let manager = TmuxProcessManager::new("test_session".to_string());
assert_eq!(manager.session_name, "test_session");
}
#[tokio::test]
async fn test_window_name_generation() {
let manager = TmuxProcessManager::new("test_session".to_string());
let window_name = manager.get_window_name("test_process");
assert_eq!(window_name, "proc-test_process");
}
#[tokio::test]
async fn test_command_building() {
let manager = TmuxProcessManager::new("test_session".to_string());
let config = ProcessConfig::new(
"test_process".to_string(),
PathBuf::from("/usr/bin/echo"),
)
.with_arg("hello".to_string())
.with_arg("world".to_string());
let command = manager.build_process_command(&config);
assert!(command.contains("/usr/bin/echo"));
assert!(command.contains("hello"));
assert!(command.contains("world"));
}
}

0
cargo_instructions.md Normal file
View File

14
config/README.md Normal file
View File

@@ -0,0 +1,14 @@
# Environment Configuration
To set up your environment variables:
1. Copy the template file to `env.sh`:
```bash
cp config/myenv_templ.sh config/env.sh
```
2. Edit `config/env.sh` and fill in your specific values for the variables.
3. This file (`config/env.sh`) is excluded from version control by the project's `.gitignore` configuration, ensuring your sensitive information remains local and is never committed to the repository.

6
config/myenv_templ.sh Normal file
View File

@@ -0,0 +1,6 @@
export OPENROUTER_API_KEY=""
export GROQ_API_KEY=""
export CEREBRAS_API_KEY=""
export OPENAI_API_KEY="sk-xxxxxxx"

View File

@@ -1,6 +1,7 @@
// Example of using the network modules in SAL through Rhai
// Shows TCP port checking, HTTP URL validation, and SSH command execution
// Function to print section header
fn section(title) {
print("\n");
@@ -19,14 +20,14 @@ let host = "localhost";
let port = 22;
print(`Checking if port ${port} is open on ${host}...`);
let is_open = tcp.check_port(host, port);
print(`Port ${port} is ${is_open ? "open" : "closed"}`);
print(`Port ${port} is ${if is_open { "open" } else { "closed" }}`);
// Check multiple ports
let ports = [22, 80, 443];
print(`Checking multiple ports on ${host}...`);
let port_results = tcp.check_ports(host, ports);
for result in port_results {
print(`Port ${result.port} is ${result.is_open ? "open" : "closed"}`);
print(`Port ${result.port} is ${if result.is_open { "open" } else { "closed" }}`);
}
// HTTP connectivity checks
@@ -39,7 +40,7 @@ let http = net::new_http_connector();
let url = "https://www.example.com";
print(`Checking if ${url} is reachable...`);
let is_reachable = http.check_url(url);
print(`${url} is ${is_reachable ? "reachable" : "unreachable"}`);
print(`${url} is ${if is_reachable { "reachable" } else { "unreachable" }}`);
// Check the status code of a URL
print(`Checking status code of ${url}...`);
@@ -68,7 +69,7 @@ if is_open {
let ssh = net::new_ssh_builder()
.host("localhost")
.port(22)
.user(os::get_env("USER") || "root")
.user(if os::get_env("USER") != () { os::get_env("USER") } else { "root" })
.timeout(10)
.build();

View File

@@ -1,7 +1,7 @@
print("Running a basic command using run().do()...");
print("Running a basic command using run().execute()...");
// Execute a simple command
let result = run("echo Hello from run_basic!").do();
let result = run("echo Hello from run_basic!").execute();
// Print the command result
print(`Command: echo Hello from run_basic!`);
@@ -13,6 +13,6 @@ print(`Stderr:\n${result.stderr}`);
// Example of a command that might fail (if 'nonexistent_command' doesn't exist)
// This will halt execution by default because ignore_error() is not used.
// print("Running a command that will fail (and should halt)...");
// let fail_result = run("nonexistent_command").do(); // This line will cause the script to halt if the command doesn't exist
// let fail_result = run("nonexistent_command").execute(); // This line will cause the script to halt if the command doesn't exist
print("Basic run() example finished.");

View File

@@ -2,7 +2,7 @@ print("Running a command that will fail, but ignoring the error...");
// Run a command that exits with a non-zero code (will fail)
// Using .ignore_error() prevents the script from halting
let result = run("exit 1").ignore_error().do();
let result = run("exit 1").ignore_error().execute();
print(`Command finished.`);
print(`Success: ${result.success}`); // This should be false
@@ -22,7 +22,7 @@ print("\nScript continued execution after the potentially failing command.");
// Example of a command that might fail due to OS error (e.g., command not found)
// This *might* still halt depending on how the underlying Rust function handles it,
// as ignore_error() primarily prevents halting on *command* non-zero exit codes.
// let os_error_result = run("nonexistent_command_123").ignore_error().do();
// let os_error_result = run("nonexistent_command_123").ignore_error().execute();
// print(`OS Error Command Success: ${os_error_result.success}`);
// print(`OS Error Command Exit Code: ${os_error_result.code}`);

View File

@@ -1,4 +1,4 @@
print("Running a command using run().log().do()...");
print("Running a command using run().log().execute()...");
// The .log() method will print the command string to the console before execution.
// This is useful for debugging or tracing which commands are being run.

View File

@@ -1,8 +1,8 @@
print("Running a command using run().silent().do()...\n");
print("Running a command using run().silent().execute()...\n");
// This command will print to standard output and standard error
// However, because .silent() is used, the output will not appear in the console directly
let result = run("echo 'This should be silent stdout.'; echo 'This should be silent stderr.' >&2; exit 0").silent().do();
let result = run("echo 'This should be silent stdout.'; echo 'This should be silent stderr.' >&2; exit 0").silent().execute();
// The output is still captured in the CommandResult
print(`Command finished.`);
@@ -12,7 +12,7 @@ print(`Captured Stdout:\\n${result.stdout}`);
print(`Captured Stderr:\\n${result.stderr}`);
// Example of a silent command that fails (but won't halt because we only suppress output)
// let fail_result = run("echo 'This is silent failure stderr.' >&2; exit 1").silent().do();
// let fail_result = run("echo 'This is silent failure stderr.' >&2; exit 1").silent().execute();
// print(`Failed command finished (silent):`);
// print(`Success: ${fail_result.success}`);
// print(`Exit Code: ${fail_result.code}`);

View File

@@ -0,0 +1,43 @@
# RFS Client Rhai Examples
This folder contains Rhai examples that use the SAL RFS client wrappers registered by `sal::rhai::register(&mut engine)` and executed by the `herodo` binary.
## Quick start
Run the auth + upload + download example (uses hardcoded credentials and `/etc/hosts` as input):
```bash
cargo run -p herodo -- examples/rfsclient/auth_and_upload.rhai
```
By default, the script:
- Uses base URL `http://127.0.0.1:8080`
- Uses credentials `user` / `password`
- Uploads the file `/etc/hosts`
- Downloads to `/tmp/rfs_example_out.txt`
To customize, edit `examples/rfsclient/auth_and_upload.rhai` near the top and change `BASE_URL`, `USER`, `PASS`, and file paths.
## What the example does
- Creates the RFS client: `rfs_create_client(BASE_URL, USER, PASS, TIMEOUT)`
- Health check: `rfs_health_check()`
- Authenticates: `rfs_authenticate()`
- Uploads a file: `rfs_upload_file(local_path, chunk_size, verify)` → returns file hash
- Downloads it back: `rfs_download_file(file_id_or_hash, dest_path, verify)` → returns unit (throws on error)
See `examples/rfsclient/auth_and_upload.rhai` for details.
## Using the Rust client directly (optional)
If you want to use the Rust API (without Rhai), depend on `sal-rfs-client` and see:
- `packages/clients/rfsclient/src/client.rs` (`RfsClient`)
- `packages/clients/rfsclient/src/types.rs` (config and option types)
- `packages/clients/rfsclient/examples/` (example usage)
## Troubleshooting
- Auth failures: verify credentials and that the server requires/authenticates them.
- Connection errors: verify the base URL is reachable from your machine.

View File

@@ -0,0 +1,41 @@
// RFS Client: Auth + Upload + Download example
// Prereqs:
// - RFS server reachable at RFS_BASE_URL
// - Valid credentials in env: RFS_USER, RFS_PASS
// - Run with herodo so the SAL Rhai modules are registered
// NOTE: env_get not available in this runtime; hardcode or replace with your env loader
let BASE_URL = "http://127.0.0.1:8080";
let USER = "user";
let PASS = "password";
let TIMEOUT = 30; // seconds
if BASE_URL == "" { throw "Set BASE_URL in the script"; }
// Create client
let ok = rfs_create_client(BASE_URL, USER, PASS, TIMEOUT);
if !ok { throw "Failed to create RFS client"; }
// Optional health check
let health = rfs_health_check();
print(`RFS health: ${health}`);
// Authenticate (required for some operations)
let auth_ok = rfs_authenticate();
if !auth_ok { throw "Authentication failed"; }
// Upload a local file
// Use an existing readable file to avoid needing os_write_file module
let local_file = "/etc/hosts";
// rfs_upload_file(file_path, chunk_size, verify)
let hash = rfs_upload_file(local_file, 0, false);
print(`Uploaded file hash: ${hash}`);
// Download it back
let out_path = "/tmp/rfs_example_out.txt";
// rfs_download_file(file_id, output_path, verify) returns unit and throws on error
rfs_download_file(hash, out_path, false);
print(`Downloaded to: ${out_path}`);
true

View File

@@ -0,0 +1,15 @@
[package]
name = "openrouter_example"
version = "0.1.0"
edition = "2021"
[workspace]
[[bin]]
name = "openrouter_example"
path = "openrouter_example.rs"
[dependencies]
codemonkey = { path = "../../packages/ai/codemonkey" }
openai-api-rs = "6.0.8"
tokio = { version = "1.0", features = ["full"] }

View File

@@ -0,0 +1,47 @@
use codemonkey::{create_ai_provider, AIProviderType, CompletionRequestBuilder, Message, MessageRole, Content};
use std::error::Error;
#[tokio::main]
async fn main() -> Result<(), Box<dyn Error>> {
let (mut provider, provider_type) = create_ai_provider(AIProviderType::OpenRouter)?;
let messages = vec![Message {
role: MessageRole::user,
content: Content::Text("Explain the concept of a factory design pattern in Rust.".to_string()),
name: None,
tool_calls: None,
tool_call_id: None,
}];
println!("Sending request to OpenRouter...");
let response = CompletionRequestBuilder::new(
&mut *provider,
"openai/gpt-oss-120b".to_string(), // Model name as specified by the user
messages,
provider_type, // Pass the provider_type
)
.temperature(1.0)
.max_tokens(8192)
.top_p(1.0)
.reasoning_effort("medium")
.stream(false)
.openrouter_options(|builder| {
builder.provider(
codemonkey::OpenRouterProviderOptionsBuilder::new()
.order(vec!["cerebras"])
.build(),
)
})
.completion()
.await?;
for choice in response.choices {
if let Some(content) = choice.message.content {
print!("{}", content);
}
}
println!();
Ok(())
}

13
examples_rust/ai/run.sh Executable file
View File

@@ -0,0 +1,13 @@
#!/bin/bash
set -e
# Change to directory where this script is located
cd "$(dirname "${BASH_SOURCE[0]}")"
source ../../config/myenv.sh
# Build the example
cargo build
# Run the example
cargo run --bin openrouter_example

View File

@@ -3,7 +3,7 @@
//! This library loads the Rhai engine, registers all SAL modules,
//! and executes Rhai scripts from a specified directory in sorted order.
use rhai::Engine;
use rhai::{Engine, Scope};
use std::error::Error;
use std::fs;
use std::path::{Path, PathBuf};
@@ -29,6 +29,19 @@ pub fn run(script_path: &str) -> Result<(), Box<dyn Error>> {
// Create a new Rhai engine
let mut engine = Engine::new();
// TODO: if we create a scope here we could clean up all the different functionsand types regsitered wit the engine
// We should generalize the way we add things to the scope for each module sepeartely
let mut scope = Scope::new();
// Conditionally add Hetzner client only when env config is present
if let Ok(cfg) = sal::hetzner::config::Config::from_env() {
let hetzner_client = sal::hetzner::api::Client::new(cfg);
scope.push("hetzner", hetzner_client);
}
// This makes it easy to call e.g. `hetzner.get_server()` or `mycelium.get_connected_peers()`
// --> without the need of manually created a client for each one first
// --> could be conditionally compiled to only use those who we need (we only push the things to the scope that we actually need to run the script)
// Register println function for output
engine.register_fn("println", |s: &str| println!("{}", s));
@@ -78,19 +91,20 @@ pub fn run(script_path: &str) -> Result<(), Box<dyn Error>> {
let script = fs::read_to_string(&script_file)?;
// Execute the script
match engine.eval::<rhai::Dynamic>(&script) {
Ok(result) => {
println!("Script executed successfully");
if !result.is_unit() {
println!("Result: {}", result);
}
}
Err(err) => {
eprintln!("Error executing script: {}", err);
// Exit with error code when a script fails
process::exit(1);
}
}
// match engine.eval::<rhai::Dynamic>(&script) {
// Ok(result) => {
// println!("Script executed successfully");
// if !result.is_unit() {
// println!("Result: {}", result);
// }
// }
// Err(err) => {
// eprintln!("Error executing script: {}", err);
// // Exit with error code when a script fails
// process::exit(1);
// }
// }
engine.run_with_scope(&mut scope, &script)?;
}
println!("\nAll scripts executed successfully!");

View File

@@ -0,0 +1,10 @@
[package]
name = "codemonkey"
version = "0.1.0"
edition = "2021"
[dependencies]
tokio = { version = "1", features = ["full"] }
async-trait = "0.1.80"
openrouter-rs = "0.4.5"
serde = { version = "1.0", features = ["derive"] }

View File

@@ -0,0 +1,216 @@
use async_trait::async_trait;
use openrouter_rs::{OpenRouterClient, api::chat::{ChatCompletionRequest, Message}, types::completion::CompletionsResponse};
use std::env;
use std::error::Error;
// Re-export MessageRole for easier use in client code
pub use openrouter_rs::types::Role as MessageRole;
#[async_trait]
pub trait AIProvider {
async fn completion(
&mut self,
request: CompletionRequest,
) -> Result<CompletionsResponse, Box<dyn Error>>;
}
pub struct CompletionRequest {
pub model: String,
pub messages: Vec<Message>,
pub temperature: Option<f64>,
pub max_tokens: Option<i64>,
pub top_p: Option<f64>,
pub stream: Option<bool>,
pub stop: Option<Vec<String>>,
}
pub struct CompletionRequestBuilder<'a> {
provider: &'a mut dyn AIProvider,
model: String,
messages: Vec<Message>,
temperature: Option<f64>,
max_tokens: Option<i64>,
top_p: Option<f64>,
stream: Option<bool>,
stop: Option<Vec<String>>,
provider_type: AIProviderType,
}
impl<'a> CompletionRequestBuilder<'a> {
pub fn new(provider: &'a mut dyn AIProvider, model: String, messages: Vec<Message>, provider_type: AIProviderType) -> Self {
Self {
provider,
model,
messages,
temperature: None,
max_tokens: None,
top_p: None,
stream: None,
stop: None,
provider_type,
}
}
pub fn temperature(mut self, temperature: f64) -> Self {
self.temperature = Some(temperature);
self
}
pub fn max_tokens(mut self, max_tokens: i64) -> Self {
self.max_tokens = Some(max_tokens);
self
}
pub fn top_p(mut self, top_p: f64) -> Self {
self.top_p = Some(top_p);
self
}
pub fn stream(mut self, stream: bool) -> Self {
self.stream = Some(stream);
self
}
pub fn stop(mut self, stop: Vec<String>) -> Self {
self.stop = Some(stop);
self
}
pub async fn completion(self) -> Result<CompletionsResponse, Box<dyn Error>> {
let request = CompletionRequest {
model: self.model,
messages: self.messages,
temperature: self.temperature,
max_tokens: self.max_tokens,
top_p: self.top_p,
stream: self.stream,
stop: self.stop,
};
self.provider.completion(request).await
}
}
pub struct GroqAIProvider {
client: OpenRouterClient,
}
#[async_trait]
impl AIProvider for GroqAIProvider {
async fn completion(
&mut self,
request: CompletionRequest,
) -> Result<CompletionsResponse, Box<dyn Error>> {
let chat_request = ChatCompletionRequest::builder()
.model(request.model)
.messages(request.messages)
.temperature(request.temperature.unwrap_or(1.0))
.max_tokens(request.max_tokens.map(|x| x as u32).unwrap_or(2048))
.top_p(request.top_p.unwrap_or(1.0))
.build()?;
let result = self.client.send_chat_completion(&chat_request).await?;
Ok(result)
}
}
pub struct OpenAIProvider {
client: OpenRouterClient,
}
#[async_trait]
impl AIProvider for OpenAIProvider {
async fn completion(
&mut self,
request: CompletionRequest,
) -> Result<CompletionsResponse, Box<dyn Error>> {
let chat_request = ChatCompletionRequest::builder()
.model(request.model)
.messages(request.messages)
.temperature(request.temperature.unwrap_or(1.0))
.max_tokens(request.max_tokens.map(|x| x as u32).unwrap_or(2048))
.top_p(request.top_p.unwrap_or(1.0))
.build()?;
let result = self.client.send_chat_completion(&chat_request).await?;
Ok(result)
}
}
pub struct OpenRouterAIProvider {
client: OpenRouterClient,
}
#[async_trait]
impl AIProvider for OpenRouterAIProvider {
async fn completion(
&mut self,
request: CompletionRequest,
) -> Result<CompletionsResponse, Box<dyn Error>> {
let chat_request = ChatCompletionRequest::builder()
.model(request.model)
.messages(request.messages)
.temperature(request.temperature.unwrap_or(1.0))
.max_tokens(request.max_tokens.map(|x| x as u32).unwrap_or(2048))
.top_p(request.top_p.unwrap_or(1.0))
.build()?;
let result = self.client.send_chat_completion(&chat_request).await?;
Ok(result)
}
}
pub struct CerebrasAIProvider {
client: OpenRouterClient,
}
#[async_trait]
impl AIProvider for CerebrasAIProvider {
async fn completion(
&mut self,
request: CompletionRequest,
) -> Result<CompletionsResponse, Box<dyn Error>> {
let chat_request = ChatCompletionRequest::builder()
.model(request.model)
.messages(request.messages)
.temperature(request.temperature.unwrap_or(1.0))
.max_tokens(request.max_tokens.map(|x| x as u32).unwrap_or(2048))
.top_p(request.top_p.unwrap_or(1.0))
.build()?;
let result = self.client.send_chat_completion(&chat_request).await?;
Ok(result)
}
}
#[derive(PartialEq)]
pub enum AIProviderType {
Groq,
OpenAI,
OpenRouter,
Cerebras,
}
pub fn create_ai_provider(provider_type: AIProviderType) -> Result<(Box<dyn AIProvider>, AIProviderType), Box<dyn Error>> {
match provider_type {
AIProviderType::Groq => {
let api_key = env::var("GROQ_API_KEY")?;
let client = OpenRouterClient::builder().api_key(api_key).build()?;
Ok((Box::new(GroqAIProvider { client }), AIProviderType::Groq))
}
AIProviderType::OpenAI => {
let api_key = env::var("OPENAI_API_KEY")?;
let client = OpenRouterClient::builder().api_key(api_key).build()?;
Ok((Box::new(OpenAIProvider { client }), AIProviderType::OpenAI))
}
AIProviderType::OpenRouter => {
let api_key = env::var("OPENROUTER_API_KEY")?;
let client = OpenRouterClient::builder().api_key(api_key).build()?;
Ok((Box::new(OpenRouterAIProvider { client }), AIProviderType::OpenRouter))
}
AIProviderType::Cerebras => {
let api_key = env::var("CEREBRAS_API_KEY")?;
let client = OpenRouterClient::builder().api_key(api_key).build()?;
Ok((Box::new(CerebrasAIProvider { client }), AIProviderType::Cerebras))
}
}
}

View File

@@ -0,0 +1,12 @@
[package]
name = "sal-hetzner"
version = "0.1.0"
edition = "2024"
[dependencies]
prettytable = "0.10.0"
reqwest.workspace = true
rhai = { workspace = true, features = ["serde"] }
serde = { workspace = true, features = ["derive"] }
serde_json.workspace = true
thiserror.workspace = true

View File

@@ -0,0 +1,54 @@
use std::fmt;
use serde::Deserialize;
use thiserror::Error;
#[derive(Debug, Error)]
pub enum AppError {
#[error("Request failed: {0}")]
RequestError(#[from] reqwest::Error),
#[error("API error: {0}")]
ApiError(ApiError),
#[error("Deserialization Error: {0:?}")]
SerdeJsonError(#[from] serde_json::Error),
}
#[derive(Debug, Deserialize)]
pub struct ApiError {
pub status: u16,
pub message: String,
}
impl From<reqwest::blocking::Response> for ApiError {
fn from(value: reqwest::blocking::Response) -> Self {
ApiError {
status: value.status().into(),
message: value.text().unwrap_or("The API call returned an error.".to_string()),
}
}
}
impl fmt::Display for ApiError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
#[derive(Deserialize)]
struct HetznerApiError {
code: String,
message: String,
}
#[derive(Deserialize)]
struct HetznerApiErrorWrapper {
error: HetznerApiError,
}
if let Ok(wrapper) = serde_json::from_str::<HetznerApiErrorWrapper>(&self.message) {
write!(
f,
"Status: {}, Code: {}, Message: {}",
self.status, wrapper.error.code, wrapper.error.message
)
} else {
write!(f, "Status: {}: {}", self.status, self.message)
}
}
}

View File

@@ -0,0 +1,513 @@
pub mod error;
pub mod models;
use self::models::{
Boot, Rescue, Server, SshKey, ServerAddonProduct, ServerAddonProductWrapper,
AuctionServerProduct, AuctionServerProductWrapper, AuctionTransaction,
AuctionTransactionWrapper, BootWrapper, Cancellation, CancellationWrapper,
OrderServerBuilder, OrderServerProduct, OrderServerProductWrapper, RescueWrapped,
ServerWrapper, SshKeyWrapper, Transaction, TransactionWrapper,
ServerAddonTransaction, ServerAddonTransactionWrapper,
OrderServerAddonBuilder,
};
use crate::api::error::ApiError;
use crate::config::Config;
use error::AppError;
use reqwest::blocking::Client as HttpClient;
use serde_json::json;
#[derive(Clone)]
pub struct Client {
http_client: HttpClient,
config: Config,
}
impl Client {
pub fn new(config: Config) -> Self {
Self {
http_client: HttpClient::new(),
config,
}
}
fn handle_response<T>(&self, response: reqwest::blocking::Response) -> Result<T, AppError>
where
T: serde::de::DeserializeOwned,
{
let status = response.status();
let body = response.text()?;
if status.is_success() {
serde_json::from_str::<T>(&body).map_err(Into::into)
} else {
Err(AppError::ApiError(ApiError {
status: status.as_u16(),
message: body,
}))
}
}
pub fn get_server(&self, server_number: i32) -> Result<Server, AppError> {
let response = self
.http_client
.get(format!("{}/server/{}", self.config.api_url, server_number))
.basic_auth(&self.config.username, Some(&self.config.password))
.send()?;
let wrapped: ServerWrapper = self.handle_response(response)?;
Ok(wrapped.server)
}
pub fn get_servers(&self) -> Result<Vec<Server>, AppError> {
let response = self
.http_client
.get(format!("{}/server", self.config.api_url))
.basic_auth(&self.config.username, Some(&self.config.password))
.send()?;
let wrapped: Vec<ServerWrapper> = self.handle_response(response)?;
let servers = wrapped.into_iter().map(|sw| sw.server).collect();
Ok(servers)
}
pub fn update_server_name(&self, server_number: i32, name: &str) -> Result<Server, AppError> {
let params = [("server_name", name)];
let response = self
.http_client
.post(format!("{}/server/{}", self.config.api_url, server_number))
.basic_auth(&self.config.username, Some(&self.config.password))
.form(&params)
.send()?;
let wrapped: ServerWrapper = self.handle_response(response)?;
Ok(wrapped.server)
}
pub fn get_cancellation_data(&self, server_number: i32) -> Result<Cancellation, AppError> {
let response = self
.http_client
.get(format!(
"{}/server/{}/cancellation",
self.config.api_url, server_number
))
.basic_auth(&self.config.username, Some(&self.config.password))
.send()?;
let wrapped: CancellationWrapper = self.handle_response(response)?;
Ok(wrapped.cancellation)
}
pub fn cancel_server(
&self,
server_number: i32,
cancellation_date: &str,
) -> Result<Cancellation, AppError> {
let params = [("cancellation_date", cancellation_date)];
let response = self
.http_client
.post(format!(
"{}/server/{}/cancellation",
self.config.api_url, server_number
))
.basic_auth(&self.config.username, Some(&self.config.password))
.form(&params)
.send()?;
let wrapped: CancellationWrapper = self.handle_response(response)?;
Ok(wrapped.cancellation)
}
pub fn withdraw_cancellation(&self, server_number: i32) -> Result<(), AppError> {
self.http_client
.delete(format!(
"{}/server/{}/cancellation",
self.config.api_url, server_number
))
.basic_auth(&self.config.username, Some(&self.config.password))
.send()?;
Ok(())
}
pub fn get_ssh_keys(&self) -> Result<Vec<SshKey>, AppError> {
let response = self
.http_client
.get(format!("{}/key", self.config.api_url))
.basic_auth(&self.config.username, Some(&self.config.password))
.send()?;
let wrapped: Vec<SshKeyWrapper> = self.handle_response(response)?;
let keys = wrapped.into_iter().map(|sk| sk.key).collect();
Ok(keys)
}
pub fn get_ssh_key(&self, fingerprint: &str) -> Result<SshKey, AppError> {
let response = self
.http_client
.get(format!("{}/key/{}", self.config.api_url, fingerprint))
.basic_auth(&self.config.username, Some(&self.config.password))
.send()?;
let wrapped: SshKeyWrapper = self.handle_response(response)?;
Ok(wrapped.key)
}
pub fn add_ssh_key(&self, name: &str, data: &str) -> Result<SshKey, AppError> {
let params = [("name", name), ("data", data)];
let response = self
.http_client
.post(format!("{}/key", self.config.api_url))
.basic_auth(&self.config.username, Some(&self.config.password))
.form(&params)
.send()?;
let wrapped: SshKeyWrapper = self.handle_response(response)?;
Ok(wrapped.key)
}
pub fn update_ssh_key_name(&self, fingerprint: &str, name: &str) -> Result<SshKey, AppError> {
let params = [("name", name)];
let response = self
.http_client
.post(format!("{}/key/{}", self.config.api_url, fingerprint))
.basic_auth(&self.config.username, Some(&self.config.password))
.form(&params)
.send()?;
let wrapped: SshKeyWrapper = self.handle_response(response)?;
Ok(wrapped.key)
}
pub fn delete_ssh_key(&self, fingerprint: &str) -> Result<(), AppError> {
self.http_client
.delete(format!("{}/key/{}", self.config.api_url, fingerprint))
.basic_auth(&self.config.username, Some(&self.config.password))
.send()?;
Ok(())
}
pub fn get_boot_configuration(&self, server_number: i32) -> Result<Boot, AppError> {
let response = self
.http_client
.get(format!("{}/boot/{}", self.config.api_url, server_number))
.basic_auth(&self.config.username, Some(&self.config.password))
.send()?;
let wrapped: BootWrapper = self.handle_response(response)?;
Ok(wrapped.boot)
}
pub fn get_rescue_boot_configuration(&self, server_number: i32) -> Result<Rescue, AppError> {
let response = self
.http_client
.get(format!(
"{}/boot/{}/rescue",
self.config.api_url, server_number
))
.basic_auth(&self.config.username, Some(&self.config.password))
.send()?;
let wrapped: RescueWrapped = self.handle_response(response)?;
Ok(wrapped.rescue)
}
pub fn enable_rescue_mode(
&self,
server_number: i32,
os: &str,
authorized_keys: Option<&[String]>,
) -> Result<Rescue, AppError> {
let mut params = vec![("os", os)];
if let Some(keys) = authorized_keys {
for key in keys {
params.push(("authorized_key[]", key));
}
}
let response = self
.http_client
.post(format!(
"{}/boot/{}/rescue",
self.config.api_url, server_number
))
.basic_auth(&self.config.username, Some(&self.config.password))
.form(&params)
.send()?;
let wrapped: RescueWrapped = self.handle_response(response)?;
Ok(wrapped.rescue)
}
pub fn disable_rescue_mode(&self, server_number: i32) -> Result<Rescue, AppError> {
let response = self
.http_client
.delete(format!(
"{}/boot/{}/rescue",
self.config.api_url, server_number
))
.basic_auth(&self.config.username, Some(&self.config.password))
.send()?;
let wrapped: RescueWrapped = self.handle_response(response)?;
Ok(wrapped.rescue)
}
pub fn get_server_products(
&self,
) -> Result<Vec<OrderServerProduct>, AppError> {
let response = self
.http_client
.get(format!("{}/order/server/product", &self.config.api_url))
.basic_auth(&self.config.username, Some(&self.config.password))
.send()?;
let wrapped: Vec<OrderServerProductWrapper> = self.handle_response(response)?;
let products = wrapped.into_iter().map(|sop| sop.product).collect();
Ok(products)
}
pub fn get_server_product_by_id(
&self,
product_id: &str,
) -> Result<OrderServerProduct, AppError> {
let response = self
.http_client
.get(format!(
"{}/order/server/product/{}",
&self.config.api_url, product_id
))
.basic_auth(&self.config.username, Some(&self.config.password))
.send()?;
let wrapped: OrderServerProductWrapper = self.handle_response(response)?;
Ok(wrapped.product)
}
pub fn order_server(&self, order: OrderServerBuilder) -> Result<Transaction, AppError> {
let mut params = json!({
"product_id": order.product_id,
"dist": order.dist,
"location": order.location,
"authorized_key": order.authorized_keys.unwrap_or_default(),
});
if let Some(addons) = order.addons {
params["addon"] = json!(addons);
}
if let Some(test) = order.test {
if test {
params["test"] = json!(test);
}
}
let response = self
.http_client
.post(format!("{}/order/server/transaction", &self.config.api_url))
.basic_auth(&self.config.username, Some(&self.config.password))
.json(&params)
.send()?;
let wrapped: TransactionWrapper = self.handle_response(response)?;
Ok(wrapped.transaction)
}
pub fn get_transaction_by_id(&self, transaction_id: &str) -> Result<Transaction, AppError> {
let response = self
.http_client
.get(format!(
"{}/order/server/transaction/{}",
&self.config.api_url, transaction_id
))
.basic_auth(&self.config.username, Some(&self.config.password))
.send()?;
let wrapped: TransactionWrapper = self.handle_response(response)?;
Ok(wrapped.transaction)
}
pub fn get_transactions(&self) -> Result<Vec<Transaction>, AppError> {
let response = self
.http_client
.get(format!("{}/order/server/transaction", &self.config.api_url))
.basic_auth(&self.config.username, Some(&self.config.password))
.send()?;
let wrapped: Vec<TransactionWrapper> = self.handle_response(response)?;
let transactions = wrapped.into_iter().map(|t| t.transaction).collect();
Ok(transactions)
}
pub fn get_auction_server_products(&self) -> Result<Vec<AuctionServerProduct>, AppError> {
let response = self
.http_client
.get(format!(
"{}/order/server_market/product",
&self.config.api_url
))
.basic_auth(&self.config.username, Some(&self.config.password))
.send()?;
let wrapped: Vec<AuctionServerProductWrapper> = self.handle_response(response)?;
let products = wrapped.into_iter().map(|asp| asp.product).collect();
Ok(products)
}
pub fn get_auction_server_product_by_id(&self, product_id: &str) -> Result<AuctionServerProduct, AppError> {
let response = self
.http_client
.get(format!("{}/order/server_market/product/{}", &self.config.api_url, product_id))
.basic_auth(&self.config.username, Some(&self.config.password))
.send()?;
let wrapped: AuctionServerProductWrapper = self.handle_response(response)?;
Ok(wrapped.product)
}
pub fn get_auction_transactions(&self) -> Result<Vec<AuctionTransaction>, AppError> {
let response = self
.http_client
.get(format!("{}/order/server_market/transaction", &self.config.api_url))
.basic_auth(&self.config.username, Some(&self.config.password))
.send()?;
let wrapped: Vec<AuctionTransactionWrapper> = self.handle_response(response)?;
let transactions = wrapped.into_iter().map(|t| t.transaction).collect();
Ok(transactions)
}
pub fn get_auction_transaction_by_id(&self, transaction_id: &str) -> Result<AuctionTransaction, AppError> {
let response = self
.http_client
.get(format!("{}/order/server_market/transaction/{}", &self.config.api_url, transaction_id))
.basic_auth(&self.config.username, Some(&self.config.password))
.send()?;
let wrapped: AuctionTransactionWrapper = self.handle_response(response)?;
Ok(wrapped.transaction)
}
pub fn get_server_addon_products(
&self,
server_number: i64,
) -> Result<Vec<ServerAddonProduct>, AppError> {
let response = self
.http_client
.get(format!(
"{}/order/server_addon/{}/product",
&self.config.api_url, server_number
))
.basic_auth(&self.config.username, Some(&self.config.password))
.send()?;
let wrapped: Vec<ServerAddonProductWrapper> = self.handle_response(response)?;
let products = wrapped.into_iter().map(|sap| sap.product).collect();
Ok(products)
}
pub fn order_auction_server(
&self,
product_id: i64,
authorized_keys: Vec<String>,
dist: Option<String>,
arch: Option<String>,
lang: Option<String>,
comment: Option<String>,
addons: Option<Vec<String>>,
test: Option<bool>,
) -> Result<AuctionTransaction, AppError> {
let mut params: Vec<(&str, String)> = Vec::new();
params.push(("product_id", product_id.to_string()));
for key in &authorized_keys {
params.push(("authorized_key[]", key.clone()));
}
if let Some(dist) = dist {
params.push(("dist", dist));
}
if let Some(arch) = arch {
params.push(("@deprecated arch", arch));
}
if let Some(lang) = lang {
params.push(("lang", lang));
}
if let Some(comment) = comment {
params.push(("comment", comment));
}
if let Some(addons) = addons {
for addon in addons {
params.push(("addon[]", addon));
}
}
if let Some(test) = test {
params.push(("test", test.to_string()));
}
let response = self
.http_client
.post(format!("{}/order/server_market/transaction", &self.config.api_url))
.basic_auth(&self.config.username, Some(&self.config.password))
.form(&params)
.send()?;
let wrapped: AuctionTransactionWrapper = self.handle_response(response)?;
Ok(wrapped.transaction)
}
pub fn get_server_addon_transactions(&self) -> Result<Vec<ServerAddonTransaction>, AppError> {
let response = self
.http_client
.get(format!("{}/order/server_addon/transaction", &self.config.api_url))
.basic_auth(&self.config.username, Some(&self.config.password))
.send()?;
let wrapped: Vec<ServerAddonTransactionWrapper> = self.handle_response(response)?;
let transactions = wrapped.into_iter().map(|satw| satw.transaction).collect();
Ok(transactions)
}
pub fn get_server_addon_transaction_by_id(
&self,
transaction_id: &str,
) -> Result<ServerAddonTransaction, AppError> {
let response = self
.http_client
.get(format!(
"{}/order/server_addon/transaction/{}",
&self.config.api_url, transaction_id
))
.basic_auth(&self.config.username, Some(&self.config.password))
.send()?;
let wrapped: ServerAddonTransactionWrapper = self.handle_response(response)?;
Ok(wrapped.transaction)
}
pub fn order_server_addon(
&self,
order: OrderServerAddonBuilder,
) -> Result<ServerAddonTransaction, AppError> {
let mut params = json!({
"server_number": order.server_number,
"product_id": order.product_id,
});
if let Some(reason) = order.reason {
params["reason"] = json!(reason);
}
if let Some(gateway) = order.gateway {
params["gateway"] = json!(gateway);
}
if let Some(test) = order.test {
if test {
params["test"] = json!(test);
}
}
let response = self
.http_client
.post(format!("{}/order/server_addon/transaction", &self.config.api_url))
.basic_auth(&self.config.username, Some(&self.config.password))
.form(&params)
.send()?;
let wrapped: ServerAddonTransactionWrapper = self.handle_response(response)?;
Ok(wrapped.transaction)
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,25 @@
use std::env;
#[derive(Clone)]
pub struct Config {
pub username: String,
pub password: String,
pub api_url: String,
}
impl Config {
pub fn from_env() -> Result<Self, String> {
let username = env::var("HETZNER_USERNAME")
.map_err(|_| "HETZNER_USERNAME environment variable not set".to_string())?;
let password = env::var("HETZNER_PASSWORD")
.map_err(|_| "HETZNER_PASSWORD environment variable not set".to_string())?;
let api_url = env::var("HETZNER_API_URL")
.unwrap_or_else(|_| "https://robot-ws.your-server.de".to_string());
Ok(Config {
username,
password,
api_url,
})
}
}

View File

@@ -0,0 +1,3 @@
pub mod api;
pub mod config;
pub mod rhai;

View File

@@ -0,0 +1,63 @@
use crate::api::{
models::{Boot, Rescue},
Client,
};
use rhai::{plugin::*, Engine};
pub fn register(engine: &mut Engine) {
let boot_module = exported_module!(boot_api);
engine.register_global_module(boot_module.into());
}
#[export_module]
pub mod boot_api {
use super::*;
use rhai::EvalAltResult;
#[rhai_fn(name = "get_boot_configuration", return_raw)]
pub fn get_boot_configuration(
client: &mut Client,
server_number: i64,
) -> Result<Boot, Box<EvalAltResult>> {
client
.get_boot_configuration(server_number as i32)
.map_err(|e| e.to_string().into())
}
#[rhai_fn(name = "get_rescue_boot_configuration", return_raw)]
pub fn get_rescue_boot_configuration(
client: &mut Client,
server_number: i64,
) -> Result<Rescue, Box<EvalAltResult>> {
client
.get_rescue_boot_configuration(server_number as i32)
.map_err(|e| e.to_string().into())
}
#[rhai_fn(name = "enable_rescue_mode", return_raw)]
pub fn enable_rescue_mode(
client: &mut Client,
server_number: i64,
os: &str,
authorized_keys: rhai::Array,
) -> Result<Rescue, Box<EvalAltResult>> {
let keys: Vec<String> = authorized_keys
.into_iter()
.map(|k| k.into_string().unwrap())
.collect();
client
.enable_rescue_mode(server_number as i32, os, Some(&keys))
.map_err(|e| e.to_string().into())
}
#[rhai_fn(name = "disable_rescue_mode", return_raw)]
pub fn disable_rescue_mode(
client: &mut Client,
server_number: i64,
) -> Result<Rescue, Box<EvalAltResult>> {
client
.disable_rescue_mode(server_number as i32)
.map_err(|e| e.to_string().into())
}
}

View File

@@ -0,0 +1,54 @@
use rhai::{Engine, EvalAltResult};
use crate::api::models::{
AuctionServerProduct, AuctionTransaction, AuctionTransactionProduct, AuthorizedKey, Boot,
Cancellation, Cpanel, HostKey, Linux, OrderAuctionServerBuilder, OrderServerAddonBuilder,
OrderServerBuilder, OrderServerProduct, Plesk, Rescue, Server, ServerAddonProduct,
ServerAddonResource, ServerAddonTransaction, SshKey, Transaction, TransactionProduct, Vnc,
Windows,
};
pub mod boot;
pub mod printing;
pub mod server;
pub mod server_ordering;
pub mod ssh_keys;
// here just register the hetzner module
pub fn register_hetzner_module(engine: &mut Engine) -> Result<(), Box<EvalAltResult>> {
// TODO:register types
engine.build_type::<Server>();
engine.build_type::<SshKey>();
engine.build_type::<Boot>();
engine.build_type::<Rescue>();
engine.build_type::<Linux>();
engine.build_type::<Vnc>();
engine.build_type::<Windows>();
engine.build_type::<Plesk>();
engine.build_type::<Cpanel>();
engine.build_type::<Cancellation>();
engine.build_type::<OrderServerProduct>();
engine.build_type::<Transaction>();
engine.build_type::<AuthorizedKey>();
engine.build_type::<TransactionProduct>();
engine.build_type::<HostKey>();
engine.build_type::<AuctionServerProduct>();
engine.build_type::<AuctionTransaction>();
engine.build_type::<AuctionTransactionProduct>();
engine.build_type::<OrderAuctionServerBuilder>();
engine.build_type::<OrderServerBuilder>();
engine.build_type::<ServerAddonProduct>();
engine.build_type::<ServerAddonTransaction>();
engine.build_type::<ServerAddonResource>();
engine.build_type::<OrderServerAddonBuilder>();
server::register(engine);
ssh_keys::register(engine);
boot::register(engine);
server_ordering::register(engine);
// TODO: push hetzner to scope as value client:
// scope.push("hetzner", client);
Ok(())
}

View File

@@ -0,0 +1,43 @@
use rhai::{Array, Engine};
use crate::{api::models::{OrderServerProduct, AuctionServerProduct, AuctionTransaction, ServerAddonProduct, ServerAddonTransaction, Server, SshKey}};
mod servers_table;
mod ssh_keys_table;
mod server_ordering_table;
// This will be called when we print(...) or pretty_print() an Array (with Dynamic values)
pub fn pretty_print_dispatch(array: Array) {
if array.is_empty() {
println!("<empty table>");
return;
}
let first = &array[0];
if first.is::<Server>() {
println!("Yeah first is server!");
servers_table::pretty_print_servers(array);
} else if first.is::<SshKey>() {
ssh_keys_table::pretty_print_ssh_keys(array);
}
else if first.is::<OrderServerProduct>() {
server_ordering_table::pretty_print_server_products(array);
} else if first.is::<AuctionServerProduct>() {
server_ordering_table::pretty_print_auction_server_products(array);
} else if first.is::<AuctionTransaction>() {
server_ordering_table::pretty_print_auction_transactions(array);
} else if first.is::<ServerAddonProduct>() {
server_ordering_table::pretty_print_server_addon_products(array);
} else if first.is::<ServerAddonTransaction>() {
server_ordering_table::pretty_print_server_addon_transactions(array);
} else {
// Generic fallback for other types
for item in array {
println!("{}", item.to_string());
}
}
}
pub fn register(engine: &mut Engine) {
engine.register_fn("pretty_print", pretty_print_dispatch);
}

View File

@@ -0,0 +1,293 @@
use prettytable::{row, Table};
use crate::api::models::{OrderServerProduct, ServerAddonProduct, ServerAddonTransaction, ServerAddonResource};
pub fn pretty_print_server_products(products: rhai::Array) {
let mut table = Table::new();
table.add_row(row![b =>
"ID",
"Name",
"Description",
"Traffic",
"Location",
"Price (Net)",
"Price (Gross)",
]);
for product_dyn in products {
if let Some(product) = product_dyn.try_cast::<OrderServerProduct>() {
let mut price_net = "N/A".to_string();
let mut price_gross = "N/A".to_string();
if let Some(first_price) = product.prices.first() {
price_net = first_price.price.net.clone();
price_gross = first_price.price.gross.clone();
}
table.add_row(row![
product.id,
product.name,
product.description.join(", "),
product.traffic,
product.location.join(", "),
price_net,
price_gross,
]);
}
}
table.printstd();
}
pub fn pretty_print_auction_server_products(products: rhai::Array) {
let mut table = Table::new();
table.add_row(row![b =>
"ID",
"Name",
"Description",
"Traffic",
"Distributions",
"Architectures",
"Languages",
"CPU",
"CPU Benchmark",
"Memory Size (GB)",
"HDD Size (GB)",
"HDD Text",
"HDD Count",
"Datacenter",
"Network Speed",
"Price (Net)",
"Price (Hourly Net)",
"Price (Setup Net)",
"Price (VAT)",
"Price (Hourly VAT)",
"Price (Setup VAT)",
"Fixed Price",
"Next Reduce (seconds)",
"Next Reduce Date",
"Orderable Addons",
]);
for product_dyn in products {
if let Some(product) = product_dyn.try_cast::<crate::api::models::AuctionServerProduct>() {
let mut addons_table = Table::new();
addons_table.add_row(row![b => "ID", "Name", "Min", "Max", "Prices"]);
for addon in &product.orderable_addons {
let mut addon_prices_table = Table::new();
addon_prices_table.add_row(row![b => "Location", "Net", "Gross", "Hourly Net", "Hourly Gross", "Setup Net", "Setup Gross"]);
for price in &addon.prices {
addon_prices_table.add_row(row![
price.location,
price.price.net,
price.price.gross,
price.price.hourly_net,
price.price.hourly_gross,
price.price_setup.net,
price.price_setup.gross
]);
}
addons_table.add_row(row![
addon.id,
addon.name,
addon.min,
addon.max,
addon_prices_table
]);
}
table.add_row(row![
product.id,
product.name,
product.description.join(", "),
product.traffic,
product.dist.join(", "),
product.dist.join(", "),
product.lang.join(", "),
product.cpu,
product.cpu_benchmark,
product.memory_size,
product.hdd_size,
product.hdd_text,
product.hdd_count,
product.datacenter,
product.network_speed,
product.price,
product.price_hourly.as_deref().unwrap_or("N/A"),
product.price_setup,
product.price_with_vat,
product.price_hourly_with_vat.as_deref().unwrap_or("N/A"),
product.price_setup_with_vat,
product.fixed_price,
product.next_reduce,
product.next_reduce_date,
addons_table,
]);
}
}
table.printstd();
}
pub fn pretty_print_server_addon_products(products: rhai::Array) {
let mut table = Table::new();
table.add_row(row![b =>
"ID",
"Name",
"Type",
"Location",
"Price (Net)",
"Price (Gross)",
"Hourly Net",
"Hourly Gross",
"Setup Net",
"Setup Gross",
]);
for product_dyn in products {
if let Some(product) = product_dyn.try_cast::<ServerAddonProduct>() {
table.add_row(row![
product.id,
product.name,
product.product_type,
product.price.location,
product.price.price.net,
product.price.price.gross,
product.price.price.hourly_net,
product.price.price.hourly_gross,
product.price.price_setup.net,
product.price.price_setup.gross,
]);
}
}
table.printstd();
}
pub fn pretty_print_auction_transactions(transactions: rhai::Array) {
let mut table = Table::new();
table.add_row(row![b =>
"ID",
"Date",
"Status",
"Server Number",
"Server IP",
"Comment",
"Product ID",
"Product Name",
"Product Traffic",
"Product Distributions",
"Product Architectures",
"Product Languages",
"Product CPU",
"Product CPU Benchmark",
"Product Memory Size (GB)",
"Product HDD Size (GB)",
"Product HDD Text",
"Product HDD Count",
"Product Datacenter",
"Product Network Speed",
"Product Fixed Price",
"Product Next Reduce (seconds)",
"Product Next Reduce Date",
"Addons",
]);
for transaction_dyn in transactions {
if let Some(transaction) = transaction_dyn.try_cast::<crate::api::models::AuctionTransaction>() {
let _authorized_keys_table = {
let mut table = Table::new();
table.add_row(row![b => "Name", "Fingerprint", "Type", "Size"]);
for key in &transaction.authorized_key {
table.add_row(row![
key.key.name.as_deref().unwrap_or("N/A"),
key.key.fingerprint.as_deref().unwrap_or("N/A"),
key.key.key_type.as_deref().unwrap_or("N/A"),
key.key.size.map_or("N/A".to_string(), |s| s.to_string())
]);
}
table
};
let _host_keys_table = {
let mut table = Table::new();
table.add_row(row![b => "Fingerprint", "Type", "Size"]);
for key in &transaction.host_key {
table.add_row(row![
key.key.fingerprint.as_deref().unwrap_or("N/A"),
key.key.key_type.as_deref().unwrap_or("N/A"),
key.key.size.map_or("N/A".to_string(), |s| s.to_string())
]);
}
table
};
table.add_row(row![
transaction.id,
transaction.date,
transaction.status,
transaction.server_number.map_or("N/A".to_string(), |id| id.to_string()),
transaction.server_ip.as_deref().unwrap_or("N/A"),
transaction.comment.as_deref().unwrap_or("N/A"),
transaction.product.id,
transaction.product.name,
transaction.product.traffic,
transaction.product.dist,
transaction.product.arch.as_deref().unwrap_or("N/A"),
transaction.product.lang,
transaction.product.cpu,
transaction.product.cpu_benchmark,
transaction.product.memory_size,
transaction.product.hdd_size,
transaction.product.hdd_text,
transaction.product.hdd_count,
transaction.product.datacenter,
transaction.product.network_speed,
transaction.product.fixed_price.unwrap_or_default().to_string(),
transaction
.product
.next_reduce
.map_or("N/A".to_string(), |r| r.to_string()),
transaction
.product
.next_reduce_date
.as_deref()
.unwrap_or("N/A"),
transaction.addons.join(", "),
]);
}
}
table.printstd();
}
pub fn pretty_print_server_addon_transactions(transactions: rhai::Array) {
let mut table = Table::new();
table.add_row(row![b =>
"ID",
"Date",
"Status",
"Server Number",
"Product ID",
"Product Name",
"Product Price",
"Resources",
]);
for transaction_dyn in transactions {
if let Some(transaction) = transaction_dyn.try_cast::<ServerAddonTransaction>() {
let mut resources_table = Table::new();
resources_table.add_row(row![b => "Type", "ID"]);
for resource in &transaction.resources {
resources_table.add_row(row![resource.resource_type, resource.id]);
}
table.add_row(row![
transaction.id,
transaction.date,
transaction.status,
transaction.server_number,
transaction.product.id,
transaction.product.name,
transaction.product.price.to_string(),
resources_table,
]);
}
}
table.printstd();
}

View File

@@ -0,0 +1,30 @@
use prettytable::{row, Table};
use rhai::Array;
use super::Server;
pub fn pretty_print_servers(servers: Array) {
let mut table = Table::new();
table.add_row(row![b =>
"Number",
"Name",
"IP",
"Product",
"DC",
"Status"
]);
for server_dyn in servers {
if let Some(server) = server_dyn.try_cast::<Server>() {
table.add_row(row![
server.server_number.to_string(),
server.server_name,
server.server_ip.unwrap_or("N/A".to_string()),
server.product,
server.dc,
server.status
]);
}
}
table.printstd();
}

View File

@@ -0,0 +1,26 @@
use prettytable::{row, Table};
use super::SshKey;
pub fn pretty_print_ssh_keys(keys: rhai::Array) {
let mut table = Table::new();
table.add_row(row![b =>
"Name",
"Fingerprint",
"Type",
"Size",
"Created At"
]);
for key_dyn in keys {
if let Some(key) = key_dyn.try_cast::<SshKey>() {
table.add_row(row![
key.name,
key.fingerprint,
key.key_type,
key.size.to_string(),
key.created_at
]);
}
}
table.printstd();
}

View File

@@ -0,0 +1,76 @@
use crate::api::{Client, models::Server};
use rhai::{Array, Dynamic, plugin::*};
pub fn register(engine: &mut Engine) {
let server_module = exported_module!(server_api);
engine.register_global_module(server_module.into());
}
#[export_module]
pub mod server_api {
use crate::api::models::Cancellation;
use super::*;
use rhai::EvalAltResult;
#[rhai_fn(name = "get_server", return_raw)]
pub fn get_server(
client: &mut Client,
server_number: i64,
) -> Result<Server, Box<EvalAltResult>> {
client
.get_server(server_number as i32)
.map_err(|e| e.to_string().into())
}
#[rhai_fn(name = "get_servers", return_raw)]
pub fn get_servers(client: &mut Client) -> Result<Array, Box<EvalAltResult>> {
let servers = client
.get_servers()
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
println!("number of SERVERS we got: {:#?}", servers.len());
Ok(servers.into_iter().map(Dynamic::from).collect())
}
#[rhai_fn(name = "update_server_name", return_raw)]
pub fn update_server_name(
client: &mut Client,
server_number: i64,
name: &str,
) -> Result<Server, Box<EvalAltResult>> {
client
.update_server_name(server_number as i32, name)
.map_err(|e| e.to_string().into())
}
#[rhai_fn(name = "get_cancellation_data", return_raw)]
pub fn get_cancellation_data(
client: &mut Client,
server_number: i64,
) -> Result<Cancellation, Box<EvalAltResult>> {
client
.get_cancellation_data(server_number as i32)
.map_err(|e| e.to_string().into())
}
#[rhai_fn(name = "cancel_server", return_raw)]
pub fn cancel_server(
client: &mut Client,
server_number: i64,
cancellation_date: &str,
) -> Result<Cancellation, Box<EvalAltResult>> {
client
.cancel_server(server_number as i32, cancellation_date)
.map_err(|e| e.to_string().into())
}
#[rhai_fn(name = "withdraw_cancellation", return_raw)]
pub fn withdraw_cancellation(
client: &mut Client,
server_number: i64,
) -> Result<(), Box<EvalAltResult>> {
client
.withdraw_cancellation(server_number as i32)
.map_err(|e| e.to_string().into())
}
}

View File

@@ -0,0 +1,170 @@
use crate::api::{
Client,
models::{
AuctionServerProduct, AuctionTransaction, OrderAuctionServerBuilder, OrderServerBuilder,
OrderServerProduct, ServerAddonProduct, ServerAddonTransaction, Transaction,
},
};
use rhai::{Array, Dynamic, plugin::*};
pub fn register(engine: &mut Engine) {
let server_order_module = exported_module!(server_order_api);
engine.register_global_module(server_order_module.into());
}
#[export_module]
pub mod server_order_api {
use crate::api::models::OrderServerAddonBuilder;
#[rhai_fn(name = "get_server_products", return_raw)]
pub fn get_server_ordering_product_overview(
client: &mut Client,
) -> Result<Array, Box<EvalAltResult>> {
let overview_servers = client
.get_server_products()
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
Ok(overview_servers.into_iter().map(Dynamic::from).collect())
}
#[rhai_fn(name = "get_server_product_by_id", return_raw)]
pub fn get_server_ordering_product_by_id(
client: &mut Client,
product_id: &str,
) -> Result<OrderServerProduct, Box<EvalAltResult>> {
let product = client
.get_server_product_by_id(product_id)
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
Ok(product)
}
#[rhai_fn(name = "order_server", return_raw)]
pub fn order_server(
client: &mut Client,
order: OrderServerBuilder,
) -> Result<Transaction, Box<EvalAltResult>> {
let transaction = client
.order_server(order)
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
Ok(transaction)
}
#[rhai_fn(name = "get_transaction_by_id", return_raw)]
pub fn get_transaction_by_id(
client: &mut Client,
transaction_id: &str,
) -> Result<Transaction, Box<EvalAltResult>> {
let transaction = client
.get_transaction_by_id(transaction_id)
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
Ok(transaction)
}
#[rhai_fn(name = "get_transactions", return_raw)]
pub fn get_transactions(client: &mut Client) -> Result<Array, Box<EvalAltResult>> {
let transactions = client
.get_transactions()
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
Ok(transactions.into_iter().map(Dynamic::from).collect())
}
#[rhai_fn(name = "get_auction_server_products", return_raw)]
pub fn get_auction_server_products(client: &mut Client) -> Result<Array, Box<EvalAltResult>> {
let products = client
.get_auction_server_products()
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
Ok(products.into_iter().map(Dynamic::from).collect())
}
#[rhai_fn(name = "get_auction_server_product_by_id", return_raw)]
pub fn get_auction_server_product_by_id(
client: &mut Client,
product_id: &str,
) -> Result<AuctionServerProduct, Box<EvalAltResult>> {
let product = client
.get_auction_server_product_by_id(product_id)
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
Ok(product)
}
#[rhai_fn(name = "get_auction_transactions", return_raw)]
pub fn get_auction_transactions(client: &mut Client) -> Result<Array, Box<EvalAltResult>> {
let transactions = client
.get_auction_transactions()
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
Ok(transactions.into_iter().map(Dynamic::from).collect())
}
#[rhai_fn(name = "get_auction_transaction_by_id", return_raw)]
pub fn get_auction_transaction_by_id(
client: &mut Client,
transaction_id: &str,
) -> Result<AuctionTransaction, Box<EvalAltResult>> {
let transaction = client
.get_auction_transaction_by_id(transaction_id)
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
Ok(transaction)
}
#[rhai_fn(name = "get_server_addon_products", return_raw)]
pub fn get_server_addon_products(
client: &mut Client,
server_number: i64,
) -> Result<Array, Box<EvalAltResult>> {
let products = client
.get_server_addon_products(server_number)
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
Ok(products.into_iter().map(Dynamic::from).collect())
}
#[rhai_fn(name = "get_server_addon_transactions", return_raw)]
pub fn get_server_addon_transactions(
client: &mut Client,
) -> Result<Array, Box<EvalAltResult>> {
let transactions = client
.get_server_addon_transactions()
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
Ok(transactions.into_iter().map(Dynamic::from).collect())
}
#[rhai_fn(name = "get_server_addon_transaction_by_id", return_raw)]
pub fn get_server_addon_transaction_by_id(
client: &mut Client,
transaction_id: &str,
) -> Result<ServerAddonTransaction, Box<EvalAltResult>> {
let transaction = client
.get_server_addon_transaction_by_id(transaction_id)
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
Ok(transaction)
}
#[rhai_fn(name = "order_auction_server", return_raw)]
pub fn order_auction_server(
client: &mut Client,
order: OrderAuctionServerBuilder,
) -> Result<AuctionTransaction, Box<EvalAltResult>> {
println!("Builder struct being used to order server: {:#?}", order);
let transaction = client.order_auction_server(
order.product_id,
order.authorized_keys.unwrap_or(vec![]),
order.dist,
None,
order.lang,
order.comment,
order.addon,
order.test,
).map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
Ok(transaction)
}
#[rhai_fn(name = "order_server_addon", return_raw)]
pub fn order_server_addon(
client: &mut Client,
order: OrderServerAddonBuilder,
) -> Result<ServerAddonTransaction, Box<EvalAltResult>> {
println!("Builder struct being used to order server addon: {:#?}", order);
let transaction = client
.order_server_addon(order)
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
Ok(transaction)
}
}

View File

@@ -0,0 +1,89 @@
use crate::api::{Client, models::SshKey};
use prettytable::{Table, row};
use rhai::{Array, Dynamic, Engine, plugin::*};
pub fn register(engine: &mut Engine) {
let ssh_keys_module = exported_module!(ssh_keys_api);
engine.register_global_module(ssh_keys_module.into());
}
#[export_module]
pub mod ssh_keys_api {
use super::*;
use rhai::EvalAltResult;
#[rhai_fn(name = "get_ssh_keys", return_raw)]
pub fn get_ssh_keys(client: &mut Client) -> Result<Array, Box<EvalAltResult>> {
let ssh_keys = client
.get_ssh_keys()
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
Ok(ssh_keys.into_iter().map(Dynamic::from).collect())
}
#[rhai_fn(name = "get_ssh_key", return_raw)]
pub fn get_ssh_key(
client: &mut Client,
fingerprint: &str,
) -> Result<SshKey, Box<EvalAltResult>> {
client
.get_ssh_key(fingerprint)
.map_err(|e| e.to_string().into())
}
#[rhai_fn(name = "add_ssh_key", return_raw)]
pub fn add_ssh_key(
client: &mut Client,
name: &str,
data: &str,
) -> Result<SshKey, Box<EvalAltResult>> {
client
.add_ssh_key(name, data)
.map_err(|e| e.to_string().into())
}
#[rhai_fn(name = "update_ssh_key_name", return_raw)]
pub fn update_ssh_key_name(
client: &mut Client,
fingerprint: &str,
name: &str,
) -> Result<SshKey, Box<EvalAltResult>> {
client
.update_ssh_key_name(fingerprint, name)
.map_err(|e| e.to_string().into())
}
#[rhai_fn(name = "delete_ssh_key", return_raw)]
pub fn delete_ssh_key(
client: &mut Client,
fingerprint: &str,
) -> Result<(), Box<EvalAltResult>> {
client
.delete_ssh_key(fingerprint)
.map_err(|e| e.to_string().into())
}
#[rhai_fn(name = "pretty_print")]
pub fn pretty_print_ssh_keys(keys: Array) {
let mut table = Table::new();
table.add_row(row![b =>
"Name",
"Fingerprint",
"Type",
"Size",
"Created At"
]);
for key_dyn in keys {
if let Some(key) = key_dyn.try_cast::<SshKey>() {
table.add_row(row![
key.name,
key.fingerprint,
key.key_type,
key.size.to_string(),
key.created_at
]);
}
}
table.printstd();
}
}

View File

@@ -9,22 +9,22 @@ license = "Apache-2.0"
[dependencies]
# HTTP client for async requests
reqwest = { version = "0.12.15", features = ["json"] }
reqwest = { workspace = true }
# JSON handling
serde_json = "1.0"
serde_json = { workspace = true }
# Base64 encoding/decoding for message payloads
base64 = "0.22.1"
base64 = { workspace = true }
# Async runtime
tokio = { version = "1.45.0", features = ["full"] }
tokio = { workspace = true }
# Rhai scripting support
rhai = { version = "1.12.0", features = ["sync"] }
rhai = { workspace = true }
# Logging
log = "0.4"
log = { workspace = true }
# URL encoding for API parameters
urlencoding = "2.1.3"
urlencoding = { workspace = true }
[dev-dependencies]
# For async testing
tokio-test = "0.4.4"
tokio-test = { workspace = true }
# For temporary files in tests
tempfile = "3.5"
tempfile = { workspace = true }

View File

@@ -11,24 +11,24 @@ categories = ["database", "api-bindings"]
[dependencies]
# PostgreSQL client dependencies
postgres = "0.19.4"
postgres-types = "0.2.5"
tokio-postgres = "0.7.8"
postgres = { workspace = true }
postgres-types = { workspace = true }
tokio-postgres = { workspace = true }
# Connection pooling
r2d2 = "0.8.10"
r2d2_postgres = "0.18.2"
r2d2 = { workspace = true }
r2d2_postgres = { workspace = true }
# Utility dependencies
lazy_static = "1.4.0"
thiserror = "2.0.12"
lazy_static = { workspace = true }
thiserror = { workspace = true }
# Rhai scripting support
rhai = { version = "1.12.0", features = ["sync"] }
rhai = { workspace = true }
# SAL dependencies
sal-virt = { path = "../virt" }
sal-virt = { workspace = true }
[dev-dependencies]
tempfile = "3.5"
tokio-test = "0.4.4"
tempfile = { workspace = true }
tokio-test = { workspace = true }

View File

@@ -11,11 +11,11 @@ categories = ["database", "caching", "api-bindings"]
[dependencies]
# Core Redis functionality
redis = "0.31.0"
lazy_static = "1.4.0"
redis = { workspace = true }
lazy_static = { workspace = true }
# Rhai integration (optional)
rhai = { version = "1.12.0", features = ["sync"], optional = true }
rhai = { workspace = true, optional = true }
[features]
default = ["rhai"]
@@ -23,4 +23,4 @@ rhai = ["dep:rhai"]
[dev-dependencies]
# For testing
tempfile = "3.5"
tempfile = { workspace = true }

View File

@@ -0,0 +1,26 @@
[package]
name = "sal-rfs-client"
version = "0.1.0"
edition = "2021"
description = "SAL RFS Client - Client library for Remote File System server"
repository = "https://git.threefold.info/herocode/sal"
license = "Apache-2.0"
keywords = ["rfs", "client", "filesystem", "remote"]
categories = ["filesystem", "api-bindings"]
[dependencies]
openapi = { path = "./openapi" }
thiserror.workspace = true
url.workspace = true
reqwest = { workspace = true, features = ["json", "multipart"] }
tokio = { workspace = true, features = ["full"] }
serde = { workspace = true, features = ["derive"] }
serde_json.workspace = true
log.workspace = true
bytes.workspace = true
futures.workspace = true
rhai.workspace = true
lazy_static.workspace = true
[dev-dependencies]
tempfile = "3.0"

View File

@@ -0,0 +1,195 @@
# RFS Client
A Rust client library for interacting with the Remote File System (RFS) server.
## Overview
This client library provides a user-friendly wrapper around the OpenAPI-generated client code. It offers high-level abstractions for common operations such as:
- Authentication and session management
- File uploads and downloads with progress tracking
- Block-level operations and verification
- FList creation, monitoring, and management
- Timeout configuration and error handling
## Structure
The library is organized as follows:
- `client.rs`: Main client implementation with methods for interacting with the RFS server
- `error.rs`: Error types and handling
- `types.rs`: Type definitions and utilities
## Quick Start
```rust
use rfs_client::RfsClient;
use rfs_client::types::{ClientConfig, Credentials};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create a client with custom configuration
let config = ClientConfig {
base_url: "http://localhost:8080".to_string(),
credentials: Some(Credentials {
username: "user".to_string(),
password: "password".to_string(),
}),
timeout_seconds: 60,
};
let mut client = RfsClient::new(config);
// Authenticate
client.authenticate().await?;
println!("Authentication successful");
// Upload a file
let file_path = "/path/to/file.txt";
let file_hash = client.upload_file(file_path, None).await?;
println!("File uploaded with hash: {}", file_hash);
// Download the file
let output_path = "/path/to/output.txt";
client.download_file(&file_hash, output_path, None).await?;
println!("File downloaded to {}", output_path);
Ok(())
}
```
## Feature Examples
### Authentication
```rust
// Create a client with authentication
let config = ClientConfig {
base_url: "http://localhost:8080".to_string(),
credentials: Some(Credentials {
username: "user".to_string(),
password: "password".to_string(),
}),
timeout_seconds: 30,
};
let mut client = RfsClient::new(config);
// Authenticate with the server
client.authenticate().await?;
if client.is_authenticated() {
println!("Authentication successful");
}
```
### File Management
```rust
// Upload a file with options
let upload_options = UploadOptions {
chunk_size: Some(1024 * 1024), // 1MB chunks
verify: true,
};
let file_hash = client.upload_file("/path/to/file.txt", Some(upload_options)).await?;
// Download the file
let download_options = DownloadOptions {
verify: true,
};
client.download_file(&file_hash, "/path/to/output.txt", Some(download_options)).await?;
```
### FList Operations
```rust
// Create an FList from a Docker image
let options = FlistOptions {
auth: None,
username: None,
password: None,
email: None,
server_address: Some("docker.io".to_string()),
identity_token: None,
registry_token: None,
};
let job_id = client.create_flist("alpine:latest", Some(options)).await?;
// Wait for FList creation with progress tracking
let wait_options = WaitOptions {
timeout_seconds: 60,
poll_interval_ms: 1000,
progress_callback: Some(Box::new(|state| {
println!("Progress: FList state is now {:?}", state);
})),
};
let final_state = client.wait_for_flist_creation(&job_id, Some(wait_options)).await?;
// List available FLists
let flists = client.list_flists().await?;
// Preview an FList
let preview = client.preview_flist("flists/user/alpine-latest.fl").await?;
// Download an FList
client.download_flist("flists/user/alpine-latest.fl", "/tmp/downloaded_flist.fl").await?;
```
### Block Management
```rust
// List blocks
let blocks_list = client.list_blocks(None).await?;
// Check if a block exists
let exists = client.check_block("block_hash").await?;
// Get block content
let block_content = client.get_block("block_hash").await?;
// Upload a block
let block_hash = client.upload_block("file_hash", 0, data).await?;
// Verify blocks
let request = VerifyBlocksRequest { blocks: verify_blocks };
let verify_result = client.verify_blocks(request).await?;
```
## Complete Examples
For more detailed examples, check the `examples` directory:
- `authentication.rs`: Authentication and health check examples
- `file_management.rs`: File upload and download with verification
- `flist_operations.rs`: Complete FList creation, monitoring, listing, preview, and download
- `block_management.rs`: Block-level operations including listing, verification, and upload
- `wait_for_flist.rs`: Advanced FList creation with progress monitoring
Run an example with:
```bash
cargo run --example flist_operations
```
## Development
This library wraps the OpenAPI-generated client located in the `openapi` directory. The OpenAPI client was generated using the OpenAPI Generator CLI.
To build the library:
```bash
cargo build
```
To run tests:
```bash
cargo test -- --test-threads=1
```
## License
MIT

View File

@@ -0,0 +1,42 @@
use sal_rfs_client::types::{ClientConfig, Credentials};
use sal_rfs_client::RfsClient;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create a client with authentication credentials
let config = ClientConfig {
base_url: "http://localhost:8080".to_string(),
credentials: Some(Credentials {
username: "user".to_string(),
password: "password".to_string(),
}),
timeout_seconds: 30,
};
let mut client = RfsClient::new(config);
println!("Client created with authentication credentials");
// Authenticate with the server
client.authenticate().await?;
if client.is_authenticated() {
println!("Authentication successful");
} else {
println!("Authentication failed");
}
// Create a client without authentication
let config_no_auth = ClientConfig {
base_url: "http://localhost:8080".to_string(),
credentials: None,
timeout_seconds: 30,
};
let client_no_auth = RfsClient::new(config_no_auth);
println!("Client created without authentication credentials");
// Check health endpoint (doesn't require authentication)
let health = client_no_auth.health_check().await?;
println!("Server health: {:?}", health);
Ok(())
}

View File

@@ -0,0 +1,153 @@
use openapi::models::{VerifyBlock, VerifyBlocksRequest};
use sal_rfs_client::types::{ClientConfig, Credentials};
use sal_rfs_client::RfsClient;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create a client with authentication
let config = ClientConfig {
base_url: "http://localhost:8080".to_string(),
credentials: Some(Credentials {
username: "user".to_string(),
password: "password".to_string(),
}),
timeout_seconds: 60,
};
let mut client = RfsClient::new(config);
// Authenticate with the server
client.authenticate().await?;
println!("Authentication successful");
// Create a test file to upload for block testing
let test_file_path = "/tmp/block_test.txt";
let test_content = "This is a test file for RFS client block management";
std::fs::write(test_file_path, test_content)?;
println!("Created test file at {}", test_file_path);
// Upload the file to get blocks
println!("Uploading file to get blocks...");
let file_hash = client.upload_file(test_file_path, None).await?;
println!("File uploaded with hash: {}", file_hash);
// Get blocks by file hash
println!("Getting blocks for file hash: {}", file_hash);
let blocks = client.get_blocks_by_hash(&file_hash).await?;
println!("Found {} blocks for the file", blocks.blocks.len());
// Print block information
for (i, block_data) in blocks.blocks.iter().enumerate() {
println!(
"Block {}: Hash={}, Index={}",
i, block_data.hash, block_data.index
);
}
// Verify blocks with complete information
println!("Verifying blocks...");
// Create a list of VerifyBlock objects with complete information
let verify_blocks = blocks
.blocks
.iter()
.map(|block| {
VerifyBlock {
block_hash: block.hash.clone(),
block_index: block.index,
file_hash: file_hash.clone(), // Using the actual file hash
}
})
.collect::<Vec<_>>();
// Create the request with the complete block information
for block in verify_blocks.iter() {
println!("Block: {}", block.block_hash);
println!("Block index: {}", block.block_index);
println!("File hash: {}", block.file_hash);
}
let request = VerifyBlocksRequest {
blocks: verify_blocks,
};
// Send the verification request
let verify_result = client.verify_blocks(request).await?;
println!(
"Verification result: {} missing blocks",
verify_result.missing.len()
);
for block in verify_result.missing.iter() {
println!("Missing block: {}", block);
}
// List blocks (list_blocks_handler)
println!("\n1. Listing all blocks with pagination...");
let blocks_list = client.list_blocks(None).await?;
println!("Server has {} blocks in total", blocks_list.len());
if !blocks_list.is_empty() {
let first_few = blocks_list
.iter()
.take(3)
.map(|s| s.as_str())
.collect::<Vec<_>>()
.join(", ");
println!("First few blocks: {}", first_few);
}
// Check if a block exists (check_block_handler)
if !blocks.blocks.is_empty() {
let block_to_check = &blocks.blocks[0].hash;
println!("\n2. Checking if block exists: {}", block_to_check);
let exists = client.check_block(block_to_check).await?;
println!("Block exists: {}", exists);
}
// Get block downloads statistics (get_block_downloads_handler)
if !blocks.blocks.is_empty() {
let block_to_check = &blocks.blocks[0].hash;
println!(
"\n3. Getting download statistics for block: {}",
block_to_check
);
let downloads = client.get_block_downloads(block_to_check).await?;
println!(
"Block has been downloaded {} times",
downloads.downloads_count
);
}
// Get a specific block content (get_block_handler)
if !blocks.blocks.is_empty() {
let block_to_get = &blocks.blocks[0].hash;
println!("\n4. Getting content for block: {}", block_to_get);
let block_content = client.get_block(block_to_get).await?;
println!("Retrieved block with {} bytes", block_content.len());
}
// Get user blocks (get_user_blocks_handler)
println!("\n6. Listing user blocks...");
let user_blocks = client.get_user_blocks(Some(1), Some(10)).await?;
println!(
"User has {} blocks (showing page 1 with 10 per page)",
user_blocks.total
);
for block in user_blocks.blocks.iter().take(3) {
println!(" - Block: {}, Size: {}", block.hash, block.size);
}
// Upload a block (upload_block_handler)
println!("\n7. Uploading a new test block...");
let test_block_data = b"This is test block data for direct block upload";
let new_file_hash = "test_file_hash_for_block_upload";
let block_index = 0;
let block_hash = client
.upload_block(new_file_hash, block_index, test_block_data.to_vec())
.await?;
println!("Uploaded block with hash: {}", block_hash);
// Clean up
std::fs::remove_file(test_file_path)?;
println!("Test file cleaned up");
Ok(())
}

View File

@@ -0,0 +1,66 @@
use sal_rfs_client::types::{ClientConfig, Credentials, DownloadOptions, UploadOptions};
use sal_rfs_client::RfsClient;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create a client with authentication
let config = ClientConfig {
base_url: "http://localhost:8080".to_string(),
credentials: Some(Credentials {
username: "user".to_string(),
password: "password".to_string(),
}),
timeout_seconds: 60,
};
let mut client = RfsClient::new(config);
// Authenticate with the server
client.authenticate().await?;
println!("Authentication successful");
// Create a test file to upload
let test_file_path = "/tmp/test_upload.txt";
std::fs::write(test_file_path, "This is a test file for RFS client upload")?;
println!("Created test file at {}", test_file_path);
// Upload the file with options
println!("Uploading file...");
let upload_options = UploadOptions {
chunk_size: Some(1024 * 1024), // 1MB chunks
verify: true,
};
let file_hash = client
.upload_file(test_file_path, Some(upload_options))
.await?;
println!("File uploaded with hash: {}", file_hash);
// Download the file
let download_path = "/tmp/test_download.txt";
println!("Downloading file to {}...", download_path);
let download_options = DownloadOptions { verify: true };
client
.download_file(&file_hash, download_path, Some(download_options))
.await?;
println!("File downloaded to {}", download_path);
// Verify the downloaded file matches the original
let original_content = std::fs::read_to_string(test_file_path)?;
let downloaded_content = std::fs::read_to_string(download_path)?;
if original_content == downloaded_content {
println!("File contents match! Download successful.");
} else {
println!("ERROR: File contents do not match!");
}
// Clean up test files
std::fs::remove_file(test_file_path)?;
std::fs::remove_file(download_path)?;
println!("Test files cleaned up");
Ok(())
}

View File

@@ -0,0 +1,176 @@
use sal_rfs_client::types::{ClientConfig, Credentials, FlistOptions, WaitOptions};
use sal_rfs_client::RfsClient;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let parent_dir = "flists";
// Create a client with authentication
let config = ClientConfig {
base_url: "http://localhost:8080".to_string(),
credentials: Some(Credentials {
username: "user".to_string(),
password: "password".to_string(),
}),
timeout_seconds: 60,
};
let mut client = RfsClient::new(config);
// Authenticate with the server
client.authenticate().await?;
println!("Authentication successful");
println!("\n1. CREATE FLIST - Creating an FList from a Docker image");
let image_name = "alpine:latest";
println!("Creating FList for image: {}", image_name);
// Use FlistOptions to specify additional parameters
let options = FlistOptions {
auth: None,
username: None,
password: None,
email: None,
server_address: Some("docker.io".to_string()),
identity_token: None,
registry_token: None,
};
// Create the FList and handle potential conflict error
let job_id = match client.create_flist(&image_name, Some(options)).await {
Ok(id) => {
println!("FList creation started with job ID: {}", id);
Some(id)
}
Err(e) => {
if e.to_string().contains("Conflict") {
println!("FList already exists");
None
} else {
return Err(e.into());
}
}
};
// 2. Check FList state if we have a job ID
if let Some(job_id) = &job_id {
println!("\n2. GET FLIST STATE - Checking FList creation state");
let state = client.get_flist_state(job_id).await?;
println!("Current FList state: {:?}", state.flist_state);
// 3. Wait for FList creation with progress reporting
println!("\n3. WAIT FOR FLIST CREATION - Waiting for FList to be created with progress reporting");
let wait_options = WaitOptions {
timeout_seconds: 60, // Shorter timeout for the example
poll_interval_ms: 1000,
progress_callback: Some(Box::new(|state| {
println!("Progress: FList state is now {:?}", state);
// No return value needed (returns unit type)
})),
};
// Wait for the FList to be created (with a timeout)
match client
.wait_for_flist_creation(job_id, Some(wait_options))
.await
{
Ok(final_state) => {
println!("FList creation completed with state: {:?}", final_state);
}
Err(e) => {
println!("Error waiting for FList creation: {}", e);
// Continue with the example even if waiting fails
}
};
}
// 4. List all available FLists
println!("\n4. LIST FLISTS - Listing all available FLists");
// Variable to store the FList path for preview and download
let mut flist_path_for_preview: Option<String> = None;
match client.list_flists().await {
Ok(flists) => {
println!("Found {} FList categories", flists.len());
for (category, files) in &flists {
println!("Category: {}", category);
for file in files.iter().take(2) {
// Show only first 2 files per category
println!(" - {} (size: {} bytes)", file.name, file.size);
// Save the first FList path for preview
if flist_path_for_preview.is_none() {
let path = format!("{}/{}/{}", parent_dir, category, file.name);
flist_path_for_preview = Some(path);
}
}
if files.len() > 2 {
println!(" - ... and {} more files", files.len() - 2);
}
}
// 5. Preview an FList if we found one
if let Some(ref flist_path) = flist_path_for_preview {
println!("\n5. PREVIEW FLIST - Previewing FList: {}", flist_path);
match client.preview_flist(flist_path).await {
Ok(preview) => {
println!("FList preview for {}:", flist_path);
println!(" - Checksum: {}", preview.checksum);
println!(" - Metadata: {}", preview.metadata);
// Display content (list of strings)
if !preview.content.is_empty() {
println!(" - Content entries:");
for (i, entry) in preview.content.iter().enumerate().take(5) {
println!(" {}. {}", i + 1, entry);
}
if preview.content.len() > 5 {
println!(" ... and {} more entries", preview.content.len() - 5);
}
}
}
Err(e) => println!("Error previewing FList: {}", e),
}
} else {
println!("No FLists available for preview");
}
}
Err(e) => println!("Error listing FLists: {}", e),
}
// 6. DOWNLOAD FLIST - Downloading an FList to a local file
if let Some(ref flist_path) = flist_path_for_preview {
println!("\n6. DOWNLOAD FLIST - Downloading FList: {}", flist_path);
// Create a temporary output path for the downloaded FList
let output_path = "/tmp/downloaded_flist.fl";
match client.download_flist(flist_path, output_path).await {
Ok(_) => {
println!("FList successfully downloaded to {}", output_path);
// Get file size
match std::fs::metadata(output_path) {
Ok(metadata) => println!("Downloaded file size: {} bytes", metadata.len()),
Err(e) => println!("Error getting file metadata: {}", e),
}
}
Err(e) => println!("Error downloading FList: {}", e),
}
} else {
println!("\n6. DOWNLOAD FLIST - No FList available for download");
}
println!("\nAll FList operations demonstrated:");
println!("1. create_flist - Create a new FList from a Docker image");
println!("2. get_flist_state - Check the state of an FList creation job");
println!(
"3. wait_for_flist_creation - Wait for an FList to be created with progress reporting"
);
println!("4. list_flists - List all available FLists");
println!("5. preview_flist - Preview the content of an FList");
println!("6. download_flist - Download an FList to a local file");
Ok(())
}

View File

@@ -0,0 +1,64 @@
use openapi::models::FlistState;
use sal_rfs_client::types::{ClientConfig, Credentials, WaitOptions};
use sal_rfs_client::RfsClient;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create a client with authentication
let config = ClientConfig {
base_url: "http://localhost:8080".to_string(),
credentials: Some(Credentials {
username: "user".to_string(),
password: "password".to_string(),
}),
timeout_seconds: 60,
};
let mut client = RfsClient::new(config);
// Authenticate with the server
client.authenticate().await?;
println!("Authentication successful");
// Create an FList from a Docker image
let image_name = "redis:latest";
println!("Creating FList for image: {}", image_name);
let job_id = client.create_flist(&image_name, None).await?;
println!("FList creation started with job ID: {}", job_id);
// Set up options for waiting with progress reporting
let options = WaitOptions {
timeout_seconds: 600, // 10 minutes timeout
poll_interval_ms: 2000, // Check every 2 seconds
progress_callback: Some(Box::new(|state| match state {
FlistState::FlistStateInProgress(info) => {
println!(
"Progress: {:.1}% - {}",
info.in_progress.progress, info.in_progress.msg
);
}
FlistState::FlistStateStarted(_) => {
println!("FList creation started...");
}
FlistState::FlistStateAccepted(_) => {
println!("FList creation request accepted...");
}
_ => println!("State: {:?}", state),
})),
};
// Wait for the FList to be created
println!("Waiting for FList creation to complete...");
// Use ? operator to propagate errors properly
let state = client
.wait_for_flist_creation(&job_id, Some(options))
.await
.map_err(|e| -> Box<dyn std::error::Error> { Box::new(e) })?;
println!("FList created successfully!");
println!("Final state: {:?}", state);
Ok(())
}

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,3 @@
/target/
**/*.rs.bk
Cargo.lock

View File

@@ -0,0 +1,23 @@
# OpenAPI Generator Ignore
# Generated by openapi-generator https://github.com/openapitools/openapi-generator
# Use this file to prevent files from being overwritten by the generator.
# The patterns follow closely to .gitignore or .dockerignore.
# As an example, the C# client generator defines ApiClient.cs.
# You can make changes and tell OpenAPI Generator to ignore just this file by uncommenting the following line:
#ApiClient.cs
# You can match any string of characters against a directory, file or extension with a single asterisk (*):
#foo/*/qux
# The above matches foo/bar/qux and foo/baz/qux, but not foo/bar/baz/qux
# You can recursively match patterns against a directory, file or extension with a double asterisk (**):
#foo/**/qux
# This matches foo/bar/qux, foo/baz/qux, and foo/bar/baz/qux
# You can also negate patterns with an exclamation (!).
# For example, you can ignore all files in a docs folder with the file extension .md:
#docs/*.md
# Then explicitly reverse the ignore rule for a single file:
#!docs/README.md

View File

@@ -0,0 +1,125 @@
.gitignore
.travis.yml
Cargo.toml
README.md
docs/AuthenticationApi.md
docs/BlockDownloadsResponse.md
docs/BlockInfo.md
docs/BlockManagementApi.md
docs/BlockUploadedResponse.md
docs/BlocksResponse.md
docs/DirListTemplate.md
docs/DirLister.md
docs/ErrorTemplate.md
docs/FileDownloadRequest.md
docs/FileInfo.md
docs/FileManagementApi.md
docs/FileUploadResponse.md
docs/FlistBody.md
docs/FlistManagementApi.md
docs/FlistState.md
docs/FlistStateAccepted.md
docs/FlistStateCreated.md
docs/FlistStateInProgress.md
docs/FlistStateInfo.md
docs/FlistStateResponse.md
docs/FlistStateStarted.md
docs/HealthResponse.md
docs/Job.md
docs/ListBlocksParams.md
docs/ListBlocksResponse.md
docs/PreviewResponse.md
docs/ResponseError.md
docs/ResponseErrorBadRequest.md
docs/ResponseErrorConflict.md
docs/ResponseErrorForbidden.md
docs/ResponseErrorNotFound.md
docs/ResponseErrorTemplateError.md
docs/ResponseErrorUnauthorized.md
docs/ResponseResult.md
docs/ResponseResultBlockUploaded.md
docs/ResponseResultDirTemplate.md
docs/ResponseResultFileUploaded.md
docs/ResponseResultFlistCreated.md
docs/ResponseResultFlistState.md
docs/ResponseResultFlists.md
docs/ResponseResultPreviewFlist.md
docs/ResponseResultRes.md
docs/ResponseResultSignedIn.md
docs/SignInBody.md
docs/SignInResponse.md
docs/SystemApi.md
docs/TemplateErr.md
docs/TemplateErrBadRequest.md
docs/TemplateErrInternalServerError.md
docs/TemplateErrNotFound.md
docs/UploadBlockParams.md
docs/UserBlockInfo.md
docs/UserBlocksResponse.md
docs/VerifyBlock.md
docs/VerifyBlocksRequest.md
docs/VerifyBlocksResponse.md
docs/WebsiteServingApi.md
git_push.sh
src/apis/authentication_api.rs
src/apis/block_management_api.rs
src/apis/configuration.rs
src/apis/file_management_api.rs
src/apis/flist_management_api.rs
src/apis/mod.rs
src/apis/system_api.rs
src/apis/website_serving_api.rs
src/lib.rs
src/models/block_downloads_response.rs
src/models/block_info.rs
src/models/block_uploaded_response.rs
src/models/blocks_response.rs
src/models/dir_list_template.rs
src/models/dir_lister.rs
src/models/error_template.rs
src/models/file_download_request.rs
src/models/file_info.rs
src/models/file_upload_response.rs
src/models/flist_body.rs
src/models/flist_state.rs
src/models/flist_state_accepted.rs
src/models/flist_state_created.rs
src/models/flist_state_in_progress.rs
src/models/flist_state_info.rs
src/models/flist_state_response.rs
src/models/flist_state_started.rs
src/models/health_response.rs
src/models/job.rs
src/models/list_blocks_params.rs
src/models/list_blocks_response.rs
src/models/mod.rs
src/models/preview_response.rs
src/models/response_error.rs
src/models/response_error_bad_request.rs
src/models/response_error_conflict.rs
src/models/response_error_forbidden.rs
src/models/response_error_not_found.rs
src/models/response_error_template_error.rs
src/models/response_error_unauthorized.rs
src/models/response_result.rs
src/models/response_result_block_uploaded.rs
src/models/response_result_dir_template.rs
src/models/response_result_file_uploaded.rs
src/models/response_result_flist_created.rs
src/models/response_result_flist_state.rs
src/models/response_result_flists.rs
src/models/response_result_preview_flist.rs
src/models/response_result_res.rs
src/models/response_result_signed_in.rs
src/models/sign_in_body.rs
src/models/sign_in_response.rs
src/models/template_err.rs
src/models/template_err_bad_request.rs
src/models/template_err_internal_server_error.rs
src/models/template_err_not_found.rs
src/models/upload_block_params.rs
src/models/user_block_info.rs
src/models/user_blocks_response.rs
src/models/verify_block.rs
src/models/verify_blocks_request.rs
src/models/verify_blocks_response.rs

View File

@@ -0,0 +1 @@
7.13.0

Some files were not shown because too many files have changed in this diff Show More