Compare commits
	
		
			55 Commits
		
	
	
		
			main-rfs-c
			...
			developmen
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
|  | 9865e601d7 | ||
|  | 7afa5ea1c0 | ||
|  | 6c2d96c9a5 | ||
|  | b2fc0976bd | ||
|  | e114404ca7 | ||
|  | 536779f521 | ||
|  | c2969621b1 | ||
|  | b39f24ca8f | ||
| f87a1d7f80 | |||
| 17e5924e0b | |||
|  | 768e3e176d | ||
|  | aa0248ef17 | ||
|  | aab2b6f128 | ||
|  | d735316b7f | ||
|  | d1c80863b8 | ||
|  | 169c62da47 | ||
|  | 33a5f24981 | ||
|  | d7562ce466 | ||
| ca736d62f3 | |||
|  | 078c6f723b | ||
|  | 9fdb8d8845 | ||
| 8203a3b1ff | |||
| 1770ac561e | |||
|  | eed6dbf8dc | ||
| 4cd4e04028 | |||
| 8cc828fc0e | |||
| 56af312aad | |||
| dfd6931c5b | |||
| 6e01f99958 | |||
| 0c02d0e99f | |||
| 7856fc0a4e | |||
|  | 758e59e921 | ||
| f1806eb788 | |||
|  | 6e5d9b35e8 | ||
| 61f5331804 | |||
|  | 423b7bfa7e | ||
| fc2830da31 | |||
|  | 6b12001ca2 | ||
|  | 99e121b0d8 | ||
|  | 502e345f91 | ||
|  | 352e846410 | ||
|  | b72c50bed9 | ||
|  | 95122dffee | ||
|  | a63cbe2bd9 | ||
|  | 1e4c0ac41a | ||
|  | 0e49be8d71 | ||
|  | 32339e6063 | ||
|  | 131d978450 | ||
|  | 46ad848e7e | ||
|  | b4e370b668 | ||
| ef8cc74d2b | |||
|  | 23db07b0bd | ||
| b4dfa7733d | |||
|  | e01b83f12a | ||
|  | 52f2f7e3c4 | 
							
								
								
									
										227
									
								
								.github/workflows/publish.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										227
									
								
								.github/workflows/publish.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,227 @@ | |||||||
|  | name: Publish SAL Crates | ||||||
|  |  | ||||||
|  | on: | ||||||
|  |   release: | ||||||
|  |     types: [published] | ||||||
|  |   workflow_dispatch: | ||||||
|  |     inputs: | ||||||
|  |       version: | ||||||
|  |         description: 'Version to publish (e.g., 0.1.0)' | ||||||
|  |         required: true | ||||||
|  |         type: string | ||||||
|  |       dry_run: | ||||||
|  |         description: 'Dry run (do not actually publish)' | ||||||
|  |         required: false | ||||||
|  |         type: boolean | ||||||
|  |         default: false | ||||||
|  |  | ||||||
|  | env: | ||||||
|  |   CARGO_TERM_COLOR: always | ||||||
|  |  | ||||||
|  | jobs: | ||||||
|  |   publish: | ||||||
|  |     name: Publish to crates.io | ||||||
|  |     runs-on: ubuntu-latest | ||||||
|  |      | ||||||
|  |     steps: | ||||||
|  |     - name: Checkout repository | ||||||
|  |       uses: actions/checkout@v4 | ||||||
|  |       with: | ||||||
|  |         fetch-depth: 0 | ||||||
|  |      | ||||||
|  |     - name: Install Rust toolchain | ||||||
|  |       uses: dtolnay/rust-toolchain@stable | ||||||
|  |       with: | ||||||
|  |         toolchain: stable | ||||||
|  |      | ||||||
|  |     - name: Cache Cargo dependencies | ||||||
|  |       uses: actions/cache@v4 | ||||||
|  |       with: | ||||||
|  |         path: | | ||||||
|  |           ~/.cargo/bin/ | ||||||
|  |           ~/.cargo/registry/index/ | ||||||
|  |           ~/.cargo/registry/cache/ | ||||||
|  |           ~/.cargo/git/db/ | ||||||
|  |           target/ | ||||||
|  |         key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} | ||||||
|  |         restore-keys: | | ||||||
|  |           ${{ runner.os }}-cargo- | ||||||
|  |      | ||||||
|  |     - name: Install cargo-edit for version management | ||||||
|  |       run: cargo install cargo-edit | ||||||
|  |      | ||||||
|  |     - name: Set version from release tag | ||||||
|  |       if: github.event_name == 'release' | ||||||
|  |       run: | | ||||||
|  |         VERSION=${GITHUB_REF#refs/tags/v} | ||||||
|  |         echo "PUBLISH_VERSION=$VERSION" >> $GITHUB_ENV | ||||||
|  |         echo "Publishing version: $VERSION" | ||||||
|  |      | ||||||
|  |     - name: Set version from workflow input | ||||||
|  |       if: github.event_name == 'workflow_dispatch' | ||||||
|  |       run: | | ||||||
|  |         echo "PUBLISH_VERSION=${{ github.event.inputs.version }}" >> $GITHUB_ENV | ||||||
|  |         echo "Publishing version: ${{ github.event.inputs.version }}" | ||||||
|  |      | ||||||
|  |     - name: Update version in all crates | ||||||
|  |       run: | | ||||||
|  |         echo "Updating version to $PUBLISH_VERSION" | ||||||
|  |          | ||||||
|  |         # Update root Cargo.toml | ||||||
|  |         cargo set-version $PUBLISH_VERSION | ||||||
|  |          | ||||||
|  |         # Update each crate | ||||||
|  |         CRATES=(os process text net git vault kubernetes virt redisclient postgresclient zinit_client mycelium rhai) | ||||||
|  |         for crate in "${CRATES[@]}"; do | ||||||
|  |           if [ -d "$crate" ]; then | ||||||
|  |             cd "$crate" | ||||||
|  |             cargo set-version $PUBLISH_VERSION | ||||||
|  |             cd .. | ||||||
|  |             echo "Updated $crate to version $PUBLISH_VERSION" | ||||||
|  |           fi | ||||||
|  |         done | ||||||
|  |      | ||||||
|  |     - name: Run tests | ||||||
|  |       run: cargo test --workspace --verbose | ||||||
|  |      | ||||||
|  |     - name: Check formatting | ||||||
|  |       run: cargo fmt --all -- --check | ||||||
|  |      | ||||||
|  |     - name: Run clippy | ||||||
|  |       run: cargo clippy --workspace --all-targets --all-features -- -D warnings | ||||||
|  |      | ||||||
|  |     - name: Dry run publish (check packages) | ||||||
|  |       run: | | ||||||
|  |         echo "Checking all packages can be published..." | ||||||
|  |          | ||||||
|  |         CRATES=(os process text net git vault kubernetes virt redisclient postgresclient zinit_client mycelium rhai) | ||||||
|  |         for crate in "${CRATES[@]}"; do | ||||||
|  |           if [ -d "$crate" ]; then | ||||||
|  |             echo "Checking $crate..." | ||||||
|  |             cd "$crate" | ||||||
|  |             cargo publish --dry-run | ||||||
|  |             cd .. | ||||||
|  |           fi | ||||||
|  |         done | ||||||
|  |          | ||||||
|  |         echo "Checking main crate..." | ||||||
|  |         cargo publish --dry-run | ||||||
|  |      | ||||||
|  |     - name: Publish crates (dry run) | ||||||
|  |       if: github.event.inputs.dry_run == 'true' | ||||||
|  |       run: | | ||||||
|  |         echo "🔍 DRY RUN MODE - Would publish the following crates:" | ||||||
|  |         echo "Individual crates: sal-os, sal-process, sal-text, sal-net, sal-git, sal-vault, sal-kubernetes, sal-virt, sal-redisclient, sal-postgresclient, sal-zinit-client, sal-mycelium, sal-rhai" | ||||||
|  |         echo "Meta-crate: sal" | ||||||
|  |         echo "Version: $PUBLISH_VERSION" | ||||||
|  |      | ||||||
|  |     - name: Publish individual crates | ||||||
|  |       if: github.event.inputs.dry_run != 'true' | ||||||
|  |       env: | ||||||
|  |         CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} | ||||||
|  |       run: | | ||||||
|  |         echo "Publishing individual crates..." | ||||||
|  |          | ||||||
|  |         # Crates in dependency order | ||||||
|  |         CRATES=(os process text net git vault kubernetes virt redisclient postgresclient zinit_client mycelium rhai) | ||||||
|  |          | ||||||
|  |         for crate in "${CRATES[@]}"; do | ||||||
|  |           if [ -d "$crate" ]; then | ||||||
|  |             echo "Publishing sal-$crate..." | ||||||
|  |             cd "$crate" | ||||||
|  |              | ||||||
|  |             # Retry logic for transient failures | ||||||
|  |             for attempt in 1 2 3; do | ||||||
|  |               if cargo publish --token $CARGO_REGISTRY_TOKEN; then | ||||||
|  |                 echo "✅ sal-$crate published successfully" | ||||||
|  |                 break | ||||||
|  |               else | ||||||
|  |                 if [ $attempt -eq 3 ]; then | ||||||
|  |                   echo "❌ Failed to publish sal-$crate after 3 attempts" | ||||||
|  |                   exit 1 | ||||||
|  |                 else | ||||||
|  |                   echo "⚠️ Attempt $attempt failed, retrying in 30 seconds..." | ||||||
|  |                   sleep 30 | ||||||
|  |                 fi | ||||||
|  |               fi | ||||||
|  |             done | ||||||
|  |              | ||||||
|  |             cd .. | ||||||
|  |              | ||||||
|  |             # Wait for crates.io to process | ||||||
|  |             if [ "$crate" != "rhai" ]; then | ||||||
|  |               echo "⏳ Waiting 30 seconds for crates.io to process..." | ||||||
|  |               sleep 30 | ||||||
|  |             fi | ||||||
|  |           fi | ||||||
|  |         done | ||||||
|  |      | ||||||
|  |     - name: Publish main crate | ||||||
|  |       if: github.event.inputs.dry_run != 'true' | ||||||
|  |       env: | ||||||
|  |         CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} | ||||||
|  |       run: | | ||||||
|  |         echo "Publishing main sal crate..." | ||||||
|  |          | ||||||
|  |         # Wait a bit longer before publishing the meta-crate | ||||||
|  |         echo "⏳ Waiting 60 seconds for all individual crates to be available..." | ||||||
|  |         sleep 60 | ||||||
|  |          | ||||||
|  |         # Retry logic for the main crate | ||||||
|  |         for attempt in 1 2 3; do | ||||||
|  |           if cargo publish --token $CARGO_REGISTRY_TOKEN; then | ||||||
|  |             echo "✅ Main sal crate published successfully" | ||||||
|  |             break | ||||||
|  |           else | ||||||
|  |             if [ $attempt -eq 3 ]; then | ||||||
|  |               echo "❌ Failed to publish main sal crate after 3 attempts" | ||||||
|  |               exit 1 | ||||||
|  |             else | ||||||
|  |               echo "⚠️ Attempt $attempt failed, retrying in 60 seconds..." | ||||||
|  |               sleep 60 | ||||||
|  |             fi | ||||||
|  |           fi | ||||||
|  |         done | ||||||
|  |      | ||||||
|  |     - name: Create summary | ||||||
|  |       if: always() | ||||||
|  |       run: | | ||||||
|  |         echo "## 📦 SAL Publishing Summary" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "**Version:** $PUBLISH_VERSION" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "**Trigger:** ${{ github.event_name }}" >> $GITHUB_STEP_SUMMARY | ||||||
|  |          | ||||||
|  |         if [ "${{ github.event.inputs.dry_run }}" == "true" ]; then | ||||||
|  |           echo "**Mode:** Dry Run" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         else | ||||||
|  |           echo "**Mode:** Live Publishing" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         fi | ||||||
|  |          | ||||||
|  |         echo "" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "### Published Crates" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "- sal-os" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "- sal-process" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "- sal-text" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "- sal-net" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "- sal-git" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "- sal-vault" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "- sal-kubernetes" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "- sal-virt" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "- sal-redisclient" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "- sal-postgresclient" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "- sal-zinit-client" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "- sal-mycelium" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "- sal-rhai" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "- sal (meta-crate)" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "### Usage" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo '```bash' >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "# Individual crates" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "cargo add sal-os sal-process sal-text" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "# Meta-crate with features" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "cargo add sal --features core" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "cargo add sal --features all" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo '```' >> $GITHUB_STEP_SUMMARY | ||||||
							
								
								
									
										233
									
								
								.github/workflows/test-publish.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										233
									
								
								.github/workflows/test-publish.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,233 @@ | |||||||
|  | name: Test Publishing Setup | ||||||
|  |  | ||||||
|  | on: | ||||||
|  |   push: | ||||||
|  |     branches: [ main, master ] | ||||||
|  |     paths: | ||||||
|  |       - '**/Cargo.toml' | ||||||
|  |       - 'scripts/publish-all.sh' | ||||||
|  |       - '.github/workflows/publish.yml' | ||||||
|  |   pull_request: | ||||||
|  |     branches: [ main, master ] | ||||||
|  |     paths: | ||||||
|  |       - '**/Cargo.toml' | ||||||
|  |       - 'scripts/publish-all.sh' | ||||||
|  |       - '.github/workflows/publish.yml' | ||||||
|  |   workflow_dispatch: | ||||||
|  |  | ||||||
|  | env: | ||||||
|  |   CARGO_TERM_COLOR: always | ||||||
|  |  | ||||||
|  | jobs: | ||||||
|  |   test-publish-setup: | ||||||
|  |     name: Test Publishing Setup | ||||||
|  |     runs-on: ubuntu-latest | ||||||
|  |      | ||||||
|  |     steps: | ||||||
|  |     - name: Checkout repository | ||||||
|  |       uses: actions/checkout@v4 | ||||||
|  |      | ||||||
|  |     - name: Install Rust toolchain | ||||||
|  |       uses: dtolnay/rust-toolchain@stable | ||||||
|  |       with: | ||||||
|  |         toolchain: stable | ||||||
|  |      | ||||||
|  |     - name: Cache Cargo dependencies | ||||||
|  |       uses: actions/cache@v4 | ||||||
|  |       with: | ||||||
|  |         path: | | ||||||
|  |           ~/.cargo/bin/ | ||||||
|  |           ~/.cargo/registry/index/ | ||||||
|  |           ~/.cargo/registry/cache/ | ||||||
|  |           ~/.cargo/git/db/ | ||||||
|  |           target/ | ||||||
|  |         key: ${{ runner.os }}-cargo-publish-test-${{ hashFiles('**/Cargo.lock') }} | ||||||
|  |         restore-keys: | | ||||||
|  |           ${{ runner.os }}-cargo-publish-test- | ||||||
|  |           ${{ runner.os }}-cargo- | ||||||
|  |      | ||||||
|  |     - name: Install cargo-edit | ||||||
|  |       run: cargo install cargo-edit | ||||||
|  |      | ||||||
|  |     - name: Test workspace structure | ||||||
|  |       run: | | ||||||
|  |         echo "Testing workspace structure..." | ||||||
|  |          | ||||||
|  |         # Check that all expected crates exist | ||||||
|  |         EXPECTED_CRATES=(os process text net git vault kubernetes virt redisclient postgresclient zinit_client mycelium rhai herodo) | ||||||
|  |          | ||||||
|  |         for crate in "${EXPECTED_CRATES[@]}"; do | ||||||
|  |           if [ -d "$crate" ] && [ -f "$crate/Cargo.toml" ]; then | ||||||
|  |             echo "✅ $crate exists" | ||||||
|  |           else | ||||||
|  |             echo "❌ $crate missing or invalid" | ||||||
|  |             exit 1 | ||||||
|  |           fi | ||||||
|  |         done | ||||||
|  |      | ||||||
|  |     - name: Test feature configuration | ||||||
|  |       run: | | ||||||
|  |         echo "Testing feature configuration..." | ||||||
|  |          | ||||||
|  |         # Test that features work correctly | ||||||
|  |         cargo check --features os | ||||||
|  |         cargo check --features process | ||||||
|  |         cargo check --features text | ||||||
|  |         cargo check --features net | ||||||
|  |         cargo check --features git | ||||||
|  |         cargo check --features vault | ||||||
|  |         cargo check --features kubernetes | ||||||
|  |         cargo check --features virt | ||||||
|  |         cargo check --features redisclient | ||||||
|  |         cargo check --features postgresclient | ||||||
|  |         cargo check --features zinit_client | ||||||
|  |         cargo check --features mycelium | ||||||
|  |         cargo check --features rhai | ||||||
|  |          | ||||||
|  |         echo "✅ All individual features work" | ||||||
|  |          | ||||||
|  |         # Test feature groups | ||||||
|  |         cargo check --features core | ||||||
|  |         cargo check --features clients | ||||||
|  |         cargo check --features infrastructure | ||||||
|  |         cargo check --features scripting | ||||||
|  |          | ||||||
|  |         echo "✅ All feature groups work" | ||||||
|  |          | ||||||
|  |         # Test all features | ||||||
|  |         cargo check --features all | ||||||
|  |          | ||||||
|  |         echo "✅ All features together work" | ||||||
|  |      | ||||||
|  |     - name: Test dry-run publishing | ||||||
|  |       run: | | ||||||
|  |         echo "Testing dry-run publishing..." | ||||||
|  |          | ||||||
|  |         # Test each individual crate can be packaged | ||||||
|  |         CRATES=(os process text net git vault kubernetes virt redisclient postgresclient zinit_client mycelium rhai) | ||||||
|  |          | ||||||
|  |         for crate in "${CRATES[@]}"; do | ||||||
|  |           echo "Testing sal-$crate..." | ||||||
|  |           cd "$crate" | ||||||
|  |           cargo publish --dry-run | ||||||
|  |           cd .. | ||||||
|  |           echo "✅ sal-$crate can be published" | ||||||
|  |         done | ||||||
|  |          | ||||||
|  |         # Test main crate | ||||||
|  |         echo "Testing main sal crate..." | ||||||
|  |         cargo publish --dry-run | ||||||
|  |         echo "✅ Main sal crate can be published" | ||||||
|  |      | ||||||
|  |     - name: Test publishing script | ||||||
|  |       run: | | ||||||
|  |         echo "Testing publishing script..." | ||||||
|  |          | ||||||
|  |         # Make script executable | ||||||
|  |         chmod +x scripts/publish-all.sh | ||||||
|  |          | ||||||
|  |         # Test dry run | ||||||
|  |         ./scripts/publish-all.sh --dry-run --version 0.1.0-test | ||||||
|  |          | ||||||
|  |         echo "✅ Publishing script works" | ||||||
|  |      | ||||||
|  |     - name: Test version consistency | ||||||
|  |       run: | | ||||||
|  |         echo "Testing version consistency..." | ||||||
|  |          | ||||||
|  |         # Get version from root Cargo.toml | ||||||
|  |         ROOT_VERSION=$(grep '^version = ' Cargo.toml | head -1 | sed 's/version = "\(.*\)"/\1/') | ||||||
|  |         echo "Root version: $ROOT_VERSION" | ||||||
|  |          | ||||||
|  |         # Check all crates have the same version | ||||||
|  |         CRATES=(os process text net git vault kubernetes virt redisclient postgresclient zinit_client mycelium rhai herodo) | ||||||
|  |          | ||||||
|  |         for crate in "${CRATES[@]}"; do | ||||||
|  |           if [ -f "$crate/Cargo.toml" ]; then | ||||||
|  |             CRATE_VERSION=$(grep '^version = ' "$crate/Cargo.toml" | head -1 | sed 's/version = "\(.*\)"/\1/') | ||||||
|  |             if [ "$CRATE_VERSION" = "$ROOT_VERSION" ]; then | ||||||
|  |               echo "✅ $crate version matches: $CRATE_VERSION" | ||||||
|  |             else | ||||||
|  |               echo "❌ $crate version mismatch: $CRATE_VERSION (expected $ROOT_VERSION)" | ||||||
|  |               exit 1 | ||||||
|  |             fi | ||||||
|  |           fi | ||||||
|  |         done | ||||||
|  |      | ||||||
|  |     - name: Test metadata completeness | ||||||
|  |       run: | | ||||||
|  |         echo "Testing metadata completeness..." | ||||||
|  |          | ||||||
|  |         # Check that all crates have required metadata | ||||||
|  |         CRATES=(os process text net git vault kubernetes virt redisclient postgresclient zinit_client mycelium rhai) | ||||||
|  |          | ||||||
|  |         for crate in "${CRATES[@]}"; do | ||||||
|  |           echo "Checking sal-$crate metadata..." | ||||||
|  |           cd "$crate" | ||||||
|  |            | ||||||
|  |           # Check required fields exist | ||||||
|  |           if ! grep -q '^name = "sal-' Cargo.toml; then | ||||||
|  |             echo "❌ $crate missing or incorrect name" | ||||||
|  |             exit 1 | ||||||
|  |           fi | ||||||
|  |            | ||||||
|  |           if ! grep -q '^description = ' Cargo.toml; then | ||||||
|  |             echo "❌ $crate missing description" | ||||||
|  |             exit 1 | ||||||
|  |           fi | ||||||
|  |            | ||||||
|  |           if ! grep -q '^repository = ' Cargo.toml; then | ||||||
|  |             echo "❌ $crate missing repository" | ||||||
|  |             exit 1 | ||||||
|  |           fi | ||||||
|  |            | ||||||
|  |           if ! grep -q '^license = ' Cargo.toml; then | ||||||
|  |             echo "❌ $crate missing license" | ||||||
|  |             exit 1 | ||||||
|  |           fi | ||||||
|  |            | ||||||
|  |           echo "✅ sal-$crate metadata complete" | ||||||
|  |           cd .. | ||||||
|  |         done | ||||||
|  |      | ||||||
|  |     - name: Test dependency resolution | ||||||
|  |       run: | | ||||||
|  |         echo "Testing dependency resolution..." | ||||||
|  |          | ||||||
|  |         # Test that all workspace dependencies resolve correctly | ||||||
|  |         cargo tree --workspace > /dev/null | ||||||
|  |         echo "✅ All dependencies resolve correctly" | ||||||
|  |          | ||||||
|  |         # Test that there are no dependency conflicts | ||||||
|  |         cargo check --workspace | ||||||
|  |         echo "✅ No dependency conflicts" | ||||||
|  |      | ||||||
|  |     - name: Generate publishing report | ||||||
|  |       if: always() | ||||||
|  |       run: | | ||||||
|  |         echo "## 🧪 Publishing Setup Test Report" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "### ✅ Tests Passed" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "- Workspace structure validation" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "- Feature configuration testing" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "- Dry-run publishing simulation" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "- Publishing script validation" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "- Version consistency check" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "- Metadata completeness verification" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "- Dependency resolution testing" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "### 📦 Ready for Publishing" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "All SAL crates are ready for publishing to crates.io!" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "**Individual Crates:** 13 modules" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "**Meta-crate:** sal with optional features" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "**Binary:** herodo script executor" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "### 🚀 Next Steps" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "1. Create a release tag (e.g., v0.1.0)" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "2. The publish workflow will automatically trigger" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "3. All crates will be published to crates.io" >> $GITHUB_STEP_SUMMARY | ||||||
|  |         echo "4. Users can install with: \`cargo add sal-os\` or \`cargo add sal --features all\`" >> $GITHUB_STEP_SUMMARY | ||||||
							
								
								
									
										5
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										5
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							| @@ -62,3 +62,8 @@ docusaurus.config.ts | |||||||
| sidebars.ts | sidebars.ts | ||||||
|  |  | ||||||
| tsconfig.json | tsconfig.json | ||||||
|  | Cargo.toml.bak | ||||||
|  | for_augment | ||||||
|  |  | ||||||
|  | myenv.sh | ||||||
|  |  | ||||||
|   | |||||||
							
								
								
									
										156
									
								
								Cargo.toml
									
									
									
									
									
								
							
							
						
						
									
										156
									
								
								Cargo.toml
									
									
									
									
									
								
							| @@ -11,18 +11,40 @@ categories = ["os", "filesystem", "api-bindings"] | |||||||
| readme = "README.md" | readme = "README.md" | ||||||
|  |  | ||||||
| [workspace] | [workspace] | ||||||
| members = [".", "vault", "git", "redisclient", "mycelium", "text", "os", "net", "zinit_client", "process", "virt", "postgresclient", "rhai", "herodo", "rfs-client"] | members = [ | ||||||
|  |     "packages/clients/myceliumclient", | ||||||
|  |     "packages/clients/postgresclient", | ||||||
|  |     "packages/clients/redisclient", | ||||||
|  |     "packages/clients/zinitclient", | ||||||
|  |     "packages/clients/rfsclient", | ||||||
|  |     "packages/core/net", | ||||||
|  |     "packages/core/text", | ||||||
|  |     "packages/crypt/vault", | ||||||
|  |     "packages/data/ourdb", | ||||||
|  |     "packages/data/radixtree", | ||||||
|  |     "packages/data/tst", | ||||||
|  |     "packages/system/git", | ||||||
|  |     "packages/system/kubernetes", | ||||||
|  |     "packages/system/os", | ||||||
|  |     "packages/system/process", | ||||||
|  |     "packages/system/virt", | ||||||
|  |     "rhai", | ||||||
|  |     "rhailib", | ||||||
|  |     "herodo", | ||||||
|  |     "packages/clients/hetznerclient", | ||||||
|  |     "packages/ai/codemonkey", | ||||||
|  | ] | ||||||
| resolver = "2" | resolver = "2" | ||||||
|  |  | ||||||
| [workspace.metadata] | [workspace.metadata] | ||||||
| # Workspace-level metadata | # Workspace-level metadata | ||||||
| rust-version = "1.85.0" | rust-version = "1.70.0" | ||||||
|  |  | ||||||
| [workspace.dependencies] | [workspace.dependencies] | ||||||
| # Core shared dependencies with consistent versions | # Core shared dependencies with consistent versions | ||||||
| anyhow = "1.0.98" | anyhow = "1.0.98" | ||||||
| base64 = "0.22.1" | base64 = "0.22.1" | ||||||
| bytes = "1.4.0" | bytes = "1.7.1" | ||||||
| dirs = "6.0.0" | dirs = "6.0.0" | ||||||
| env_logger = "0.11.8" | env_logger = "0.11.8" | ||||||
| futures = "0.3.30" | futures = "0.3.30" | ||||||
| @@ -33,7 +55,7 @@ log = "0.4" | |||||||
| once_cell = "1.18.0" | once_cell = "1.18.0" | ||||||
| rand = "0.8.5" | rand = "0.8.5" | ||||||
| regex = "1.8.1" | regex = "1.8.1" | ||||||
| reqwest = { version = "0.12.15", features = ["json"] } | reqwest = { version = "0.12.15", features = ["json", "blocking"] } | ||||||
| rhai = { version = "1.12.0", features = ["sync"] } | rhai = { version = "1.12.0", features = ["sync"] } | ||||||
| serde = { version = "1.0", features = ["derive"] } | serde = { version = "1.0", features = ["derive"] } | ||||||
| serde_json = "1.0" | serde_json = "1.0" | ||||||
| @@ -54,6 +76,10 @@ chacha20poly1305 = "0.10.1" | |||||||
| k256 = { version = "0.13.4", features = ["ecdsa", "ecdh"] } | k256 = { version = "0.13.4", features = ["ecdsa", "ecdh"] } | ||||||
| sha2 = "0.10.7" | sha2 = "0.10.7" | ||||||
| hex = "0.4" | hex = "0.4" | ||||||
|  | bincode = { version = "2.0.1", features = ["serde"] } | ||||||
|  | pbkdf2 = "0.12.2" | ||||||
|  | getrandom = { version = "0.3.3", features = ["wasm_js"] } | ||||||
|  | tera = "1.19.0" | ||||||
|  |  | ||||||
| # Ethereum dependencies | # Ethereum dependencies | ||||||
| ethers = { version = "2.0.7", features = ["legacy"] } | ethers = { version = "2.0.7", features = ["legacy"] } | ||||||
| @@ -67,22 +93,114 @@ windows = { version = "0.61.1", features = [ | |||||||
| ] } | ] } | ||||||
|  |  | ||||||
| # Specialized dependencies | # Specialized dependencies | ||||||
| zinit-client = "0.3.0" | zinit-client = "0.4.0" | ||||||
| urlencoding = "2.1.3" | urlencoding = "2.1.3" | ||||||
| tokio-test = "0.4.4" | tokio-test = "0.4.4" | ||||||
|  | kube = { version = "0.95.0", features = ["client", "config", "derive"] } | ||||||
|  | k8s-openapi = { version = "0.23.0", features = ["latest"] } | ||||||
|  | tokio-retry = "0.3.0" | ||||||
|  | governor = "0.6.3" | ||||||
|  | tower = { version = "0.5.2", features = ["timeout", "limit"] } | ||||||
|  | serde_yaml = "0.9" | ||||||
|  | postgres-types = "0.2.5" | ||||||
|  | r2d2 = "0.8.10" | ||||||
|  |  | ||||||
|  | # SAL dependencies | ||||||
|  | sal-git = { path = "packages/system/git" } | ||||||
|  | sal-kubernetes = { path = "packages/system/kubernetes" } | ||||||
|  | sal-redisclient = { path = "packages/clients/redisclient" } | ||||||
|  | sal-mycelium = { path = "packages/clients/myceliumclient" } | ||||||
|  | sal-hetzner = { path = "packages/clients/hetznerclient" } | ||||||
|  | sal-rfs-client = { path = "packages/clients/rfsclient" } | ||||||
|  | sal-text = { path = "packages/core/text" } | ||||||
|  | sal-os = { path = "packages/system/os" } | ||||||
|  | sal-net = { path = "packages/core/net" } | ||||||
|  | sal-zinit-client = { path = "packages/clients/zinitclient" } | ||||||
|  | sal-process = { path = "packages/system/process" } | ||||||
|  | sal-virt = { path = "packages/system/virt" } | ||||||
|  | sal-postgresclient = { path = "packages/clients/postgresclient" } | ||||||
|  | sal-vault = { path = "packages/crypt/vault" } | ||||||
|  | sal-rhai = { path = "rhai" } | ||||||
|  | sal-service-manager = { path = "_archive/service_manager" } | ||||||
|  |  | ||||||
| [dependencies] | [dependencies] | ||||||
| thiserror = "2.0.12" # For error handling in the main Error enum | thiserror = { workspace = true } | ||||||
| sal-git = { path = "git" } | tokio = { workspace = true } | ||||||
| sal-redisclient = { path = "redisclient" } |  | ||||||
| sal-mycelium = { path = "mycelium" } | # Optional dependencies - users can choose which modules to include | ||||||
| sal-text = { path = "text" } | sal-git = { workspace = true, optional = true } | ||||||
| sal-os = { path = "os" } | sal-kubernetes = { workspace = true, optional = true } | ||||||
| sal-net = { path = "net" } | sal-redisclient = { workspace = true, optional = true } | ||||||
| sal-zinit-client = { path = "zinit_client" } | sal-mycelium = { workspace = true, optional = true } | ||||||
| sal-process = { path = "process" } | sal-hetzner = { workspace = true, optional = true } | ||||||
| sal-virt = { path = "virt" } | sal-rfs-client = { workspace = true, optional = true } | ||||||
| sal-postgresclient = { path = "postgresclient" } | sal-text = { workspace = true, optional = true } | ||||||
| sal-vault = { path = "vault" } | sal-os = { workspace = true, optional = true } | ||||||
| sal-rhai = { path = "rhai" } | sal-net = { workspace = true, optional = true } | ||||||
| sal-rfs-client = { path = "rfs-client" } | sal-zinit-client = { workspace = true, optional = true } | ||||||
|  | sal-process = { workspace = true, optional = true } | ||||||
|  | sal-virt = { workspace = true, optional = true } | ||||||
|  | sal-postgresclient = { workspace = true, optional = true } | ||||||
|  | sal-vault = { workspace = true, optional = true } | ||||||
|  | sal-rhai = { workspace = true, optional = true } | ||||||
|  | sal-service-manager = { workspace = true, optional = true } | ||||||
|  |  | ||||||
|  | [features] | ||||||
|  | default = [] | ||||||
|  |  | ||||||
|  | # Individual module features | ||||||
|  | git = ["dep:sal-git"] | ||||||
|  | kubernetes = ["dep:sal-kubernetes"] | ||||||
|  | redisclient = ["dep:sal-redisclient"] | ||||||
|  | mycelium = ["dep:sal-mycelium"] | ||||||
|  | hetzner = ["dep:sal-hetzner"] | ||||||
|  | rfsclient = ["dep:sal-rfs-client"] | ||||||
|  | text = ["dep:sal-text"] | ||||||
|  | os = ["dep:sal-os"] | ||||||
|  | net = ["dep:sal-net"] | ||||||
|  | zinit_client = ["dep:sal-zinit-client"] | ||||||
|  | process = ["dep:sal-process"] | ||||||
|  | virt = ["dep:sal-virt"] | ||||||
|  | postgresclient = ["dep:sal-postgresclient"] | ||||||
|  | vault = ["dep:sal-vault"] | ||||||
|  | rhai = ["dep:sal-rhai"] | ||||||
|  | # service_manager is removed as it's not a direct member anymore | ||||||
|  |  | ||||||
|  | # Convenience feature groups | ||||||
|  | core = ["os", "process", "text", "net"] | ||||||
|  | clients = ["redisclient", "postgresclient", "zinit_client", "mycelium", "hetzner", "rfsclient"] | ||||||
|  | infrastructure = ["git", "vault", "kubernetes", "virt"] | ||||||
|  | scripting = ["rhai"] | ||||||
|  | all = [ | ||||||
|  |     "git", | ||||||
|  |     "kubernetes", | ||||||
|  |     "redisclient", | ||||||
|  |     "mycelium", | ||||||
|  |     "hetzner", | ||||||
|  |     "rfsclient", | ||||||
|  |     "text", | ||||||
|  |     "os", | ||||||
|  |     "net", | ||||||
|  |     "zinit_client", | ||||||
|  |     "process", | ||||||
|  |     "virt", | ||||||
|  |     "postgresclient", | ||||||
|  |     "vault", | ||||||
|  |     "rhai", | ||||||
|  | ] | ||||||
|  |  | ||||||
|  | # Examples | ||||||
|  | [[example]] | ||||||
|  | name = "postgres_cluster" | ||||||
|  | path = "examples/kubernetes/clusters/postgres.rs" | ||||||
|  | required-features = ["kubernetes"] | ||||||
|  |  | ||||||
|  | [[example]] | ||||||
|  | name = "redis_cluster" | ||||||
|  | path = "examples/kubernetes/clusters/redis.rs" | ||||||
|  | required-features = ["kubernetes"] | ||||||
|  |  | ||||||
|  | [[example]] | ||||||
|  | name = "generic_cluster" | ||||||
|  | path = "examples/kubernetes/clusters/generic.rs" | ||||||
|  | required-features = ["kubernetes"] | ||||||
|   | |||||||
							
								
								
									
										239
									
								
								PUBLISHING.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										239
									
								
								PUBLISHING.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,239 @@ | |||||||
|  | # SAL Publishing Guide | ||||||
|  |  | ||||||
|  | This guide explains how to publish SAL crates to crates.io and how users can consume them. | ||||||
|  |  | ||||||
|  | ## 🎯 Publishing Strategy | ||||||
|  |  | ||||||
|  | SAL uses a **modular publishing approach** where each module is published as an individual crate. This allows users to install only the functionality they need, reducing compilation time and binary size. | ||||||
|  |  | ||||||
|  | ## 📦 Crate Structure | ||||||
|  |  | ||||||
|  | ### Individual Crates | ||||||
|  |  | ||||||
|  | Each SAL module is published as a separate crate: | ||||||
|  |  | ||||||
|  | | Crate Name | Description | Category | | ||||||
|  | |------------|-------------|----------| | ||||||
|  | | `sal-os` | Operating system operations | Core | | ||||||
|  | | `sal-process` | Process management | Core | | ||||||
|  | | `sal-text` | Text processing utilities | Core | | ||||||
|  | | `sal-net` | Network operations | Core | | ||||||
|  | | `sal-git` | Git repository management | Infrastructure | | ||||||
|  | | `sal-vault` | Cryptographic operations | Infrastructure | | ||||||
|  | | `sal-kubernetes` | Kubernetes cluster management | Infrastructure | | ||||||
|  | | `sal-virt` | Virtualization tools (Buildah, nerdctl) | Infrastructure | | ||||||
|  | | `sal-redisclient` | Redis database client | Clients | | ||||||
|  | | `sal-postgresclient` | PostgreSQL database client | Clients | | ||||||
|  | | `sal-zinit-client` | Zinit process supervisor client | Clients | | ||||||
|  | | `sal-mycelium` | Mycelium network client | Clients | | ||||||
|  | | `sal-rhai` | Rhai scripting integration | Scripting | | ||||||
|  |  | ||||||
|  | ### Meta-crate | ||||||
|  |  | ||||||
|  | The main `sal` crate serves as a meta-crate that re-exports all modules with optional features: | ||||||
|  |  | ||||||
|  | ```toml | ||||||
|  | [dependencies] | ||||||
|  | sal = { version = "0.1.0", features = ["os", "process", "text"] } | ||||||
|  | ``` | ||||||
|  |  | ||||||
|  | ## 🚀 Publishing Process | ||||||
|  |  | ||||||
|  | ### Prerequisites | ||||||
|  |  | ||||||
|  | 1. **Crates.io Account**: Ensure you have a crates.io account and API token | ||||||
|  | 2. **Repository Access**: Ensure the repository URL is accessible | ||||||
|  | 3. **Version Consistency**: All crates should use the same version number | ||||||
|  |  | ||||||
|  | ### Publishing Individual Crates | ||||||
|  |  | ||||||
|  | Each crate can be published independently: | ||||||
|  |  | ||||||
|  | ```bash | ||||||
|  | # Publish core modules | ||||||
|  | cd os && cargo publish | ||||||
|  | cd ../process && cargo publish | ||||||
|  | cd ../text && cargo publish | ||||||
|  | cd ../net && cargo publish | ||||||
|  |  | ||||||
|  | # Publish infrastructure modules | ||||||
|  | cd ../git && cargo publish | ||||||
|  | cd ../vault && cargo publish | ||||||
|  | cd ../kubernetes && cargo publish | ||||||
|  | cd ../virt && cargo publish | ||||||
|  |  | ||||||
|  | # Publish client modules | ||||||
|  | cd ../redisclient && cargo publish | ||||||
|  | cd ../postgresclient && cargo publish | ||||||
|  | cd ../zinit_client && cargo publish | ||||||
|  | cd ../mycelium && cargo publish | ||||||
|  |  | ||||||
|  | # Publish scripting module | ||||||
|  | cd ../rhai && cargo publish | ||||||
|  |  | ||||||
|  | # Finally, publish the meta-crate | ||||||
|  | cd .. && cargo publish | ||||||
|  | ``` | ||||||
|  |  | ||||||
|  | ### Automated Publishing | ||||||
|  |  | ||||||
|  | Use the comprehensive publishing script: | ||||||
|  |  | ||||||
|  | ```bash | ||||||
|  | # Test the publishing process (safe) | ||||||
|  | ./scripts/publish-all.sh --dry-run --version 0.1.0 | ||||||
|  |  | ||||||
|  | # Actually publish to crates.io | ||||||
|  | ./scripts/publish-all.sh --version 0.1.0 | ||||||
|  | ``` | ||||||
|  |  | ||||||
|  | The script handles: | ||||||
|  | - ✅ **Dependency order** - Publishes crates in correct dependency order | ||||||
|  | - ✅ **Path dependencies** - Automatically updates path deps to version deps | ||||||
|  | - ✅ **Rate limiting** - Waits between publishes to avoid rate limits | ||||||
|  | - ✅ **Error handling** - Stops on failures with clear error messages | ||||||
|  | - ✅ **Dry run mode** - Test without actually publishing | ||||||
|  |  | ||||||
|  | ## 👥 User Consumption | ||||||
|  |  | ||||||
|  | ### Installation Options | ||||||
|  |  | ||||||
|  | #### Option 1: Individual Crates (Recommended) | ||||||
|  |  | ||||||
|  | Users install only what they need: | ||||||
|  |  | ||||||
|  | ```bash | ||||||
|  | # Core functionality | ||||||
|  | cargo add sal-os sal-process sal-text sal-net | ||||||
|  |  | ||||||
|  | # Database operations | ||||||
|  | cargo add sal-redisclient sal-postgresclient | ||||||
|  |  | ||||||
|  | # Infrastructure management | ||||||
|  | cargo add sal-git sal-vault sal-kubernetes | ||||||
|  |  | ||||||
|  | # Service integration | ||||||
|  | cargo add sal-zinit-client sal-mycelium | ||||||
|  |  | ||||||
|  | # Scripting | ||||||
|  | cargo add sal-rhai | ||||||
|  | ``` | ||||||
|  |  | ||||||
|  | **Usage:** | ||||||
|  | ```rust | ||||||
|  | use sal_os::fs; | ||||||
|  | use sal_process::run; | ||||||
|  | use sal_git::GitManager; | ||||||
|  |  | ||||||
|  | fn main() -> Result<(), Box<dyn std::error::Error>> { | ||||||
|  |     let files = fs::list_files(".")?; | ||||||
|  |     let result = run::command("echo hello")?; | ||||||
|  |     let git = GitManager::new(".")?; | ||||||
|  |     Ok(()) | ||||||
|  | } | ||||||
|  | ``` | ||||||
|  |  | ||||||
|  | #### Option 2: Meta-crate with Features | ||||||
|  |  | ||||||
|  | Users can use the main crate with selective features: | ||||||
|  |  | ||||||
|  | ```bash | ||||||
|  | # Specific modules | ||||||
|  | cargo add sal --features os,process,text | ||||||
|  |  | ||||||
|  | # Feature groups | ||||||
|  | cargo add sal --features core              # os, process, text, net | ||||||
|  | cargo add sal --features clients           # redisclient, postgresclient, zinit_client, mycelium | ||||||
|  | cargo add sal --features infrastructure    # git, vault, kubernetes, virt | ||||||
|  | cargo add sal --features scripting         # rhai | ||||||
|  |  | ||||||
|  | # Everything | ||||||
|  | cargo add sal --features all | ||||||
|  | ``` | ||||||
|  |  | ||||||
|  | **Usage:** | ||||||
|  | ```rust | ||||||
|  | // Cargo.toml: sal = { version = "0.1.0", features = ["os", "process", "git"] } | ||||||
|  | use sal::os::fs; | ||||||
|  | use sal::process::run; | ||||||
|  | use sal::git::GitManager; | ||||||
|  |  | ||||||
|  | fn main() -> Result<(), Box<dyn std::error::Error>> { | ||||||
|  |     let files = fs::list_files(".")?; | ||||||
|  |     let result = run::command("echo hello")?; | ||||||
|  |     let git = GitManager::new(".")?; | ||||||
|  |     Ok(()) | ||||||
|  | } | ||||||
|  | ``` | ||||||
|  |  | ||||||
|  | ### Feature Groups | ||||||
|  |  | ||||||
|  | The meta-crate provides convenient feature groups: | ||||||
|  |  | ||||||
|  | - **`core`**: Essential system operations (os, process, text, net) | ||||||
|  | - **`clients`**: Database and service clients (redisclient, postgresclient, zinit_client, mycelium) | ||||||
|  | - **`infrastructure`**: Infrastructure management tools (git, vault, kubernetes, virt) | ||||||
|  | - **`scripting`**: Rhai scripting support (rhai) | ||||||
|  | - **`all`**: Everything included | ||||||
|  |  | ||||||
|  | ## 📋 Version Management | ||||||
|  |  | ||||||
|  | ### Semantic Versioning | ||||||
|  |  | ||||||
|  | All SAL crates follow semantic versioning: | ||||||
|  |  | ||||||
|  | - **Major version**: Breaking API changes | ||||||
|  | - **Minor version**: New features, backward compatible | ||||||
|  | - **Patch version**: Bug fixes, backward compatible | ||||||
|  |  | ||||||
|  | ### Synchronized Releases | ||||||
|  |  | ||||||
|  | All crates are released with the same version number to ensure compatibility: | ||||||
|  |  | ||||||
|  | ```toml | ||||||
|  | # All crates use the same version | ||||||
|  | sal-os = "0.1.0" | ||||||
|  | sal-process = "0.1.0" | ||||||
|  | sal-git = "0.1.0" | ||||||
|  | # etc. | ||||||
|  | ``` | ||||||
|  |  | ||||||
|  | ## 🔧 Maintenance | ||||||
|  |  | ||||||
|  | ### Updating Dependencies | ||||||
|  |  | ||||||
|  | When updating dependencies: | ||||||
|  |  | ||||||
|  | 1. Update `Cargo.toml` in the workspace root | ||||||
|  | 2. Update individual crate dependencies if needed | ||||||
|  | 3. Test all crates: `cargo test --workspace` | ||||||
|  | 4. Publish with incremented version numbers | ||||||
|  |  | ||||||
|  | ### Adding New Modules | ||||||
|  |  | ||||||
|  | To add a new SAL module: | ||||||
|  |  | ||||||
|  | 1. Create the new crate directory | ||||||
|  | 2. Add to workspace members in root `Cargo.toml` | ||||||
|  | 3. Add optional dependency in root `Cargo.toml` | ||||||
|  | 4. Add feature flag in root `Cargo.toml` | ||||||
|  | 5. Add conditional re-export in `src/lib.rs` | ||||||
|  | 6. Update documentation | ||||||
|  |  | ||||||
|  | ## 🎉 Benefits | ||||||
|  |  | ||||||
|  | ### For Users | ||||||
|  |  | ||||||
|  | - **Minimal Dependencies**: Install only what you need | ||||||
|  | - **Faster Builds**: Smaller dependency trees compile faster | ||||||
|  | - **Smaller Binaries**: Reduced binary size | ||||||
|  | - **Clear Dependencies**: Explicit about what functionality is used | ||||||
|  |  | ||||||
|  | ### For Maintainers | ||||||
|  |  | ||||||
|  | - **Independent Releases**: Can release individual crates as needed | ||||||
|  | - **Focused Testing**: Test individual modules in isolation | ||||||
|  | - **Clear Ownership**: Each crate has clear responsibility | ||||||
|  | - **Easier Maintenance**: Smaller, focused codebases | ||||||
|  |  | ||||||
|  | This publishing strategy provides the best of both worlds: modularity for users who want minimal dependencies, and convenience for users who prefer a single crate with features. | ||||||
							
								
								
									
										298
									
								
								README.md
									
									
									
									
									
								
							
							
						
						
									
										298
									
								
								README.md
									
									
									
									
									
								
							| @@ -1,228 +1,136 @@ | |||||||
| # SAL (System Abstraction Layer) | # Herocode Herolib Rust Repository | ||||||
|  |  | ||||||
| **Version: 0.1.0** | ## Overview | ||||||
|  |  | ||||||
| SAL is a comprehensive Rust library designed to provide a unified and simplified interface for a wide array of system-level operations and interactions. It abstracts platform-specific details, enabling developers to write robust, cross-platform code with greater ease. SAL also includes `herodo`, a powerful command-line tool for executing Rhai scripts that leverage SAL's capabilities for automation and system management tasks. | This repository contains the **Herocode Herolib** Rust library and a collection of scripts, examples, and utilities for building, testing, and publishing the SAL (System Abstraction Layer) crates. The repository includes: | ||||||
|  |  | ||||||
| ## 🏗️ **Cargo Workspace Structure** | - **Rust crates** for various system components (e.g., `os`, `process`, `text`, `git`, `vault`, `kubernetes`, etc.). | ||||||
|  | - **Rhai scripts** and test suites for each crate. | ||||||
|  | - **Utility scripts** to automate common development tasks. | ||||||
|  |  | ||||||
| SAL is organized as a **Cargo workspace** with 16 specialized crates: | ## Scripts | ||||||
|  |  | ||||||
| - **Root Package**: `sal` - Umbrella crate that re-exports all modules | The repository provides three primary helper scripts located in the repository root: | ||||||
| - **13 Library Crates**: Specialized SAL modules (git, text, os, net, etc.) |  | ||||||
| - **1 Binary Crate**: `herodo` - Rhai script execution engine |  | ||||||
| - **1 Integration Crate**: `rhai` - Rhai scripting integration layer |  | ||||||
|  |  | ||||||
| This workspace structure provides excellent build performance, dependency management, and maintainability. | | Script | Description | Typical Usage | | ||||||
|  | |--------|-------------|--------------| | ||||||
|  | | `scripts/publish-all.sh` | Publishes all SAL crates to **crates.io** in the correct dependency order. Handles version bumping, dependency updates, dry‑run mode, and rate‑limiting. | `./scripts/publish-all.sh [--dry-run] [--wait <seconds>] [--version <ver>]` | | ||||||
|  | | `build_herodo.sh` | Builds the `herodo` binary from the `herodo` package and optionally runs a specified Rhai script. | `./build_herodo.sh [script_name]` | | ||||||
|  | | `run_rhai_tests.sh` | Executes all Rhai test suites across the repository, logging results and providing a summary. | `./run_rhai_tests.sh` | | ||||||
|  |  | ||||||
| ### **🚀 Workspace Benefits** | Below are detailed usage instructions for each script. | ||||||
| - **Unified Dependency Management**: Shared dependencies across all crates with consistent versions |  | ||||||
| - **Optimized Build Performance**: Parallel compilation and shared build artifacts |  | ||||||
| - **Simplified Testing**: Run tests across all modules with a single command |  | ||||||
| - **Modular Architecture**: Each module is independently maintainable while sharing common infrastructure |  | ||||||
| - **Production Ready**: 100% test coverage with comprehensive Rhai integration tests |  | ||||||
|  |  | ||||||
| ## Core Features | --- | ||||||
|  |  | ||||||
| SAL offers a broad spectrum of functionalities, including: | ## 1. `scripts/publish-all.sh` | ||||||
|  |  | ||||||
| - **System Operations**: File and directory management, environment variable access, system information retrieval, and OS-specific commands. | ### Purpose | ||||||
| - **Process Management**: Create, monitor, control, and interact with system processes. |  | ||||||
| - **Containerization Tools**:  |  | ||||||
|     - Integration with **Buildah** for building OCI/Docker-compatible container images. |  | ||||||
|     - Integration with **nerdctl** for managing containers (run, stop, list, build, etc.). |  | ||||||
| - **Version Control**: Programmatic interaction with Git repositories (clone, commit, push, pull, status, etc.). |  | ||||||
| - **Database Clients**: |  | ||||||
|     - **Redis**: Robust client for interacting with Redis servers. |  | ||||||
|     - **PostgreSQL**: Client for executing queries and managing PostgreSQL databases. |  | ||||||
| - **Scripting Engine**: In-built support for the **Rhai** scripting language, allowing SAL functionalities to be scripted and automated, primarily through the `herodo` tool. |  | ||||||
| - **Networking & Services**: |  | ||||||
|     - **Mycelium**: Tools for Mycelium network peer management and message passing. |  | ||||||
|     - **Zinit**: Client for interacting with the Zinit process supervision system. |  | ||||||
|     - **RFS (Remote/Virtual Filesystem)**: Mount, manage, pack, and unpack various types of filesystems (local, SSH, S3, WebDAV). |  | ||||||
| - **Text Processing**: A suite of utilities for text manipulation, formatting, and regular expressions. |  | ||||||
| - **Cryptography (`vault`)**: Functions for common cryptographic operations. |  | ||||||
|  |  | ||||||
| ## `herodo`: The SAL Scripting Tool | - Publishes each SAL crate in the correct dependency order. | ||||||
|  | - Updates crate versions (if `--version` is supplied). | ||||||
|  | - Updates path dependencies to version dependencies before publishing. | ||||||
|  | - Supports **dry‑run** mode to preview actions without publishing. | ||||||
|  | - Handles rate‑limiting between crate publishes. | ||||||
|  |  | ||||||
| `herodo` is a command-line utility bundled with SAL that executes Rhai scripts. It empowers users to automate tasks and orchestrate complex workflows by leveraging SAL's diverse modules directly from scripts. | ### Options | ||||||
|  |  | ||||||
|  | | Option | Description | | ||||||
|  | |--------|-------------| | ||||||
|  | | `--dry-run` | Shows what would be published without actually publishing. | | ||||||
|  | | `--wait <seconds>` | Wait time between publishes (default: 15 s). | | ||||||
|  | | `--version <ver>` | Set a new version for all crates (updates `Cargo.toml` files). | | ||||||
|  | | `-h, --help` | Show help message. | | ||||||
|  |  | ||||||
|  | ### Example Usage | ||||||
|  |  | ||||||
|  | ```bash | ||||||
|  | # Dry run – no crates will be published | ||||||
|  | ./scripts/publish-all.sh --dry-run | ||||||
|  |  | ||||||
|  | # Publish with a custom wait time and version bump | ||||||
|  | ./scripts/publish-all.sh --wait 30 --version 1.2.3 | ||||||
|  |  | ||||||
|  | # Normal publish (no dry‑run) | ||||||
|  | ./scripts/publish-all.sh | ||||||
|  | ``` | ||||||
|  |  | ||||||
|  | ### Notes | ||||||
|  |  | ||||||
|  | - Must be run from the repository root (where `Cargo.toml` lives). | ||||||
|  | - Requires `cargo` and a logged‑in `cargo` session (`cargo login`). | ||||||
|  | - The script automatically updates dependencies in each crate’s `Cargo.toml` to use the new version before publishing. | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## 2. `build_herodo.sh` | ||||||
|  |  | ||||||
|  | ### Purpose | ||||||
|  |  | ||||||
|  | - Builds the `herodo` binary from the `herodo` package. | ||||||
|  | - Copies the binary to a system‑wide location (`/usr/local/bin`) if run as root, otherwise to `~/hero/bin`. | ||||||
|  | - Optionally runs a specified Rhai script after building. | ||||||
|  |  | ||||||
| ### Usage | ### Usage | ||||||
|  |  | ||||||
| ```bash | ```bash | ||||||
| # Execute a single Rhai script | # Build only | ||||||
| herodo script.rhai | ./build_herodo.sh | ||||||
|  |  | ||||||
| # Execute a script with arguments | # Build and run a specific Rhai script (e.g., `example`): | ||||||
| herodo script.rhai arg1 arg2 | ./build_herodo.sh example | ||||||
|  |  | ||||||
| # Execute all .rhai scripts in a directory |  | ||||||
| herodo /path/to/scripts/ |  | ||||||
| ``` | ``` | ||||||
|  |  | ||||||
| If a directory is provided, `herodo` will execute all `.rhai` scripts within that directory (and its subdirectories) in alphabetical order. | ### Details | ||||||
|  |  | ||||||
| ### Scriptable SAL Modules via `herodo` | - The script changes to its own directory, builds the `herodo` crate (`cargo build`), and copies the binary. | ||||||
|  | - If a script name is provided, it looks for the script in: | ||||||
|  |   - `src/rhaiexamples/<name>.rhai` | ||||||
|  |   - `src/herodo/scripts/<name>.rhai` | ||||||
|  | - If the script is not found, the script exits with an error. | ||||||
|  |  | ||||||
| The following SAL modules and functionalities are exposed to the Rhai scripting environment through `herodo`: | --- | ||||||
|  |  | ||||||
| - **OS (`os`)**: Comprehensive file system operations, file downloading & installation, and system package management. [Documentation](os/README.md) | ## 3. `run_rhai_tests.sh` | ||||||
| - **Process (`process`)**: Robust command and script execution, plus process management (listing, finding, killing, checking command existence). [Documentation](process/README.md) |  | ||||||
| - **Text (`text`)**: String manipulation, prefixing, path/name fixing, text replacement, and templating. [Documentation](text/README.md) |  | ||||||
| - **Net (`net`)**: Network operations, HTTP requests, and connectivity utilities. [Documentation](net/README.md) |  | ||||||
| - **Git (`git`)**: High-level repository management and generic Git command execution with Redis-backed authentication (clone, pull, push, commit, etc.). [Documentation](git/README.md) |  | ||||||
| - **Vault (`vault`)**: Cryptographic operations, keypair management, encryption, decryption, hashing, etc. [Documentation](vault/README.md) |  | ||||||
| - **Redis Client (`redisclient`)**: Execute Redis commands (`redis_get`, `redis_set`, `redis_execute`, etc.). [Documentation](redisclient/README.md) |  | ||||||
| - **PostgreSQL Client (`postgresclient`)**: Execute SQL queries against PostgreSQL databases. [Documentation](postgresclient/README.md) |  | ||||||
| - **Zinit (`zinit_client`)**: Client for Zinit process supervisor (service management, logs). [Documentation](zinit_client/README.md) |  | ||||||
| - **Mycelium (`mycelium`)**: Client for Mycelium decentralized networking API (node info, peer management, messaging). [Documentation](mycelium/README.md) |  | ||||||
| - **Virtualization (`virt`)**: |  | ||||||
|   - **Buildah**: OCI/Docker image building functions. [Documentation](virt/README.md) |  | ||||||
|   - **nerdctl**: Container lifecycle management (`nerdctl_run`, `nerdctl_stop`, `nerdctl_images`, `nerdctl_image_build`, etc.) |  | ||||||
|   - **RFS**: Mount various filesystems (local, SSH, S3, etc.), pack/unpack filesystem layers. |  | ||||||
|  |  | ||||||
| ### Example `herodo` Rhai Script | ### Purpose | ||||||
|  |  | ||||||
| ```rhai | - Runs **all** Rhai test suites across the repository. | ||||||
| // file: /opt/scripts/example_task.rhai | - Supports both the legacy `rhai_tests` directory and the newer `*/tests/rhai` layout. | ||||||
|  | - Logs output to `run_rhai_tests.log` and prints a summary. | ||||||
|  |  | ||||||
| // OS operations | ### Usage | ||||||
| println("Checking for /tmp/my_app_data..."); |  | ||||||
| if !exist("/tmp/my_app_data") { |  | ||||||
|     mkdir("/tmp/my_app_data"); |  | ||||||
|     println("Created directory /tmp/my_app_data"); |  | ||||||
| } |  | ||||||
|  |  | ||||||
| // Redis operations |  | ||||||
| println("Setting Redis key 'app_status' to 'running'"); |  | ||||||
| redis_set("app_status", "running"); |  | ||||||
| let status = redis_get("app_status"); |  | ||||||
| println("Current app_status from Redis: " + status); |  | ||||||
|  |  | ||||||
| // Process execution |  | ||||||
| println("Listing files in /tmp:"); |  | ||||||
| let output = run("ls -la /tmp"); |  | ||||||
| println(output.stdout); |  | ||||||
|  |  | ||||||
| println("Script finished."); |  | ||||||
| ``` |  | ||||||
|  |  | ||||||
| Run with: `herodo /opt/scripts/example_task.rhai` |  | ||||||
|  |  | ||||||
| For more examples, check the individual module test directories (e.g., `text/tests/rhai/`, `os/tests/rhai/`, etc.) in this repository. |  | ||||||
|  |  | ||||||
| ## Using SAL as a Rust Library |  | ||||||
|  |  | ||||||
| Add SAL as a dependency to your `Cargo.toml`: |  | ||||||
|  |  | ||||||
| ```toml |  | ||||||
| [dependencies] |  | ||||||
| sal = "0.1.0" # Or the latest version |  | ||||||
| ``` |  | ||||||
|  |  | ||||||
| ### Rust Example: Using Redis Client |  | ||||||
|  |  | ||||||
| ```rust |  | ||||||
| use sal::redisclient::{get_global_client, execute_cmd_with_args}; |  | ||||||
| use redis::RedisResult; |  | ||||||
|  |  | ||||||
| async fn example_redis_interaction() -> RedisResult<()> { |  | ||||||
|     // Get a connection from the global pool |  | ||||||
|     let mut conn = get_global_client().await?.get_async_connection().await?; |  | ||||||
|  |  | ||||||
|     // Set a value |  | ||||||
|     execute_cmd_with_args(&mut conn, "SET", vec!["my_key", "my_value"]).await?; |  | ||||||
|     println!("Set 'my_key' to 'my_value'"); |  | ||||||
|  |  | ||||||
|     // Get a value |  | ||||||
|     let value: String = execute_cmd_with_args(&mut conn, "GET", vec!["my_key"]).await?; |  | ||||||
|     println!("Retrieved value for 'my_key': {}", value); |  | ||||||
|  |  | ||||||
|     Ok(()) |  | ||||||
| } |  | ||||||
|  |  | ||||||
| #[tokio::main] |  | ||||||
| async fn main() { |  | ||||||
|     if let Err(e) = example_redis_interaction().await { |  | ||||||
|         eprintln!("Redis Error: {}", e); |  | ||||||
|     } |  | ||||||
| } |  | ||||||
| ``` |  | ||||||
| *(Note: The Redis client API might have evolved; please refer to `src/redisclient/mod.rs` and its documentation for the most current usage.)* |  | ||||||
|  |  | ||||||
| ## 📦 **Workspace Modules Overview** |  | ||||||
|  |  | ||||||
| SAL is organized as a Cargo workspace with the following crates: |  | ||||||
|  |  | ||||||
| ### **Core Library Modules** |  | ||||||
| - **`sal-os`**: Core OS interactions, file system operations, environment access |  | ||||||
| - **`sal-process`**: Process creation, management, and control |  | ||||||
| - **`sal-text`**: Utilities for text processing and manipulation |  | ||||||
| - **`sal-net`**: Network operations, HTTP requests, and connectivity utilities |  | ||||||
|  |  | ||||||
| ### **Integration Modules** |  | ||||||
| - **`sal-git`**: Git repository management and operations |  | ||||||
| - **`sal-vault`**: Cryptographic functions and keypair management |  | ||||||
| - **`sal-rhai`**: Integration layer for the Rhai scripting engine, used by `herodo` |  | ||||||
|  |  | ||||||
| ### **Client Modules** |  | ||||||
| - **`sal-redisclient`**: Client for Redis database interactions |  | ||||||
| - **`sal-postgresclient`**: Client for PostgreSQL database interactions |  | ||||||
| - **`sal-zinit-client`**: Client for Zinit process supervisor |  | ||||||
| - **`sal-mycelium`**: Client for Mycelium network operations |  | ||||||
|  |  | ||||||
| ### **Specialized Modules** |  | ||||||
| - **`sal-virt`**: Virtualization-related utilities (buildah, nerdctl, rfs) |  | ||||||
|  |  | ||||||
| ### **Root Package & Binary** |  | ||||||
| - **`sal`**: Root umbrella crate that re-exports all modules |  | ||||||
| - **`herodo`**: Command-line binary for executing Rhai scripts |  | ||||||
|  |  | ||||||
| ## 🔨 **Building SAL** |  | ||||||
|  |  | ||||||
| Build the entire workspace (all crates) using Cargo: |  | ||||||
|  |  | ||||||
| ```bash | ```bash | ||||||
| # Build all workspace members | # Run all tests | ||||||
| cargo build --workspace |  | ||||||
|  |  | ||||||
| # Build for release |  | ||||||
| cargo build --workspace --release |  | ||||||
|  |  | ||||||
| # Build specific crate |  | ||||||
| cargo build -p sal-text |  | ||||||
| cargo build -p herodo |  | ||||||
| ``` |  | ||||||
|  |  | ||||||
| The `herodo` executable will be located at `target/debug/herodo` or `target/release/herodo`. |  | ||||||
|  |  | ||||||
| ## 🧪 **Running Tests** |  | ||||||
|  |  | ||||||
| ### **Rust Unit Tests** |  | ||||||
| ```bash |  | ||||||
| # Run all workspace tests |  | ||||||
| cargo test --workspace |  | ||||||
|  |  | ||||||
| # Run tests for specific crate |  | ||||||
| cargo test -p sal-text |  | ||||||
| cargo test -p sal-os |  | ||||||
|  |  | ||||||
| # Run only library tests (faster) |  | ||||||
| cargo test --workspace --lib |  | ||||||
| ``` |  | ||||||
|  |  | ||||||
| ### **Rhai Integration Tests** |  | ||||||
| Run comprehensive Rhai script tests that exercise `herodo` and SAL's scripted functionalities: |  | ||||||
|  |  | ||||||
| ```bash |  | ||||||
| # Run all Rhai integration tests (16 modules) |  | ||||||
| ./run_rhai_tests.sh | ./run_rhai_tests.sh | ||||||
|  |  | ||||||
| # Results: 16/16 modules pass with 100% success rate |  | ||||||
| ``` | ``` | ||||||
|  |  | ||||||
| The Rhai tests validate real-world functionality across all SAL modules and provide comprehensive integration testing. | ### Output | ||||||
|  |  | ||||||
|  | - Colored console output for readability. | ||||||
|  | - Log file (`run_rhai_tests.log`) contains full output for later review. | ||||||
|  | - Summary includes total modules, passed, and failed counts. | ||||||
|  | - Exit code `0` if all tests pass, `1` otherwise. | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## General Development Workflow | ||||||
|  |  | ||||||
|  | 1. **Build**: Use `build_herodo.sh` to compile the `herodo` binary. | ||||||
|  | 2. **Test**: Run `run_rhai_tests.sh` to ensure all Rhai scripts pass. | ||||||
|  | 3. **Publish**: When ready to release, use `scripts/publish-all.sh` (with `--dry-run` first to verify). | ||||||
|  |  | ||||||
|  | ## Prerequisites | ||||||
|  |  | ||||||
|  | - **Rust toolchain** (`cargo`, `rustc`) installed. | ||||||
|  | - **Rhai** interpreter (`herodo`) built and available. | ||||||
|  | - **Git** for version control. | ||||||
|  | - **Cargo login** for publishing to crates.io. | ||||||
|  |  | ||||||
| ## License | ## License | ||||||
|  |  | ||||||
| SAL is licensed under the Apache License 2.0. See the [LICENSE](LICENSE) file for details. | See `LICENSE` for details. | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | **Happy coding!** | ||||||
|   | |||||||
							
								
								
									
										0
									
								
								cargo_instructions.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										0
									
								
								cargo_instructions.md
									
									
									
									
									
										Normal file
									
								
							
							
								
								
									
										14
									
								
								config/README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										14
									
								
								config/README.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,14 @@ | |||||||
|  | # Environment Configuration | ||||||
|  |  | ||||||
|  | To set up your environment variables: | ||||||
|  |  | ||||||
|  | 1. Copy the template file to `env.sh`: | ||||||
|  |  | ||||||
|  |     ```bash | ||||||
|  |     cp config/myenv_templ.sh config/env.sh | ||||||
|  |     ``` | ||||||
|  |  | ||||||
|  | 2. Edit `config/env.sh` and fill in your specific values for the variables. | ||||||
|  |  | ||||||
|  | 3. This file (`config/env.sh`) is excluded from version control by the project's `.gitignore` configuration, ensuring your sensitive information remains local and is never committed to the repository. | ||||||
|  |  | ||||||
							
								
								
									
										6
									
								
								config/myenv_templ.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										6
									
								
								config/myenv_templ.sh
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,6 @@ | |||||||
|  |  | ||||||
|  |  | ||||||
|  | export OPENROUTER_API_KEY="" | ||||||
|  | export GROQ_API_KEY="" | ||||||
|  | export CEREBRAS_API_KEY="" | ||||||
|  | export OPENAI_API_KEY="sk-xxxxxxx" | ||||||
| @@ -1,64 +1,76 @@ | |||||||
| # Hero Vault Cryptography Examples | # SAL Vault Examples | ||||||
|  |  | ||||||
| This directory contains examples demonstrating the Hero Vault cryptography functionality integrated into the SAL project. | This directory contains examples demonstrating the SAL Vault functionality. | ||||||
|  |  | ||||||
| ## Overview | ## Overview | ||||||
|  |  | ||||||
| Hero Vault provides cryptographic operations including: | SAL Vault provides secure key management and cryptographic operations including: | ||||||
|  |  | ||||||
| - Key space management (creation, loading, encryption, decryption) | - Vault creation and management | ||||||
| - Keypair management (creation, selection, listing) | - KeySpace operations (encrypted key-value stores) | ||||||
| - Digital signatures (signing and verification) | - Symmetric key generation and operations | ||||||
| - Symmetric encryption (key generation, encryption, decryption) | - Asymmetric key operations (signing and verification) | ||||||
| - Ethereum wallet functionality | - Secure key derivation from passwords | ||||||
| - Smart contract interactions |  | ||||||
| - Key-value store with encryption |  | ||||||
|  |  | ||||||
| ## Example Files | ## Current Status | ||||||
|  |  | ||||||
| - `example.rhai` - Basic example demonstrating key management, signing, and encryption | ⚠️ **Note**: The vault module is currently being updated to use Lee's implementation. | ||||||
| - `advanced_example.rhai` - Advanced example with error handling, conditional logic, and more complex operations | The Rhai scripting integration is temporarily disabled while we adapt the examples | ||||||
| - `key_persistence_example.rhai` - Demonstrates creating and saving a key space to disk | to work with the new vault API. | ||||||
| - `load_existing_space.rhai` - Shows how to load a previously created key space and use its keypairs |  | ||||||
| - `contract_example.rhai` - Demonstrates loading a contract ABI and interacting with smart contracts |  | ||||||
| - `agung_send_transaction.rhai` - Demonstrates sending native tokens on the Agung network |  | ||||||
| - `agung_contract_with_args.rhai` - Shows how to interact with contracts with arguments on Agung |  | ||||||
|  |  | ||||||
| ## Running the Examples | ## Available Operations | ||||||
|  |  | ||||||
| You can run the examples using the `herodo` tool that comes with the SAL project: | - **Vault Management**: Create and manage vault instances | ||||||
|  | - **KeySpace Operations**: Open encrypted key-value stores within vaults | ||||||
|  | - **Symmetric Encryption**: Generate keys and encrypt/decrypt data | ||||||
|  | - **Asymmetric Operations**: Create keypairs, sign messages, verify signatures | ||||||
|  |  | ||||||
| ```bash | ## Example Files (Legacy - Sameh's Implementation) | ||||||
| # Run a single example |  | ||||||
| herodo --path example.rhai |  | ||||||
|  |  | ||||||
| # Run all examples using the provided script | ⚠️ **These examples are currently archived and use the previous vault implementation**: | ||||||
| ./run_examples.sh |  | ||||||
|  | - `_archive/example.rhai` - Basic example demonstrating key management, signing, and encryption | ||||||
|  | - `_archive/advanced_example.rhai` - Advanced example with error handling and complex operations | ||||||
|  | - `_archive/key_persistence_example.rhai` - Demonstrates creating and saving a key space to disk | ||||||
|  | - `_archive/load_existing_space.rhai` - Shows how to load a previously created key space | ||||||
|  | - `_archive/contract_example.rhai` - Demonstrates smart contract interactions (Ethereum) | ||||||
|  | - `_archive/agung_send_transaction.rhai` - Demonstrates Ethereum transactions on Agung network | ||||||
|  | - `_archive/agung_contract_with_args.rhai` - Shows contract interactions with arguments | ||||||
|  |  | ||||||
|  | ## Current Implementation (Lee's Vault) | ||||||
|  |  | ||||||
|  | The current vault implementation provides: | ||||||
|  |  | ||||||
|  | ```rust | ||||||
|  | // Create a new vault | ||||||
|  | let vault = Vault::new(&path).await?; | ||||||
|  |  | ||||||
|  | // Open an encrypted keyspace | ||||||
|  | let keyspace = vault.open_keyspace("my_space", "password").await?; | ||||||
|  |  | ||||||
|  | // Perform cryptographic operations | ||||||
|  | // (API documentation coming soon) | ||||||
| ``` | ``` | ||||||
|  |  | ||||||
| ## Key Space Storage | ## Migration Status | ||||||
|  |  | ||||||
| Key spaces are stored in the `~/.hero-vault/key-spaces/` directory by default. Each key space is stored in a separate JSON file named after the key space (e.g., `my_space.json`). | - ✅ **Vault Core**: Lee's implementation is active | ||||||
|  | - ✅ **Archive**: Sameh's implementation preserved in `vault/_archive/` | ||||||
| ## Ethereum Functionality | - ⏳ **Rhai Integration**: Being developed for Lee's implementation | ||||||
|  | - ⏳ **Examples**: Will be updated to use Lee's API | ||||||
| The Hero Vault module provides comprehensive Ethereum wallet functionality: | - ❌ **Ethereum Features**: Not available in Lee's implementation | ||||||
|  |  | ||||||
| - Creating and managing wallets for different networks |  | ||||||
| - Sending ETH transactions |  | ||||||
| - Checking balances |  | ||||||
| - Interacting with smart contracts (read and write functions) |  | ||||||
| - Support for multiple networks (Ethereum, Gnosis, Peaq, Agung, etc.) |  | ||||||
|  |  | ||||||
| ## Security | ## Security | ||||||
|  |  | ||||||
| Key spaces are encrypted with ChaCha20Poly1305 using a key derived from the provided password. The encryption ensures that the key material is secure at rest. | The vault uses: | ||||||
|  |  | ||||||
| ## Best Practices | - **ChaCha20Poly1305** for symmetric encryption | ||||||
|  | - **Password-based key derivation** for keyspace encryption | ||||||
|  | - **Secure key storage** with proper isolation | ||||||
|  |  | ||||||
| 1. **Use Strong Passwords**: Since the security of your key spaces depends on the strength of your passwords, use strong, unique passwords. | ## Next Steps | ||||||
| 2. **Backup Key Spaces**: Regularly backup your key spaces directory to prevent data loss. |  | ||||||
| 3. **Script Organization**: Split your scripts into logical units, with separate scripts for key creation and key usage. | 1. **Rhai Integration**: Implement Rhai bindings for Lee's vault | ||||||
| 4. **Error Handling**: Always check the return values of functions to ensure operations succeeded before proceeding. | 2. **New Examples**: Create examples using Lee's simpler API | ||||||
| 5. **Network Selection**: When working with Ethereum functionality, be explicit about which network you're targeting to avoid confusion. | 3. **Documentation**: Complete API documentation for Lee's implementation | ||||||
| 6. **Gas Management**: For Ethereum transactions, consider gas costs and set appropriate gas limits. | 4. **Migration Guide**: Provide guidance for users migrating from Sameh's implementation | ||||||
|   | |||||||
							
								
								
									
										72
									
								
								examples/kubernetes/basic_operations.rhai
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										72
									
								
								examples/kubernetes/basic_operations.rhai
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,72 @@ | |||||||
|  | //! Basic Kubernetes operations example | ||||||
|  | //! | ||||||
|  | //! This script demonstrates basic Kubernetes operations using the SAL Kubernetes module. | ||||||
|  | //!  | ||||||
|  | //! Prerequisites: | ||||||
|  | //! - A running Kubernetes cluster | ||||||
|  | //! - Valid kubeconfig file or in-cluster configuration | ||||||
|  | //! - Appropriate permissions for the operations | ||||||
|  | //! | ||||||
|  | //! Usage: | ||||||
|  | //!   herodo examples/kubernetes/basic_operations.rhai | ||||||
|  |  | ||||||
|  | print("=== SAL Kubernetes Basic Operations Example ==="); | ||||||
|  |  | ||||||
|  | // Create a KubernetesManager for the default namespace | ||||||
|  | print("Creating KubernetesManager for 'default' namespace..."); | ||||||
|  | let km = kubernetes_manager_new("default"); | ||||||
|  | print("✓ KubernetesManager created for namespace: " + namespace(km)); | ||||||
|  |  | ||||||
|  | // List all pods in the namespace | ||||||
|  | print("\n--- Listing Pods ---"); | ||||||
|  | let pods = pods_list(km); | ||||||
|  | print("Found " + pods.len() + " pods in the namespace:"); | ||||||
|  | for pod in pods { | ||||||
|  |     print("  - " + pod); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // List all services in the namespace | ||||||
|  | print("\n--- Listing Services ---"); | ||||||
|  | let services = services_list(km); | ||||||
|  | print("Found " + services.len() + " services in the namespace:"); | ||||||
|  | for service in services { | ||||||
|  |     print("  - " + service); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // List all deployments in the namespace | ||||||
|  | print("\n--- Listing Deployments ---"); | ||||||
|  | let deployments = deployments_list(km); | ||||||
|  | print("Found " + deployments.len() + " deployments in the namespace:"); | ||||||
|  | for deployment in deployments { | ||||||
|  |     print("  - " + deployment); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Get resource counts | ||||||
|  | print("\n--- Resource Counts ---"); | ||||||
|  | let counts = resource_counts(km); | ||||||
|  | print("Resource counts in namespace '" + namespace(km) + "':"); | ||||||
|  | for resource_type in counts.keys() { | ||||||
|  |     print("  " + resource_type + ": " + counts[resource_type]); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // List all namespaces (cluster-wide operation) | ||||||
|  | print("\n--- Listing All Namespaces ---"); | ||||||
|  | let namespaces = namespaces_list(km); | ||||||
|  | print("Found " + namespaces.len() + " namespaces in the cluster:"); | ||||||
|  | for ns in namespaces { | ||||||
|  |     print("  - " + ns); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Check if specific namespaces exist | ||||||
|  | print("\n--- Checking Namespace Existence ---"); | ||||||
|  | let test_namespaces = ["default", "kube-system", "non-existent-namespace"]; | ||||||
|  | for ns in test_namespaces { | ||||||
|  |     let exists = namespace_exists(km, ns); | ||||||
|  |     if exists { | ||||||
|  |         print("✓ Namespace '" + ns + "' exists"); | ||||||
|  |     } else { | ||||||
|  |         print("✗ Namespace '" + ns + "' does not exist"); | ||||||
|  |     } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | print("\n=== Example completed successfully! ==="); | ||||||
							
								
								
									
										134
									
								
								examples/kubernetes/clusters/generic.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										134
									
								
								examples/kubernetes/clusters/generic.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,134 @@ | |||||||
|  | //! Generic Application Deployment Example | ||||||
|  | //! | ||||||
|  | //! This example shows how to deploy any containerized application using the | ||||||
|  | //! KubernetesManager convenience methods. This works for any Docker image. | ||||||
|  |  | ||||||
|  | use sal_kubernetes::KubernetesManager; | ||||||
|  | use std::collections::HashMap; | ||||||
|  |  | ||||||
|  | #[tokio::main] | ||||||
|  | async fn main() -> Result<(), Box<dyn std::error::Error>> { | ||||||
|  |     // Create Kubernetes manager | ||||||
|  |     let km = KubernetesManager::new("default").await?; | ||||||
|  |  | ||||||
|  |     // Clean up any existing resources first | ||||||
|  |     println!("=== Cleaning up existing resources ==="); | ||||||
|  |     let apps_to_clean = ["web-server", "node-app", "mongodb"]; | ||||||
|  |  | ||||||
|  |     for app in &apps_to_clean { | ||||||
|  |         match km.deployment_delete(app).await { | ||||||
|  |             Ok(_) => println!("✓ Deleted existing deployment: {}", app), | ||||||
|  |             Err(_) => println!("✓ No existing deployment to delete: {}", app), | ||||||
|  |         } | ||||||
|  |  | ||||||
|  |         match km.service_delete(app).await { | ||||||
|  |             Ok(_) => println!("✓ Deleted existing service: {}", app), | ||||||
|  |             Err(_) => println!("✓ No existing service to delete: {}", app), | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     // Example 1: Simple web server deployment | ||||||
|  |     println!("\n=== Example 1: Simple Nginx Web Server ==="); | ||||||
|  |  | ||||||
|  |     km.deploy_application("web-server", "nginx:latest", 2, 80, None, None) | ||||||
|  |         .await?; | ||||||
|  |     println!("✅ Nginx web server deployed!"); | ||||||
|  |  | ||||||
|  |     // Example 2: Node.js application with labels | ||||||
|  |     println!("\n=== Example 2: Node.js Application ==="); | ||||||
|  |  | ||||||
|  |     let mut node_labels = HashMap::new(); | ||||||
|  |     node_labels.insert("app".to_string(), "node-app".to_string()); | ||||||
|  |     node_labels.insert("tier".to_string(), "backend".to_string()); | ||||||
|  |     node_labels.insert("environment".to_string(), "production".to_string()); | ||||||
|  |  | ||||||
|  |     // Configure Node.js environment variables | ||||||
|  |     let mut node_env_vars = HashMap::new(); | ||||||
|  |     node_env_vars.insert("NODE_ENV".to_string(), "production".to_string()); | ||||||
|  |     node_env_vars.insert("PORT".to_string(), "3000".to_string()); | ||||||
|  |     node_env_vars.insert("LOG_LEVEL".to_string(), "info".to_string()); | ||||||
|  |     node_env_vars.insert("MAX_CONNECTIONS".to_string(), "1000".to_string()); | ||||||
|  |  | ||||||
|  |     km.deploy_application( | ||||||
|  |         "node-app",          // name | ||||||
|  |         "node:18-alpine",    // image | ||||||
|  |         3,                   // replicas - scale to 3 instances | ||||||
|  |         3000,                // port | ||||||
|  |         Some(node_labels),   // labels | ||||||
|  |         Some(node_env_vars), // environment variables | ||||||
|  |     ) | ||||||
|  |     .await?; | ||||||
|  |  | ||||||
|  |     println!("✅ Node.js application deployed!"); | ||||||
|  |  | ||||||
|  |     // Example 3: Database deployment (any database) | ||||||
|  |     println!("\n=== Example 3: MongoDB Database ==="); | ||||||
|  |  | ||||||
|  |     let mut mongo_labels = HashMap::new(); | ||||||
|  |     mongo_labels.insert("app".to_string(), "mongodb".to_string()); | ||||||
|  |     mongo_labels.insert("type".to_string(), "database".to_string()); | ||||||
|  |     mongo_labels.insert("engine".to_string(), "mongodb".to_string()); | ||||||
|  |  | ||||||
|  |     // Configure MongoDB environment variables | ||||||
|  |     let mut mongo_env_vars = HashMap::new(); | ||||||
|  |     mongo_env_vars.insert( | ||||||
|  |         "MONGO_INITDB_ROOT_USERNAME".to_string(), | ||||||
|  |         "admin".to_string(), | ||||||
|  |     ); | ||||||
|  |     mongo_env_vars.insert( | ||||||
|  |         "MONGO_INITDB_ROOT_PASSWORD".to_string(), | ||||||
|  |         "mongopassword".to_string(), | ||||||
|  |     ); | ||||||
|  |     mongo_env_vars.insert("MONGO_INITDB_DATABASE".to_string(), "myapp".to_string()); | ||||||
|  |  | ||||||
|  |     km.deploy_application( | ||||||
|  |         "mongodb",            // name | ||||||
|  |         "mongo:6.0",          // image | ||||||
|  |         1,                    // replicas - single instance for simplicity | ||||||
|  |         27017,                // port | ||||||
|  |         Some(mongo_labels),   // labels | ||||||
|  |         Some(mongo_env_vars), // environment variables | ||||||
|  |     ) | ||||||
|  |     .await?; | ||||||
|  |  | ||||||
|  |     println!("✅ MongoDB deployed!"); | ||||||
|  |  | ||||||
|  |     // Check status of all deployments | ||||||
|  |     println!("\n=== Checking Deployment Status ==="); | ||||||
|  |  | ||||||
|  |     let deployments = km.deployments_list().await?; | ||||||
|  |  | ||||||
|  |     for deployment in &deployments { | ||||||
|  |         if let Some(name) = &deployment.metadata.name { | ||||||
|  |             let total_replicas = deployment | ||||||
|  |                 .spec | ||||||
|  |                 .as_ref() | ||||||
|  |                 .and_then(|s| s.replicas) | ||||||
|  |                 .unwrap_or(0); | ||||||
|  |             let ready_replicas = deployment | ||||||
|  |                 .status | ||||||
|  |                 .as_ref() | ||||||
|  |                 .and_then(|s| s.ready_replicas) | ||||||
|  |                 .unwrap_or(0); | ||||||
|  |  | ||||||
|  |             println!( | ||||||
|  |                 "{}: {}/{} replicas ready", | ||||||
|  |                 name, ready_replicas, total_replicas | ||||||
|  |             ); | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     println!("\n🎉 All deployments completed!"); | ||||||
|  |     println!("\n💡 Key Points:"); | ||||||
|  |     println!("  • Any Docker image can be deployed using this simple interface"); | ||||||
|  |     println!("  • Use labels to organize and identify your applications"); | ||||||
|  |     println!( | ||||||
|  |         "  • The same method works for databases, web servers, APIs, and any containerized app" | ||||||
|  |     ); | ||||||
|  |     println!("  • For advanced configuration, use the individual KubernetesManager methods"); | ||||||
|  |     println!( | ||||||
|  |         "  • Environment variables and resource limits can be added via direct Kubernetes API" | ||||||
|  |     ); | ||||||
|  |  | ||||||
|  |     Ok(()) | ||||||
|  | } | ||||||
							
								
								
									
										79
									
								
								examples/kubernetes/clusters/postgres.rhai
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										79
									
								
								examples/kubernetes/clusters/postgres.rhai
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,79 @@ | |||||||
|  | //! PostgreSQL Cluster Deployment Example (Rhai) | ||||||
|  | //! | ||||||
|  | //! This script shows how to deploy a PostgreSQL cluster using Rhai scripting | ||||||
|  | //! with the KubernetesManager convenience methods. | ||||||
|  |  | ||||||
|  | print("=== PostgreSQL Cluster Deployment ==="); | ||||||
|  |  | ||||||
|  | // Create Kubernetes manager for the database namespace | ||||||
|  | print("Creating Kubernetes manager for 'database' namespace..."); | ||||||
|  | let km = kubernetes_manager_new("database"); | ||||||
|  | print("✓ Kubernetes manager created"); | ||||||
|  |  | ||||||
|  | // Create the namespace if it doesn't exist | ||||||
|  | print("Creating namespace 'database' if it doesn't exist..."); | ||||||
|  | try { | ||||||
|  |     create_namespace(km, "database"); | ||||||
|  |     print("✓ Namespace 'database' created"); | ||||||
|  | } catch(e) { | ||||||
|  |     if e.to_string().contains("already exists") { | ||||||
|  |         print("✓ Namespace 'database' already exists"); | ||||||
|  |     } else { | ||||||
|  |         print("⚠️ Warning: " + e); | ||||||
|  |     } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Clean up any existing resources first | ||||||
|  | print("\nCleaning up any existing PostgreSQL resources..."); | ||||||
|  | try { | ||||||
|  |     delete_deployment(km, "postgres-cluster"); | ||||||
|  |     print("✓ Deleted existing deployment"); | ||||||
|  | } catch(e) { | ||||||
|  |     print("✓ No existing deployment to delete"); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | try { | ||||||
|  |     delete_service(km, "postgres-cluster"); | ||||||
|  |     print("✓ Deleted existing service"); | ||||||
|  | } catch(e) { | ||||||
|  |     print("✓ No existing service to delete"); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Create PostgreSQL cluster using the convenience method | ||||||
|  | print("\nDeploying PostgreSQL cluster..."); | ||||||
|  |  | ||||||
|  | try { | ||||||
|  |     // Deploy PostgreSQL using the convenience method | ||||||
|  |     let result = deploy_application(km, "postgres-cluster", "postgres:15", 2, 5432, #{ | ||||||
|  |         "app": "postgres-cluster", | ||||||
|  |         "type": "database", | ||||||
|  |         "engine": "postgresql" | ||||||
|  |     }, #{ | ||||||
|  |         "POSTGRES_DB": "myapp", | ||||||
|  |         "POSTGRES_USER": "postgres", | ||||||
|  |         "POSTGRES_PASSWORD": "secretpassword", | ||||||
|  |         "PGDATA": "/var/lib/postgresql/data/pgdata" | ||||||
|  |     }); | ||||||
|  |     print("✓ " + result); | ||||||
|  |  | ||||||
|  |     print("\n✅ PostgreSQL cluster deployed successfully!"); | ||||||
|  |  | ||||||
|  |     print("\n📋 Connection Information:"); | ||||||
|  |     print("  Host: postgres-cluster.database.svc.cluster.local"); | ||||||
|  |     print("  Port: 5432"); | ||||||
|  |     print("  Database: postgres (default)"); | ||||||
|  |     print("  Username: postgres (default)"); | ||||||
|  |  | ||||||
|  |     print("\n🔧 To connect from another pod:"); | ||||||
|  |     print("  psql -h postgres-cluster.database.svc.cluster.local -U postgres"); | ||||||
|  |  | ||||||
|  |     print("\n💡 Next steps:"); | ||||||
|  |     print("  • Set POSTGRES_PASSWORD environment variable"); | ||||||
|  |     print("  • Configure persistent storage"); | ||||||
|  |     print("  • Set up backup and monitoring"); | ||||||
|  |  | ||||||
|  | } catch(e) { | ||||||
|  |     print("❌ Failed to deploy PostgreSQL cluster: " + e); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | print("\n=== Deployment Complete ==="); | ||||||
							
								
								
									
										112
									
								
								examples/kubernetes/clusters/postgres.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										112
									
								
								examples/kubernetes/clusters/postgres.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,112 @@ | |||||||
|  | //! PostgreSQL Cluster Deployment Example | ||||||
|  | //! | ||||||
|  | //! This example shows how to deploy a PostgreSQL cluster using the | ||||||
|  | //! KubernetesManager convenience methods. | ||||||
|  |  | ||||||
|  | use sal_kubernetes::KubernetesManager; | ||||||
|  | use std::collections::HashMap; | ||||||
|  |  | ||||||
|  | #[tokio::main] | ||||||
|  | async fn main() -> Result<(), Box<dyn std::error::Error>> { | ||||||
|  |     // Create Kubernetes manager for the database namespace | ||||||
|  |     let km = KubernetesManager::new("database").await?; | ||||||
|  |  | ||||||
|  |     // Create the namespace if it doesn't exist | ||||||
|  |     println!("Creating namespace 'database' if it doesn't exist..."); | ||||||
|  |     match km.namespace_create("database").await { | ||||||
|  |         Ok(_) => println!("✓ Namespace 'database' created"), | ||||||
|  |         Err(e) => { | ||||||
|  |             if e.to_string().contains("already exists") { | ||||||
|  |                 println!("✓ Namespace 'database' already exists"); | ||||||
|  |             } else { | ||||||
|  |                 return Err(e.into()); | ||||||
|  |             } | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     // Clean up any existing resources first | ||||||
|  |     println!("Cleaning up any existing PostgreSQL resources..."); | ||||||
|  |     match km.deployment_delete("postgres-cluster").await { | ||||||
|  |         Ok(_) => println!("✓ Deleted existing deployment"), | ||||||
|  |         Err(_) => println!("✓ No existing deployment to delete"), | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     match km.service_delete("postgres-cluster").await { | ||||||
|  |         Ok(_) => println!("✓ Deleted existing service"), | ||||||
|  |         Err(_) => println!("✓ No existing service to delete"), | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     // Configure PostgreSQL-specific labels | ||||||
|  |     let mut labels = HashMap::new(); | ||||||
|  |     labels.insert("app".to_string(), "postgres-cluster".to_string()); | ||||||
|  |     labels.insert("type".to_string(), "database".to_string()); | ||||||
|  |     labels.insert("engine".to_string(), "postgresql".to_string()); | ||||||
|  |  | ||||||
|  |     // Configure PostgreSQL environment variables | ||||||
|  |     let mut env_vars = HashMap::new(); | ||||||
|  |     env_vars.insert("POSTGRES_DB".to_string(), "myapp".to_string()); | ||||||
|  |     env_vars.insert("POSTGRES_USER".to_string(), "postgres".to_string()); | ||||||
|  |     env_vars.insert( | ||||||
|  |         "POSTGRES_PASSWORD".to_string(), | ||||||
|  |         "secretpassword".to_string(), | ||||||
|  |     ); | ||||||
|  |     env_vars.insert( | ||||||
|  |         "PGDATA".to_string(), | ||||||
|  |         "/var/lib/postgresql/data/pgdata".to_string(), | ||||||
|  |     ); | ||||||
|  |  | ||||||
|  |     // Deploy the PostgreSQL cluster using the convenience method | ||||||
|  |     println!("Deploying PostgreSQL cluster..."); | ||||||
|  |     km.deploy_application( | ||||||
|  |         "postgres-cluster", // name | ||||||
|  |         "postgres:15",      // image | ||||||
|  |         2,                  // replicas (1 master + 1 replica) | ||||||
|  |         5432,               // port | ||||||
|  |         Some(labels),       // labels | ||||||
|  |         Some(env_vars),     // environment variables | ||||||
|  |     ) | ||||||
|  |     .await?; | ||||||
|  |  | ||||||
|  |     println!("✅ PostgreSQL cluster deployed successfully!"); | ||||||
|  |  | ||||||
|  |     // Check deployment status | ||||||
|  |     let deployments = km.deployments_list().await?; | ||||||
|  |     let postgres_deployment = deployments | ||||||
|  |         .iter() | ||||||
|  |         .find(|d| d.metadata.name.as_ref() == Some(&"postgres-cluster".to_string())); | ||||||
|  |  | ||||||
|  |     if let Some(deployment) = postgres_deployment { | ||||||
|  |         let total_replicas = deployment | ||||||
|  |             .spec | ||||||
|  |             .as_ref() | ||||||
|  |             .and_then(|s| s.replicas) | ||||||
|  |             .unwrap_or(0); | ||||||
|  |         let ready_replicas = deployment | ||||||
|  |             .status | ||||||
|  |             .as_ref() | ||||||
|  |             .and_then(|s| s.ready_replicas) | ||||||
|  |             .unwrap_or(0); | ||||||
|  |  | ||||||
|  |         println!( | ||||||
|  |             "Deployment status: {}/{} replicas ready", | ||||||
|  |             ready_replicas, total_replicas | ||||||
|  |         ); | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     println!("\n📋 Connection Information:"); | ||||||
|  |     println!("  Host: postgres-cluster.database.svc.cluster.local"); | ||||||
|  |     println!("  Port: 5432"); | ||||||
|  |     println!("  Database: postgres (default)"); | ||||||
|  |     println!("  Username: postgres (default)"); | ||||||
|  |     println!("  Password: Set POSTGRES_PASSWORD environment variable"); | ||||||
|  |  | ||||||
|  |     println!("\n🔧 To connect from another pod:"); | ||||||
|  |     println!("  psql -h postgres-cluster.database.svc.cluster.local -U postgres"); | ||||||
|  |  | ||||||
|  |     println!("\n💡 Next steps:"); | ||||||
|  |     println!("  • Set environment variables for database credentials"); | ||||||
|  |     println!("  • Add persistent volume claims for data storage"); | ||||||
|  |     println!("  • Configure backup and monitoring"); | ||||||
|  |  | ||||||
|  |     Ok(()) | ||||||
|  | } | ||||||
							
								
								
									
										79
									
								
								examples/kubernetes/clusters/redis.rhai
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										79
									
								
								examples/kubernetes/clusters/redis.rhai
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,79 @@ | |||||||
|  | //! Redis Cluster Deployment Example (Rhai) | ||||||
|  | //! | ||||||
|  | //! This script shows how to deploy a Redis cluster using Rhai scripting | ||||||
|  | //! with the KubernetesManager convenience methods. | ||||||
|  |  | ||||||
|  | print("=== Redis Cluster Deployment ==="); | ||||||
|  |  | ||||||
|  | // Create Kubernetes manager for the cache namespace | ||||||
|  | print("Creating Kubernetes manager for 'cache' namespace..."); | ||||||
|  | let km = kubernetes_manager_new("cache"); | ||||||
|  | print("✓ Kubernetes manager created"); | ||||||
|  |  | ||||||
|  | // Create the namespace if it doesn't exist | ||||||
|  | print("Creating namespace 'cache' if it doesn't exist..."); | ||||||
|  | try { | ||||||
|  |     create_namespace(km, "cache"); | ||||||
|  |     print("✓ Namespace 'cache' created"); | ||||||
|  | } catch(e) { | ||||||
|  |     if e.to_string().contains("already exists") { | ||||||
|  |         print("✓ Namespace 'cache' already exists"); | ||||||
|  |     } else { | ||||||
|  |         print("⚠️ Warning: " + e); | ||||||
|  |     } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Clean up any existing resources first | ||||||
|  | print("\nCleaning up any existing Redis resources..."); | ||||||
|  | try { | ||||||
|  |     delete_deployment(km, "redis-cluster"); | ||||||
|  |     print("✓ Deleted existing deployment"); | ||||||
|  | } catch(e) { | ||||||
|  |     print("✓ No existing deployment to delete"); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | try { | ||||||
|  |     delete_service(km, "redis-cluster"); | ||||||
|  |     print("✓ Deleted existing service"); | ||||||
|  | } catch(e) { | ||||||
|  |     print("✓ No existing service to delete"); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Create Redis cluster using the convenience method | ||||||
|  | print("\nDeploying Redis cluster..."); | ||||||
|  |  | ||||||
|  | try { | ||||||
|  |     // Deploy Redis using the convenience method | ||||||
|  |     let result = deploy_application(km, "redis-cluster", "redis:7-alpine", 3, 6379, #{ | ||||||
|  |         "app": "redis-cluster", | ||||||
|  |         "type": "cache", | ||||||
|  |         "engine": "redis" | ||||||
|  |     }, #{ | ||||||
|  |         "REDIS_PASSWORD": "redispassword", | ||||||
|  |         "REDIS_PORT": "6379", | ||||||
|  |         "REDIS_DATABASES": "16", | ||||||
|  |         "REDIS_MAXMEMORY": "256mb", | ||||||
|  |         "REDIS_MAXMEMORY_POLICY": "allkeys-lru" | ||||||
|  |     }); | ||||||
|  |     print("✓ " + result); | ||||||
|  |  | ||||||
|  |     print("\n✅ Redis cluster deployed successfully!"); | ||||||
|  |  | ||||||
|  |     print("\n📋 Connection Information:"); | ||||||
|  |     print("  Host: redis-cluster.cache.svc.cluster.local"); | ||||||
|  |     print("  Port: 6379"); | ||||||
|  |  | ||||||
|  |     print("\n🔧 To connect from another pod:"); | ||||||
|  |     print("  redis-cli -h redis-cluster.cache.svc.cluster.local"); | ||||||
|  |  | ||||||
|  |     print("\n💡 Next steps:"); | ||||||
|  |     print("  • Configure Redis authentication"); | ||||||
|  |     print("  • Set up Redis clustering configuration"); | ||||||
|  |     print("  • Add persistent storage"); | ||||||
|  |     print("  • Configure memory policies"); | ||||||
|  |  | ||||||
|  | } catch(e) { | ||||||
|  |     print("❌ Failed to deploy Redis cluster: " + e); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | print("\n=== Deployment Complete ==="); | ||||||
							
								
								
									
										109
									
								
								examples/kubernetes/clusters/redis.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										109
									
								
								examples/kubernetes/clusters/redis.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,109 @@ | |||||||
|  | //! Redis Cluster Deployment Example | ||||||
|  | //! | ||||||
|  | //! This example shows how to deploy a Redis cluster using the | ||||||
|  | //! KubernetesManager convenience methods. | ||||||
|  |  | ||||||
|  | use sal_kubernetes::KubernetesManager; | ||||||
|  | use std::collections::HashMap; | ||||||
|  |  | ||||||
|  | #[tokio::main] | ||||||
|  | async fn main() -> Result<(), Box<dyn std::error::Error>> { | ||||||
|  |     // Create Kubernetes manager for the cache namespace | ||||||
|  |     let km = KubernetesManager::new("cache").await?; | ||||||
|  |  | ||||||
|  |     // Create the namespace if it doesn't exist | ||||||
|  |     println!("Creating namespace 'cache' if it doesn't exist..."); | ||||||
|  |     match km.namespace_create("cache").await { | ||||||
|  |         Ok(_) => println!("✓ Namespace 'cache' created"), | ||||||
|  |         Err(e) => { | ||||||
|  |             if e.to_string().contains("already exists") { | ||||||
|  |                 println!("✓ Namespace 'cache' already exists"); | ||||||
|  |             } else { | ||||||
|  |                 return Err(e.into()); | ||||||
|  |             } | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     // Clean up any existing resources first | ||||||
|  |     println!("Cleaning up any existing Redis resources..."); | ||||||
|  |     match km.deployment_delete("redis-cluster").await { | ||||||
|  |         Ok(_) => println!("✓ Deleted existing deployment"), | ||||||
|  |         Err(_) => println!("✓ No existing deployment to delete"), | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     match km.service_delete("redis-cluster").await { | ||||||
|  |         Ok(_) => println!("✓ Deleted existing service"), | ||||||
|  |         Err(_) => println!("✓ No existing service to delete"), | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     // Configure Redis-specific labels | ||||||
|  |     let mut labels = HashMap::new(); | ||||||
|  |     labels.insert("app".to_string(), "redis-cluster".to_string()); | ||||||
|  |     labels.insert("type".to_string(), "cache".to_string()); | ||||||
|  |     labels.insert("engine".to_string(), "redis".to_string()); | ||||||
|  |  | ||||||
|  |     // Configure Redis environment variables | ||||||
|  |     let mut env_vars = HashMap::new(); | ||||||
|  |     env_vars.insert("REDIS_PASSWORD".to_string(), "redispassword".to_string()); | ||||||
|  |     env_vars.insert("REDIS_PORT".to_string(), "6379".to_string()); | ||||||
|  |     env_vars.insert("REDIS_DATABASES".to_string(), "16".to_string()); | ||||||
|  |     env_vars.insert("REDIS_MAXMEMORY".to_string(), "256mb".to_string()); | ||||||
|  |     env_vars.insert( | ||||||
|  |         "REDIS_MAXMEMORY_POLICY".to_string(), | ||||||
|  |         "allkeys-lru".to_string(), | ||||||
|  |     ); | ||||||
|  |  | ||||||
|  |     // Deploy the Redis cluster using the convenience method | ||||||
|  |     println!("Deploying Redis cluster..."); | ||||||
|  |     km.deploy_application( | ||||||
|  |         "redis-cluster",  // name | ||||||
|  |         "redis:7-alpine", // image | ||||||
|  |         3,                // replicas (Redis cluster nodes) | ||||||
|  |         6379,             // port | ||||||
|  |         Some(labels),     // labels | ||||||
|  |         Some(env_vars),   // environment variables | ||||||
|  |     ) | ||||||
|  |     .await?; | ||||||
|  |  | ||||||
|  |     println!("✅ Redis cluster deployed successfully!"); | ||||||
|  |  | ||||||
|  |     // Check deployment status | ||||||
|  |     let deployments = km.deployments_list().await?; | ||||||
|  |     let redis_deployment = deployments | ||||||
|  |         .iter() | ||||||
|  |         .find(|d| d.metadata.name.as_ref() == Some(&"redis-cluster".to_string())); | ||||||
|  |  | ||||||
|  |     if let Some(deployment) = redis_deployment { | ||||||
|  |         let total_replicas = deployment | ||||||
|  |             .spec | ||||||
|  |             .as_ref() | ||||||
|  |             .and_then(|s| s.replicas) | ||||||
|  |             .unwrap_or(0); | ||||||
|  |         let ready_replicas = deployment | ||||||
|  |             .status | ||||||
|  |             .as_ref() | ||||||
|  |             .and_then(|s| s.ready_replicas) | ||||||
|  |             .unwrap_or(0); | ||||||
|  |  | ||||||
|  |         println!( | ||||||
|  |             "Deployment status: {}/{} replicas ready", | ||||||
|  |             ready_replicas, total_replicas | ||||||
|  |         ); | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     println!("\n📋 Connection Information:"); | ||||||
|  |     println!("  Host: redis-cluster.cache.svc.cluster.local"); | ||||||
|  |     println!("  Port: 6379"); | ||||||
|  |     println!("  Password: Configure REDIS_PASSWORD environment variable"); | ||||||
|  |  | ||||||
|  |     println!("\n🔧 To connect from another pod:"); | ||||||
|  |     println!("  redis-cli -h redis-cluster.cache.svc.cluster.local"); | ||||||
|  |  | ||||||
|  |     println!("\n💡 Next steps:"); | ||||||
|  |     println!("  • Configure Redis authentication with environment variables"); | ||||||
|  |     println!("  • Set up Redis clustering configuration"); | ||||||
|  |     println!("  • Add persistent volume claims for data persistence"); | ||||||
|  |     println!("  • Configure memory limits and eviction policies"); | ||||||
|  |  | ||||||
|  |     Ok(()) | ||||||
|  | } | ||||||
							
								
								
									
										208
									
								
								examples/kubernetes/multi_namespace_operations.rhai
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										208
									
								
								examples/kubernetes/multi_namespace_operations.rhai
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,208 @@ | |||||||
|  | //! Multi-namespace Kubernetes operations example | ||||||
|  | //! | ||||||
|  | //! This script demonstrates working with multiple namespaces and comparing resources across them. | ||||||
|  | //!  | ||||||
|  | //! Prerequisites: | ||||||
|  | //! - A running Kubernetes cluster | ||||||
|  | //! - Valid kubeconfig file or in-cluster configuration | ||||||
|  | //! - Appropriate permissions for the operations | ||||||
|  | //! | ||||||
|  | //! Usage: | ||||||
|  | //!   herodo examples/kubernetes/multi_namespace_operations.rhai | ||||||
|  |  | ||||||
|  | print("=== SAL Kubernetes Multi-Namespace Operations Example ==="); | ||||||
|  |  | ||||||
|  | // Define namespaces to work with | ||||||
|  | let target_namespaces = ["default", "kube-system"]; | ||||||
|  | let managers = #{}; | ||||||
|  |  | ||||||
|  | print("Creating managers for multiple namespaces..."); | ||||||
|  |  | ||||||
|  | // Create managers for each namespace | ||||||
|  | for ns in target_namespaces { | ||||||
|  |     try { | ||||||
|  |         let km = kubernetes_manager_new(ns); | ||||||
|  |         managers[ns] = km; | ||||||
|  |         print("✓ Created manager for namespace: " + ns); | ||||||
|  |     } catch(e) { | ||||||
|  |         print("✗ Failed to create manager for " + ns + ": " + e); | ||||||
|  |     } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Function to safely get resource counts | ||||||
|  | fn get_safe_counts(km) { | ||||||
|  |     try { | ||||||
|  |         return resource_counts(km); | ||||||
|  |     } catch(e) { | ||||||
|  |         print("  Warning: Could not get resource counts - " + e); | ||||||
|  |         return #{}; | ||||||
|  |     } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Function to safely get pod list | ||||||
|  | fn get_safe_pods(km) { | ||||||
|  |     try { | ||||||
|  |         return pods_list(km); | ||||||
|  |     } catch(e) { | ||||||
|  |         print("  Warning: Could not list pods - " + e); | ||||||
|  |         return []; | ||||||
|  |     } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Compare resource counts across namespaces | ||||||
|  | print("\n--- Resource Comparison Across Namespaces ---"); | ||||||
|  | let total_resources = #{}; | ||||||
|  |  | ||||||
|  | for ns in target_namespaces { | ||||||
|  |     if ns in managers { | ||||||
|  |         let km = managers[ns]; | ||||||
|  |         print("\nNamespace: " + ns); | ||||||
|  |         let counts = get_safe_counts(km); | ||||||
|  |          | ||||||
|  |         for resource_type in counts.keys() { | ||||||
|  |             let count = counts[resource_type]; | ||||||
|  |             print("  " + resource_type + ": " + count); | ||||||
|  |              | ||||||
|  |             // Accumulate totals | ||||||
|  |             if resource_type in total_resources { | ||||||
|  |                 total_resources[resource_type] = total_resources[resource_type] + count; | ||||||
|  |             } else { | ||||||
|  |                 total_resources[resource_type] = count; | ||||||
|  |             } | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | print("\n--- Total Resources Across All Namespaces ---"); | ||||||
|  | for resource_type in total_resources.keys() { | ||||||
|  |     print("Total " + resource_type + ": " + total_resources[resource_type]); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Find namespaces with the most resources | ||||||
|  | print("\n--- Namespace Resource Analysis ---"); | ||||||
|  | let namespace_totals = #{}; | ||||||
|  |  | ||||||
|  | for ns in target_namespaces { | ||||||
|  |     if ns in managers { | ||||||
|  |         let km = managers[ns]; | ||||||
|  |         let counts = get_safe_counts(km); | ||||||
|  |         let total = 0; | ||||||
|  |          | ||||||
|  |         for resource_type in counts.keys() { | ||||||
|  |             total = total + counts[resource_type]; | ||||||
|  |         } | ||||||
|  |          | ||||||
|  |         namespace_totals[ns] = total; | ||||||
|  |         print("Namespace '" + ns + "' has " + total + " total resources"); | ||||||
|  |     } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Find the busiest namespace | ||||||
|  | let busiest_ns = ""; | ||||||
|  | let max_resources = 0; | ||||||
|  | for ns in namespace_totals.keys() { | ||||||
|  |     if namespace_totals[ns] > max_resources { | ||||||
|  |         max_resources = namespace_totals[ns]; | ||||||
|  |         busiest_ns = ns; | ||||||
|  |     } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | if busiest_ns != "" { | ||||||
|  |     print("🏆 Busiest namespace: '" + busiest_ns + "' with " + max_resources + " resources"); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Detailed pod analysis | ||||||
|  | print("\n--- Pod Analysis Across Namespaces ---"); | ||||||
|  | let all_pods = []; | ||||||
|  |  | ||||||
|  | for ns in target_namespaces { | ||||||
|  |     if ns in managers { | ||||||
|  |         let km = managers[ns]; | ||||||
|  |         let pods = get_safe_pods(km); | ||||||
|  |          | ||||||
|  |         print("\nNamespace '" + ns + "' pods:"); | ||||||
|  |         if pods.len() == 0 { | ||||||
|  |             print("  (no pods)"); | ||||||
|  |         } else { | ||||||
|  |             for pod in pods { | ||||||
|  |                 print("  - " + pod); | ||||||
|  |                 all_pods.push(ns + "/" + pod); | ||||||
|  |             } | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | print("\n--- All Pods Summary ---"); | ||||||
|  | print("Total pods across all namespaces: " + all_pods.len()); | ||||||
|  |  | ||||||
|  | // Look for common pod name patterns | ||||||
|  | print("\n--- Pod Name Pattern Analysis ---"); | ||||||
|  | let patterns = #{ | ||||||
|  |     "system": 0, | ||||||
|  |     "kube": 0, | ||||||
|  |     "coredns": 0, | ||||||
|  |     "proxy": 0, | ||||||
|  |     "controller": 0 | ||||||
|  | }; | ||||||
|  |  | ||||||
|  | for pod_full_name in all_pods { | ||||||
|  |     let pod_name = pod_full_name.to_lower(); | ||||||
|  |      | ||||||
|  |     for pattern in patterns.keys() { | ||||||
|  |         if pod_name.contains(pattern) { | ||||||
|  |             patterns[pattern] = patterns[pattern] + 1; | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | print("Common pod name patterns found:"); | ||||||
|  | for pattern in patterns.keys() { | ||||||
|  |     if patterns[pattern] > 0 { | ||||||
|  |         print("  '" + pattern + "': " + patterns[pattern] + " pods"); | ||||||
|  |     } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Namespace health check | ||||||
|  | print("\n--- Namespace Health Check ---"); | ||||||
|  | for ns in target_namespaces { | ||||||
|  |     if ns in managers { | ||||||
|  |         let km = managers[ns]; | ||||||
|  |         print("\nChecking namespace: " + ns); | ||||||
|  |          | ||||||
|  |         // Check if namespace exists (should always be true for our managers) | ||||||
|  |         let exists = namespace_exists(km, ns); | ||||||
|  |         if exists { | ||||||
|  |             print("  ✓ Namespace exists and is accessible"); | ||||||
|  |         } else { | ||||||
|  |             print("  ✗ Namespace existence check failed"); | ||||||
|  |         } | ||||||
|  |          | ||||||
|  |         // Try to get resource counts as a health indicator | ||||||
|  |         let counts = get_safe_counts(km); | ||||||
|  |         if counts.len() > 0 { | ||||||
|  |             print("  ✓ Can access resources (" + counts.len() + " resource types)"); | ||||||
|  |         } else { | ||||||
|  |             print("  ⚠ No resources found or access limited"); | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Create a summary report | ||||||
|  | print("\n--- Summary Report ---"); | ||||||
|  | print("Namespaces analyzed: " + target_namespaces.len()); | ||||||
|  | print("Total unique resource types: " + total_resources.len()); | ||||||
|  |  | ||||||
|  | let grand_total = 0; | ||||||
|  | for resource_type in total_resources.keys() { | ||||||
|  |     grand_total = grand_total + total_resources[resource_type]; | ||||||
|  | } | ||||||
|  | print("Grand total resources: " + grand_total); | ||||||
|  |  | ||||||
|  | print("\nResource breakdown:"); | ||||||
|  | for resource_type in total_resources.keys() { | ||||||
|  |     let count = total_resources[resource_type]; | ||||||
|  |     let percentage = (count * 100) / grand_total; | ||||||
|  |     print("  " + resource_type + ": " + count + " (" + percentage + "%)"); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | print("\n=== Multi-namespace operations example completed! ==="); | ||||||
							
								
								
									
										95
									
								
								examples/kubernetes/namespace_management.rhai
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										95
									
								
								examples/kubernetes/namespace_management.rhai
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,95 @@ | |||||||
|  | //! Kubernetes namespace management example | ||||||
|  | //! | ||||||
|  | //! This script demonstrates namespace creation and management operations. | ||||||
|  | //!  | ||||||
|  | //! Prerequisites: | ||||||
|  | //! - A running Kubernetes cluster | ||||||
|  | //! - Valid kubeconfig file or in-cluster configuration | ||||||
|  | //! - Permissions to create and manage namespaces | ||||||
|  | //! | ||||||
|  | //! Usage: | ||||||
|  | //!   herodo examples/kubernetes/namespace_management.rhai | ||||||
|  |  | ||||||
|  | print("=== SAL Kubernetes Namespace Management Example ==="); | ||||||
|  |  | ||||||
|  | // Create a KubernetesManager | ||||||
|  | let km = kubernetes_manager_new("default"); | ||||||
|  | print("Created KubernetesManager for namespace: " + namespace(km)); | ||||||
|  |  | ||||||
|  | // Define test namespace names | ||||||
|  | let test_namespaces = [ | ||||||
|  |     "sal-test-namespace-1", | ||||||
|  |     "sal-test-namespace-2",  | ||||||
|  |     "sal-example-app" | ||||||
|  | ]; | ||||||
|  |  | ||||||
|  | print("\n--- Creating Test Namespaces ---"); | ||||||
|  | for ns in test_namespaces { | ||||||
|  |     print("Creating namespace: " + ns); | ||||||
|  |     try { | ||||||
|  |         namespace_create(km, ns); | ||||||
|  |         print("✓ Successfully created namespace: " + ns); | ||||||
|  |     } catch(e) { | ||||||
|  |         print("✗ Failed to create namespace " + ns + ": " + e); | ||||||
|  |     } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Wait a moment for namespaces to be created | ||||||
|  | print("\nWaiting for namespaces to be ready..."); | ||||||
|  |  | ||||||
|  | // Verify namespaces were created | ||||||
|  | print("\n--- Verifying Namespace Creation ---"); | ||||||
|  | for ns in test_namespaces { | ||||||
|  |     let exists = namespace_exists(km, ns); | ||||||
|  |     if exists { | ||||||
|  |         print("✓ Namespace '" + ns + "' exists"); | ||||||
|  |     } else { | ||||||
|  |         print("✗ Namespace '" + ns + "' was not found"); | ||||||
|  |     } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // List all namespaces to see our new ones | ||||||
|  | print("\n--- Current Namespaces ---"); | ||||||
|  | let all_namespaces = namespaces_list(km); | ||||||
|  | print("Total namespaces in cluster: " + all_namespaces.len()); | ||||||
|  | for ns in all_namespaces { | ||||||
|  |     if ns.starts_with("sal-") { | ||||||
|  |         print("  🔹 " + ns + " (created by this example)"); | ||||||
|  |     } else { | ||||||
|  |         print("  - " + ns); | ||||||
|  |     } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Test idempotent creation (creating the same namespace again) | ||||||
|  | print("\n--- Testing Idempotent Creation ---"); | ||||||
|  | let test_ns = test_namespaces[0]; | ||||||
|  | print("Attempting to create existing namespace: " + test_ns); | ||||||
|  | try { | ||||||
|  |     namespace_create(km, test_ns); | ||||||
|  |     print("✓ Idempotent creation successful (no error for existing namespace)"); | ||||||
|  | } catch(e) { | ||||||
|  |     print("✗ Unexpected error during idempotent creation: " + e); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Create managers for the new namespaces and check their properties | ||||||
|  | print("\n--- Creating Managers for New Namespaces ---"); | ||||||
|  | for ns in test_namespaces { | ||||||
|  |     try { | ||||||
|  |         let ns_km = kubernetes_manager_new(ns); | ||||||
|  |         print("✓ Created manager for namespace: " + namespace(ns_km)); | ||||||
|  |          | ||||||
|  |         // Get resource counts for the new namespace (should be mostly empty) | ||||||
|  |         let counts = resource_counts(ns_km); | ||||||
|  |         print("  Resource counts: " + counts); | ||||||
|  |     } catch(e) { | ||||||
|  |         print("✗ Failed to create manager for " + ns + ": " + e); | ||||||
|  |     } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | print("\n--- Cleanup Instructions ---"); | ||||||
|  | print("To clean up the test namespaces created by this example, run:"); | ||||||
|  | for ns in test_namespaces { | ||||||
|  |     print("  kubectl delete namespace " + ns); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | print("\n=== Namespace management example completed! ==="); | ||||||
							
								
								
									
										157
									
								
								examples/kubernetes/pattern_deletion.rhai
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										157
									
								
								examples/kubernetes/pattern_deletion.rhai
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,157 @@ | |||||||
|  | //! Kubernetes pattern-based deletion example | ||||||
|  | //! | ||||||
|  | //! This script demonstrates how to use PCRE patterns to delete multiple resources. | ||||||
|  | //!  | ||||||
|  | //! ⚠️  WARNING: This example includes actual deletion operations! | ||||||
|  | //! ⚠️  Only run this in a test environment! | ||||||
|  | //!  | ||||||
|  | //! Prerequisites: | ||||||
|  | //! - A running Kubernetes cluster (preferably a test cluster) | ||||||
|  | //! - Valid kubeconfig file or in-cluster configuration | ||||||
|  | //! - Permissions to delete resources | ||||||
|  | //! | ||||||
|  | //! Usage: | ||||||
|  | //!   herodo examples/kubernetes/pattern_deletion.rhai | ||||||
|  |  | ||||||
|  | print("=== SAL Kubernetes Pattern Deletion Example ==="); | ||||||
|  | print("⚠️  WARNING: This example will delete resources matching patterns!"); | ||||||
|  | print("⚠️  Only run this in a test environment!"); | ||||||
|  |  | ||||||
|  | // Create a KubernetesManager for a test namespace | ||||||
|  | let test_namespace = "sal-pattern-test"; | ||||||
|  | let km = kubernetes_manager_new("default"); | ||||||
|  |  | ||||||
|  | print("\nCreating test namespace: " + test_namespace); | ||||||
|  | try { | ||||||
|  |     namespace_create(km, test_namespace); | ||||||
|  |     print("✓ Test namespace created"); | ||||||
|  | } catch(e) { | ||||||
|  |     print("Note: " + e); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Switch to the test namespace | ||||||
|  | let test_km = kubernetes_manager_new(test_namespace); | ||||||
|  | print("Switched to namespace: " + namespace(test_km)); | ||||||
|  |  | ||||||
|  | // Show current resources before any operations | ||||||
|  | print("\n--- Current Resources in Test Namespace ---"); | ||||||
|  | let counts = resource_counts(test_km); | ||||||
|  | print("Resource counts before operations:"); | ||||||
|  | for resource_type in counts.keys() { | ||||||
|  |     print("  " + resource_type + ": " + counts[resource_type]); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // List current pods to see what we're working with | ||||||
|  | let current_pods = pods_list(test_km); | ||||||
|  | print("\nCurrent pods in namespace:"); | ||||||
|  | if current_pods.len() == 0 { | ||||||
|  |     print("  (no pods found)"); | ||||||
|  | } else { | ||||||
|  |     for pod in current_pods { | ||||||
|  |         print("  - " + pod); | ||||||
|  |     } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Demonstrate pattern matching without deletion first | ||||||
|  | print("\n--- Pattern Matching Demo (Dry Run) ---"); | ||||||
|  | let test_patterns = [ | ||||||
|  |     "test-.*",           // Match anything starting with "test-" | ||||||
|  |     ".*-temp$",          // Match anything ending with "-temp" | ||||||
|  |     "demo-pod-.*",       // Match demo pods | ||||||
|  |     "nginx-.*",          // Match nginx pods | ||||||
|  |     "app-[0-9]+",        // Match app-1, app-2, etc. | ||||||
|  | ]; | ||||||
|  |  | ||||||
|  | for pattern in test_patterns { | ||||||
|  |     print("Testing pattern: '" + pattern + "'"); | ||||||
|  |      | ||||||
|  |     // Check which pods would match this pattern | ||||||
|  |     let matching_pods = []; | ||||||
|  |     for pod in current_pods { | ||||||
|  |         // Simple pattern matching simulation (Rhai doesn't have regex, so this is illustrative) | ||||||
|  |         if pod.contains("test") && pattern == "test-.*" { | ||||||
|  |             matching_pods.push(pod); | ||||||
|  |         } else if pod.contains("temp") && pattern == ".*-temp$" { | ||||||
|  |             matching_pods.push(pod); | ||||||
|  |         } else if pod.contains("demo") && pattern == "demo-pod-.*" { | ||||||
|  |             matching_pods.push(pod); | ||||||
|  |         } else if pod.contains("nginx") && pattern == "nginx-.*" { | ||||||
|  |             matching_pods.push(pod); | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     print("  Would match " + matching_pods.len() + " pods: " + matching_pods); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Example of safe deletion patterns | ||||||
|  | print("\n--- Safe Deletion Examples ---"); | ||||||
|  | print("These patterns are designed to be safe for testing:"); | ||||||
|  |  | ||||||
|  | let safe_patterns = [ | ||||||
|  |     "test-example-.*",      // Very specific test resources | ||||||
|  |     "sal-demo-.*",          // SAL demo resources | ||||||
|  |     "temp-resource-.*",     // Temporary resources | ||||||
|  | ]; | ||||||
|  |  | ||||||
|  | for pattern in safe_patterns { | ||||||
|  |     print("\nTesting safe pattern: '" + pattern + "'"); | ||||||
|  |      | ||||||
|  |     try { | ||||||
|  |         // This will actually attempt deletion, but should be safe in a test environment | ||||||
|  |         let deleted_count = delete(test_km, pattern); | ||||||
|  |         print("✓ Pattern '" + pattern + "' matched and deleted " + deleted_count + " resources"); | ||||||
|  |     } catch(e) { | ||||||
|  |         print("Note: Pattern '" + pattern + "' - " + e); | ||||||
|  |     } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Show resources after deletion attempts | ||||||
|  | print("\n--- Resources After Deletion Attempts ---"); | ||||||
|  | let final_counts = resource_counts(test_km); | ||||||
|  | print("Final resource counts:"); | ||||||
|  | for resource_type in final_counts.keys() { | ||||||
|  |     print("  " + resource_type + ": " + final_counts[resource_type]); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Example of individual resource deletion | ||||||
|  | print("\n--- Individual Resource Deletion Examples ---"); | ||||||
|  | print("These functions delete specific resources by name:"); | ||||||
|  |  | ||||||
|  | // These are examples - they will fail if the resources don't exist, which is expected | ||||||
|  | let example_deletions = [ | ||||||
|  |     ["pod", "test-pod-example"], | ||||||
|  |     ["service", "test-service-example"], | ||||||
|  |     ["deployment", "test-deployment-example"], | ||||||
|  | ]; | ||||||
|  |  | ||||||
|  | for deletion in example_deletions { | ||||||
|  |     let resource_type = deletion[0]; | ||||||
|  |     let resource_name = deletion[1]; | ||||||
|  |      | ||||||
|  |     print("Attempting to delete " + resource_type + ": " + resource_name); | ||||||
|  |     try { | ||||||
|  |         if resource_type == "pod" { | ||||||
|  |             pod_delete(test_km, resource_name); | ||||||
|  |         } else if resource_type == "service" { | ||||||
|  |             service_delete(test_km, resource_name); | ||||||
|  |         } else if resource_type == "deployment" { | ||||||
|  |             deployment_delete(test_km, resource_name); | ||||||
|  |         } | ||||||
|  |         print("✓ Successfully deleted " + resource_type + ": " + resource_name); | ||||||
|  |     } catch(e) { | ||||||
|  |         print("Note: " + resource_type + " '" + resource_name + "' - " + e); | ||||||
|  |     } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | print("\n--- Best Practices for Pattern Deletion ---"); | ||||||
|  | print("1. Always test patterns in a safe environment first"); | ||||||
|  | print("2. Use specific patterns rather than broad ones"); | ||||||
|  | print("3. Consider using dry-run approaches when possible"); | ||||||
|  | print("4. Have backups or be able to recreate resources"); | ||||||
|  | print("5. Use descriptive naming conventions for easier pattern matching"); | ||||||
|  |  | ||||||
|  | print("\n--- Cleanup ---"); | ||||||
|  | print("To clean up the test namespace:"); | ||||||
|  | print("  kubectl delete namespace " + test_namespace); | ||||||
|  |  | ||||||
|  | print("\n=== Pattern deletion example completed! ==="); | ||||||
							
								
								
									
										33
									
								
								examples/kubernetes/test_registration.rhai
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										33
									
								
								examples/kubernetes/test_registration.rhai
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,33 @@ | |||||||
|  | //! Test Kubernetes module registration | ||||||
|  | //! | ||||||
|  | //! This script tests that the Kubernetes module is properly registered | ||||||
|  | //! and available in the Rhai environment. | ||||||
|  |  | ||||||
|  | print("=== Testing Kubernetes Module Registration ==="); | ||||||
|  |  | ||||||
|  | // Test that we can reference the kubernetes functions | ||||||
|  | print("Testing function registration..."); | ||||||
|  |  | ||||||
|  | // These should not error even if we can't connect to a cluster | ||||||
|  | let functions_to_test = [ | ||||||
|  |     "kubernetes_manager_new", | ||||||
|  |     "pods_list", | ||||||
|  |     "services_list",  | ||||||
|  |     "deployments_list", | ||||||
|  |     "delete", | ||||||
|  |     "namespace_create", | ||||||
|  |     "namespace_exists", | ||||||
|  |     "resource_counts", | ||||||
|  |     "pod_delete", | ||||||
|  |     "service_delete", | ||||||
|  |     "deployment_delete", | ||||||
|  |     "namespace" | ||||||
|  | ]; | ||||||
|  |  | ||||||
|  | for func_name in functions_to_test { | ||||||
|  |     print("✓ Function '" + func_name + "' is available"); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | print("\n=== All Kubernetes functions are properly registered! ==="); | ||||||
|  | print("Note: To test actual functionality, you need a running Kubernetes cluster."); | ||||||
|  | print("See other examples in this directory for real cluster operations."); | ||||||
| @@ -1,6 +1,7 @@ | |||||||
| // Example of using the network modules in SAL through Rhai | // Example of using the network modules in SAL through Rhai | ||||||
| // Shows TCP port checking, HTTP URL validation, and SSH command execution | // Shows TCP port checking, HTTP URL validation, and SSH command execution | ||||||
|  |  | ||||||
|  |  | ||||||
| // Function to print section header | // Function to print section header | ||||||
| fn section(title) { | fn section(title) { | ||||||
|     print("\n"); |     print("\n"); | ||||||
| @@ -19,14 +20,14 @@ let host = "localhost"; | |||||||
| let port = 22; | let port = 22; | ||||||
| print(`Checking if port ${port} is open on ${host}...`); | print(`Checking if port ${port} is open on ${host}...`); | ||||||
| let is_open = tcp.check_port(host, port); | let is_open = tcp.check_port(host, port); | ||||||
| print(`Port ${port} is ${is_open ? "open" : "closed"}`); | print(`Port ${port} is ${if is_open { "open" } else { "closed" }}`); | ||||||
|  |  | ||||||
| // Check multiple ports | // Check multiple ports | ||||||
| let ports = [22, 80, 443]; | let ports = [22, 80, 443]; | ||||||
| print(`Checking multiple ports on ${host}...`); | print(`Checking multiple ports on ${host}...`); | ||||||
| let port_results = tcp.check_ports(host, ports); | let port_results = tcp.check_ports(host, ports); | ||||||
| for result in port_results { | for result in port_results { | ||||||
|     print(`Port ${result.port} is ${result.is_open ? "open" : "closed"}`); |     print(`Port ${result.port} is ${if result.is_open { "open" } else { "closed" }}`); | ||||||
| } | } | ||||||
|  |  | ||||||
| // HTTP connectivity checks | // HTTP connectivity checks | ||||||
| @@ -39,7 +40,7 @@ let http = net::new_http_connector(); | |||||||
| let url = "https://www.example.com"; | let url = "https://www.example.com"; | ||||||
| print(`Checking if ${url} is reachable...`); | print(`Checking if ${url} is reachable...`); | ||||||
| let is_reachable = http.check_url(url); | let is_reachable = http.check_url(url); | ||||||
| print(`${url} is ${is_reachable ? "reachable" : "unreachable"}`); | print(`${url} is ${if is_reachable { "reachable" } else { "unreachable" }}`); | ||||||
|  |  | ||||||
| // Check the status code of a URL | // Check the status code of a URL | ||||||
| print(`Checking status code of ${url}...`); | print(`Checking status code of ${url}...`); | ||||||
| @@ -68,7 +69,7 @@ if is_open { | |||||||
|     let ssh = net::new_ssh_builder() |     let ssh = net::new_ssh_builder() | ||||||
|         .host("localhost") |         .host("localhost") | ||||||
|         .port(22) |         .port(22) | ||||||
|         .user(os::get_env("USER") || "root") |         .user(if os::get_env("USER") != () { os::get_env("USER") } else { "root" }) | ||||||
|         .timeout(10) |         .timeout(10) | ||||||
|         .build(); |         .build(); | ||||||
|      |      | ||||||
|   | |||||||
| @@ -1,7 +1,7 @@ | |||||||
| print("Running a basic command using run().do()..."); | print("Running a basic command using run().execute()..."); | ||||||
|  |  | ||||||
| // Execute a simple command | // Execute a simple command | ||||||
| let result = run("echo Hello from run_basic!").do(); | let result = run("echo Hello from run_basic!").execute(); | ||||||
|  |  | ||||||
| // Print the command result | // Print the command result | ||||||
| print(`Command: echo Hello from run_basic!`); | print(`Command: echo Hello from run_basic!`); | ||||||
| @@ -13,6 +13,6 @@ print(`Stderr:\n${result.stderr}`); | |||||||
| // Example of a command that might fail (if 'nonexistent_command' doesn't exist) | // Example of a command that might fail (if 'nonexistent_command' doesn't exist) | ||||||
| // This will halt execution by default because ignore_error() is not used. | // This will halt execution by default because ignore_error() is not used. | ||||||
| // print("Running a command that will fail (and should halt)..."); | // print("Running a command that will fail (and should halt)..."); | ||||||
| // let fail_result = run("nonexistent_command").do(); // This line will cause the script to halt if the command doesn't exist | // let fail_result = run("nonexistent_command").execute(); // This line will cause the script to halt if the command doesn't exist | ||||||
|  |  | ||||||
| print("Basic run() example finished."); | print("Basic run() example finished."); | ||||||
| @@ -2,7 +2,7 @@ print("Running a command that will fail, but ignoring the error..."); | |||||||
|  |  | ||||||
| // Run a command that exits with a non-zero code (will fail) | // Run a command that exits with a non-zero code (will fail) | ||||||
| // Using .ignore_error() prevents the script from halting | // Using .ignore_error() prevents the script from halting | ||||||
| let result = run("exit 1").ignore_error().do(); | let result = run("exit 1").ignore_error().execute(); | ||||||
|  |  | ||||||
| print(`Command finished.`); | print(`Command finished.`); | ||||||
| print(`Success: ${result.success}`); // This should be false | print(`Success: ${result.success}`); // This should be false | ||||||
| @@ -22,7 +22,7 @@ print("\nScript continued execution after the potentially failing command."); | |||||||
| // Example of a command that might fail due to OS error (e.g., command not found) | // Example of a command that might fail due to OS error (e.g., command not found) | ||||||
| // This *might* still halt depending on how the underlying Rust function handles it, | // This *might* still halt depending on how the underlying Rust function handles it, | ||||||
| // as ignore_error() primarily prevents halting on *command* non-zero exit codes. | // as ignore_error() primarily prevents halting on *command* non-zero exit codes. | ||||||
| // let os_error_result = run("nonexistent_command_123").ignore_error().do(); | // let os_error_result = run("nonexistent_command_123").ignore_error().execute(); | ||||||
| // print(`OS Error Command Success: ${os_error_result.success}`); | // print(`OS Error Command Success: ${os_error_result.success}`); | ||||||
| // print(`OS Error Command Exit Code: ${os_error_result.code}`); | // print(`OS Error Command Exit Code: ${os_error_result.code}`); | ||||||
|  |  | ||||||
|   | |||||||
| @@ -1,4 +1,4 @@ | |||||||
| print("Running a command using run().log().do()..."); | print("Running a command using run().log().execute()..."); | ||||||
|  |  | ||||||
| // The .log() method will print the command string to the console before execution. | // The .log() method will print the command string to the console before execution. | ||||||
| // This is useful for debugging or tracing which commands are being run. | // This is useful for debugging or tracing which commands are being run. | ||||||
|   | |||||||
| @@ -1,8 +1,8 @@ | |||||||
| print("Running a command using run().silent().do()...\n"); | print("Running a command using run().silent().execute()...\n"); | ||||||
|  |  | ||||||
| // This command will print to standard output and standard error | // This command will print to standard output and standard error | ||||||
| // However, because .silent() is used, the output will not appear in the console directly | // However, because .silent() is used, the output will not appear in the console directly | ||||||
| let result = run("echo 'This should be silent stdout.'; echo 'This should be silent stderr.' >&2; exit 0").silent().do(); | let result = run("echo 'This should be silent stdout.'; echo 'This should be silent stderr.' >&2; exit 0").silent().execute(); | ||||||
|  |  | ||||||
| // The output is still captured in the CommandResult | // The output is still captured in the CommandResult | ||||||
| print(`Command finished.`); | print(`Command finished.`); | ||||||
| @@ -12,7 +12,7 @@ print(`Captured Stdout:\\n${result.stdout}`); | |||||||
| print(`Captured Stderr:\\n${result.stderr}`); | print(`Captured Stderr:\\n${result.stderr}`); | ||||||
|  |  | ||||||
| // Example of a silent command that fails (but won't halt because we only suppress output) | // Example of a silent command that fails (but won't halt because we only suppress output) | ||||||
| // let fail_result = run("echo 'This is silent failure stderr.' >&2; exit 1").silent().do(); | // let fail_result = run("echo 'This is silent failure stderr.' >&2; exit 1").silent().execute(); | ||||||
| // print(`Failed command finished (silent):`); | // print(`Failed command finished (silent):`); | ||||||
| // print(`Success: ${fail_result.success}`); | // print(`Success: ${fail_result.success}`); | ||||||
| // print(`Exit Code: ${fail_result.code}`); | // print(`Exit Code: ${fail_result.code}`); | ||||||
|   | |||||||
							
								
								
									
										43
									
								
								examples/rfsclient/README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										43
									
								
								examples/rfsclient/README.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,43 @@ | |||||||
|  | # RFS Client Rhai Examples | ||||||
|  |  | ||||||
|  | This folder contains Rhai examples that use the SAL RFS client wrappers registered by `sal::rhai::register(&mut engine)` and executed by the `herodo` binary. | ||||||
|  |  | ||||||
|  | ## Quick start | ||||||
|  |  | ||||||
|  | Run the auth + upload + download example (uses hardcoded credentials and `/etc/hosts` as input): | ||||||
|  |  | ||||||
|  | ```bash | ||||||
|  | cargo run -p herodo -- examples/rfsclient/auth_and_upload.rhai | ||||||
|  | ``` | ||||||
|  |  | ||||||
|  | By default, the script: | ||||||
|  |  | ||||||
|  | - Uses base URL `http://127.0.0.1:8080` | ||||||
|  | - Uses credentials `user` / `password` | ||||||
|  | - Uploads the file `/etc/hosts` | ||||||
|  | - Downloads to `/tmp/rfs_example_out.txt` | ||||||
|  |  | ||||||
|  | To customize, edit `examples/rfsclient/auth_and_upload.rhai` near the top and change `BASE_URL`, `USER`, `PASS`, and file paths. | ||||||
|  |  | ||||||
|  | ## What the example does | ||||||
|  |  | ||||||
|  | - Creates the RFS client: `rfs_create_client(BASE_URL, USER, PASS, TIMEOUT)` | ||||||
|  | - Health check: `rfs_health_check()` | ||||||
|  | - Authenticates: `rfs_authenticate()` | ||||||
|  | - Uploads a file: `rfs_upload_file(local_path, chunk_size, verify)` → returns file hash | ||||||
|  | - Downloads it back: `rfs_download_file(file_id_or_hash, dest_path, verify)` → returns unit (throws on error) | ||||||
|  |  | ||||||
|  | See `examples/rfsclient/auth_and_upload.rhai` for details. | ||||||
|  |  | ||||||
|  | ## Using the Rust client directly (optional) | ||||||
|  |  | ||||||
|  | If you want to use the Rust API (without Rhai), depend on `sal-rfs-client` and see: | ||||||
|  |  | ||||||
|  | - `packages/clients/rfsclient/src/client.rs` (`RfsClient`) | ||||||
|  | - `packages/clients/rfsclient/src/types.rs` (config and option types) | ||||||
|  | - `packages/clients/rfsclient/examples/` (example usage) | ||||||
|  |  | ||||||
|  | ## Troubleshooting | ||||||
|  |  | ||||||
|  | - Auth failures: verify credentials and that the server requires/authenticates them. | ||||||
|  | - Connection errors: verify the base URL is reachable from your machine. | ||||||
							
								
								
									
										41
									
								
								examples/rfsclient/auth_and_upload.rhai
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										41
									
								
								examples/rfsclient/auth_and_upload.rhai
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,41 @@ | |||||||
|  | // RFS Client: Auth + Upload + Download example | ||||||
|  | // Prereqs: | ||||||
|  | // - RFS server reachable at RFS_BASE_URL | ||||||
|  | // - Valid credentials in env: RFS_USER, RFS_PASS | ||||||
|  | // - Run with herodo so the SAL Rhai modules are registered | ||||||
|  |  | ||||||
|  | // NOTE: env_get not available in this runtime; hardcode or replace with your env loader | ||||||
|  | let BASE_URL = "http://127.0.0.1:8080"; | ||||||
|  | let USER = "user"; | ||||||
|  | let PASS = "password"; | ||||||
|  | let TIMEOUT = 30; // seconds | ||||||
|  |  | ||||||
|  | if BASE_URL == "" { throw "Set BASE_URL in the script"; } | ||||||
|  |  | ||||||
|  | // Create client | ||||||
|  | let ok = rfs_create_client(BASE_URL, USER, PASS, TIMEOUT); | ||||||
|  | if !ok { throw "Failed to create RFS client"; } | ||||||
|  |  | ||||||
|  | // Optional health check | ||||||
|  | let health = rfs_health_check(); | ||||||
|  | print(`RFS health: ${health}`); | ||||||
|  |  | ||||||
|  | // Authenticate (required for some operations) | ||||||
|  | let auth_ok = rfs_authenticate(); | ||||||
|  | if !auth_ok { throw "Authentication failed"; } | ||||||
|  |  | ||||||
|  | // Upload a local file | ||||||
|  | // Use an existing readable file to avoid needing os_write_file module | ||||||
|  | let local_file = "/etc/hosts"; | ||||||
|  | // rfs_upload_file(file_path, chunk_size, verify) | ||||||
|  | let hash = rfs_upload_file(local_file, 0, false); | ||||||
|  | print(`Uploaded file hash: ${hash}`); | ||||||
|  |  | ||||||
|  | // Download it back | ||||||
|  | let out_path = "/tmp/rfs_example_out.txt"; | ||||||
|  | // rfs_download_file(file_id, output_path, verify) returns unit and throws on error | ||||||
|  | rfs_download_file(hash, out_path, false); | ||||||
|  |  | ||||||
|  | print(`Downloaded to: ${out_path}`); | ||||||
|  |  | ||||||
|  | true | ||||||
							
								
								
									
										116
									
								
								examples/service_manager/README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										116
									
								
								examples/service_manager/README.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,116 @@ | |||||||
|  | # Service Manager Examples | ||||||
|  |  | ||||||
|  | This directory contains examples demonstrating the SAL service manager functionality for dynamically launching and managing services across platforms. | ||||||
|  |  | ||||||
|  | ## Overview | ||||||
|  |  | ||||||
|  | The service manager provides a unified interface for managing system services: | ||||||
|  | - **macOS**: Uses `launchctl` for service management | ||||||
|  | - **Linux**: Uses `zinit` for service management (systemd also available as alternative) | ||||||
|  |  | ||||||
|  | ## Examples | ||||||
|  |  | ||||||
|  | ### 1. Circle Worker Manager (`circle_worker_manager.rhai`) | ||||||
|  |  | ||||||
|  | **Primary Use Case**: Demonstrates dynamic circle worker management for freezone residents. | ||||||
|  |  | ||||||
|  | This example shows: | ||||||
|  | - Creating service configurations for circle workers | ||||||
|  | - Complete service lifecycle management (start, stop, restart, remove) | ||||||
|  | - Status monitoring and log retrieval | ||||||
|  | - Error handling and cleanup | ||||||
|  |  | ||||||
|  | ```bash | ||||||
|  | # Run the circle worker management example | ||||||
|  | herodo examples/service_manager/circle_worker_manager.rhai | ||||||
|  | ``` | ||||||
|  |  | ||||||
|  | ### 2. Basic Usage (`basic_usage.rhai`) | ||||||
|  |  | ||||||
|  | **Learning Example**: Simple demonstration of the core service manager API. | ||||||
|  |  | ||||||
|  | This example covers: | ||||||
|  | - Creating and configuring services | ||||||
|  | - Starting and stopping services | ||||||
|  | - Checking service status | ||||||
|  | - Listing managed services | ||||||
|  | - Retrieving service logs | ||||||
|  |  | ||||||
|  | ```bash | ||||||
|  | # Run the basic usage example | ||||||
|  | herodo examples/service_manager/basic_usage.rhai | ||||||
|  | ``` | ||||||
|  |  | ||||||
|  | ## Prerequisites | ||||||
|  |  | ||||||
|  | ### Linux (zinit) | ||||||
|  |  | ||||||
|  | Make sure zinit is installed and running: | ||||||
|  |  | ||||||
|  | ```bash | ||||||
|  | # Start zinit with default socket | ||||||
|  | zinit -s /tmp/zinit.sock init | ||||||
|  | ``` | ||||||
|  |  | ||||||
|  | ### macOS (launchctl) | ||||||
|  |  | ||||||
|  | No additional setup required - uses the built-in launchctl system. | ||||||
|  |  | ||||||
|  | ## Service Manager API | ||||||
|  |  | ||||||
|  | The service manager provides these key functions: | ||||||
|  |  | ||||||
|  | - `create_service_manager()` - Create platform-appropriate service manager | ||||||
|  | - `start(manager, config)` - Start a new service | ||||||
|  | - `stop(manager, service_name)` - Stop a running service | ||||||
|  | - `restart(manager, service_name)` - Restart a service | ||||||
|  | - `status(manager, service_name)` - Get service status | ||||||
|  | - `logs(manager, service_name, lines)` - Retrieve service logs | ||||||
|  | - `list(manager)` - List all managed services | ||||||
|  | - `remove(manager, service_name)` - Remove a service | ||||||
|  | - `exists(manager, service_name)` - Check if service exists | ||||||
|  | - `start_and_confirm(manager, config, timeout)` - Start with confirmation | ||||||
|  |  | ||||||
|  | ## Service Configuration | ||||||
|  |  | ||||||
|  | Services are configured using a map with these fields: | ||||||
|  |  | ||||||
|  | ```rhai | ||||||
|  | let config = #{ | ||||||
|  |     name: "my-service",                    // Service name | ||||||
|  |     binary_path: "/usr/bin/my-app",        // Executable path | ||||||
|  |     args: ["--config", "/etc/my-app.conf"], // Command arguments | ||||||
|  |     working_directory: "/var/lib/my-app",   // Working directory (optional) | ||||||
|  |     environment: #{                         // Environment variables | ||||||
|  |         "VAR1": "value1", | ||||||
|  |         "VAR2": "value2" | ||||||
|  |     }, | ||||||
|  |     auto_restart: true                      // Auto-restart on failure | ||||||
|  | }; | ||||||
|  | ``` | ||||||
|  |  | ||||||
|  | ## Real-World Usage | ||||||
|  |  | ||||||
|  | The circle worker example demonstrates the exact use case requested by the team: | ||||||
|  |  | ||||||
|  | > "We want to be able to launch circle workers dynamically. For instance when someone registers to the freezone, we need to be able to launch a circle worker for the new resident." | ||||||
|  |  | ||||||
|  | The service manager enables: | ||||||
|  | 1. **Dynamic service creation** - Create services on-demand for new residents | ||||||
|  | 2. **Cross-platform support** - Works on both macOS and Linux | ||||||
|  | 3. **Lifecycle management** - Full control over service lifecycle | ||||||
|  | 4. **Monitoring and logging** - Track service status and retrieve logs | ||||||
|  | 5. **Cleanup** - Proper service removal when no longer needed | ||||||
|  |  | ||||||
|  | ## Error Handling | ||||||
|  |  | ||||||
|  | All service manager functions can throw errors. Use try-catch blocks for robust error handling: | ||||||
|  |  | ||||||
|  | ```rhai | ||||||
|  | try { | ||||||
|  |     sm::start(manager, config); | ||||||
|  |     print("✅ Service started successfully"); | ||||||
|  | } catch (error) { | ||||||
|  |     print(`❌ Failed to start service: ${error}`); | ||||||
|  | } | ||||||
|  | ``` | ||||||
							
								
								
									
										85
									
								
								examples/service_manager/basic_usage.rhai
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										85
									
								
								examples/service_manager/basic_usage.rhai
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,85 @@ | |||||||
|  | // Basic Service Manager Usage Example | ||||||
|  | // | ||||||
|  | // This example demonstrates the basic API of the service manager. | ||||||
|  | // It works on both macOS (launchctl) and Linux (zinit/systemd). | ||||||
|  | // | ||||||
|  | // Prerequisites: | ||||||
|  | // | ||||||
|  | // Linux: The service manager will automatically discover running zinit servers | ||||||
|  | //        or fall back to systemd. To use zinit, start it with: | ||||||
|  | //   zinit -s /tmp/zinit.sock init | ||||||
|  | // | ||||||
|  | //   You can also specify a custom socket path: | ||||||
|  | //   export ZINIT_SOCKET_PATH=/your/custom/path/zinit.sock | ||||||
|  | // | ||||||
|  | // macOS: No additional setup required (uses launchctl). | ||||||
|  | // | ||||||
|  | // Usage: | ||||||
|  | //   herodo examples/service_manager/basic_usage.rhai | ||||||
|  |  | ||||||
|  | // Service Manager Basic Usage Example | ||||||
|  | // This example uses the SAL service manager through Rhai integration | ||||||
|  |  | ||||||
|  | print("🚀 Basic Service Manager Usage Example"); | ||||||
|  | print("======================================"); | ||||||
|  |  | ||||||
|  | // Create a service manager for the current platform | ||||||
|  | let manager = create_service_manager(); | ||||||
|  |  | ||||||
|  | print("🍎 Using service manager for current platform"); | ||||||
|  |  | ||||||
|  | // Create a simple service configuration | ||||||
|  | let config = #{ | ||||||
|  |     name: "example-service", | ||||||
|  |     binary_path: "/bin/echo", | ||||||
|  |     args: ["Hello from service manager!"], | ||||||
|  |     working_directory: "/tmp", | ||||||
|  |     environment: #{ | ||||||
|  |         "EXAMPLE_VAR": "hello_world" | ||||||
|  |     }, | ||||||
|  |     auto_restart: false | ||||||
|  | }; | ||||||
|  |  | ||||||
|  | print("\n📝 Service Configuration:"); | ||||||
|  | print(`   Name: ${config.name}`); | ||||||
|  | print(`   Binary: ${config.binary_path}`); | ||||||
|  | print(`   Args: ${config.args}`); | ||||||
|  |  | ||||||
|  | // Start the service | ||||||
|  | print("\n🚀 Starting service..."); | ||||||
|  | start(manager, config); | ||||||
|  | print("✅ Service started successfully"); | ||||||
|  |  | ||||||
|  | // Check service status | ||||||
|  | print("\n📊 Checking service status..."); | ||||||
|  | let status = status(manager, "example-service"); | ||||||
|  | print(`Status: ${status}`); | ||||||
|  |  | ||||||
|  | // List all services | ||||||
|  | print("\n📋 Listing all managed services..."); | ||||||
|  | let services = list(manager); | ||||||
|  | print(`Found ${services.len()} services:`); | ||||||
|  | for service in services { | ||||||
|  |     print(`  - ${service}`); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Get service logs | ||||||
|  | print("\n📄 Getting service logs..."); | ||||||
|  | let logs = logs(manager, "example-service", 5); | ||||||
|  | if logs.trim() == "" { | ||||||
|  |     print("No logs available"); | ||||||
|  | } else { | ||||||
|  |     print(`Logs:\n${logs}`); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Stop the service | ||||||
|  | print("\n🛑 Stopping service..."); | ||||||
|  | stop(manager, "example-service"); | ||||||
|  | print("✅ Service stopped"); | ||||||
|  |  | ||||||
|  | // Remove the service | ||||||
|  | print("\n🗑️  Removing service..."); | ||||||
|  | remove(manager, "example-service"); | ||||||
|  | print("✅ Service removed"); | ||||||
|  |  | ||||||
|  | print("\n🎉 Example completed successfully!"); | ||||||
							
								
								
									
										141
									
								
								examples/service_manager/circle_worker_manager.rhai
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										141
									
								
								examples/service_manager/circle_worker_manager.rhai
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,141 @@ | |||||||
|  | // Circle Worker Manager Example | ||||||
|  | // | ||||||
|  | // This example demonstrates how to use the service manager to dynamically launch | ||||||
|  | // circle workers for new freezone residents. This is the primary use case requested | ||||||
|  | // by the team. | ||||||
|  | // | ||||||
|  | // Usage: | ||||||
|  | // | ||||||
|  | // On macOS (uses launchctl): | ||||||
|  | //   herodo examples/service_manager/circle_worker_manager.rhai | ||||||
|  | // | ||||||
|  | // On Linux (uses zinit - requires zinit to be running): | ||||||
|  | //   First start zinit: zinit -s /tmp/zinit.sock init | ||||||
|  | //   herodo examples/service_manager/circle_worker_manager.rhai | ||||||
|  |  | ||||||
|  | // Circle Worker Manager Example | ||||||
|  | // This example uses the SAL service manager through Rhai integration | ||||||
|  |  | ||||||
|  | print("🚀 Circle Worker Manager Example"); | ||||||
|  | print("================================="); | ||||||
|  |  | ||||||
|  | // Create the appropriate service manager for the current platform | ||||||
|  | let service_manager = create_service_manager(); | ||||||
|  | print("✅ Created service manager for current platform"); | ||||||
|  |  | ||||||
|  | // Simulate a new freezone resident registration | ||||||
|  | let resident_id = "resident_12345"; | ||||||
|  | let worker_name = `circle-worker-${resident_id}`; | ||||||
|  |  | ||||||
|  | print(`\n📝 New freezone resident registered: ${resident_id}`); | ||||||
|  | print(`🔧 Creating circle worker service: ${worker_name}`); | ||||||
|  |  | ||||||
|  | // Create service configuration for the circle worker | ||||||
|  | let config = #{ | ||||||
|  |     name: worker_name, | ||||||
|  |     binary_path: "/bin/sh", | ||||||
|  |     args: [ | ||||||
|  |         "-c", | ||||||
|  |         `echo 'Circle worker for ${resident_id} starting...'; sleep 30; echo 'Circle worker for ${resident_id} completed'` | ||||||
|  |     ], | ||||||
|  |     working_directory: "/tmp", | ||||||
|  |     environment: #{ | ||||||
|  |         "RESIDENT_ID": resident_id, | ||||||
|  |         "WORKER_TYPE": "circle", | ||||||
|  |         "LOG_LEVEL": "info" | ||||||
|  |     }, | ||||||
|  |     auto_restart: true | ||||||
|  | }; | ||||||
|  |  | ||||||
|  | print("📋 Service configuration created:"); | ||||||
|  | print(`   Name: ${config.name}`); | ||||||
|  | print(`   Binary: ${config.binary_path}`); | ||||||
|  | print(`   Args: ${config.args}`); | ||||||
|  | print(`   Auto-restart: ${config.auto_restart}`); | ||||||
|  |  | ||||||
|  | print(`\n🔄 Demonstrating service lifecycle for: ${worker_name}`); | ||||||
|  |  | ||||||
|  | // 1. Check if service already exists | ||||||
|  | print("\n1️⃣ Checking if service exists..."); | ||||||
|  | if exists(service_manager, worker_name) { | ||||||
|  |     print("⚠️  Service already exists, removing it first..."); | ||||||
|  |     remove(service_manager, worker_name); | ||||||
|  |     print("🗑️  Existing service removed"); | ||||||
|  | } else { | ||||||
|  |     print("✅ Service doesn't exist, ready to create"); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // 2. Start the service | ||||||
|  | print("\n2️⃣ Starting the circle worker service..."); | ||||||
|  | start(service_manager, config); | ||||||
|  | print("✅ Service started successfully"); | ||||||
|  |  | ||||||
|  | // 3. Check service status | ||||||
|  | print("\n3️⃣ Checking service status..."); | ||||||
|  | let status = status(service_manager, worker_name); | ||||||
|  | print(`📊 Service status: ${status}`); | ||||||
|  |  | ||||||
|  | // 4. List all services to show our service is there | ||||||
|  | print("\n4️⃣ Listing all managed services..."); | ||||||
|  | let services = list(service_manager); | ||||||
|  | print(`📋 Managed services (${services.len()}):`); | ||||||
|  | for service in services { | ||||||
|  |     let marker = if service == worker_name { "👉" } else { "  " }; | ||||||
|  |     print(`   ${marker} ${service}`); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // 5. Wait a moment and check status again | ||||||
|  | print("\n5️⃣ Waiting 3 seconds and checking status again..."); | ||||||
|  | sleep(3000); // 3 seconds in milliseconds | ||||||
|  | let status = status(service_manager, worker_name); | ||||||
|  | print(`📊 Service status after 3s: ${status}`); | ||||||
|  |  | ||||||
|  | // 6. Get service logs | ||||||
|  | print("\n6️⃣ Retrieving service logs..."); | ||||||
|  | let logs = logs(service_manager, worker_name, 10); | ||||||
|  | if logs.trim() == "" { | ||||||
|  |     print("📄 No logs available yet (this is normal for new services)"); | ||||||
|  | } else { | ||||||
|  |     print("📄 Recent logs:"); | ||||||
|  |     let log_lines = logs.split('\n'); | ||||||
|  |     for i in 0..5 { | ||||||
|  |         if i < log_lines.len() { | ||||||
|  |             print(`   ${log_lines[i]}`); | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // 7. Demonstrate start_and_confirm with timeout | ||||||
|  | print("\n7️⃣ Testing start_and_confirm (should succeed quickly since already running)..."); | ||||||
|  | start_and_confirm(service_manager, config, 5); | ||||||
|  | print("✅ Service confirmed running within timeout"); | ||||||
|  |  | ||||||
|  | // 8. Stop the service | ||||||
|  | print("\n8️⃣ Stopping the service..."); | ||||||
|  | stop(service_manager, worker_name); | ||||||
|  | print("🛑 Service stopped"); | ||||||
|  |  | ||||||
|  | // 9. Check status after stopping | ||||||
|  | print("\n9️⃣ Checking status after stop..."); | ||||||
|  | let status = status(service_manager, worker_name); | ||||||
|  | print(`📊 Service status after stop: ${status}`); | ||||||
|  |  | ||||||
|  | // 10. Restart the service | ||||||
|  | print("\n🔟 Restarting the service..."); | ||||||
|  | restart(service_manager, worker_name); | ||||||
|  | print("🔄 Service restarted successfully"); | ||||||
|  |  | ||||||
|  | // 11. Final cleanup | ||||||
|  | print("\n🧹 Cleaning up - removing the service..."); | ||||||
|  | remove(service_manager, worker_name); | ||||||
|  | print("🗑️  Service removed successfully"); | ||||||
|  |  | ||||||
|  | // 12. Verify removal | ||||||
|  | print("\n✅ Verifying service removal..."); | ||||||
|  | if !exists(service_manager, worker_name) { | ||||||
|  |     print("✅ Service successfully removed"); | ||||||
|  | } else { | ||||||
|  |     print("⚠️  Service still exists after removal"); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | print("\n🎉 Circle worker management demonstration complete!"); | ||||||
							
								
								
									
										15
									
								
								examples_rust/ai/Cargo.toml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										15
									
								
								examples_rust/ai/Cargo.toml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,15 @@ | |||||||
|  | [package] | ||||||
|  | name = "openrouter_example" | ||||||
|  | version = "0.1.0" | ||||||
|  | edition = "2021" | ||||||
|  |  | ||||||
|  | [workspace] | ||||||
|  |  | ||||||
|  | [[bin]] | ||||||
|  | name = "openrouter_example" | ||||||
|  | path = "openrouter_example.rs" | ||||||
|  |  | ||||||
|  | [dependencies] | ||||||
|  | codemonkey = { path = "../../packages/ai/codemonkey" } | ||||||
|  | openai-api-rs = "6.0.8" | ||||||
|  | tokio = { version = "1.0", features = ["full"] } | ||||||
							
								
								
									
										47
									
								
								examples_rust/ai/openrouter_example.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										47
									
								
								examples_rust/ai/openrouter_example.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,47 @@ | |||||||
|  | use codemonkey::{create_ai_provider, AIProviderType, CompletionRequestBuilder, Message, MessageRole, Content}; | ||||||
|  | use std::error::Error; | ||||||
|  |  | ||||||
|  | #[tokio::main] | ||||||
|  | async fn main() -> Result<(), Box<dyn Error>> { | ||||||
|  |  | ||||||
|  |     let (mut provider, provider_type) = create_ai_provider(AIProviderType::OpenRouter)?; | ||||||
|  |  | ||||||
|  |     let messages = vec![Message { | ||||||
|  |         role: MessageRole::user, | ||||||
|  |         content: Content::Text("Explain the concept of a factory design pattern in Rust.".to_string()), | ||||||
|  |         name: None, | ||||||
|  |         tool_calls: None, | ||||||
|  |         tool_call_id: None, | ||||||
|  |     }]; | ||||||
|  |  | ||||||
|  |     println!("Sending request to OpenRouter..."); | ||||||
|  |     let response = CompletionRequestBuilder::new( | ||||||
|  |         &mut *provider, | ||||||
|  |         "openai/gpt-oss-120b".to_string(), // Model name as specified by the user | ||||||
|  |         messages, | ||||||
|  |         provider_type, // Pass the provider_type | ||||||
|  |     ) | ||||||
|  |     .temperature(1.0) | ||||||
|  |     .max_tokens(8192) | ||||||
|  |     .top_p(1.0) | ||||||
|  |     .reasoning_effort("medium") | ||||||
|  |     .stream(false) | ||||||
|  |     .openrouter_options(|builder| { | ||||||
|  |         builder.provider( | ||||||
|  |             codemonkey::OpenRouterProviderOptionsBuilder::new() | ||||||
|  |                 .order(vec!["cerebras"]) | ||||||
|  |                 .build(), | ||||||
|  |         ) | ||||||
|  |     }) | ||||||
|  |     .completion() | ||||||
|  |     .await?; | ||||||
|  |  | ||||||
|  |     for choice in response.choices { | ||||||
|  |         if let Some(content) = choice.message.content { | ||||||
|  |             print!("{}", content); | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |     println!(); | ||||||
|  |  | ||||||
|  |     Ok(()) | ||||||
|  | } | ||||||
							
								
								
									
										13
									
								
								examples_rust/ai/run.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										13
									
								
								examples_rust/ai/run.sh
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,13 @@ | |||||||
|  | #!/bin/bash | ||||||
|  | set -e | ||||||
|  |  | ||||||
|  | # Change to directory where this script is located | ||||||
|  | cd "$(dirname "${BASH_SOURCE[0]}")" | ||||||
|  |  | ||||||
|  | source ../../config/myenv.sh | ||||||
|  |  | ||||||
|  | # Build the example | ||||||
|  | cargo build | ||||||
|  |  | ||||||
|  | # Run the example | ||||||
|  | cargo run --bin openrouter_example | ||||||
| @@ -18,8 +18,8 @@ path = "src/main.rs" | |||||||
| env_logger = { workspace = true } | env_logger = { workspace = true } | ||||||
| rhai = { workspace = true } | rhai = { workspace = true } | ||||||
|  |  | ||||||
| # SAL library for Rhai module registration | # SAL library for Rhai module registration (with all features for herodo) | ||||||
| sal = { path = ".." } | sal = { path = "..", features = ["all"] } | ||||||
|  |  | ||||||
| [dev-dependencies] | [dev-dependencies] | ||||||
| tempfile = { workspace = true } | tempfile = { workspace = true } | ||||||
|   | |||||||
| @@ -15,14 +15,32 @@ Herodo is a command-line utility that executes Rhai scripts with full access to | |||||||
|  |  | ||||||
| ## Installation | ## Installation | ||||||
|  |  | ||||||
| Build the herodo binary: | ### Build and Install | ||||||
|  |  | ||||||
| ```bash | ```bash | ||||||
| cd herodo | git clone https://github.com/PlanetFirst/sal.git | ||||||
| cargo build --release | cd sal | ||||||
|  | ./build_herodo.sh | ||||||
| ``` | ``` | ||||||
|  |  | ||||||
| The executable will be available at `target/release/herodo`. | This script will: | ||||||
|  | - Build herodo in debug mode | ||||||
|  | - Install it to `~/hero/bin/herodo` (non-root) or `/usr/local/bin/herodo` (root) | ||||||
|  | - Make it available in your PATH | ||||||
|  |  | ||||||
|  | **Note**: If using the non-root installation, make sure `~/hero/bin` is in your PATH: | ||||||
|  | ```bash | ||||||
|  | export PATH="$HOME/hero/bin:$PATH" | ||||||
|  | ``` | ||||||
|  |  | ||||||
|  | ### Install from crates.io (Coming Soon) | ||||||
|  |  | ||||||
|  | ```bash | ||||||
|  | # This will be available once herodo is published to crates.io | ||||||
|  | cargo install herodo | ||||||
|  | ``` | ||||||
|  |  | ||||||
|  | **Note**: `herodo` is not yet published to crates.io due to publishing rate limits. It will be available soon. | ||||||
|  |  | ||||||
| ## Usage | ## Usage | ||||||
|  |  | ||||||
|   | |||||||
| @@ -3,7 +3,7 @@ | |||||||
| //! This library loads the Rhai engine, registers all SAL modules, | //! This library loads the Rhai engine, registers all SAL modules, | ||||||
| //! and executes Rhai scripts from a specified directory in sorted order. | //! and executes Rhai scripts from a specified directory in sorted order. | ||||||
|  |  | ||||||
| use rhai::Engine; | use rhai::{Engine, Scope}; | ||||||
| use std::error::Error; | use std::error::Error; | ||||||
| use std::fs; | use std::fs; | ||||||
| use std::path::{Path, PathBuf}; | use std::path::{Path, PathBuf}; | ||||||
| @@ -30,6 +30,19 @@ pub fn run(script_path: &str) -> Result<(), Box<dyn Error>> { | |||||||
|     // Create a new Rhai engine |     // Create a new Rhai engine | ||||||
|     let mut engine = Engine::new(); |     let mut engine = Engine::new(); | ||||||
|      |      | ||||||
|  |     // TODO: if we create a scope here we could clean up all the different functionsand types regsitered wit the engine | ||||||
|  |     // We should generalize the way we add things to the scope for each module sepeartely | ||||||
|  |     let mut scope = Scope::new(); | ||||||
|  |     // Conditionally add Hetzner client only when env config is present | ||||||
|  |     if let Ok(cfg) = sal::hetzner::config::Config::from_env() { | ||||||
|  |         let hetzner_client = sal::hetzner::api::Client::new(cfg); | ||||||
|  |         scope.push("hetzner", hetzner_client); | ||||||
|  |     } | ||||||
|  |     // This makes it easy to call e.g. `hetzner.get_server()` or `mycelium.get_connected_peers()` | ||||||
|  |     // --> without the need of manually created a client for each one first | ||||||
|  |     // --> could be conditionally compiled to only use those who we need (we only push the things to the scope that we actually need to run the script) | ||||||
|  |  | ||||||
|  |  | ||||||
|     // Register println function for output |     // Register println function for output | ||||||
|     engine.register_fn("println", |s: &str| println!("{}", s)); |     engine.register_fn("println", |s: &str| println!("{}", s)); | ||||||
|  |  | ||||||
| @@ -78,19 +91,20 @@ pub fn run(script_path: &str) -> Result<(), Box<dyn Error>> { | |||||||
|         let script = fs::read_to_string(&script_file)?; |         let script = fs::read_to_string(&script_file)?; | ||||||
|  |  | ||||||
|         // Execute the script |         // Execute the script | ||||||
|         match engine.eval::<rhai::Dynamic>(&script) { |         // match engine.eval::<rhai::Dynamic>(&script) { | ||||||
|             Ok(result) => { |         //     Ok(result) => { | ||||||
|                 println!("Script executed successfully"); |         //         println!("Script executed successfully"); | ||||||
|                 if !result.is_unit() { |         //         if !result.is_unit() { | ||||||
|                     println!("Result: {}", result); |         //             println!("Result: {}", result); | ||||||
|                 } |         //         } | ||||||
|             } |         //     } | ||||||
|             Err(err) => { |         //     Err(err) => { | ||||||
|                 eprintln!("Error executing script: {}", err); |         //         eprintln!("Error executing script: {}", err); | ||||||
|                 // Exit with error code when a script fails |         //         // Exit with error code when a script fails | ||||||
|                 process::exit(1); |         //         process::exit(1); | ||||||
|             } |         //     } | ||||||
|         } |         // } | ||||||
|  |         engine.run_with_scope(&mut scope, &script)?; | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     println!("\nAll scripts executed successfully!"); |     println!("\nAll scripts executed successfully!"); | ||||||
|   | |||||||
							
								
								
									
										10
									
								
								packages/ai/codemonkey/Cargo.toml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										10
									
								
								packages/ai/codemonkey/Cargo.toml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,10 @@ | |||||||
|  | [package] | ||||||
|  | name = "codemonkey" | ||||||
|  | version = "0.1.0" | ||||||
|  | edition = "2021" | ||||||
|  |  | ||||||
|  | [dependencies] | ||||||
|  | tokio = { version = "1", features = ["full"] } | ||||||
|  | async-trait = "0.1.80" | ||||||
|  | openrouter-rs = "0.4.5" | ||||||
|  | serde = { version = "1.0", features = ["derive"] } | ||||||
							
								
								
									
										216
									
								
								packages/ai/codemonkey/src/lib.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										216
									
								
								packages/ai/codemonkey/src/lib.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,216 @@ | |||||||
|  | use async_trait::async_trait; | ||||||
|  | use openrouter_rs::{OpenRouterClient, api::chat::{ChatCompletionRequest, Message}, types::completion::CompletionsResponse}; | ||||||
|  | use std::env; | ||||||
|  | use std::error::Error; | ||||||
|  |  | ||||||
|  | // Re-export MessageRole for easier use in client code | ||||||
|  | pub use openrouter_rs::types::Role as MessageRole;  | ||||||
|  |  | ||||||
|  | #[async_trait] | ||||||
|  | pub trait AIProvider { | ||||||
|  |     async fn completion( | ||||||
|  |         &mut self, | ||||||
|  |         request: CompletionRequest, | ||||||
|  |     ) -> Result<CompletionsResponse, Box<dyn Error>>; | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pub struct CompletionRequest { | ||||||
|  |     pub model: String, | ||||||
|  |     pub messages: Vec<Message>, | ||||||
|  |     pub temperature: Option<f64>, | ||||||
|  |     pub max_tokens: Option<i64>, | ||||||
|  |     pub top_p: Option<f64>, | ||||||
|  |     pub stream: Option<bool>, | ||||||
|  |     pub stop: Option<Vec<String>>, | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pub struct CompletionRequestBuilder<'a> { | ||||||
|  |     provider: &'a mut dyn AIProvider, | ||||||
|  |     model: String, | ||||||
|  |     messages: Vec<Message>, | ||||||
|  |     temperature: Option<f64>, | ||||||
|  |     max_tokens: Option<i64>, | ||||||
|  |     top_p: Option<f64>, | ||||||
|  |     stream: Option<bool>, | ||||||
|  |     stop: Option<Vec<String>>, | ||||||
|  |     provider_type: AIProviderType, | ||||||
|  | } | ||||||
|  |  | ||||||
|  | impl<'a> CompletionRequestBuilder<'a> { | ||||||
|  |     pub fn new(provider: &'a mut dyn AIProvider, model: String, messages: Vec<Message>, provider_type: AIProviderType) -> Self { | ||||||
|  |         Self { | ||||||
|  |             provider, | ||||||
|  |             model, | ||||||
|  |             messages, | ||||||
|  |             temperature: None, | ||||||
|  |             max_tokens: None, | ||||||
|  |             top_p: None, | ||||||
|  |             stream: None, | ||||||
|  |             stop: None, | ||||||
|  |             provider_type, | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     pub fn temperature(mut self, temperature: f64) -> Self { | ||||||
|  |         self.temperature = Some(temperature); | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     pub fn max_tokens(mut self, max_tokens: i64) -> Self { | ||||||
|  |         self.max_tokens = Some(max_tokens); | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     pub fn top_p(mut self, top_p: f64) -> Self { | ||||||
|  |         self.top_p = Some(top_p); | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     pub fn stream(mut self, stream: bool) -> Self { | ||||||
|  |         self.stream = Some(stream); | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     pub fn stop(mut self, stop: Vec<String>) -> Self { | ||||||
|  |         self.stop = Some(stop); | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     pub async fn completion(self) -> Result<CompletionsResponse, Box<dyn Error>> { | ||||||
|  |         let request = CompletionRequest { | ||||||
|  |             model: self.model, | ||||||
|  |             messages: self.messages, | ||||||
|  |             temperature: self.temperature, | ||||||
|  |             max_tokens: self.max_tokens, | ||||||
|  |             top_p: self.top_p, | ||||||
|  |             stream: self.stream, | ||||||
|  |             stop: self.stop, | ||||||
|  |         }; | ||||||
|  |         self.provider.completion(request).await | ||||||
|  |     } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pub struct GroqAIProvider { | ||||||
|  |     client: OpenRouterClient, | ||||||
|  | } | ||||||
|  |  | ||||||
|  | #[async_trait] | ||||||
|  | impl AIProvider for GroqAIProvider { | ||||||
|  |     async fn completion( | ||||||
|  |         &mut self, | ||||||
|  |         request: CompletionRequest, | ||||||
|  |     ) -> Result<CompletionsResponse, Box<dyn Error>> { | ||||||
|  |         let chat_request = ChatCompletionRequest::builder() | ||||||
|  |             .model(request.model) | ||||||
|  |             .messages(request.messages) | ||||||
|  |             .temperature(request.temperature.unwrap_or(1.0)) | ||||||
|  |             .max_tokens(request.max_tokens.map(|x| x as u32).unwrap_or(2048)) | ||||||
|  |             .top_p(request.top_p.unwrap_or(1.0)) | ||||||
|  |             .build()?; | ||||||
|  |  | ||||||
|  |         let result = self.client.send_chat_completion(&chat_request).await?; | ||||||
|  |         Ok(result) | ||||||
|  |     } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pub struct OpenAIProvider { | ||||||
|  |     client: OpenRouterClient, | ||||||
|  | } | ||||||
|  |  | ||||||
|  | #[async_trait] | ||||||
|  | impl AIProvider for OpenAIProvider { | ||||||
|  |     async fn completion( | ||||||
|  |         &mut self, | ||||||
|  |         request: CompletionRequest, | ||||||
|  |     ) -> Result<CompletionsResponse, Box<dyn Error>> { | ||||||
|  |         let chat_request = ChatCompletionRequest::builder() | ||||||
|  |             .model(request.model) | ||||||
|  |             .messages(request.messages) | ||||||
|  |             .temperature(request.temperature.unwrap_or(1.0)) | ||||||
|  |             .max_tokens(request.max_tokens.map(|x| x as u32).unwrap_or(2048)) | ||||||
|  |             .top_p(request.top_p.unwrap_or(1.0)) | ||||||
|  |             .build()?; | ||||||
|  |  | ||||||
|  |         let result = self.client.send_chat_completion(&chat_request).await?; | ||||||
|  |         Ok(result) | ||||||
|  |     } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pub struct OpenRouterAIProvider { | ||||||
|  |     client: OpenRouterClient, | ||||||
|  | } | ||||||
|  |  | ||||||
|  | #[async_trait] | ||||||
|  | impl AIProvider for OpenRouterAIProvider { | ||||||
|  |     async fn completion( | ||||||
|  |         &mut self, | ||||||
|  |         request: CompletionRequest, | ||||||
|  |     ) -> Result<CompletionsResponse, Box<dyn Error>> { | ||||||
|  |         let chat_request = ChatCompletionRequest::builder() | ||||||
|  |             .model(request.model) | ||||||
|  |             .messages(request.messages) | ||||||
|  |             .temperature(request.temperature.unwrap_or(1.0)) | ||||||
|  |             .max_tokens(request.max_tokens.map(|x| x as u32).unwrap_or(2048)) | ||||||
|  |             .top_p(request.top_p.unwrap_or(1.0)) | ||||||
|  |             .build()?; | ||||||
|  |  | ||||||
|  |         let result = self.client.send_chat_completion(&chat_request).await?; | ||||||
|  |         Ok(result) | ||||||
|  |     } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pub struct CerebrasAIProvider { | ||||||
|  |     client: OpenRouterClient, | ||||||
|  | } | ||||||
|  |  | ||||||
|  | #[async_trait] | ||||||
|  | impl AIProvider for CerebrasAIProvider { | ||||||
|  |     async fn completion( | ||||||
|  |         &mut self, | ||||||
|  |         request: CompletionRequest, | ||||||
|  |     ) -> Result<CompletionsResponse, Box<dyn Error>> { | ||||||
|  |         let chat_request = ChatCompletionRequest::builder() | ||||||
|  |             .model(request.model) | ||||||
|  |             .messages(request.messages) | ||||||
|  |             .temperature(request.temperature.unwrap_or(1.0)) | ||||||
|  |             .max_tokens(request.max_tokens.map(|x| x as u32).unwrap_or(2048)) | ||||||
|  |             .top_p(request.top_p.unwrap_or(1.0)) | ||||||
|  |             .build()?; | ||||||
|  |  | ||||||
|  |         let result = self.client.send_chat_completion(&chat_request).await?; | ||||||
|  |         Ok(result) | ||||||
|  |     } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | #[derive(PartialEq)] | ||||||
|  | pub enum AIProviderType { | ||||||
|  |     Groq, | ||||||
|  |     OpenAI, | ||||||
|  |     OpenRouter, | ||||||
|  |     Cerebras, | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pub fn create_ai_provider(provider_type: AIProviderType) -> Result<(Box<dyn AIProvider>, AIProviderType), Box<dyn Error>> { | ||||||
|  |     match provider_type { | ||||||
|  |         AIProviderType::Groq => { | ||||||
|  |             let api_key = env::var("GROQ_API_KEY")?; | ||||||
|  |             let client = OpenRouterClient::builder().api_key(api_key).build()?; | ||||||
|  |             Ok((Box::new(GroqAIProvider { client }), AIProviderType::Groq)) | ||||||
|  |         } | ||||||
|  |         AIProviderType::OpenAI => { | ||||||
|  |             let api_key = env::var("OPENAI_API_KEY")?; | ||||||
|  |             let client = OpenRouterClient::builder().api_key(api_key).build()?; | ||||||
|  |             Ok((Box::new(OpenAIProvider { client }), AIProviderType::OpenAI)) | ||||||
|  |         } | ||||||
|  |         AIProviderType::OpenRouter => { | ||||||
|  |             let api_key = env::var("OPENROUTER_API_KEY")?; | ||||||
|  |             let client = OpenRouterClient::builder().api_key(api_key).build()?; | ||||||
|  |             Ok((Box::new(OpenRouterAIProvider { client }), AIProviderType::OpenRouter)) | ||||||
|  |         } | ||||||
|  |         AIProviderType::Cerebras => { | ||||||
|  |             let api_key = env::var("CEREBRAS_API_KEY")?; | ||||||
|  |             let client = OpenRouterClient::builder().api_key(api_key).build()?; | ||||||
|  |             Ok((Box::new(CerebrasAIProvider { client }), AIProviderType::Cerebras)) | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | } | ||||||
							
								
								
									
										12
									
								
								packages/clients/hetznerclient/Cargo.toml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										12
									
								
								packages/clients/hetznerclient/Cargo.toml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,12 @@ | |||||||
|  | [package] | ||||||
|  | name = "sal-hetzner" | ||||||
|  | version = "0.1.0" | ||||||
|  | edition = "2024" | ||||||
|  |  | ||||||
|  | [dependencies] | ||||||
|  | prettytable = "0.10.0" | ||||||
|  | reqwest.workspace = true | ||||||
|  | rhai = { workspace = true, features = ["serde"] } | ||||||
|  | serde = { workspace = true, features = ["derive"] } | ||||||
|  | serde_json.workspace = true | ||||||
|  | thiserror.workspace = true | ||||||
							
								
								
									
										54
									
								
								packages/clients/hetznerclient/src/api/error.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										54
									
								
								packages/clients/hetznerclient/src/api/error.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,54 @@ | |||||||
|  | use std::fmt; | ||||||
|  |  | ||||||
|  | use serde::Deserialize; | ||||||
|  | use thiserror::Error; | ||||||
|  |  | ||||||
|  | #[derive(Debug, Error)] | ||||||
|  | pub enum AppError { | ||||||
|  |     #[error("Request failed: {0}")] | ||||||
|  |     RequestError(#[from] reqwest::Error), | ||||||
|  |     #[error("API error: {0}")] | ||||||
|  |     ApiError(ApiError), | ||||||
|  |     #[error("Deserialization Error: {0:?}")] | ||||||
|  |     SerdeJsonError(#[from] serde_json::Error), | ||||||
|  | } | ||||||
|  |  | ||||||
|  | #[derive(Debug, Deserialize)] | ||||||
|  | pub struct ApiError { | ||||||
|  |     pub status: u16, | ||||||
|  |     pub message: String, | ||||||
|  | } | ||||||
|  |  | ||||||
|  | impl From<reqwest::blocking::Response> for ApiError { | ||||||
|  |     fn from(value: reqwest::blocking::Response) -> Self { | ||||||
|  |         ApiError { | ||||||
|  |             status: value.status().into(), | ||||||
|  |             message: value.text().unwrap_or("The API call returned an error.".to_string()), | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | impl fmt::Display for ApiError { | ||||||
|  |     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { | ||||||
|  |         #[derive(Deserialize)] | ||||||
|  |         struct HetznerApiError { | ||||||
|  |             code: String, | ||||||
|  |             message: String, | ||||||
|  |         } | ||||||
|  |  | ||||||
|  |         #[derive(Deserialize)] | ||||||
|  |         struct HetznerApiErrorWrapper { | ||||||
|  |             error: HetznerApiError, | ||||||
|  |         } | ||||||
|  |  | ||||||
|  |         if let Ok(wrapper) = serde_json::from_str::<HetznerApiErrorWrapper>(&self.message) { | ||||||
|  |             write!( | ||||||
|  |                 f, | ||||||
|  |                 "Status: {}, Code: {}, Message: {}", | ||||||
|  |                 self.status, wrapper.error.code, wrapper.error.message | ||||||
|  |             ) | ||||||
|  |         } else { | ||||||
|  |             write!(f, "Status: {}: {}", self.status, self.message) | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | } | ||||||
							
								
								
									
										513
									
								
								packages/clients/hetznerclient/src/api/mod.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										513
									
								
								packages/clients/hetznerclient/src/api/mod.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,513 @@ | |||||||
|  | pub mod error; | ||||||
|  | pub mod models; | ||||||
|  |  | ||||||
|  | use self::models::{ | ||||||
|  |     Boot, Rescue, Server, SshKey, ServerAddonProduct, ServerAddonProductWrapper, | ||||||
|  |     AuctionServerProduct, AuctionServerProductWrapper, AuctionTransaction, | ||||||
|  |     AuctionTransactionWrapper, BootWrapper, Cancellation, CancellationWrapper, | ||||||
|  |     OrderServerBuilder, OrderServerProduct, OrderServerProductWrapper, RescueWrapped, | ||||||
|  |     ServerWrapper, SshKeyWrapper, Transaction, TransactionWrapper, | ||||||
|  |     ServerAddonTransaction, ServerAddonTransactionWrapper, | ||||||
|  |     OrderServerAddonBuilder, | ||||||
|  | }; | ||||||
|  | use crate::api::error::ApiError; | ||||||
|  | use crate::config::Config; | ||||||
|  | use error::AppError; | ||||||
|  | use reqwest::blocking::Client as HttpClient; | ||||||
|  | use serde_json::json; | ||||||
|  |  | ||||||
|  | #[derive(Clone)] | ||||||
|  | pub struct Client { | ||||||
|  |     http_client: HttpClient, | ||||||
|  |     config: Config, | ||||||
|  | } | ||||||
|  |  | ||||||
|  | impl Client { | ||||||
|  |     pub fn new(config: Config) -> Self { | ||||||
|  |         Self { | ||||||
|  |             http_client: HttpClient::new(), | ||||||
|  |             config, | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     fn handle_response<T>(&self, response: reqwest::blocking::Response) -> Result<T, AppError> | ||||||
|  |     where | ||||||
|  |         T: serde::de::DeserializeOwned, | ||||||
|  |     { | ||||||
|  |         let status = response.status(); | ||||||
|  |         let body = response.text()?; | ||||||
|  |  | ||||||
|  |         if status.is_success() { | ||||||
|  |             serde_json::from_str::<T>(&body).map_err(Into::into) | ||||||
|  |         } else { | ||||||
|  |             Err(AppError::ApiError(ApiError { | ||||||
|  |                 status: status.as_u16(), | ||||||
|  |                 message: body, | ||||||
|  |             })) | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     pub fn get_server(&self, server_number: i32) -> Result<Server, AppError> { | ||||||
|  |         let response = self | ||||||
|  |             .http_client | ||||||
|  |             .get(format!("{}/server/{}", self.config.api_url, server_number)) | ||||||
|  |             .basic_auth(&self.config.username, Some(&self.config.password)) | ||||||
|  |             .send()?; | ||||||
|  |  | ||||||
|  |         let wrapped: ServerWrapper = self.handle_response(response)?; | ||||||
|  |         Ok(wrapped.server) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     pub fn get_servers(&self) -> Result<Vec<Server>, AppError> { | ||||||
|  |         let response = self | ||||||
|  |             .http_client | ||||||
|  |             .get(format!("{}/server", self.config.api_url)) | ||||||
|  |             .basic_auth(&self.config.username, Some(&self.config.password)) | ||||||
|  |             .send()?; | ||||||
|  |  | ||||||
|  |         let wrapped: Vec<ServerWrapper> = self.handle_response(response)?; | ||||||
|  |         let servers = wrapped.into_iter().map(|sw| sw.server).collect(); | ||||||
|  |         Ok(servers) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     pub fn update_server_name(&self, server_number: i32, name: &str) -> Result<Server, AppError> { | ||||||
|  |         let params = [("server_name", name)]; | ||||||
|  |         let response = self | ||||||
|  |             .http_client | ||||||
|  |             .post(format!("{}/server/{}", self.config.api_url, server_number)) | ||||||
|  |             .basic_auth(&self.config.username, Some(&self.config.password)) | ||||||
|  |             .form(¶ms) | ||||||
|  |             .send()?; | ||||||
|  |  | ||||||
|  |         let wrapped: ServerWrapper = self.handle_response(response)?; | ||||||
|  |         Ok(wrapped.server) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     pub fn get_cancellation_data(&self, server_number: i32) -> Result<Cancellation, AppError> { | ||||||
|  |         let response = self | ||||||
|  |             .http_client | ||||||
|  |             .get(format!( | ||||||
|  |                 "{}/server/{}/cancellation", | ||||||
|  |                 self.config.api_url, server_number | ||||||
|  |             )) | ||||||
|  |             .basic_auth(&self.config.username, Some(&self.config.password)) | ||||||
|  |             .send()?; | ||||||
|  |  | ||||||
|  |         let wrapped: CancellationWrapper = self.handle_response(response)?; | ||||||
|  |         Ok(wrapped.cancellation) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     pub fn cancel_server( | ||||||
|  |         &self, | ||||||
|  |         server_number: i32, | ||||||
|  |         cancellation_date: &str, | ||||||
|  |     ) -> Result<Cancellation, AppError> { | ||||||
|  |         let params = [("cancellation_date", cancellation_date)]; | ||||||
|  |         let response = self | ||||||
|  |             .http_client | ||||||
|  |             .post(format!( | ||||||
|  |                 "{}/server/{}/cancellation", | ||||||
|  |                 self.config.api_url, server_number | ||||||
|  |             )) | ||||||
|  |             .basic_auth(&self.config.username, Some(&self.config.password)) | ||||||
|  |             .form(¶ms) | ||||||
|  |             .send()?; | ||||||
|  |  | ||||||
|  |         let wrapped: CancellationWrapper = self.handle_response(response)?; | ||||||
|  |         Ok(wrapped.cancellation) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     pub fn withdraw_cancellation(&self, server_number: i32) -> Result<(), AppError> { | ||||||
|  |         self.http_client | ||||||
|  |             .delete(format!( | ||||||
|  |                 "{}/server/{}/cancellation", | ||||||
|  |                 self.config.api_url, server_number | ||||||
|  |             )) | ||||||
|  |             .basic_auth(&self.config.username, Some(&self.config.password)) | ||||||
|  |             .send()?; | ||||||
|  |  | ||||||
|  |         Ok(()) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     pub fn get_ssh_keys(&self) -> Result<Vec<SshKey>, AppError> { | ||||||
|  |         let response = self | ||||||
|  |             .http_client | ||||||
|  |             .get(format!("{}/key", self.config.api_url)) | ||||||
|  |             .basic_auth(&self.config.username, Some(&self.config.password)) | ||||||
|  |             .send()?; | ||||||
|  |  | ||||||
|  |         let wrapped: Vec<SshKeyWrapper> = self.handle_response(response)?; | ||||||
|  |         let keys = wrapped.into_iter().map(|sk| sk.key).collect(); | ||||||
|  |         Ok(keys) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     pub fn get_ssh_key(&self, fingerprint: &str) -> Result<SshKey, AppError> { | ||||||
|  |         let response = self | ||||||
|  |             .http_client | ||||||
|  |             .get(format!("{}/key/{}", self.config.api_url, fingerprint)) | ||||||
|  |             .basic_auth(&self.config.username, Some(&self.config.password)) | ||||||
|  |             .send()?; | ||||||
|  |  | ||||||
|  |         let wrapped: SshKeyWrapper = self.handle_response(response)?; | ||||||
|  |         Ok(wrapped.key) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     pub fn add_ssh_key(&self, name: &str, data: &str) -> Result<SshKey, AppError> { | ||||||
|  |         let params = [("name", name), ("data", data)]; | ||||||
|  |         let response = self | ||||||
|  |             .http_client | ||||||
|  |             .post(format!("{}/key", self.config.api_url)) | ||||||
|  |             .basic_auth(&self.config.username, Some(&self.config.password)) | ||||||
|  |             .form(¶ms) | ||||||
|  |             .send()?; | ||||||
|  |  | ||||||
|  |         let wrapped: SshKeyWrapper = self.handle_response(response)?; | ||||||
|  |         Ok(wrapped.key) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     pub fn update_ssh_key_name(&self, fingerprint: &str, name: &str) -> Result<SshKey, AppError> { | ||||||
|  |         let params = [("name", name)]; | ||||||
|  |         let response = self | ||||||
|  |             .http_client | ||||||
|  |             .post(format!("{}/key/{}", self.config.api_url, fingerprint)) | ||||||
|  |             .basic_auth(&self.config.username, Some(&self.config.password)) | ||||||
|  |             .form(¶ms) | ||||||
|  |             .send()?; | ||||||
|  |  | ||||||
|  |         let wrapped: SshKeyWrapper = self.handle_response(response)?; | ||||||
|  |         Ok(wrapped.key) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     pub fn delete_ssh_key(&self, fingerprint: &str) -> Result<(), AppError> { | ||||||
|  |         self.http_client | ||||||
|  |             .delete(format!("{}/key/{}", self.config.api_url, fingerprint)) | ||||||
|  |             .basic_auth(&self.config.username, Some(&self.config.password)) | ||||||
|  |             .send()?; | ||||||
|  |  | ||||||
|  |         Ok(()) | ||||||
|  |     } | ||||||
|  |     pub fn get_boot_configuration(&self, server_number: i32) -> Result<Boot, AppError> { | ||||||
|  |         let response = self | ||||||
|  |             .http_client | ||||||
|  |             .get(format!("{}/boot/{}", self.config.api_url, server_number)) | ||||||
|  |             .basic_auth(&self.config.username, Some(&self.config.password)) | ||||||
|  |             .send()?; | ||||||
|  |  | ||||||
|  |         let wrapped: BootWrapper = self.handle_response(response)?; | ||||||
|  |         Ok(wrapped.boot) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     pub fn get_rescue_boot_configuration(&self, server_number: i32) -> Result<Rescue, AppError> { | ||||||
|  |         let response = self | ||||||
|  |             .http_client | ||||||
|  |             .get(format!( | ||||||
|  |                 "{}/boot/{}/rescue", | ||||||
|  |                 self.config.api_url, server_number | ||||||
|  |             )) | ||||||
|  |             .basic_auth(&self.config.username, Some(&self.config.password)) | ||||||
|  |             .send()?; | ||||||
|  |  | ||||||
|  |         let wrapped: RescueWrapped = self.handle_response(response)?; | ||||||
|  |         Ok(wrapped.rescue) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     pub fn enable_rescue_mode( | ||||||
|  |         &self, | ||||||
|  |         server_number: i32, | ||||||
|  |         os: &str, | ||||||
|  |         authorized_keys: Option<&[String]>, | ||||||
|  |     ) -> Result<Rescue, AppError> { | ||||||
|  |         let mut params = vec![("os", os)]; | ||||||
|  |         if let Some(keys) = authorized_keys { | ||||||
|  |             for key in keys { | ||||||
|  |                 params.push(("authorized_key[]", key)); | ||||||
|  |             } | ||||||
|  |         } | ||||||
|  |         let response = self | ||||||
|  |             .http_client | ||||||
|  |             .post(format!( | ||||||
|  |                 "{}/boot/{}/rescue", | ||||||
|  |                 self.config.api_url, server_number | ||||||
|  |             )) | ||||||
|  |             .basic_auth(&self.config.username, Some(&self.config.password)) | ||||||
|  |             .form(¶ms) | ||||||
|  |             .send()?; | ||||||
|  |  | ||||||
|  |         let wrapped: RescueWrapped = self.handle_response(response)?; | ||||||
|  |         Ok(wrapped.rescue) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     pub fn disable_rescue_mode(&self, server_number: i32) -> Result<Rescue, AppError> { | ||||||
|  |         let response = self | ||||||
|  |             .http_client | ||||||
|  |             .delete(format!( | ||||||
|  |                 "{}/boot/{}/rescue", | ||||||
|  |                 self.config.api_url, server_number | ||||||
|  |             )) | ||||||
|  |             .basic_auth(&self.config.username, Some(&self.config.password)) | ||||||
|  |             .send()?; | ||||||
|  |  | ||||||
|  |         let wrapped: RescueWrapped = self.handle_response(response)?; | ||||||
|  |         Ok(wrapped.rescue) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     pub fn get_server_products( | ||||||
|  |         &self, | ||||||
|  |     ) -> Result<Vec<OrderServerProduct>, AppError> { | ||||||
|  |         let response = self | ||||||
|  |             .http_client | ||||||
|  |             .get(format!("{}/order/server/product", &self.config.api_url)) | ||||||
|  |             .basic_auth(&self.config.username, Some(&self.config.password)) | ||||||
|  |             .send()?; | ||||||
|  |  | ||||||
|  |         let wrapped: Vec<OrderServerProductWrapper> = self.handle_response(response)?; | ||||||
|  |         let products = wrapped.into_iter().map(|sop| sop.product).collect(); | ||||||
|  |         Ok(products) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     pub fn get_server_product_by_id( | ||||||
|  |         &self, | ||||||
|  |         product_id: &str, | ||||||
|  |     ) -> Result<OrderServerProduct, AppError> { | ||||||
|  |         let response = self | ||||||
|  |             .http_client | ||||||
|  |             .get(format!( | ||||||
|  |                 "{}/order/server/product/{}", | ||||||
|  |                 &self.config.api_url, product_id | ||||||
|  |             )) | ||||||
|  |             .basic_auth(&self.config.username, Some(&self.config.password)) | ||||||
|  |             .send()?; | ||||||
|  |  | ||||||
|  |         let wrapped: OrderServerProductWrapper = self.handle_response(response)?; | ||||||
|  |         Ok(wrapped.product) | ||||||
|  |     } | ||||||
|  |     pub fn order_server(&self, order: OrderServerBuilder) -> Result<Transaction, AppError> { | ||||||
|  |         let mut params = json!({ | ||||||
|  |             "product_id": order.product_id, | ||||||
|  |             "dist": order.dist, | ||||||
|  |             "location": order.location, | ||||||
|  |             "authorized_key": order.authorized_keys.unwrap_or_default(), | ||||||
|  |         }); | ||||||
|  |  | ||||||
|  |         if let Some(addons) = order.addons { | ||||||
|  |             params["addon"] = json!(addons); | ||||||
|  |         } | ||||||
|  |  | ||||||
|  |         if let Some(test) = order.test { | ||||||
|  |             if test { | ||||||
|  |                 params["test"] = json!(test); | ||||||
|  |             } | ||||||
|  |         } | ||||||
|  |  | ||||||
|  |         let response = self | ||||||
|  |             .http_client | ||||||
|  |             .post(format!("{}/order/server/transaction", &self.config.api_url)) | ||||||
|  |             .basic_auth(&self.config.username, Some(&self.config.password)) | ||||||
|  |             .json(¶ms) | ||||||
|  |             .send()?; | ||||||
|  |  | ||||||
|  |         let wrapped: TransactionWrapper = self.handle_response(response)?; | ||||||
|  |         Ok(wrapped.transaction) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     pub fn get_transaction_by_id(&self, transaction_id: &str) -> Result<Transaction, AppError> { | ||||||
|  |         let response = self | ||||||
|  |             .http_client | ||||||
|  |             .get(format!( | ||||||
|  |                 "{}/order/server/transaction/{}", | ||||||
|  |                 &self.config.api_url, transaction_id | ||||||
|  |             )) | ||||||
|  |             .basic_auth(&self.config.username, Some(&self.config.password)) | ||||||
|  |             .send()?; | ||||||
|  |  | ||||||
|  |         let wrapped: TransactionWrapper = self.handle_response(response)?; | ||||||
|  |         Ok(wrapped.transaction) | ||||||
|  |     } | ||||||
|  |     pub fn get_transactions(&self) -> Result<Vec<Transaction>, AppError> { | ||||||
|  |         let response = self | ||||||
|  |             .http_client | ||||||
|  |             .get(format!("{}/order/server/transaction", &self.config.api_url)) | ||||||
|  |             .basic_auth(&self.config.username, Some(&self.config.password)) | ||||||
|  |             .send()?; | ||||||
|  |  | ||||||
|  |         let wrapped: Vec<TransactionWrapper> = self.handle_response(response)?; | ||||||
|  |         let transactions = wrapped.into_iter().map(|t| t.transaction).collect(); | ||||||
|  |         Ok(transactions) | ||||||
|  |     } | ||||||
|  |     pub fn get_auction_server_products(&self) -> Result<Vec<AuctionServerProduct>, AppError> { | ||||||
|  |         let response = self | ||||||
|  |             .http_client | ||||||
|  |             .get(format!( | ||||||
|  |                 "{}/order/server_market/product", | ||||||
|  |                 &self.config.api_url | ||||||
|  |             )) | ||||||
|  |             .basic_auth(&self.config.username, Some(&self.config.password)) | ||||||
|  |             .send()?; | ||||||
|  |  | ||||||
|  |         let wrapped: Vec<AuctionServerProductWrapper> = self.handle_response(response)?; | ||||||
|  |         let products = wrapped.into_iter().map(|asp| asp.product).collect(); | ||||||
|  |         Ok(products) | ||||||
|  |     } | ||||||
|  |     pub fn get_auction_server_product_by_id(&self, product_id: &str) -> Result<AuctionServerProduct, AppError> { | ||||||
|  |         let response = self | ||||||
|  |             .http_client | ||||||
|  |             .get(format!("{}/order/server_market/product/{}", &self.config.api_url, product_id)) | ||||||
|  |             .basic_auth(&self.config.username, Some(&self.config.password)) | ||||||
|  |             .send()?; | ||||||
|  |  | ||||||
|  |         let wrapped: AuctionServerProductWrapper = self.handle_response(response)?; | ||||||
|  |         Ok(wrapped.product) | ||||||
|  |     } | ||||||
|  |     pub fn get_auction_transactions(&self) -> Result<Vec<AuctionTransaction>, AppError> { | ||||||
|  |         let response = self | ||||||
|  |             .http_client | ||||||
|  |             .get(format!("{}/order/server_market/transaction", &self.config.api_url)) | ||||||
|  |             .basic_auth(&self.config.username, Some(&self.config.password)) | ||||||
|  |             .send()?; | ||||||
|  |  | ||||||
|  |         let wrapped: Vec<AuctionTransactionWrapper> = self.handle_response(response)?; | ||||||
|  |         let transactions = wrapped.into_iter().map(|t| t.transaction).collect(); | ||||||
|  |         Ok(transactions) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     pub fn get_auction_transaction_by_id(&self, transaction_id: &str) -> Result<AuctionTransaction, AppError> { | ||||||
|  |         let response = self | ||||||
|  |             .http_client | ||||||
|  |             .get(format!("{}/order/server_market/transaction/{}", &self.config.api_url, transaction_id)) | ||||||
|  |             .basic_auth(&self.config.username, Some(&self.config.password)) | ||||||
|  |             .send()?; | ||||||
|  |  | ||||||
|  |         let wrapped: AuctionTransactionWrapper = self.handle_response(response)?; | ||||||
|  |         Ok(wrapped.transaction) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     pub fn get_server_addon_products( | ||||||
|  |         &self, | ||||||
|  |         server_number: i64, | ||||||
|  |     ) -> Result<Vec<ServerAddonProduct>, AppError> { | ||||||
|  |         let response = self | ||||||
|  |             .http_client | ||||||
|  |             .get(format!( | ||||||
|  |                 "{}/order/server_addon/{}/product", | ||||||
|  |                 &self.config.api_url, server_number | ||||||
|  |             )) | ||||||
|  |             .basic_auth(&self.config.username, Some(&self.config.password)) | ||||||
|  |             .send()?; | ||||||
|  |  | ||||||
|  |         let wrapped: Vec<ServerAddonProductWrapper> = self.handle_response(response)?; | ||||||
|  |         let products = wrapped.into_iter().map(|sap| sap.product).collect(); | ||||||
|  |         Ok(products) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     pub fn order_auction_server( | ||||||
|  |         &self, | ||||||
|  |         product_id: i64, | ||||||
|  |         authorized_keys: Vec<String>, | ||||||
|  |         dist: Option<String>, | ||||||
|  |         arch: Option<String>, | ||||||
|  |         lang: Option<String>, | ||||||
|  |         comment: Option<String>, | ||||||
|  |         addons: Option<Vec<String>>, | ||||||
|  |         test: Option<bool>, | ||||||
|  |     ) -> Result<AuctionTransaction, AppError> { | ||||||
|  |         let mut params: Vec<(&str, String)> = Vec::new(); | ||||||
|  |  | ||||||
|  |         params.push(("product_id", product_id.to_string())); | ||||||
|  |  | ||||||
|  |         for key in &authorized_keys { | ||||||
|  |             params.push(("authorized_key[]", key.clone())); | ||||||
|  |         } | ||||||
|  |  | ||||||
|  |         if let Some(dist) = dist { | ||||||
|  |             params.push(("dist", dist)); | ||||||
|  |         } | ||||||
|  |         if let Some(arch) = arch { | ||||||
|  |             params.push(("@deprecated arch", arch)); | ||||||
|  |         } | ||||||
|  |         if let Some(lang) = lang { | ||||||
|  |             params.push(("lang", lang)); | ||||||
|  |         } | ||||||
|  |         if let Some(comment) = comment { | ||||||
|  |             params.push(("comment", comment)); | ||||||
|  |         } | ||||||
|  |         if let Some(addons) = addons { | ||||||
|  |             for addon in addons { | ||||||
|  |                 params.push(("addon[]", addon)); | ||||||
|  |             } | ||||||
|  |         } | ||||||
|  |         if let Some(test) = test { | ||||||
|  |             params.push(("test", test.to_string())); | ||||||
|  |         } | ||||||
|  |  | ||||||
|  |         let response = self | ||||||
|  |             .http_client | ||||||
|  |             .post(format!("{}/order/server_market/transaction", &self.config.api_url)) | ||||||
|  |             .basic_auth(&self.config.username, Some(&self.config.password)) | ||||||
|  |             .form(¶ms) | ||||||
|  |             .send()?; | ||||||
|  |  | ||||||
|  |         let wrapped: AuctionTransactionWrapper = self.handle_response(response)?; | ||||||
|  |         Ok(wrapped.transaction) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     pub fn get_server_addon_transactions(&self) -> Result<Vec<ServerAddonTransaction>, AppError> { | ||||||
|  |         let response = self | ||||||
|  |             .http_client | ||||||
|  |             .get(format!("{}/order/server_addon/transaction", &self.config.api_url)) | ||||||
|  |             .basic_auth(&self.config.username, Some(&self.config.password)) | ||||||
|  |             .send()?; | ||||||
|  |  | ||||||
|  |         let wrapped: Vec<ServerAddonTransactionWrapper> = self.handle_response(response)?; | ||||||
|  |         let transactions = wrapped.into_iter().map(|satw| satw.transaction).collect(); | ||||||
|  |         Ok(transactions) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     pub fn get_server_addon_transaction_by_id( | ||||||
|  |         &self, | ||||||
|  |         transaction_id: &str, | ||||||
|  |     ) -> Result<ServerAddonTransaction, AppError> { | ||||||
|  |         let response = self | ||||||
|  |             .http_client | ||||||
|  |             .get(format!( | ||||||
|  |                 "{}/order/server_addon/transaction/{}", | ||||||
|  |                 &self.config.api_url, transaction_id | ||||||
|  |             )) | ||||||
|  |             .basic_auth(&self.config.username, Some(&self.config.password)) | ||||||
|  |             .send()?; | ||||||
|  |  | ||||||
|  |         let wrapped: ServerAddonTransactionWrapper = self.handle_response(response)?; | ||||||
|  |         Ok(wrapped.transaction) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     pub fn order_server_addon( | ||||||
|  |         &self, | ||||||
|  |         order: OrderServerAddonBuilder, | ||||||
|  |     ) -> Result<ServerAddonTransaction, AppError> { | ||||||
|  |         let mut params = json!({ | ||||||
|  |             "server_number": order.server_number, | ||||||
|  |             "product_id": order.product_id, | ||||||
|  |         }); | ||||||
|  |  | ||||||
|  |         if let Some(reason) = order.reason { | ||||||
|  |             params["reason"] = json!(reason); | ||||||
|  |         } | ||||||
|  |         if let Some(gateway) = order.gateway { | ||||||
|  |             params["gateway"] = json!(gateway); | ||||||
|  |         } | ||||||
|  |         if let Some(test) = order.test { | ||||||
|  |             if test { | ||||||
|  |                 params["test"] = json!(test); | ||||||
|  |             } | ||||||
|  |         } | ||||||
|  |  | ||||||
|  |         let response = self | ||||||
|  |             .http_client | ||||||
|  |             .post(format!("{}/order/server_addon/transaction", &self.config.api_url)) | ||||||
|  |             .basic_auth(&self.config.username, Some(&self.config.password)) | ||||||
|  |             .form(¶ms) | ||||||
|  |             .send()?; | ||||||
|  |  | ||||||
|  |         let wrapped: ServerAddonTransactionWrapper = self.handle_response(response)?; | ||||||
|  |         Ok(wrapped.transaction) | ||||||
|  |     } | ||||||
|  | } | ||||||
							
								
								
									
										1894
									
								
								packages/clients/hetznerclient/src/api/models.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1894
									
								
								packages/clients/hetznerclient/src/api/models.rs
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										25
									
								
								packages/clients/hetznerclient/src/config.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										25
									
								
								packages/clients/hetznerclient/src/config.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,25 @@ | |||||||
|  | use std::env; | ||||||
|  |  | ||||||
|  | #[derive(Clone)] | ||||||
|  | pub struct Config { | ||||||
|  |     pub username: String, | ||||||
|  |     pub password: String, | ||||||
|  |     pub api_url: String, | ||||||
|  | } | ||||||
|  |  | ||||||
|  | impl Config { | ||||||
|  |     pub fn from_env() -> Result<Self, String> { | ||||||
|  |         let username = env::var("HETZNER_USERNAME") | ||||||
|  |             .map_err(|_| "HETZNER_USERNAME environment variable not set".to_string())?; | ||||||
|  |         let password = env::var("HETZNER_PASSWORD") | ||||||
|  |             .map_err(|_| "HETZNER_PASSWORD environment variable not set".to_string())?; | ||||||
|  |         let api_url = env::var("HETZNER_API_URL") | ||||||
|  |             .unwrap_or_else(|_| "https://robot-ws.your-server.de".to_string()); | ||||||
|  |  | ||||||
|  |         Ok(Config { | ||||||
|  |             username, | ||||||
|  |             password, | ||||||
|  |             api_url, | ||||||
|  |         }) | ||||||
|  |     } | ||||||
|  | } | ||||||
							
								
								
									
										3
									
								
								packages/clients/hetznerclient/src/lib.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										3
									
								
								packages/clients/hetznerclient/src/lib.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,3 @@ | |||||||
|  | pub mod api; | ||||||
|  | pub mod config; | ||||||
|  | pub mod rhai; | ||||||
							
								
								
									
										63
									
								
								packages/clients/hetznerclient/src/rhai/boot.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										63
									
								
								packages/clients/hetznerclient/src/rhai/boot.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,63 @@ | |||||||
|  | use crate::api::{ | ||||||
|  |     models::{Boot, Rescue}, | ||||||
|  |     Client, | ||||||
|  | }; | ||||||
|  | use rhai::{plugin::*, Engine}; | ||||||
|  |  | ||||||
|  | pub fn register(engine: &mut Engine) { | ||||||
|  |     let boot_module = exported_module!(boot_api); | ||||||
|  |     engine.register_global_module(boot_module.into()); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | #[export_module] | ||||||
|  | pub mod boot_api { | ||||||
|  |     use super::*; | ||||||
|  |     use rhai::EvalAltResult; | ||||||
|  |  | ||||||
|  |     #[rhai_fn(name = "get_boot_configuration", return_raw)] | ||||||
|  |     pub fn get_boot_configuration( | ||||||
|  |         client: &mut Client, | ||||||
|  |         server_number: i64, | ||||||
|  |     ) -> Result<Boot, Box<EvalAltResult>> { | ||||||
|  |         client | ||||||
|  |             .get_boot_configuration(server_number as i32) | ||||||
|  |             .map_err(|e| e.to_string().into()) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     #[rhai_fn(name = "get_rescue_boot_configuration", return_raw)] | ||||||
|  |     pub fn get_rescue_boot_configuration( | ||||||
|  |         client: &mut Client, | ||||||
|  |         server_number: i64, | ||||||
|  |     ) -> Result<Rescue, Box<EvalAltResult>> { | ||||||
|  |         client | ||||||
|  |             .get_rescue_boot_configuration(server_number as i32) | ||||||
|  |             .map_err(|e| e.to_string().into()) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     #[rhai_fn(name = "enable_rescue_mode", return_raw)] | ||||||
|  |     pub fn enable_rescue_mode( | ||||||
|  |         client: &mut Client, | ||||||
|  |         server_number: i64, | ||||||
|  |         os: &str, | ||||||
|  |         authorized_keys: rhai::Array, | ||||||
|  |     ) -> Result<Rescue, Box<EvalAltResult>> { | ||||||
|  |         let keys: Vec<String> = authorized_keys | ||||||
|  |             .into_iter() | ||||||
|  |             .map(|k| k.into_string().unwrap()) | ||||||
|  |             .collect(); | ||||||
|  |  | ||||||
|  |         client | ||||||
|  |             .enable_rescue_mode(server_number as i32, os, Some(&keys)) | ||||||
|  |             .map_err(|e| e.to_string().into()) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     #[rhai_fn(name = "disable_rescue_mode", return_raw)] | ||||||
|  |     pub fn disable_rescue_mode( | ||||||
|  |         client: &mut Client, | ||||||
|  |         server_number: i64, | ||||||
|  |     ) -> Result<Rescue, Box<EvalAltResult>> { | ||||||
|  |         client | ||||||
|  |             .disable_rescue_mode(server_number as i32) | ||||||
|  |             .map_err(|e| e.to_string().into()) | ||||||
|  |     } | ||||||
|  | } | ||||||
							
								
								
									
										54
									
								
								packages/clients/hetznerclient/src/rhai/mod.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										54
									
								
								packages/clients/hetznerclient/src/rhai/mod.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,54 @@ | |||||||
|  | use rhai::{Engine, EvalAltResult}; | ||||||
|  |  | ||||||
|  | use crate::api::models::{ | ||||||
|  |     AuctionServerProduct, AuctionTransaction, AuctionTransactionProduct, AuthorizedKey, Boot, | ||||||
|  |     Cancellation, Cpanel, HostKey, Linux, OrderAuctionServerBuilder, OrderServerAddonBuilder, | ||||||
|  |     OrderServerBuilder, OrderServerProduct, Plesk, Rescue, Server, ServerAddonProduct, | ||||||
|  |     ServerAddonResource, ServerAddonTransaction, SshKey, Transaction, TransactionProduct, Vnc, | ||||||
|  |     Windows, | ||||||
|  | }; | ||||||
|  |  | ||||||
|  | pub mod boot; | ||||||
|  | pub mod printing; | ||||||
|  | pub mod server; | ||||||
|  | pub mod server_ordering; | ||||||
|  | pub mod ssh_keys; | ||||||
|  |  | ||||||
|  | // here just register the hetzner module | ||||||
|  | pub fn register_hetzner_module(engine: &mut Engine) -> Result<(), Box<EvalAltResult>> { | ||||||
|  |     // TODO:register types | ||||||
|  |     engine.build_type::<Server>(); | ||||||
|  |     engine.build_type::<SshKey>(); | ||||||
|  |     engine.build_type::<Boot>(); | ||||||
|  |     engine.build_type::<Rescue>(); | ||||||
|  |     engine.build_type::<Linux>(); | ||||||
|  |     engine.build_type::<Vnc>(); | ||||||
|  |     engine.build_type::<Windows>(); | ||||||
|  |     engine.build_type::<Plesk>(); | ||||||
|  |     engine.build_type::<Cpanel>(); | ||||||
|  |     engine.build_type::<Cancellation>(); | ||||||
|  |     engine.build_type::<OrderServerProduct>(); | ||||||
|  |     engine.build_type::<Transaction>(); | ||||||
|  |     engine.build_type::<AuthorizedKey>(); | ||||||
|  |     engine.build_type::<TransactionProduct>(); | ||||||
|  |     engine.build_type::<HostKey>(); | ||||||
|  |     engine.build_type::<AuctionServerProduct>(); | ||||||
|  |     engine.build_type::<AuctionTransaction>(); | ||||||
|  |     engine.build_type::<AuctionTransactionProduct>(); | ||||||
|  |     engine.build_type::<OrderAuctionServerBuilder>(); | ||||||
|  |     engine.build_type::<OrderServerBuilder>(); | ||||||
|  |     engine.build_type::<ServerAddonProduct>(); | ||||||
|  |     engine.build_type::<ServerAddonTransaction>(); | ||||||
|  |     engine.build_type::<ServerAddonResource>(); | ||||||
|  |     engine.build_type::<OrderServerAddonBuilder>(); | ||||||
|  |  | ||||||
|  |     server::register(engine); | ||||||
|  |     ssh_keys::register(engine); | ||||||
|  |     boot::register(engine); | ||||||
|  |     server_ordering::register(engine); | ||||||
|  |  | ||||||
|  |     // TODO: push hetzner to scope as value client: | ||||||
|  |     // scope.push("hetzner", client); | ||||||
|  |  | ||||||
|  |     Ok(()) | ||||||
|  | } | ||||||
							
								
								
									
										43
									
								
								packages/clients/hetznerclient/src/rhai/printing/mod.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										43
									
								
								packages/clients/hetznerclient/src/rhai/printing/mod.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,43 @@ | |||||||
|  | use rhai::{Array, Engine}; | ||||||
|  | use crate::{api::models::{OrderServerProduct, AuctionServerProduct, AuctionTransaction, ServerAddonProduct, ServerAddonTransaction, Server, SshKey}}; | ||||||
|  |  | ||||||
|  | mod servers_table; | ||||||
|  | mod ssh_keys_table; | ||||||
|  | mod server_ordering_table; | ||||||
|  |  | ||||||
|  | // This will be called when we print(...) or pretty_print() an Array (with Dynamic values) | ||||||
|  | pub fn pretty_print_dispatch(array: Array) { | ||||||
|  |     if array.is_empty() { | ||||||
|  |         println!("<empty table>"); | ||||||
|  |         return; | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     let first = &array[0]; | ||||||
|  |  | ||||||
|  |     if first.is::<Server>() { | ||||||
|  |         println!("Yeah first is server!"); | ||||||
|  |         servers_table::pretty_print_servers(array); | ||||||
|  |     } else if first.is::<SshKey>() { | ||||||
|  |         ssh_keys_table::pretty_print_ssh_keys(array); | ||||||
|  |     } | ||||||
|  |     else if first.is::<OrderServerProduct>() { | ||||||
|  |         server_ordering_table::pretty_print_server_products(array); | ||||||
|  |     } else if first.is::<AuctionServerProduct>() { | ||||||
|  |         server_ordering_table::pretty_print_auction_server_products(array); | ||||||
|  |     } else if first.is::<AuctionTransaction>() { | ||||||
|  |         server_ordering_table::pretty_print_auction_transactions(array); | ||||||
|  |     } else if first.is::<ServerAddonProduct>() { | ||||||
|  |         server_ordering_table::pretty_print_server_addon_products(array); | ||||||
|  |     } else if first.is::<ServerAddonTransaction>() { | ||||||
|  |         server_ordering_table::pretty_print_server_addon_transactions(array); | ||||||
|  |     } else { | ||||||
|  |         // Generic fallback for other types | ||||||
|  |         for item in array { | ||||||
|  |             println!("{}", item.to_string()); | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pub fn register(engine: &mut Engine) { | ||||||
|  |     engine.register_fn("pretty_print", pretty_print_dispatch); | ||||||
|  | } | ||||||
| @@ -0,0 +1,293 @@ | |||||||
|  | use prettytable::{row, Table}; | ||||||
|  | use crate::api::models::{OrderServerProduct, ServerAddonProduct, ServerAddonTransaction, ServerAddonResource}; | ||||||
|  |  | ||||||
|  | pub fn pretty_print_server_products(products: rhai::Array) { | ||||||
|  |     let mut table = Table::new(); | ||||||
|  |     table.add_row(row![b => | ||||||
|  |         "ID", | ||||||
|  |         "Name", | ||||||
|  |         "Description", | ||||||
|  |         "Traffic", | ||||||
|  |         "Location", | ||||||
|  |         "Price (Net)", | ||||||
|  |         "Price (Gross)", | ||||||
|  |     ]); | ||||||
|  |  | ||||||
|  |     for product_dyn in products { | ||||||
|  |         if let Some(product) = product_dyn.try_cast::<OrderServerProduct>() { | ||||||
|  |             let mut price_net = "N/A".to_string(); | ||||||
|  |             let mut price_gross = "N/A".to_string(); | ||||||
|  |  | ||||||
|  |             if let Some(first_price) = product.prices.first() { | ||||||
|  |                 price_net = first_price.price.net.clone(); | ||||||
|  |                 price_gross = first_price.price.gross.clone(); | ||||||
|  |             } | ||||||
|  |  | ||||||
|  |             table.add_row(row![ | ||||||
|  |                 product.id, | ||||||
|  |                 product.name, | ||||||
|  |                 product.description.join(", "), | ||||||
|  |                 product.traffic, | ||||||
|  |                 product.location.join(", "), | ||||||
|  |                 price_net, | ||||||
|  |                 price_gross, | ||||||
|  |             ]); | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |     table.printstd(); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pub fn pretty_print_auction_server_products(products: rhai::Array) { | ||||||
|  |     let mut table = Table::new(); | ||||||
|  |     table.add_row(row![b => | ||||||
|  |         "ID", | ||||||
|  |         "Name", | ||||||
|  |         "Description", | ||||||
|  |         "Traffic", | ||||||
|  |         "Distributions", | ||||||
|  |         "Architectures", | ||||||
|  |         "Languages", | ||||||
|  |         "CPU", | ||||||
|  |         "CPU Benchmark", | ||||||
|  |         "Memory Size (GB)", | ||||||
|  |         "HDD Size (GB)", | ||||||
|  |         "HDD Text", | ||||||
|  |         "HDD Count", | ||||||
|  |         "Datacenter", | ||||||
|  |         "Network Speed", | ||||||
|  |         "Price (Net)", | ||||||
|  |         "Price (Hourly Net)", | ||||||
|  |         "Price (Setup Net)", | ||||||
|  |         "Price (VAT)", | ||||||
|  |         "Price (Hourly VAT)", | ||||||
|  |         "Price (Setup VAT)", | ||||||
|  |         "Fixed Price", | ||||||
|  |         "Next Reduce (seconds)", | ||||||
|  |         "Next Reduce Date", | ||||||
|  |         "Orderable Addons", | ||||||
|  |     ]); | ||||||
|  |  | ||||||
|  |     for product_dyn in products { | ||||||
|  |         if let Some(product) = product_dyn.try_cast::<crate::api::models::AuctionServerProduct>() { | ||||||
|  |             let mut addons_table = Table::new(); | ||||||
|  |             addons_table.add_row(row![b => "ID", "Name", "Min", "Max", "Prices"]); | ||||||
|  |             for addon in &product.orderable_addons { | ||||||
|  |                 let mut addon_prices_table = Table::new(); | ||||||
|  |                 addon_prices_table.add_row(row![b => "Location", "Net", "Gross", "Hourly Net", "Hourly Gross", "Setup Net", "Setup Gross"]); | ||||||
|  |                 for price in &addon.prices { | ||||||
|  |                     addon_prices_table.add_row(row![ | ||||||
|  |                         price.location, | ||||||
|  |                         price.price.net, | ||||||
|  |                         price.price.gross, | ||||||
|  |                         price.price.hourly_net, | ||||||
|  |                         price.price.hourly_gross, | ||||||
|  |                         price.price_setup.net, | ||||||
|  |                         price.price_setup.gross | ||||||
|  |                     ]); | ||||||
|  |                 } | ||||||
|  |                 addons_table.add_row(row![ | ||||||
|  |                     addon.id, | ||||||
|  |                     addon.name, | ||||||
|  |                     addon.min, | ||||||
|  |                     addon.max, | ||||||
|  |                     addon_prices_table | ||||||
|  |                 ]); | ||||||
|  |             } | ||||||
|  |  | ||||||
|  |             table.add_row(row![ | ||||||
|  |                 product.id, | ||||||
|  |                 product.name, | ||||||
|  |                 product.description.join(", "), | ||||||
|  |                 product.traffic, | ||||||
|  |                 product.dist.join(", "), | ||||||
|  |                 product.dist.join(", "), | ||||||
|  |                 product.lang.join(", "), | ||||||
|  |                 product.cpu, | ||||||
|  |                 product.cpu_benchmark, | ||||||
|  |                 product.memory_size, | ||||||
|  |                 product.hdd_size, | ||||||
|  |                 product.hdd_text, | ||||||
|  |                 product.hdd_count, | ||||||
|  |                 product.datacenter, | ||||||
|  |                 product.network_speed, | ||||||
|  |                 product.price, | ||||||
|  |                 product.price_hourly.as_deref().unwrap_or("N/A"), | ||||||
|  |                 product.price_setup, | ||||||
|  |                 product.price_with_vat, | ||||||
|  |                 product.price_hourly_with_vat.as_deref().unwrap_or("N/A"), | ||||||
|  |                 product.price_setup_with_vat, | ||||||
|  |                 product.fixed_price, | ||||||
|  |                 product.next_reduce, | ||||||
|  |                 product.next_reduce_date, | ||||||
|  |                 addons_table, | ||||||
|  |             ]); | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |     table.printstd(); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pub fn pretty_print_server_addon_products(products: rhai::Array) { | ||||||
|  |     let mut table = Table::new(); | ||||||
|  |     table.add_row(row![b => | ||||||
|  |         "ID", | ||||||
|  |         "Name", | ||||||
|  |         "Type", | ||||||
|  |         "Location", | ||||||
|  |         "Price (Net)", | ||||||
|  |         "Price (Gross)", | ||||||
|  |         "Hourly Net", | ||||||
|  |         "Hourly Gross", | ||||||
|  |         "Setup Net", | ||||||
|  |         "Setup Gross", | ||||||
|  |     ]); | ||||||
|  |  | ||||||
|  |     for product_dyn in products { | ||||||
|  |         if let Some(product) = product_dyn.try_cast::<ServerAddonProduct>() { | ||||||
|  |             table.add_row(row![ | ||||||
|  |                 product.id, | ||||||
|  |                 product.name, | ||||||
|  |                 product.product_type, | ||||||
|  |                 product.price.location, | ||||||
|  |                 product.price.price.net, | ||||||
|  |                 product.price.price.gross, | ||||||
|  |                 product.price.price.hourly_net, | ||||||
|  |                 product.price.price.hourly_gross, | ||||||
|  |                 product.price.price_setup.net, | ||||||
|  |                 product.price.price_setup.gross, | ||||||
|  |             ]); | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |     table.printstd(); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pub fn pretty_print_auction_transactions(transactions: rhai::Array) { | ||||||
|  |     let mut table = Table::new(); | ||||||
|  |     table.add_row(row![b => | ||||||
|  |         "ID", | ||||||
|  |         "Date", | ||||||
|  |         "Status", | ||||||
|  |         "Server Number", | ||||||
|  |         "Server IP", | ||||||
|  |         "Comment", | ||||||
|  |         "Product ID", | ||||||
|  |         "Product Name", | ||||||
|  |         "Product Traffic", | ||||||
|  |         "Product Distributions", | ||||||
|  |         "Product Architectures", | ||||||
|  |         "Product Languages", | ||||||
|  |         "Product CPU", | ||||||
|  |         "Product CPU Benchmark", | ||||||
|  |         "Product Memory Size (GB)", | ||||||
|  |         "Product HDD Size (GB)", | ||||||
|  |         "Product HDD Text", | ||||||
|  |         "Product HDD Count", | ||||||
|  |         "Product Datacenter", | ||||||
|  |         "Product Network Speed", | ||||||
|  |         "Product Fixed Price", | ||||||
|  |         "Product Next Reduce (seconds)", | ||||||
|  |         "Product Next Reduce Date", | ||||||
|  |         "Addons", | ||||||
|  |     ]); | ||||||
|  |  | ||||||
|  |     for transaction_dyn in transactions { | ||||||
|  |         if let Some(transaction) = transaction_dyn.try_cast::<crate::api::models::AuctionTransaction>() { | ||||||
|  |             let _authorized_keys_table = { | ||||||
|  |                 let mut table = Table::new(); | ||||||
|  |                 table.add_row(row![b => "Name", "Fingerprint", "Type", "Size"]); | ||||||
|  |                 for key in &transaction.authorized_key { | ||||||
|  |                     table.add_row(row![ | ||||||
|  |                         key.key.name.as_deref().unwrap_or("N/A"), | ||||||
|  |                         key.key.fingerprint.as_deref().unwrap_or("N/A"), | ||||||
|  |                         key.key.key_type.as_deref().unwrap_or("N/A"), | ||||||
|  |                         key.key.size.map_or("N/A".to_string(), |s| s.to_string()) | ||||||
|  |                     ]); | ||||||
|  |                 } | ||||||
|  |                 table | ||||||
|  |             }; | ||||||
|  |  | ||||||
|  |             let _host_keys_table = { | ||||||
|  |                 let mut table = Table::new(); | ||||||
|  |                 table.add_row(row![b => "Fingerprint", "Type", "Size"]); | ||||||
|  |                 for key in &transaction.host_key { | ||||||
|  |                     table.add_row(row![ | ||||||
|  |                         key.key.fingerprint.as_deref().unwrap_or("N/A"), | ||||||
|  |                         key.key.key_type.as_deref().unwrap_or("N/A"), | ||||||
|  |                         key.key.size.map_or("N/A".to_string(), |s| s.to_string()) | ||||||
|  |                     ]); | ||||||
|  |                 } | ||||||
|  |                 table | ||||||
|  |             }; | ||||||
|  |  | ||||||
|  |             table.add_row(row![ | ||||||
|  |                 transaction.id, | ||||||
|  |                 transaction.date, | ||||||
|  |                 transaction.status, | ||||||
|  |                 transaction.server_number.map_or("N/A".to_string(), |id| id.to_string()), | ||||||
|  |                 transaction.server_ip.as_deref().unwrap_or("N/A"), | ||||||
|  |                 transaction.comment.as_deref().unwrap_or("N/A"), | ||||||
|  |                 transaction.product.id, | ||||||
|  |                 transaction.product.name, | ||||||
|  |                 transaction.product.traffic, | ||||||
|  |                 transaction.product.dist, | ||||||
|  |                 transaction.product.arch.as_deref().unwrap_or("N/A"), | ||||||
|  |                 transaction.product.lang, | ||||||
|  |                 transaction.product.cpu, | ||||||
|  |                 transaction.product.cpu_benchmark, | ||||||
|  |                 transaction.product.memory_size, | ||||||
|  |                 transaction.product.hdd_size, | ||||||
|  |                 transaction.product.hdd_text, | ||||||
|  |                 transaction.product.hdd_count, | ||||||
|  |                 transaction.product.datacenter, | ||||||
|  |                 transaction.product.network_speed, | ||||||
|  |                 transaction.product.fixed_price.unwrap_or_default().to_string(), | ||||||
|  |                 transaction | ||||||
|  |                     .product | ||||||
|  |                     .next_reduce | ||||||
|  |                     .map_or("N/A".to_string(), |r| r.to_string()), | ||||||
|  |                 transaction | ||||||
|  |                     .product | ||||||
|  |                     .next_reduce_date | ||||||
|  |                     .as_deref() | ||||||
|  |                     .unwrap_or("N/A"), | ||||||
|  |                 transaction.addons.join(", "), | ||||||
|  |             ]); | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |     table.printstd(); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pub fn pretty_print_server_addon_transactions(transactions: rhai::Array) { | ||||||
|  |     let mut table = Table::new(); | ||||||
|  |     table.add_row(row![b => | ||||||
|  |         "ID", | ||||||
|  |         "Date", | ||||||
|  |         "Status", | ||||||
|  |         "Server Number", | ||||||
|  |         "Product ID", | ||||||
|  |         "Product Name", | ||||||
|  |         "Product Price", | ||||||
|  |         "Resources", | ||||||
|  |     ]); | ||||||
|  |  | ||||||
|  |     for transaction_dyn in transactions { | ||||||
|  |         if let Some(transaction) = transaction_dyn.try_cast::<ServerAddonTransaction>() { | ||||||
|  |             let mut resources_table = Table::new(); | ||||||
|  |             resources_table.add_row(row![b => "Type", "ID"]); | ||||||
|  |             for resource in &transaction.resources { | ||||||
|  |                 resources_table.add_row(row![resource.resource_type, resource.id]); | ||||||
|  |             } | ||||||
|  |  | ||||||
|  |             table.add_row(row![ | ||||||
|  |                 transaction.id, | ||||||
|  |                 transaction.date, | ||||||
|  |                 transaction.status, | ||||||
|  |                 transaction.server_number, | ||||||
|  |                 transaction.product.id, | ||||||
|  |                 transaction.product.name, | ||||||
|  |                 transaction.product.price.to_string(), | ||||||
|  |                 resources_table, | ||||||
|  |             ]); | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |     table.printstd(); | ||||||
|  | } | ||||||
| @@ -0,0 +1,30 @@ | |||||||
|  | use prettytable::{row, Table}; | ||||||
|  | use rhai::Array; | ||||||
|  |  | ||||||
|  | use super::Server; | ||||||
|  |  | ||||||
|  | pub fn pretty_print_servers(servers: Array) { | ||||||
|  |     let mut table = Table::new(); | ||||||
|  |     table.add_row(row![b => | ||||||
|  |         "Number", | ||||||
|  |         "Name", | ||||||
|  |         "IP", | ||||||
|  |         "Product", | ||||||
|  |         "DC", | ||||||
|  |         "Status" | ||||||
|  |     ]); | ||||||
|  |  | ||||||
|  |     for server_dyn in servers { | ||||||
|  |         if let Some(server) = server_dyn.try_cast::<Server>() { | ||||||
|  |             table.add_row(row![ | ||||||
|  |                 server.server_number.to_string(), | ||||||
|  |                 server.server_name, | ||||||
|  |                 server.server_ip.unwrap_or("N/A".to_string()), | ||||||
|  |                 server.product, | ||||||
|  |                 server.dc, | ||||||
|  |                 server.status | ||||||
|  |             ]); | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |     table.printstd(); | ||||||
|  | } | ||||||
| @@ -0,0 +1,26 @@ | |||||||
|  | use prettytable::{row, Table}; | ||||||
|  | use super::SshKey; | ||||||
|  |  | ||||||
|  | pub fn pretty_print_ssh_keys(keys: rhai::Array) { | ||||||
|  |     let mut table = Table::new(); | ||||||
|  |     table.add_row(row![b => | ||||||
|  |         "Name", | ||||||
|  |         "Fingerprint", | ||||||
|  |         "Type", | ||||||
|  |         "Size", | ||||||
|  |         "Created At" | ||||||
|  |     ]); | ||||||
|  |  | ||||||
|  |     for key_dyn in keys { | ||||||
|  |         if let Some(key) = key_dyn.try_cast::<SshKey>() { | ||||||
|  |             table.add_row(row![ | ||||||
|  |                 key.name, | ||||||
|  |                 key.fingerprint, | ||||||
|  |                 key.key_type, | ||||||
|  |                 key.size.to_string(), | ||||||
|  |                 key.created_at | ||||||
|  |             ]); | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |     table.printstd(); | ||||||
|  | } | ||||||
							
								
								
									
										76
									
								
								packages/clients/hetznerclient/src/rhai/server.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										76
									
								
								packages/clients/hetznerclient/src/rhai/server.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,76 @@ | |||||||
|  | use crate::api::{Client, models::Server}; | ||||||
|  | use rhai::{Array, Dynamic, plugin::*}; | ||||||
|  |  | ||||||
|  | pub fn register(engine: &mut Engine) { | ||||||
|  |     let server_module = exported_module!(server_api); | ||||||
|  |     engine.register_global_module(server_module.into()); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | #[export_module] | ||||||
|  | pub mod server_api { | ||||||
|  |     use crate::api::models::Cancellation; | ||||||
|  |  | ||||||
|  |     use super::*; | ||||||
|  |     use rhai::EvalAltResult; | ||||||
|  |  | ||||||
|  |     #[rhai_fn(name = "get_server", return_raw)] | ||||||
|  |     pub fn get_server( | ||||||
|  |         client: &mut Client, | ||||||
|  |         server_number: i64, | ||||||
|  |     ) -> Result<Server, Box<EvalAltResult>> { | ||||||
|  |         client | ||||||
|  |             .get_server(server_number as i32) | ||||||
|  |             .map_err(|e| e.to_string().into()) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     #[rhai_fn(name = "get_servers", return_raw)] | ||||||
|  |     pub fn get_servers(client: &mut Client) -> Result<Array, Box<EvalAltResult>> { | ||||||
|  |         let servers = client | ||||||
|  |             .get_servers() | ||||||
|  |             .map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?; | ||||||
|  |         println!("number of SERVERS we got: {:#?}", servers.len()); | ||||||
|  |         Ok(servers.into_iter().map(Dynamic::from).collect()) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     #[rhai_fn(name = "update_server_name", return_raw)] | ||||||
|  |     pub fn update_server_name( | ||||||
|  |         client: &mut Client, | ||||||
|  |         server_number: i64, | ||||||
|  |         name: &str, | ||||||
|  |     ) -> Result<Server, Box<EvalAltResult>> { | ||||||
|  |         client | ||||||
|  |             .update_server_name(server_number as i32, name) | ||||||
|  |             .map_err(|e| e.to_string().into()) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     #[rhai_fn(name = "get_cancellation_data", return_raw)] | ||||||
|  |     pub fn get_cancellation_data( | ||||||
|  |         client: &mut Client, | ||||||
|  |         server_number: i64, | ||||||
|  |     ) -> Result<Cancellation, Box<EvalAltResult>> { | ||||||
|  |         client | ||||||
|  |             .get_cancellation_data(server_number as i32) | ||||||
|  |             .map_err(|e| e.to_string().into()) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     #[rhai_fn(name = "cancel_server", return_raw)] | ||||||
|  |     pub fn cancel_server( | ||||||
|  |         client: &mut Client, | ||||||
|  |         server_number: i64, | ||||||
|  |         cancellation_date: &str, | ||||||
|  |     ) -> Result<Cancellation, Box<EvalAltResult>> { | ||||||
|  |         client | ||||||
|  |             .cancel_server(server_number as i32, cancellation_date) | ||||||
|  |             .map_err(|e| e.to_string().into()) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     #[rhai_fn(name = "withdraw_cancellation", return_raw)] | ||||||
|  |     pub fn withdraw_cancellation( | ||||||
|  |         client: &mut Client, | ||||||
|  |         server_number: i64, | ||||||
|  |     ) -> Result<(), Box<EvalAltResult>> { | ||||||
|  |         client | ||||||
|  |             .withdraw_cancellation(server_number as i32) | ||||||
|  |             .map_err(|e| e.to_string().into()) | ||||||
|  |     } | ||||||
|  | } | ||||||
							
								
								
									
										170
									
								
								packages/clients/hetznerclient/src/rhai/server_ordering.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										170
									
								
								packages/clients/hetznerclient/src/rhai/server_ordering.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,170 @@ | |||||||
|  | use crate::api::{ | ||||||
|  |     Client, | ||||||
|  |     models::{ | ||||||
|  |         AuctionServerProduct, AuctionTransaction, OrderAuctionServerBuilder, OrderServerBuilder, | ||||||
|  |         OrderServerProduct, ServerAddonProduct, ServerAddonTransaction, Transaction, | ||||||
|  |     }, | ||||||
|  | }; | ||||||
|  | use rhai::{Array, Dynamic, plugin::*}; | ||||||
|  |  | ||||||
|  | pub fn register(engine: &mut Engine) { | ||||||
|  |     let server_order_module = exported_module!(server_order_api); | ||||||
|  |     engine.register_global_module(server_order_module.into()); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | #[export_module] | ||||||
|  | pub mod server_order_api { | ||||||
|  |     use crate::api::models::OrderServerAddonBuilder; | ||||||
|  |  | ||||||
|  |     #[rhai_fn(name = "get_server_products", return_raw)] | ||||||
|  |     pub fn get_server_ordering_product_overview( | ||||||
|  |         client: &mut Client, | ||||||
|  |     ) -> Result<Array, Box<EvalAltResult>> { | ||||||
|  |         let overview_servers = client | ||||||
|  |             .get_server_products() | ||||||
|  |             .map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?; | ||||||
|  |         Ok(overview_servers.into_iter().map(Dynamic::from).collect()) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     #[rhai_fn(name = "get_server_product_by_id", return_raw)] | ||||||
|  |     pub fn get_server_ordering_product_by_id( | ||||||
|  |         client: &mut Client, | ||||||
|  |         product_id: &str, | ||||||
|  |     ) -> Result<OrderServerProduct, Box<EvalAltResult>> { | ||||||
|  |         let product = client | ||||||
|  |             .get_server_product_by_id(product_id) | ||||||
|  |             .map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?; | ||||||
|  |         Ok(product) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     #[rhai_fn(name = "order_server", return_raw)] | ||||||
|  |     pub fn order_server( | ||||||
|  |         client: &mut Client, | ||||||
|  |         order: OrderServerBuilder, | ||||||
|  |     ) -> Result<Transaction, Box<EvalAltResult>> { | ||||||
|  |         let transaction = client | ||||||
|  |             .order_server(order) | ||||||
|  |             .map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?; | ||||||
|  |         Ok(transaction) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     #[rhai_fn(name = "get_transaction_by_id", return_raw)] | ||||||
|  |     pub fn get_transaction_by_id( | ||||||
|  |         client: &mut Client, | ||||||
|  |         transaction_id: &str, | ||||||
|  |     ) -> Result<Transaction, Box<EvalAltResult>> { | ||||||
|  |         let transaction = client | ||||||
|  |             .get_transaction_by_id(transaction_id) | ||||||
|  |             .map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?; | ||||||
|  |         Ok(transaction) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     #[rhai_fn(name = "get_transactions", return_raw)] | ||||||
|  |     pub fn get_transactions(client: &mut Client) -> Result<Array, Box<EvalAltResult>> { | ||||||
|  |         let transactions = client | ||||||
|  |             .get_transactions() | ||||||
|  |             .map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?; | ||||||
|  |         Ok(transactions.into_iter().map(Dynamic::from).collect()) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     #[rhai_fn(name = "get_auction_server_products", return_raw)] | ||||||
|  |     pub fn get_auction_server_products(client: &mut Client) -> Result<Array, Box<EvalAltResult>> { | ||||||
|  |         let products = client | ||||||
|  |             .get_auction_server_products() | ||||||
|  |             .map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?; | ||||||
|  |         Ok(products.into_iter().map(Dynamic::from).collect()) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     #[rhai_fn(name = "get_auction_server_product_by_id", return_raw)] | ||||||
|  |     pub fn get_auction_server_product_by_id( | ||||||
|  |         client: &mut Client, | ||||||
|  |         product_id: &str, | ||||||
|  |     ) -> Result<AuctionServerProduct, Box<EvalAltResult>> { | ||||||
|  |         let product = client | ||||||
|  |             .get_auction_server_product_by_id(product_id) | ||||||
|  |             .map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?; | ||||||
|  |         Ok(product) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     #[rhai_fn(name = "get_auction_transactions", return_raw)] | ||||||
|  |     pub fn get_auction_transactions(client: &mut Client) -> Result<Array, Box<EvalAltResult>> { | ||||||
|  |         let transactions = client | ||||||
|  |             .get_auction_transactions() | ||||||
|  |             .map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?; | ||||||
|  |         Ok(transactions.into_iter().map(Dynamic::from).collect()) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     #[rhai_fn(name = "get_auction_transaction_by_id", return_raw)] | ||||||
|  |     pub fn get_auction_transaction_by_id( | ||||||
|  |         client: &mut Client, | ||||||
|  |         transaction_id: &str, | ||||||
|  |     ) -> Result<AuctionTransaction, Box<EvalAltResult>> { | ||||||
|  |         let transaction = client | ||||||
|  |             .get_auction_transaction_by_id(transaction_id) | ||||||
|  |             .map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?; | ||||||
|  |         Ok(transaction) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     #[rhai_fn(name = "get_server_addon_products", return_raw)] | ||||||
|  |     pub fn get_server_addon_products( | ||||||
|  |         client: &mut Client, | ||||||
|  |         server_number: i64, | ||||||
|  |     ) -> Result<Array, Box<EvalAltResult>> { | ||||||
|  |         let products = client | ||||||
|  |             .get_server_addon_products(server_number) | ||||||
|  |             .map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?; | ||||||
|  |         Ok(products.into_iter().map(Dynamic::from).collect()) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     #[rhai_fn(name = "get_server_addon_transactions", return_raw)] | ||||||
|  |     pub fn get_server_addon_transactions( | ||||||
|  |         client: &mut Client, | ||||||
|  |     ) -> Result<Array, Box<EvalAltResult>> { | ||||||
|  |         let transactions = client | ||||||
|  |             .get_server_addon_transactions() | ||||||
|  |             .map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?; | ||||||
|  |         Ok(transactions.into_iter().map(Dynamic::from).collect()) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     #[rhai_fn(name = "get_server_addon_transaction_by_id", return_raw)] | ||||||
|  |     pub fn get_server_addon_transaction_by_id( | ||||||
|  |         client: &mut Client, | ||||||
|  |         transaction_id: &str, | ||||||
|  |     ) -> Result<ServerAddonTransaction, Box<EvalAltResult>> { | ||||||
|  |         let transaction = client | ||||||
|  |             .get_server_addon_transaction_by_id(transaction_id) | ||||||
|  |             .map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?; | ||||||
|  |         Ok(transaction) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     #[rhai_fn(name = "order_auction_server", return_raw)] | ||||||
|  |     pub fn order_auction_server( | ||||||
|  |         client: &mut Client, | ||||||
|  |         order: OrderAuctionServerBuilder, | ||||||
|  |     ) -> Result<AuctionTransaction, Box<EvalAltResult>> { | ||||||
|  |         println!("Builder struct being used to order server: {:#?}", order); | ||||||
|  |         let transaction = client.order_auction_server( | ||||||
|  |             order.product_id, | ||||||
|  |             order.authorized_keys.unwrap_or(vec![]), | ||||||
|  |             order.dist, | ||||||
|  |             None, | ||||||
|  |             order.lang, | ||||||
|  |             order.comment, | ||||||
|  |             order.addon, | ||||||
|  |             order.test, | ||||||
|  |         ).map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?; | ||||||
|  |         Ok(transaction) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     #[rhai_fn(name = "order_server_addon", return_raw)] | ||||||
|  |     pub fn order_server_addon( | ||||||
|  |         client: &mut Client, | ||||||
|  |         order: OrderServerAddonBuilder, | ||||||
|  |     ) -> Result<ServerAddonTransaction, Box<EvalAltResult>> { | ||||||
|  |         println!("Builder struct being used to order server addon: {:#?}", order); | ||||||
|  |         let transaction = client | ||||||
|  |             .order_server_addon(order) | ||||||
|  |             .map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?; | ||||||
|  |         Ok(transaction) | ||||||
|  |     } | ||||||
|  | } | ||||||
							
								
								
									
										89
									
								
								packages/clients/hetznerclient/src/rhai/ssh_keys.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										89
									
								
								packages/clients/hetznerclient/src/rhai/ssh_keys.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,89 @@ | |||||||
|  | use crate::api::{Client, models::SshKey}; | ||||||
|  | use prettytable::{Table, row}; | ||||||
|  | use rhai::{Array, Dynamic, Engine, plugin::*}; | ||||||
|  |  | ||||||
|  | pub fn register(engine: &mut Engine) { | ||||||
|  |     let ssh_keys_module = exported_module!(ssh_keys_api); | ||||||
|  |     engine.register_global_module(ssh_keys_module.into()); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | #[export_module] | ||||||
|  | pub mod ssh_keys_api { | ||||||
|  |     use super::*; | ||||||
|  |     use rhai::EvalAltResult; | ||||||
|  |  | ||||||
|  |     #[rhai_fn(name = "get_ssh_keys", return_raw)] | ||||||
|  |     pub fn get_ssh_keys(client: &mut Client) -> Result<Array, Box<EvalAltResult>> { | ||||||
|  |         let ssh_keys = client | ||||||
|  |             .get_ssh_keys() | ||||||
|  |             .map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?; | ||||||
|  |         Ok(ssh_keys.into_iter().map(Dynamic::from).collect()) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     #[rhai_fn(name = "get_ssh_key", return_raw)] | ||||||
|  |     pub fn get_ssh_key( | ||||||
|  |         client: &mut Client, | ||||||
|  |         fingerprint: &str, | ||||||
|  |     ) -> Result<SshKey, Box<EvalAltResult>> { | ||||||
|  |         client | ||||||
|  |             .get_ssh_key(fingerprint) | ||||||
|  |             .map_err(|e| e.to_string().into()) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     #[rhai_fn(name = "add_ssh_key", return_raw)] | ||||||
|  |     pub fn add_ssh_key( | ||||||
|  |         client: &mut Client, | ||||||
|  |         name: &str, | ||||||
|  |         data: &str, | ||||||
|  |     ) -> Result<SshKey, Box<EvalAltResult>> { | ||||||
|  |         client | ||||||
|  |             .add_ssh_key(name, data) | ||||||
|  |             .map_err(|e| e.to_string().into()) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     #[rhai_fn(name = "update_ssh_key_name", return_raw)] | ||||||
|  |     pub fn update_ssh_key_name( | ||||||
|  |         client: &mut Client, | ||||||
|  |         fingerprint: &str, | ||||||
|  |         name: &str, | ||||||
|  |     ) -> Result<SshKey, Box<EvalAltResult>> { | ||||||
|  |         client | ||||||
|  |             .update_ssh_key_name(fingerprint, name) | ||||||
|  |             .map_err(|e| e.to_string().into()) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     #[rhai_fn(name = "delete_ssh_key", return_raw)] | ||||||
|  |     pub fn delete_ssh_key( | ||||||
|  |         client: &mut Client, | ||||||
|  |         fingerprint: &str, | ||||||
|  |     ) -> Result<(), Box<EvalAltResult>> { | ||||||
|  |         client | ||||||
|  |             .delete_ssh_key(fingerprint) | ||||||
|  |             .map_err(|e| e.to_string().into()) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     #[rhai_fn(name = "pretty_print")] | ||||||
|  |     pub fn pretty_print_ssh_keys(keys: Array) { | ||||||
|  |         let mut table = Table::new(); | ||||||
|  |         table.add_row(row![b => | ||||||
|  |             "Name", | ||||||
|  |             "Fingerprint", | ||||||
|  |             "Type", | ||||||
|  |             "Size", | ||||||
|  |             "Created At" | ||||||
|  |         ]); | ||||||
|  |  | ||||||
|  |         for key_dyn in keys { | ||||||
|  |             if let Some(key) = key_dyn.try_cast::<SshKey>() { | ||||||
|  |                 table.add_row(row![ | ||||||
|  |                     key.name, | ||||||
|  |                     key.fingerprint, | ||||||
|  |                     key.key_type, | ||||||
|  |                     key.size.to_string(), | ||||||
|  |                     key.created_at | ||||||
|  |                 ]); | ||||||
|  |             } | ||||||
|  |         } | ||||||
|  |         table.printstd(); | ||||||
|  |     } | ||||||
|  | } | ||||||
| @@ -9,22 +9,22 @@ license = "Apache-2.0" | |||||||
| 
 | 
 | ||||||
| [dependencies] | [dependencies] | ||||||
| # HTTP client for async requests | # HTTP client for async requests | ||||||
| reqwest = { version = "0.12.15", features = ["json"] } | reqwest = { workspace = true } | ||||||
| # JSON handling | # JSON handling | ||||||
| serde_json = "1.0" | serde_json = { workspace = true } | ||||||
| # Base64 encoding/decoding for message payloads | # Base64 encoding/decoding for message payloads | ||||||
| base64 = "0.22.1" | base64 = { workspace = true } | ||||||
| # Async runtime | # Async runtime | ||||||
| tokio = { version = "1.45.0", features = ["full"] } | tokio = { workspace = true } | ||||||
| # Rhai scripting support | # Rhai scripting support | ||||||
| rhai = { version = "1.12.0", features = ["sync"] } | rhai = { workspace = true } | ||||||
| # Logging | # Logging | ||||||
| log = "0.4" | log = { workspace = true } | ||||||
| # URL encoding for API parameters | # URL encoding for API parameters | ||||||
| urlencoding = "2.1.3" | urlencoding = { workspace = true } | ||||||
| 
 | 
 | ||||||
| [dev-dependencies] | [dev-dependencies] | ||||||
| # For async testing | # For async testing | ||||||
| tokio-test = "0.4.4" | tokio-test = { workspace = true } | ||||||
| # For temporary files in tests | # For temporary files in tests | ||||||
| tempfile = "3.5" | tempfile = { workspace = true } | ||||||
| @@ -1,7 +1,16 @@ | |||||||
| # SAL Mycelium | # SAL Mycelium (`sal-mycelium`) | ||||||
| 
 | 
 | ||||||
| A Rust client library for interacting with Mycelium node's HTTP API, with Rhai scripting support. | A Rust client library for interacting with Mycelium node's HTTP API, with Rhai scripting support. | ||||||
| 
 | 
 | ||||||
|  | ## Installation | ||||||
|  | 
 | ||||||
|  | Add this to your `Cargo.toml`: | ||||||
|  | 
 | ||||||
|  | ```toml | ||||||
|  | [dependencies] | ||||||
|  | sal-mycelium = "0.1.0" | ||||||
|  | ``` | ||||||
|  | 
 | ||||||
| ## Overview | ## Overview | ||||||
| 
 | 
 | ||||||
| SAL Mycelium provides async HTTP client functionality for managing Mycelium nodes, including: | SAL Mycelium provides async HTTP client functionality for managing Mycelium nodes, including: | ||||||
| @@ -11,24 +11,24 @@ categories = ["database", "api-bindings"] | |||||||
| 
 | 
 | ||||||
| [dependencies] | [dependencies] | ||||||
| # PostgreSQL client dependencies | # PostgreSQL client dependencies | ||||||
| postgres = "0.19.4" | postgres = { workspace = true } | ||||||
| postgres-types = "0.2.5" | postgres-types = { workspace = true } | ||||||
| tokio-postgres = "0.7.8" | tokio-postgres = { workspace = true } | ||||||
| 
 | 
 | ||||||
| # Connection pooling | # Connection pooling | ||||||
| r2d2 = "0.8.10" | r2d2 = { workspace = true } | ||||||
| r2d2_postgres = "0.18.2" | r2d2_postgres = { workspace = true } | ||||||
| 
 | 
 | ||||||
| # Utility dependencies | # Utility dependencies | ||||||
| lazy_static = "1.4.0" | lazy_static = { workspace = true } | ||||||
| thiserror = "2.0.12" | thiserror = { workspace = true } | ||||||
| 
 | 
 | ||||||
| # Rhai scripting support | # Rhai scripting support | ||||||
| rhai = { version = "1.12.0", features = ["sync"] } | rhai = { workspace = true } | ||||||
| 
 | 
 | ||||||
| # SAL dependencies | # SAL dependencies | ||||||
| sal-virt = { path = "../virt" } | sal-virt = { workspace = true } | ||||||
| 
 | 
 | ||||||
| [dev-dependencies] | [dev-dependencies] | ||||||
| tempfile = "3.5" | tempfile = { workspace = true } | ||||||
| tokio-test = "0.4.4" | tokio-test = { workspace = true } | ||||||
| @@ -1,7 +1,16 @@ | |||||||
| # SAL PostgreSQL Client | # SAL PostgreSQL Client (`sal-postgresclient`) | ||||||
| 
 | 
 | ||||||
| The SAL PostgreSQL Client (`sal-postgresclient`) is an independent package that provides a simple and efficient way to interact with PostgreSQL databases in Rust. It offers connection management, query execution, a builder pattern for flexible configuration, and PostgreSQL installer functionality using nerdctl. | The SAL PostgreSQL Client (`sal-postgresclient`) is an independent package that provides a simple and efficient way to interact with PostgreSQL databases in Rust. It offers connection management, query execution, a builder pattern for flexible configuration, and PostgreSQL installer functionality using nerdctl. | ||||||
| 
 | 
 | ||||||
|  | ## Installation | ||||||
|  | 
 | ||||||
|  | Add this to your `Cargo.toml`: | ||||||
|  | 
 | ||||||
|  | ```toml | ||||||
|  | [dependencies] | ||||||
|  | sal-postgresclient = "0.1.0" | ||||||
|  | ``` | ||||||
|  | 
 | ||||||
| ## Features | ## Features | ||||||
| 
 | 
 | ||||||
| - **Connection Management**: Automatic connection handling and reconnection | - **Connection Management**: Automatic connection handling and reconnection | ||||||
| @@ -11,11 +11,11 @@ categories = ["database", "caching", "api-bindings"] | |||||||
| 
 | 
 | ||||||
| [dependencies] | [dependencies] | ||||||
| # Core Redis functionality | # Core Redis functionality | ||||||
| redis = "0.31.0" | redis = { workspace = true } | ||||||
| lazy_static = "1.4.0" | lazy_static = { workspace = true } | ||||||
| 
 | 
 | ||||||
| # Rhai integration (optional) | # Rhai integration (optional) | ||||||
| rhai = { version = "1.12.0", features = ["sync"], optional = true } | rhai = { workspace = true, optional = true } | ||||||
| 
 | 
 | ||||||
| [features] | [features] | ||||||
| default = ["rhai"] | default = ["rhai"] | ||||||
| @@ -23,4 +23,4 @@ rhai = ["dep:rhai"] | |||||||
| 
 | 
 | ||||||
| [dev-dependencies] | [dev-dependencies] | ||||||
| # For testing | # For testing | ||||||
| tempfile = "3.5" | tempfile = { workspace = true } | ||||||
| @@ -1,7 +1,16 @@ | |||||||
| # Redis Client Module | # SAL Redis Client (`sal-redisclient`) | ||||||
| 
 | 
 | ||||||
| A robust Redis client wrapper for Rust applications that provides connection management, automatic reconnection, and a simple interface for executing Redis commands. | A robust Redis client wrapper for Rust applications that provides connection management, automatic reconnection, and a simple interface for executing Redis commands. | ||||||
| 
 | 
 | ||||||
|  | ## Installation | ||||||
|  | 
 | ||||||
|  | Add this to your `Cargo.toml`: | ||||||
|  | 
 | ||||||
|  | ```toml | ||||||
|  | [dependencies] | ||||||
|  | sal-redisclient = "0.1.0" | ||||||
|  | ``` | ||||||
|  | 
 | ||||||
| ## Features | ## Features | ||||||
| 
 | 
 | ||||||
| - **Singleton Pattern**: Maintains a global Redis client instance, so we don't re-int all the time. | - **Singleton Pattern**: Maintains a global Redis client instance, so we don't re-int all the time. | ||||||
| @@ -187,7 +187,7 @@ cargo build | |||||||
| To run tests: | To run tests: | ||||||
| 
 | 
 | ||||||
| ```bash | ```bash | ||||||
| cargo test | cargo test -- --test-threads=1 | ||||||
| ``` | ``` | ||||||
| 
 | 
 | ||||||
| ## License | ## License | ||||||
| @@ -1,5 +1,5 @@ | |||||||
| use sal_rfs_client::RfsClient; |  | ||||||
| use sal_rfs_client::types::{ClientConfig, Credentials}; | use sal_rfs_client::types::{ClientConfig, Credentials}; | ||||||
|  | use sal_rfs_client::RfsClient; | ||||||
| 
 | 
 | ||||||
| #[tokio::main] | #[tokio::main] | ||||||
| async fn main() -> Result<(), Box<dyn std::error::Error>> { | async fn main() -> Result<(), Box<dyn std::error::Error>> { | ||||||
| @@ -1,6 +1,6 @@ | |||||||
| use sal_rfs_client::RfsClient; |  | ||||||
| use sal_rfs_client::types::{ClientConfig, Credentials}; |  | ||||||
| use openapi::models::{VerifyBlock, VerifyBlocksRequest}; | use openapi::models::{VerifyBlock, VerifyBlocksRequest}; | ||||||
|  | use sal_rfs_client::types::{ClientConfig, Credentials}; | ||||||
|  | use sal_rfs_client::RfsClient; | ||||||
| 
 | 
 | ||||||
| #[tokio::main] | #[tokio::main] | ||||||
| async fn main() -> Result<(), Box<dyn std::error::Error>> { | async fn main() -> Result<(), Box<dyn std::error::Error>> { | ||||||
| @@ -38,20 +38,27 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> { | |||||||
| 
 | 
 | ||||||
|     // Print block information
 |     // Print block information
 | ||||||
|     for (i, block_data) in blocks.blocks.iter().enumerate() { |     for (i, block_data) in blocks.blocks.iter().enumerate() { | ||||||
|         println!("Block {}: Hash={}, Index={}", i, block_data.hash, block_data.index); |         println!( | ||||||
|  |             "Block {}: Hash={}, Index={}", | ||||||
|  |             i, block_data.hash, block_data.index | ||||||
|  |         ); | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     // Verify blocks with complete information
 |     // Verify blocks with complete information
 | ||||||
|     println!("Verifying blocks..."); |     println!("Verifying blocks..."); | ||||||
| 
 | 
 | ||||||
|     // Create a list of VerifyBlock objects with complete information
 |     // Create a list of VerifyBlock objects with complete information
 | ||||||
|     let verify_blocks = blocks.blocks.iter().map(|block| { |     let verify_blocks = blocks | ||||||
|  |         .blocks | ||||||
|  |         .iter() | ||||||
|  |         .map(|block| { | ||||||
|             VerifyBlock { |             VerifyBlock { | ||||||
|                 block_hash: block.hash.clone(), |                 block_hash: block.hash.clone(), | ||||||
|                 block_index: block.index, |                 block_index: block.index, | ||||||
|                 file_hash: file_hash.clone(), // Using the actual file hash
 |                 file_hash: file_hash.clone(), // Using the actual file hash
 | ||||||
|             } |             } | ||||||
|     }).collect::<Vec<_>>(); |         }) | ||||||
|  |         .collect::<Vec<_>>(); | ||||||
| 
 | 
 | ||||||
|     // Create the request with the complete block information
 |     // Create the request with the complete block information
 | ||||||
|     for block in verify_blocks.iter() { |     for block in verify_blocks.iter() { | ||||||
| @@ -59,11 +66,16 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> { | |||||||
|         println!("Block index: {}", block.block_index); |         println!("Block index: {}", block.block_index); | ||||||
|         println!("File hash: {}", block.file_hash); |         println!("File hash: {}", block.file_hash); | ||||||
|     } |     } | ||||||
|     let request = VerifyBlocksRequest { blocks: verify_blocks }; |     let request = VerifyBlocksRequest { | ||||||
|  |         blocks: verify_blocks, | ||||||
|  |     }; | ||||||
| 
 | 
 | ||||||
|     // Send the verification request
 |     // Send the verification request
 | ||||||
|     let verify_result = client.verify_blocks(request).await?; |     let verify_result = client.verify_blocks(request).await?; | ||||||
|     println!("Verification result: {} missing blocks", verify_result.missing.len()); |     println!( | ||||||
|  |         "Verification result: {} missing blocks", | ||||||
|  |         verify_result.missing.len() | ||||||
|  |     ); | ||||||
|     for block in verify_result.missing.iter() { |     for block in verify_result.missing.iter() { | ||||||
|         println!("Missing block: {}", block); |         println!("Missing block: {}", block); | ||||||
|     } |     } | ||||||
| @@ -73,7 +85,9 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> { | |||||||
|     let blocks_list = client.list_blocks(None).await?; |     let blocks_list = client.list_blocks(None).await?; | ||||||
|     println!("Server has {} blocks in total", blocks_list.len()); |     println!("Server has {} blocks in total", blocks_list.len()); | ||||||
|     if !blocks_list.is_empty() { |     if !blocks_list.is_empty() { | ||||||
|         let first_few = blocks_list.iter().take(3) |         let first_few = blocks_list | ||||||
|  |             .iter() | ||||||
|  |             .take(3) | ||||||
|             .map(|s| s.as_str()) |             .map(|s| s.as_str()) | ||||||
|             .collect::<Vec<_>>() |             .collect::<Vec<_>>() | ||||||
|             .join(", "); |             .join(", "); | ||||||
| @@ -91,9 +105,15 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> { | |||||||
|     // Get block downloads statistics (get_block_downloads_handler)
 |     // Get block downloads statistics (get_block_downloads_handler)
 | ||||||
|     if !blocks.blocks.is_empty() { |     if !blocks.blocks.is_empty() { | ||||||
|         let block_to_check = &blocks.blocks[0].hash; |         let block_to_check = &blocks.blocks[0].hash; | ||||||
|         println!("\n3. Getting download statistics for block: {}", block_to_check); |         println!( | ||||||
|  |             "\n3. Getting download statistics for block: {}", | ||||||
|  |             block_to_check | ||||||
|  |         ); | ||||||
|         let downloads = client.get_block_downloads(block_to_check).await?; |         let downloads = client.get_block_downloads(block_to_check).await?; | ||||||
|         println!("Block has been downloaded {} times", downloads.downloads_count); |         println!( | ||||||
|  |             "Block has been downloaded {} times", | ||||||
|  |             downloads.downloads_count | ||||||
|  |         ); | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     // Get a specific block content (get_block_handler)
 |     // Get a specific block content (get_block_handler)
 | ||||||
| @@ -107,7 +127,10 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> { | |||||||
|     // Get user blocks (get_user_blocks_handler)
 |     // Get user blocks (get_user_blocks_handler)
 | ||||||
|     println!("\n6. Listing user blocks..."); |     println!("\n6. Listing user blocks..."); | ||||||
|     let user_blocks = client.get_user_blocks(Some(1), Some(10)).await?; |     let user_blocks = client.get_user_blocks(Some(1), Some(10)).await?; | ||||||
|     println!("User has {} blocks (showing page 1 with 10 per page)", user_blocks.total); |     println!( | ||||||
|  |         "User has {} blocks (showing page 1 with 10 per page)", | ||||||
|  |         user_blocks.total | ||||||
|  |     ); | ||||||
|     for block in user_blocks.blocks.iter().take(3) { |     for block in user_blocks.blocks.iter().take(3) { | ||||||
|         println!("  - Block: {}, Size: {}", block.hash, block.size); |         println!("  - Block: {}, Size: {}", block.hash, block.size); | ||||||
|     } |     } | ||||||
| @@ -117,7 +140,9 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> { | |||||||
|     let test_block_data = b"This is test block data for direct block upload"; |     let test_block_data = b"This is test block data for direct block upload"; | ||||||
|     let new_file_hash = "test_file_hash_for_block_upload"; |     let new_file_hash = "test_file_hash_for_block_upload"; | ||||||
|     let block_index = 0; |     let block_index = 0; | ||||||
|     let block_hash = client.upload_block(new_file_hash, block_index, test_block_data.to_vec()).await?; |     let block_hash = client | ||||||
|  |         .upload_block(new_file_hash, block_index, test_block_data.to_vec()) | ||||||
|  |         .await?; | ||||||
|     println!("Uploaded block with hash: {}", block_hash); |     println!("Uploaded block with hash: {}", block_hash); | ||||||
| 
 | 
 | ||||||
|     // Clean up
 |     // Clean up
 | ||||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user