Compare commits
	
		
			4 Commits
		
	
	
		
			ef8cc74d2b
			...
			main-rfs-c
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
|  | 5014c2f4a5 | ||
|  | ba6f53a28a | ||
|  | b81a0aa61c | ||
|  | b02101bd42 | 
							
								
								
									
										227
									
								
								.github/workflows/publish.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										227
									
								
								.github/workflows/publish.yml
									
									
									
									
										vendored
									
									
								
							| @@ -1,227 +0,0 @@ | ||||
| name: Publish SAL Crates | ||||
|  | ||||
| on: | ||||
|   release: | ||||
|     types: [published] | ||||
|   workflow_dispatch: | ||||
|     inputs: | ||||
|       version: | ||||
|         description: 'Version to publish (e.g., 0.1.0)' | ||||
|         required: true | ||||
|         type: string | ||||
|       dry_run: | ||||
|         description: 'Dry run (do not actually publish)' | ||||
|         required: false | ||||
|         type: boolean | ||||
|         default: false | ||||
|  | ||||
| env: | ||||
|   CARGO_TERM_COLOR: always | ||||
|  | ||||
| jobs: | ||||
|   publish: | ||||
|     name: Publish to crates.io | ||||
|     runs-on: ubuntu-latest | ||||
|      | ||||
|     steps: | ||||
|     - name: Checkout repository | ||||
|       uses: actions/checkout@v4 | ||||
|       with: | ||||
|         fetch-depth: 0 | ||||
|      | ||||
|     - name: Install Rust toolchain | ||||
|       uses: dtolnay/rust-toolchain@stable | ||||
|       with: | ||||
|         toolchain: stable | ||||
|      | ||||
|     - name: Cache Cargo dependencies | ||||
|       uses: actions/cache@v4 | ||||
|       with: | ||||
|         path: | | ||||
|           ~/.cargo/bin/ | ||||
|           ~/.cargo/registry/index/ | ||||
|           ~/.cargo/registry/cache/ | ||||
|           ~/.cargo/git/db/ | ||||
|           target/ | ||||
|         key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} | ||||
|         restore-keys: | | ||||
|           ${{ runner.os }}-cargo- | ||||
|      | ||||
|     - name: Install cargo-edit for version management | ||||
|       run: cargo install cargo-edit | ||||
|      | ||||
|     - name: Set version from release tag | ||||
|       if: github.event_name == 'release' | ||||
|       run: | | ||||
|         VERSION=${GITHUB_REF#refs/tags/v} | ||||
|         echo "PUBLISH_VERSION=$VERSION" >> $GITHUB_ENV | ||||
|         echo "Publishing version: $VERSION" | ||||
|      | ||||
|     - name: Set version from workflow input | ||||
|       if: github.event_name == 'workflow_dispatch' | ||||
|       run: | | ||||
|         echo "PUBLISH_VERSION=${{ github.event.inputs.version }}" >> $GITHUB_ENV | ||||
|         echo "Publishing version: ${{ github.event.inputs.version }}" | ||||
|      | ||||
|     - name: Update version in all crates | ||||
|       run: | | ||||
|         echo "Updating version to $PUBLISH_VERSION" | ||||
|          | ||||
|         # Update root Cargo.toml | ||||
|         cargo set-version $PUBLISH_VERSION | ||||
|          | ||||
|         # Update each crate | ||||
|         CRATES=(os process text net git vault kubernetes virt redisclient postgresclient zinit_client mycelium rhai) | ||||
|         for crate in "${CRATES[@]}"; do | ||||
|           if [ -d "$crate" ]; then | ||||
|             cd "$crate" | ||||
|             cargo set-version $PUBLISH_VERSION | ||||
|             cd .. | ||||
|             echo "Updated $crate to version $PUBLISH_VERSION" | ||||
|           fi | ||||
|         done | ||||
|      | ||||
|     - name: Run tests | ||||
|       run: cargo test --workspace --verbose | ||||
|      | ||||
|     - name: Check formatting | ||||
|       run: cargo fmt --all -- --check | ||||
|      | ||||
|     - name: Run clippy | ||||
|       run: cargo clippy --workspace --all-targets --all-features -- -D warnings | ||||
|      | ||||
|     - name: Dry run publish (check packages) | ||||
|       run: | | ||||
|         echo "Checking all packages can be published..." | ||||
|          | ||||
|         CRATES=(os process text net git vault kubernetes virt redisclient postgresclient zinit_client mycelium rhai) | ||||
|         for crate in "${CRATES[@]}"; do | ||||
|           if [ -d "$crate" ]; then | ||||
|             echo "Checking $crate..." | ||||
|             cd "$crate" | ||||
|             cargo publish --dry-run | ||||
|             cd .. | ||||
|           fi | ||||
|         done | ||||
|          | ||||
|         echo "Checking main crate..." | ||||
|         cargo publish --dry-run | ||||
|      | ||||
|     - name: Publish crates (dry run) | ||||
|       if: github.event.inputs.dry_run == 'true' | ||||
|       run: | | ||||
|         echo "🔍 DRY RUN MODE - Would publish the following crates:" | ||||
|         echo "Individual crates: sal-os, sal-process, sal-text, sal-net, sal-git, sal-vault, sal-kubernetes, sal-virt, sal-redisclient, sal-postgresclient, sal-zinit-client, sal-mycelium, sal-rhai" | ||||
|         echo "Meta-crate: sal" | ||||
|         echo "Version: $PUBLISH_VERSION" | ||||
|      | ||||
|     - name: Publish individual crates | ||||
|       if: github.event.inputs.dry_run != 'true' | ||||
|       env: | ||||
|         CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} | ||||
|       run: | | ||||
|         echo "Publishing individual crates..." | ||||
|          | ||||
|         # Crates in dependency order | ||||
|         CRATES=(os process text net git vault kubernetes virt redisclient postgresclient zinit_client mycelium rhai) | ||||
|          | ||||
|         for crate in "${CRATES[@]}"; do | ||||
|           if [ -d "$crate" ]; then | ||||
|             echo "Publishing sal-$crate..." | ||||
|             cd "$crate" | ||||
|              | ||||
|             # Retry logic for transient failures | ||||
|             for attempt in 1 2 3; do | ||||
|               if cargo publish --token $CARGO_REGISTRY_TOKEN; then | ||||
|                 echo "✅ sal-$crate published successfully" | ||||
|                 break | ||||
|               else | ||||
|                 if [ $attempt -eq 3 ]; then | ||||
|                   echo "❌ Failed to publish sal-$crate after 3 attempts" | ||||
|                   exit 1 | ||||
|                 else | ||||
|                   echo "⚠️ Attempt $attempt failed, retrying in 30 seconds..." | ||||
|                   sleep 30 | ||||
|                 fi | ||||
|               fi | ||||
|             done | ||||
|              | ||||
|             cd .. | ||||
|              | ||||
|             # Wait for crates.io to process | ||||
|             if [ "$crate" != "rhai" ]; then | ||||
|               echo "⏳ Waiting 30 seconds for crates.io to process..." | ||||
|               sleep 30 | ||||
|             fi | ||||
|           fi | ||||
|         done | ||||
|      | ||||
|     - name: Publish main crate | ||||
|       if: github.event.inputs.dry_run != 'true' | ||||
|       env: | ||||
|         CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} | ||||
|       run: | | ||||
|         echo "Publishing main sal crate..." | ||||
|          | ||||
|         # Wait a bit longer before publishing the meta-crate | ||||
|         echo "⏳ Waiting 60 seconds for all individual crates to be available..." | ||||
|         sleep 60 | ||||
|          | ||||
|         # Retry logic for the main crate | ||||
|         for attempt in 1 2 3; do | ||||
|           if cargo publish --token $CARGO_REGISTRY_TOKEN; then | ||||
|             echo "✅ Main sal crate published successfully" | ||||
|             break | ||||
|           else | ||||
|             if [ $attempt -eq 3 ]; then | ||||
|               echo "❌ Failed to publish main sal crate after 3 attempts" | ||||
|               exit 1 | ||||
|             else | ||||
|               echo "⚠️ Attempt $attempt failed, retrying in 60 seconds..." | ||||
|               sleep 60 | ||||
|             fi | ||||
|           fi | ||||
|         done | ||||
|      | ||||
|     - name: Create summary | ||||
|       if: always() | ||||
|       run: | | ||||
|         echo "## 📦 SAL Publishing Summary" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "**Version:** $PUBLISH_VERSION" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "**Trigger:** ${{ github.event_name }}" >> $GITHUB_STEP_SUMMARY | ||||
|          | ||||
|         if [ "${{ github.event.inputs.dry_run }}" == "true" ]; then | ||||
|           echo "**Mode:** Dry Run" >> $GITHUB_STEP_SUMMARY | ||||
|         else | ||||
|           echo "**Mode:** Live Publishing" >> $GITHUB_STEP_SUMMARY | ||||
|         fi | ||||
|          | ||||
|         echo "" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "### Published Crates" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "- sal-os" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "- sal-process" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "- sal-text" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "- sal-net" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "- sal-git" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "- sal-vault" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "- sal-kubernetes" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "- sal-virt" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "- sal-redisclient" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "- sal-postgresclient" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "- sal-zinit-client" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "- sal-mycelium" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "- sal-rhai" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "- sal (meta-crate)" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "### Usage" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "" >> $GITHUB_STEP_SUMMARY | ||||
|         echo '```bash' >> $GITHUB_STEP_SUMMARY | ||||
|         echo "# Individual crates" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "cargo add sal-os sal-process sal-text" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "# Meta-crate with features" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "cargo add sal --features core" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "cargo add sal --features all" >> $GITHUB_STEP_SUMMARY | ||||
|         echo '```' >> $GITHUB_STEP_SUMMARY | ||||
							
								
								
									
										233
									
								
								.github/workflows/test-publish.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										233
									
								
								.github/workflows/test-publish.yml
									
									
									
									
										vendored
									
									
								
							| @@ -1,233 +0,0 @@ | ||||
| name: Test Publishing Setup | ||||
|  | ||||
| on: | ||||
|   push: | ||||
|     branches: [ main, master ] | ||||
|     paths: | ||||
|       - '**/Cargo.toml' | ||||
|       - 'scripts/publish-all.sh' | ||||
|       - '.github/workflows/publish.yml' | ||||
|   pull_request: | ||||
|     branches: [ main, master ] | ||||
|     paths: | ||||
|       - '**/Cargo.toml' | ||||
|       - 'scripts/publish-all.sh' | ||||
|       - '.github/workflows/publish.yml' | ||||
|   workflow_dispatch: | ||||
|  | ||||
| env: | ||||
|   CARGO_TERM_COLOR: always | ||||
|  | ||||
| jobs: | ||||
|   test-publish-setup: | ||||
|     name: Test Publishing Setup | ||||
|     runs-on: ubuntu-latest | ||||
|      | ||||
|     steps: | ||||
|     - name: Checkout repository | ||||
|       uses: actions/checkout@v4 | ||||
|      | ||||
|     - name: Install Rust toolchain | ||||
|       uses: dtolnay/rust-toolchain@stable | ||||
|       with: | ||||
|         toolchain: stable | ||||
|      | ||||
|     - name: Cache Cargo dependencies | ||||
|       uses: actions/cache@v4 | ||||
|       with: | ||||
|         path: | | ||||
|           ~/.cargo/bin/ | ||||
|           ~/.cargo/registry/index/ | ||||
|           ~/.cargo/registry/cache/ | ||||
|           ~/.cargo/git/db/ | ||||
|           target/ | ||||
|         key: ${{ runner.os }}-cargo-publish-test-${{ hashFiles('**/Cargo.lock') }} | ||||
|         restore-keys: | | ||||
|           ${{ runner.os }}-cargo-publish-test- | ||||
|           ${{ runner.os }}-cargo- | ||||
|      | ||||
|     - name: Install cargo-edit | ||||
|       run: cargo install cargo-edit | ||||
|      | ||||
|     - name: Test workspace structure | ||||
|       run: | | ||||
|         echo "Testing workspace structure..." | ||||
|          | ||||
|         # Check that all expected crates exist | ||||
|         EXPECTED_CRATES=(os process text net git vault kubernetes virt redisclient postgresclient zinit_client mycelium rhai herodo) | ||||
|          | ||||
|         for crate in "${EXPECTED_CRATES[@]}"; do | ||||
|           if [ -d "$crate" ] && [ -f "$crate/Cargo.toml" ]; then | ||||
|             echo "✅ $crate exists" | ||||
|           else | ||||
|             echo "❌ $crate missing or invalid" | ||||
|             exit 1 | ||||
|           fi | ||||
|         done | ||||
|      | ||||
|     - name: Test feature configuration | ||||
|       run: | | ||||
|         echo "Testing feature configuration..." | ||||
|          | ||||
|         # Test that features work correctly | ||||
|         cargo check --features os | ||||
|         cargo check --features process | ||||
|         cargo check --features text | ||||
|         cargo check --features net | ||||
|         cargo check --features git | ||||
|         cargo check --features vault | ||||
|         cargo check --features kubernetes | ||||
|         cargo check --features virt | ||||
|         cargo check --features redisclient | ||||
|         cargo check --features postgresclient | ||||
|         cargo check --features zinit_client | ||||
|         cargo check --features mycelium | ||||
|         cargo check --features rhai | ||||
|          | ||||
|         echo "✅ All individual features work" | ||||
|          | ||||
|         # Test feature groups | ||||
|         cargo check --features core | ||||
|         cargo check --features clients | ||||
|         cargo check --features infrastructure | ||||
|         cargo check --features scripting | ||||
|          | ||||
|         echo "✅ All feature groups work" | ||||
|          | ||||
|         # Test all features | ||||
|         cargo check --features all | ||||
|          | ||||
|         echo "✅ All features together work" | ||||
|      | ||||
|     - name: Test dry-run publishing | ||||
|       run: | | ||||
|         echo "Testing dry-run publishing..." | ||||
|          | ||||
|         # Test each individual crate can be packaged | ||||
|         CRATES=(os process text net git vault kubernetes virt redisclient postgresclient zinit_client mycelium rhai) | ||||
|          | ||||
|         for crate in "${CRATES[@]}"; do | ||||
|           echo "Testing sal-$crate..." | ||||
|           cd "$crate" | ||||
|           cargo publish --dry-run | ||||
|           cd .. | ||||
|           echo "✅ sal-$crate can be published" | ||||
|         done | ||||
|          | ||||
|         # Test main crate | ||||
|         echo "Testing main sal crate..." | ||||
|         cargo publish --dry-run | ||||
|         echo "✅ Main sal crate can be published" | ||||
|      | ||||
|     - name: Test publishing script | ||||
|       run: | | ||||
|         echo "Testing publishing script..." | ||||
|          | ||||
|         # Make script executable | ||||
|         chmod +x scripts/publish-all.sh | ||||
|          | ||||
|         # Test dry run | ||||
|         ./scripts/publish-all.sh --dry-run --version 0.1.0-test | ||||
|          | ||||
|         echo "✅ Publishing script works" | ||||
|      | ||||
|     - name: Test version consistency | ||||
|       run: | | ||||
|         echo "Testing version consistency..." | ||||
|          | ||||
|         # Get version from root Cargo.toml | ||||
|         ROOT_VERSION=$(grep '^version = ' Cargo.toml | head -1 | sed 's/version = "\(.*\)"/\1/') | ||||
|         echo "Root version: $ROOT_VERSION" | ||||
|          | ||||
|         # Check all crates have the same version | ||||
|         CRATES=(os process text net git vault kubernetes virt redisclient postgresclient zinit_client mycelium rhai herodo) | ||||
|          | ||||
|         for crate in "${CRATES[@]}"; do | ||||
|           if [ -f "$crate/Cargo.toml" ]; then | ||||
|             CRATE_VERSION=$(grep '^version = ' "$crate/Cargo.toml" | head -1 | sed 's/version = "\(.*\)"/\1/') | ||||
|             if [ "$CRATE_VERSION" = "$ROOT_VERSION" ]; then | ||||
|               echo "✅ $crate version matches: $CRATE_VERSION" | ||||
|             else | ||||
|               echo "❌ $crate version mismatch: $CRATE_VERSION (expected $ROOT_VERSION)" | ||||
|               exit 1 | ||||
|             fi | ||||
|           fi | ||||
|         done | ||||
|      | ||||
|     - name: Test metadata completeness | ||||
|       run: | | ||||
|         echo "Testing metadata completeness..." | ||||
|          | ||||
|         # Check that all crates have required metadata | ||||
|         CRATES=(os process text net git vault kubernetes virt redisclient postgresclient zinit_client mycelium rhai) | ||||
|          | ||||
|         for crate in "${CRATES[@]}"; do | ||||
|           echo "Checking sal-$crate metadata..." | ||||
|           cd "$crate" | ||||
|            | ||||
|           # Check required fields exist | ||||
|           if ! grep -q '^name = "sal-' Cargo.toml; then | ||||
|             echo "❌ $crate missing or incorrect name" | ||||
|             exit 1 | ||||
|           fi | ||||
|            | ||||
|           if ! grep -q '^description = ' Cargo.toml; then | ||||
|             echo "❌ $crate missing description" | ||||
|             exit 1 | ||||
|           fi | ||||
|            | ||||
|           if ! grep -q '^repository = ' Cargo.toml; then | ||||
|             echo "❌ $crate missing repository" | ||||
|             exit 1 | ||||
|           fi | ||||
|            | ||||
|           if ! grep -q '^license = ' Cargo.toml; then | ||||
|             echo "❌ $crate missing license" | ||||
|             exit 1 | ||||
|           fi | ||||
|            | ||||
|           echo "✅ sal-$crate metadata complete" | ||||
|           cd .. | ||||
|         done | ||||
|      | ||||
|     - name: Test dependency resolution | ||||
|       run: | | ||||
|         echo "Testing dependency resolution..." | ||||
|          | ||||
|         # Test that all workspace dependencies resolve correctly | ||||
|         cargo tree --workspace > /dev/null | ||||
|         echo "✅ All dependencies resolve correctly" | ||||
|          | ||||
|         # Test that there are no dependency conflicts | ||||
|         cargo check --workspace | ||||
|         echo "✅ No dependency conflicts" | ||||
|      | ||||
|     - name: Generate publishing report | ||||
|       if: always() | ||||
|       run: | | ||||
|         echo "## 🧪 Publishing Setup Test Report" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "### ✅ Tests Passed" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "- Workspace structure validation" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "- Feature configuration testing" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "- Dry-run publishing simulation" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "- Publishing script validation" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "- Version consistency check" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "- Metadata completeness verification" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "- Dependency resolution testing" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "### 📦 Ready for Publishing" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "All SAL crates are ready for publishing to crates.io!" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "**Individual Crates:** 13 modules" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "**Meta-crate:** sal with optional features" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "**Binary:** herodo script executor" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "### 🚀 Next Steps" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "1. Create a release tag (e.g., v0.1.0)" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "2. The publish workflow will automatically trigger" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "3. All crates will be published to crates.io" >> $GITHUB_STEP_SUMMARY | ||||
|         echo "4. Users can install with: \`cargo add sal-os\` or \`cargo add sal --features all\`" >> $GITHUB_STEP_SUMMARY | ||||
							
								
								
									
										1
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							| @@ -62,4 +62,3 @@ docusaurus.config.ts | ||||
| sidebars.ts | ||||
|  | ||||
| tsconfig.json | ||||
| Cargo.toml.bak | ||||
							
								
								
									
										88
									
								
								Cargo.toml
									
									
									
									
									
								
							
							
						
						
									
										88
									
								
								Cargo.toml
									
									
									
									
									
								
							| @@ -11,33 +11,18 @@ categories = ["os", "filesystem", "api-bindings"] | ||||
| readme = "README.md" | ||||
|  | ||||
| [workspace] | ||||
| members = [ | ||||
|     ".", | ||||
|     "vault", | ||||
|     "git", | ||||
|     "redisclient", | ||||
|     "mycelium", | ||||
|     "text", | ||||
|     "os", | ||||
|     "net", | ||||
|     "zinit_client", | ||||
|     "process", | ||||
|     "virt", | ||||
|     "postgresclient", | ||||
|     "kubernetes", | ||||
|     "rhai", | ||||
|     "herodo", | ||||
| ] | ||||
| members = [".", "vault", "git", "redisclient", "mycelium", "text", "os", "net", "zinit_client", "process", "virt", "postgresclient", "rhai", "herodo", "rfs-client"] | ||||
| resolver = "2" | ||||
|  | ||||
| [workspace.metadata] | ||||
| # Workspace-level metadata | ||||
| rust-version = "1.70.0" | ||||
| rust-version = "1.85.0" | ||||
|  | ||||
| [workspace.dependencies] | ||||
| # Core shared dependencies with consistent versions | ||||
| anyhow = "1.0.98" | ||||
| base64 = "0.22.1" | ||||
| bytes = "1.4.0" | ||||
| dirs = "6.0.0" | ||||
| env_logger = "0.11.8" | ||||
| futures = "0.3.30" | ||||
| @@ -88,57 +73,16 @@ tokio-test = "0.4.4" | ||||
|  | ||||
| [dependencies] | ||||
| thiserror = "2.0.12" # For error handling in the main Error enum | ||||
|  | ||||
| # Optional dependencies - users can choose which modules to include | ||||
| sal-git = { path = "git", optional = true } | ||||
| sal-kubernetes = { path = "kubernetes", optional = true } | ||||
| sal-redisclient = { path = "redisclient", optional = true } | ||||
| sal-mycelium = { path = "mycelium", optional = true } | ||||
| sal-text = { path = "text", optional = true } | ||||
| sal-os = { path = "os", optional = true } | ||||
| sal-net = { path = "net", optional = true } | ||||
| sal-zinit-client = { path = "zinit_client", optional = true } | ||||
| sal-process = { path = "process", optional = true } | ||||
| sal-virt = { path = "virt", optional = true } | ||||
| sal-postgresclient = { path = "postgresclient", optional = true } | ||||
| sal-vault = { path = "vault", optional = true } | ||||
| sal-rhai = { path = "rhai", optional = true } | ||||
|  | ||||
| [features] | ||||
| default = [] | ||||
|  | ||||
| # Individual module features | ||||
| git = ["dep:sal-git"] | ||||
| kubernetes = ["dep:sal-kubernetes"] | ||||
| redisclient = ["dep:sal-redisclient"] | ||||
| mycelium = ["dep:sal-mycelium"] | ||||
| text = ["dep:sal-text"] | ||||
| os = ["dep:sal-os"] | ||||
| net = ["dep:sal-net"] | ||||
| zinit_client = ["dep:sal-zinit-client"] | ||||
| process = ["dep:sal-process"] | ||||
| virt = ["dep:sal-virt"] | ||||
| postgresclient = ["dep:sal-postgresclient"] | ||||
| vault = ["dep:sal-vault"] | ||||
| rhai = ["dep:sal-rhai"] | ||||
|  | ||||
| # Convenience feature groups | ||||
| core = ["os", "process", "text", "net"] | ||||
| clients = ["redisclient", "postgresclient", "zinit_client", "mycelium"] | ||||
| infrastructure = ["git", "vault", "kubernetes", "virt"] | ||||
| scripting = ["rhai"] | ||||
| all = [ | ||||
|     "git", | ||||
|     "kubernetes", | ||||
|     "redisclient", | ||||
|     "mycelium", | ||||
|     "text", | ||||
|     "os", | ||||
|     "net", | ||||
|     "zinit_client", | ||||
|     "process", | ||||
|     "virt", | ||||
|     "postgresclient", | ||||
|     "vault", | ||||
|     "rhai", | ||||
| ] | ||||
| sal-git = { path = "git" } | ||||
| sal-redisclient = { path = "redisclient" } | ||||
| sal-mycelium = { path = "mycelium" } | ||||
| sal-text = { path = "text" } | ||||
| sal-os = { path = "os" } | ||||
| sal-net = { path = "net" } | ||||
| sal-zinit-client = { path = "zinit_client" } | ||||
| sal-process = { path = "process" } | ||||
| sal-virt = { path = "virt" } | ||||
| sal-postgresclient = { path = "postgresclient" } | ||||
| sal-vault = { path = "vault" } | ||||
| sal-rhai = { path = "rhai" } | ||||
| sal-rfs-client = { path = "rfs-client" } | ||||
|   | ||||
							
								
								
									
										239
									
								
								PUBLISHING.md
									
									
									
									
									
								
							
							
						
						
									
										239
									
								
								PUBLISHING.md
									
									
									
									
									
								
							| @@ -1,239 +0,0 @@ | ||||
| # SAL Publishing Guide | ||||
|  | ||||
| This guide explains how to publish SAL crates to crates.io and how users can consume them. | ||||
|  | ||||
| ## 🎯 Publishing Strategy | ||||
|  | ||||
| SAL uses a **modular publishing approach** where each module is published as an individual crate. This allows users to install only the functionality they need, reducing compilation time and binary size. | ||||
|  | ||||
| ## 📦 Crate Structure | ||||
|  | ||||
| ### Individual Crates | ||||
|  | ||||
| Each SAL module is published as a separate crate: | ||||
|  | ||||
| | Crate Name | Description | Category | | ||||
| |------------|-------------|----------| | ||||
| | `sal-os` | Operating system operations | Core | | ||||
| | `sal-process` | Process management | Core | | ||||
| | `sal-text` | Text processing utilities | Core | | ||||
| | `sal-net` | Network operations | Core | | ||||
| | `sal-git` | Git repository management | Infrastructure | | ||||
| | `sal-vault` | Cryptographic operations | Infrastructure | | ||||
| | `sal-kubernetes` | Kubernetes cluster management | Infrastructure | | ||||
| | `sal-virt` | Virtualization tools (Buildah, nerdctl) | Infrastructure | | ||||
| | `sal-redisclient` | Redis database client | Clients | | ||||
| | `sal-postgresclient` | PostgreSQL database client | Clients | | ||||
| | `sal-zinit-client` | Zinit process supervisor client | Clients | | ||||
| | `sal-mycelium` | Mycelium network client | Clients | | ||||
| | `sal-rhai` | Rhai scripting integration | Scripting | | ||||
|  | ||||
| ### Meta-crate | ||||
|  | ||||
| The main `sal` crate serves as a meta-crate that re-exports all modules with optional features: | ||||
|  | ||||
| ```toml | ||||
| [dependencies] | ||||
| sal = { version = "0.1.0", features = ["os", "process", "text"] } | ||||
| ``` | ||||
|  | ||||
| ## 🚀 Publishing Process | ||||
|  | ||||
| ### Prerequisites | ||||
|  | ||||
| 1. **Crates.io Account**: Ensure you have a crates.io account and API token | ||||
| 2. **Repository Access**: Ensure the repository URL is accessible | ||||
| 3. **Version Consistency**: All crates should use the same version number | ||||
|  | ||||
| ### Publishing Individual Crates | ||||
|  | ||||
| Each crate can be published independently: | ||||
|  | ||||
| ```bash | ||||
| # Publish core modules | ||||
| cd os && cargo publish | ||||
| cd ../process && cargo publish | ||||
| cd ../text && cargo publish | ||||
| cd ../net && cargo publish | ||||
|  | ||||
| # Publish infrastructure modules | ||||
| cd ../git && cargo publish | ||||
| cd ../vault && cargo publish | ||||
| cd ../kubernetes && cargo publish | ||||
| cd ../virt && cargo publish | ||||
|  | ||||
| # Publish client modules | ||||
| cd ../redisclient && cargo publish | ||||
| cd ../postgresclient && cargo publish | ||||
| cd ../zinit_client && cargo publish | ||||
| cd ../mycelium && cargo publish | ||||
|  | ||||
| # Publish scripting module | ||||
| cd ../rhai && cargo publish | ||||
|  | ||||
| # Finally, publish the meta-crate | ||||
| cd .. && cargo publish | ||||
| ``` | ||||
|  | ||||
| ### Automated Publishing | ||||
|  | ||||
| Use the comprehensive publishing script: | ||||
|  | ||||
| ```bash | ||||
| # Test the publishing process (safe) | ||||
| ./scripts/publish-all.sh --dry-run --version 0.1.0 | ||||
|  | ||||
| # Actually publish to crates.io | ||||
| ./scripts/publish-all.sh --version 0.1.0 | ||||
| ``` | ||||
|  | ||||
| The script handles: | ||||
| - ✅ **Dependency order** - Publishes crates in correct dependency order | ||||
| - ✅ **Path dependencies** - Automatically updates path deps to version deps | ||||
| - ✅ **Rate limiting** - Waits between publishes to avoid rate limits | ||||
| - ✅ **Error handling** - Stops on failures with clear error messages | ||||
| - ✅ **Dry run mode** - Test without actually publishing | ||||
|  | ||||
| ## 👥 User Consumption | ||||
|  | ||||
| ### Installation Options | ||||
|  | ||||
| #### Option 1: Individual Crates (Recommended) | ||||
|  | ||||
| Users install only what they need: | ||||
|  | ||||
| ```bash | ||||
| # Core functionality | ||||
| cargo add sal-os sal-process sal-text sal-net | ||||
|  | ||||
| # Database operations | ||||
| cargo add sal-redisclient sal-postgresclient | ||||
|  | ||||
| # Infrastructure management | ||||
| cargo add sal-git sal-vault sal-kubernetes | ||||
|  | ||||
| # Service integration | ||||
| cargo add sal-zinit-client sal-mycelium | ||||
|  | ||||
| # Scripting | ||||
| cargo add sal-rhai | ||||
| ``` | ||||
|  | ||||
| **Usage:** | ||||
| ```rust | ||||
| use sal_os::fs; | ||||
| use sal_process::run; | ||||
| use sal_git::GitManager; | ||||
|  | ||||
| fn main() -> Result<(), Box<dyn std::error::Error>> { | ||||
|     let files = fs::list_files(".")?; | ||||
|     let result = run::command("echo hello")?; | ||||
|     let git = GitManager::new(".")?; | ||||
|     Ok(()) | ||||
| } | ||||
| ``` | ||||
|  | ||||
| #### Option 2: Meta-crate with Features | ||||
|  | ||||
| Users can use the main crate with selective features: | ||||
|  | ||||
| ```bash | ||||
| # Specific modules | ||||
| cargo add sal --features os,process,text | ||||
|  | ||||
| # Feature groups | ||||
| cargo add sal --features core              # os, process, text, net | ||||
| cargo add sal --features clients           # redisclient, postgresclient, zinit_client, mycelium | ||||
| cargo add sal --features infrastructure    # git, vault, kubernetes, virt | ||||
| cargo add sal --features scripting         # rhai | ||||
|  | ||||
| # Everything | ||||
| cargo add sal --features all | ||||
| ``` | ||||
|  | ||||
| **Usage:** | ||||
| ```rust | ||||
| // Cargo.toml: sal = { version = "0.1.0", features = ["os", "process", "git"] } | ||||
| use sal::os::fs; | ||||
| use sal::process::run; | ||||
| use sal::git::GitManager; | ||||
|  | ||||
| fn main() -> Result<(), Box<dyn std::error::Error>> { | ||||
|     let files = fs::list_files(".")?; | ||||
|     let result = run::command("echo hello")?; | ||||
|     let git = GitManager::new(".")?; | ||||
|     Ok(()) | ||||
| } | ||||
| ``` | ||||
|  | ||||
| ### Feature Groups | ||||
|  | ||||
| The meta-crate provides convenient feature groups: | ||||
|  | ||||
| - **`core`**: Essential system operations (os, process, text, net) | ||||
| - **`clients`**: Database and service clients (redisclient, postgresclient, zinit_client, mycelium) | ||||
| - **`infrastructure`**: Infrastructure management tools (git, vault, kubernetes, virt) | ||||
| - **`scripting`**: Rhai scripting support (rhai) | ||||
| - **`all`**: Everything included | ||||
|  | ||||
| ## 📋 Version Management | ||||
|  | ||||
| ### Semantic Versioning | ||||
|  | ||||
| All SAL crates follow semantic versioning: | ||||
|  | ||||
| - **Major version**: Breaking API changes | ||||
| - **Minor version**: New features, backward compatible | ||||
| - **Patch version**: Bug fixes, backward compatible | ||||
|  | ||||
| ### Synchronized Releases | ||||
|  | ||||
| All crates are released with the same version number to ensure compatibility: | ||||
|  | ||||
| ```toml | ||||
| # All crates use the same version | ||||
| sal-os = "0.1.0" | ||||
| sal-process = "0.1.0" | ||||
| sal-git = "0.1.0" | ||||
| # etc. | ||||
| ``` | ||||
|  | ||||
| ## 🔧 Maintenance | ||||
|  | ||||
| ### Updating Dependencies | ||||
|  | ||||
| When updating dependencies: | ||||
|  | ||||
| 1. Update `Cargo.toml` in the workspace root | ||||
| 2. Update individual crate dependencies if needed | ||||
| 3. Test all crates: `cargo test --workspace` | ||||
| 4. Publish with incremented version numbers | ||||
|  | ||||
| ### Adding New Modules | ||||
|  | ||||
| To add a new SAL module: | ||||
|  | ||||
| 1. Create the new crate directory | ||||
| 2. Add to workspace members in root `Cargo.toml` | ||||
| 3. Add optional dependency in root `Cargo.toml` | ||||
| 4. Add feature flag in root `Cargo.toml` | ||||
| 5. Add conditional re-export in `src/lib.rs` | ||||
| 6. Update documentation | ||||
|  | ||||
| ## 🎉 Benefits | ||||
|  | ||||
| ### For Users | ||||
|  | ||||
| - **Minimal Dependencies**: Install only what you need | ||||
| - **Faster Builds**: Smaller dependency trees compile faster | ||||
| - **Smaller Binaries**: Reduced binary size | ||||
| - **Clear Dependencies**: Explicit about what functionality is used | ||||
|  | ||||
| ### For Maintainers | ||||
|  | ||||
| - **Independent Releases**: Can release individual crates as needed | ||||
| - **Focused Testing**: Test individual modules in isolation | ||||
| - **Clear Ownership**: Each crate has clear responsibility | ||||
| - **Easier Maintenance**: Smaller, focused codebases | ||||
|  | ||||
| This publishing strategy provides the best of both worlds: modularity for users who want minimal dependencies, and convenience for users who prefer a single crate with features. | ||||
							
								
								
									
										226
									
								
								README.md
									
									
									
									
									
								
							
							
						
						
									
										226
									
								
								README.md
									
									
									
									
									
								
							| @@ -6,10 +6,10 @@ SAL is a comprehensive Rust library designed to provide a unified and simplified | ||||
|  | ||||
| ## 🏗️ **Cargo Workspace Structure** | ||||
|  | ||||
| SAL is organized as a **Cargo workspace** with 15 specialized crates: | ||||
| SAL is organized as a **Cargo workspace** with 16 specialized crates: | ||||
|  | ||||
| - **Root Package**: `sal` - Umbrella crate that re-exports all modules | ||||
| - **12 Library Crates**: Core SAL modules (os, process, text, net, git, vault, kubernetes, virt, redisclient, postgresclient, zinit_client, mycelium) | ||||
| - **13 Library Crates**: Specialized SAL modules (git, text, os, net, etc.) | ||||
| - **1 Binary Crate**: `herodo` - Rhai script execution engine | ||||
| - **1 Integration Crate**: `rhai` - Rhai scripting integration layer | ||||
|  | ||||
| @@ -22,147 +22,6 @@ This workspace structure provides excellent build performance, dependency manage | ||||
| - **Modular Architecture**: Each module is independently maintainable while sharing common infrastructure | ||||
| - **Production Ready**: 100% test coverage with comprehensive Rhai integration tests | ||||
|  | ||||
| ## 📦 Installation | ||||
|  | ||||
| SAL is designed to be modular - install only the components you need! | ||||
|  | ||||
| ### Option 1: Individual Crates (Recommended) | ||||
|  | ||||
| Install only the modules you need: | ||||
|  | ||||
| ```bash | ||||
| # Currently available packages | ||||
| cargo add sal-os sal-process sal-text sal-net sal-git sal-vault sal-kubernetes sal-virt | ||||
|  | ||||
| # Coming soon (rate limited) | ||||
| # cargo add sal-redisclient sal-postgresclient sal-zinit-client sal-mycelium sal-rhai | ||||
| ``` | ||||
|  | ||||
| ### Option 2: Meta-crate with Features | ||||
|  | ||||
| Use the main `sal` crate with specific features: | ||||
|  | ||||
| ```bash | ||||
| # Coming soon - meta-crate with features (rate limited) | ||||
| # cargo add sal --features os,process,text | ||||
| # cargo add sal --features core              # os, process, text, net | ||||
| # cargo add sal --features infrastructure    # git, vault, kubernetes, virt | ||||
| # cargo add sal --features all | ||||
|  | ||||
| # For now, use individual crates (see Option 1 above) | ||||
| ``` | ||||
|  | ||||
| ### Quick Start Examples | ||||
|  | ||||
| #### Using Individual Crates (Recommended) | ||||
|  | ||||
| ```rust | ||||
| use sal_os::fs; | ||||
| use sal_process::run; | ||||
|  | ||||
| fn main() -> Result<(), Box<dyn std::error::Error>> { | ||||
|     // File system operations | ||||
|     let files = fs::list_files(".")?; | ||||
|     println!("Found {} files", files.len()); | ||||
|  | ||||
|     // Process execution | ||||
|     let result = run::command("echo hello")?; | ||||
|     println!("Output: {}", result.stdout); | ||||
|  | ||||
|     Ok(()) | ||||
| } | ||||
| ``` | ||||
|  | ||||
| #### Using Meta-crate with Features | ||||
|  | ||||
| ```rust | ||||
| // In Cargo.toml: sal = { version = "0.1.0", features = ["os", "process"] } | ||||
| use sal::os::fs; | ||||
| use sal::process::run; | ||||
|  | ||||
| fn main() -> Result<(), Box<dyn std::error::Error>> { | ||||
|     // File system operations | ||||
|     let files = fs::list_files(".")?; | ||||
|     println!("Found {} files", files.len()); | ||||
|  | ||||
|     // Process execution | ||||
|     let result = run::command("echo hello")?; | ||||
|     println!("Output: {}", result.stdout); | ||||
|  | ||||
|     Ok(()) | ||||
| } | ||||
| ``` | ||||
|  | ||||
| #### Using Herodo for Scripting | ||||
|  | ||||
| ```bash | ||||
| # Build and install herodo | ||||
| git clone https://github.com/PlanetFirst/sal.git | ||||
| cd sal | ||||
| ./build_herodo.sh | ||||
|  | ||||
| # Create a script file | ||||
| cat > example.rhai << 'EOF' | ||||
| // File operations | ||||
| let files = find_files(".", "*.rs"); | ||||
| print("Found " + files.len() + " Rust files"); | ||||
|  | ||||
| // Process execution | ||||
| let result = run("echo 'Hello from SAL!'"); | ||||
| print("Output: " + result.stdout); | ||||
|  | ||||
| // Network operations | ||||
| let reachable = http_check("https://github.com"); | ||||
| print("GitHub reachable: " + reachable); | ||||
| EOF | ||||
|  | ||||
| # Execute the script | ||||
| herodo example.rhai | ||||
| ``` | ||||
|  | ||||
| ## 📦 Available Packages | ||||
|  | ||||
| SAL is published as individual crates, allowing you to install only what you need: | ||||
|  | ||||
| | Package | Description | Install Command | | ||||
| |---------|-------------|-----------------| | ||||
| | [`sal-os`](https://crates.io/crates/sal-os) | Operating system operations | `cargo add sal-os` | | ||||
| | [`sal-process`](https://crates.io/crates/sal-process) | Process management | `cargo add sal-process` | | ||||
| | [`sal-text`](https://crates.io/crates/sal-text) | Text processing utilities | `cargo add sal-text` | | ||||
| | [`sal-net`](https://crates.io/crates/sal-net) | Network operations | `cargo add sal-net` | | ||||
| | [`sal-git`](https://crates.io/crates/sal-git) | Git repository management | `cargo add sal-git` | | ||||
| | [`sal-vault`](https://crates.io/crates/sal-vault) | Cryptographic operations | `cargo add sal-vault` | | ||||
| | [`sal-kubernetes`](https://crates.io/crates/sal-kubernetes) | Kubernetes management | `cargo add sal-kubernetes` | | ||||
| | [`sal-virt`](https://crates.io/crates/sal-virt) | Virtualization tools | `cargo add sal-virt` | | ||||
| | `sal-redisclient` | Redis database client | `cargo add sal-redisclient` ⏳ | | ||||
| | `sal-postgresclient` | PostgreSQL client | `cargo add sal-postgresclient` ⏳ | | ||||
| | `sal-zinit-client` | Zinit process supervisor | `cargo add sal-zinit-client` ⏳ | | ||||
| | `sal-mycelium` | Mycelium network client | `cargo add sal-mycelium` ⏳ | | ||||
| | `sal-rhai` | Rhai scripting integration | `cargo add sal-rhai` ⏳ | | ||||
| | `sal` | Meta-crate with features | `cargo add sal --features all` ⏳ | | ||||
| | `herodo` | Script executor binary | Build from source ⏳ | | ||||
|  | ||||
| **Legend**: ✅ Published | ⏳ Publishing soon (rate limited) | ||||
|  | ||||
| ### 📢 **Publishing Status** | ||||
|  | ||||
| **Currently Available on crates.io:** | ||||
| - ✅ [`sal-os`](https://crates.io/crates/sal-os) - Operating system operations | ||||
| - ✅ [`sal-process`](https://crates.io/crates/sal-process) - Process management | ||||
| - ✅ [`sal-text`](https://crates.io/crates/sal-text) - Text processing utilities | ||||
| - ✅ [`sal-net`](https://crates.io/crates/sal-net) - Network operations | ||||
| - ✅ [`sal-git`](https://crates.io/crates/sal-git) - Git repository management | ||||
| - ✅ [`sal-vault`](https://crates.io/crates/sal-vault) - Cryptographic operations | ||||
| - ✅ [`sal-kubernetes`](https://crates.io/crates/sal-kubernetes) - Kubernetes management | ||||
| - ✅ [`sal-virt`](https://crates.io/crates/sal-virt) - Virtualization tools | ||||
|  | ||||
| **Publishing Soon** (hit crates.io rate limit): | ||||
| - ⏳ `sal-redisclient`, `sal-postgresclient`, `sal-zinit-client`, `sal-mycelium` | ||||
| - ⏳ `sal-rhai` | ||||
| - ⏳ `sal` (meta-crate), `herodo` (binary) | ||||
|  | ||||
| **Estimated Timeline**: Remaining packages will be published within 24 hours once the rate limit resets. | ||||
|  | ||||
| ## Core Features | ||||
|  | ||||
| SAL offers a broad spectrum of functionalities, including: | ||||
| @@ -254,77 +113,42 @@ For more examples, check the individual module test directories (e.g., `text/tes | ||||
|  | ||||
| ## Using SAL as a Rust Library | ||||
|  | ||||
| ### Option 1: Individual Crates (Recommended) | ||||
|  | ||||
| Add only the SAL modules you need: | ||||
| Add SAL as a dependency to your `Cargo.toml`: | ||||
|  | ||||
| ```toml | ||||
| [dependencies] | ||||
| sal-os = "0.1.0" | ||||
| sal-process = "0.1.0" | ||||
| sal-text = "0.1.0" | ||||
| sal = "0.1.0" # Or the latest version | ||||
| ``` | ||||
|  | ||||
| ### Rust Example: Using Redis Client | ||||
|  | ||||
| ```rust | ||||
| use sal_os::fs; | ||||
| use sal_process::run; | ||||
| use sal_text::template; | ||||
| use sal::redisclient::{get_global_client, execute_cmd_with_args}; | ||||
| use redis::RedisResult; | ||||
|  | ||||
| fn main() -> Result<(), Box<dyn std::error::Error>> { | ||||
|     // File operations | ||||
|     let files = fs::list_files(".")?; | ||||
|     println!("Found {} files", files.len()); | ||||
| async fn example_redis_interaction() -> RedisResult<()> { | ||||
|     // Get a connection from the global pool | ||||
|     let mut conn = get_global_client().await?.get_async_connection().await?; | ||||
|  | ||||
|     // Process execution | ||||
|     let result = run::command("echo 'Hello SAL!'")?; | ||||
|     println!("Output: {}", result.stdout); | ||||
|     // Set a value | ||||
|     execute_cmd_with_args(&mut conn, "SET", vec!["my_key", "my_value"]).await?; | ||||
|     println!("Set 'my_key' to 'my_value'"); | ||||
|  | ||||
|     // Text templating | ||||
|     let template_str = "Hello {{name}}!"; | ||||
|     let mut vars = std::collections::HashMap::new(); | ||||
|     vars.insert("name".to_string(), "World".to_string()); | ||||
|     let rendered = template::render(template_str, &vars)?; | ||||
|     println!("Rendered: {}", rendered); | ||||
|     // Get a value | ||||
|     let value: String = execute_cmd_with_args(&mut conn, "GET", vec!["my_key"]).await?; | ||||
|     println!("Retrieved value for 'my_key': {}", value); | ||||
|  | ||||
|     Ok(()) | ||||
| } | ||||
|  | ||||
| #[tokio::main] | ||||
| async fn main() { | ||||
|     if let Err(e) = example_redis_interaction().await { | ||||
|         eprintln!("Redis Error: {}", e); | ||||
|     } | ||||
| } | ||||
| ``` | ||||
|  | ||||
| ### Option 2: Meta-crate with Features (Coming Soon) | ||||
|  | ||||
| ```toml | ||||
| [dependencies] | ||||
| sal = { version = "0.1.0", features = ["os", "process", "text"] } | ||||
| ``` | ||||
|  | ||||
| ```rust | ||||
| use sal::os::fs; | ||||
| use sal::process::run; | ||||
| use sal::text::template; | ||||
|  | ||||
| // Same code as above, but using the meta-crate | ||||
| ``` | ||||
|  | ||||
| *(Note: The meta-crate `sal` will be available once all individual packages are published.)* | ||||
|  | ||||
| ## 🎯 **Why Choose SAL?** | ||||
|  | ||||
| ### **Modular Architecture** | ||||
| - **Install Only What You Need**: Each package is independent - no bloated dependencies | ||||
| - **Faster Compilation**: Smaller dependency trees mean faster build times | ||||
| - **Smaller Binaries**: Only include the functionality you actually use | ||||
| - **Clear Dependencies**: Explicit about what functionality your project uses | ||||
|  | ||||
| ### **Developer Experience** | ||||
| - **Consistent APIs**: All packages follow the same design patterns and conventions | ||||
| - **Comprehensive Documentation**: Each package has detailed documentation and examples | ||||
| - **Real-World Tested**: All functionality is production-tested, no placeholder code | ||||
| - **Type Safety**: Leverages Rust's type system for safe, reliable operations | ||||
|  | ||||
| ### **Scripting Power** | ||||
| - **Herodo Integration**: Execute Rhai scripts with full access to SAL functionality | ||||
| - **Cross-Platform**: Works consistently across Windows, macOS, and Linux | ||||
| - **Automation Ready**: Perfect for DevOps, CI/CD, and system administration tasks | ||||
| *(Note: The Redis client API might have evolved; please refer to `src/redisclient/mod.rs` and its documentation for the most current usage.)* | ||||
|  | ||||
| ## 📦 **Workspace Modules Overview** | ||||
|  | ||||
|   | ||||
| @@ -1,72 +0,0 @@ | ||||
| //! Basic Kubernetes operations example | ||||
| //! | ||||
| //! This script demonstrates basic Kubernetes operations using the SAL Kubernetes module. | ||||
| //!  | ||||
| //! Prerequisites: | ||||
| //! - A running Kubernetes cluster | ||||
| //! - Valid kubeconfig file or in-cluster configuration | ||||
| //! - Appropriate permissions for the operations | ||||
| //! | ||||
| //! Usage: | ||||
| //!   herodo examples/kubernetes/basic_operations.rhai | ||||
|  | ||||
| print("=== SAL Kubernetes Basic Operations Example ==="); | ||||
|  | ||||
| // Create a KubernetesManager for the default namespace | ||||
| print("Creating KubernetesManager for 'default' namespace..."); | ||||
| let km = kubernetes_manager_new("default"); | ||||
| print("✓ KubernetesManager created for namespace: " + namespace(km)); | ||||
|  | ||||
| // List all pods in the namespace | ||||
| print("\n--- Listing Pods ---"); | ||||
| let pods = pods_list(km); | ||||
| print("Found " + pods.len() + " pods in the namespace:"); | ||||
| for pod in pods { | ||||
|     print("  - " + pod); | ||||
| } | ||||
|  | ||||
| // List all services in the namespace | ||||
| print("\n--- Listing Services ---"); | ||||
| let services = services_list(km); | ||||
| print("Found " + services.len() + " services in the namespace:"); | ||||
| for service in services { | ||||
|     print("  - " + service); | ||||
| } | ||||
|  | ||||
| // List all deployments in the namespace | ||||
| print("\n--- Listing Deployments ---"); | ||||
| let deployments = deployments_list(km); | ||||
| print("Found " + deployments.len() + " deployments in the namespace:"); | ||||
| for deployment in deployments { | ||||
|     print("  - " + deployment); | ||||
| } | ||||
|  | ||||
| // Get resource counts | ||||
| print("\n--- Resource Counts ---"); | ||||
| let counts = resource_counts(km); | ||||
| print("Resource counts in namespace '" + namespace(km) + "':"); | ||||
| for resource_type in counts.keys() { | ||||
|     print("  " + resource_type + ": " + counts[resource_type]); | ||||
| } | ||||
|  | ||||
| // List all namespaces (cluster-wide operation) | ||||
| print("\n--- Listing All Namespaces ---"); | ||||
| let namespaces = namespaces_list(km); | ||||
| print("Found " + namespaces.len() + " namespaces in the cluster:"); | ||||
| for ns in namespaces { | ||||
|     print("  - " + ns); | ||||
| } | ||||
|  | ||||
| // Check if specific namespaces exist | ||||
| print("\n--- Checking Namespace Existence ---"); | ||||
| let test_namespaces = ["default", "kube-system", "non-existent-namespace"]; | ||||
| for ns in test_namespaces { | ||||
|     let exists = namespace_exists(km, ns); | ||||
|     if exists { | ||||
|         print("✓ Namespace '" + ns + "' exists"); | ||||
|     } else { | ||||
|         print("✗ Namespace '" + ns + "' does not exist"); | ||||
|     } | ||||
| } | ||||
|  | ||||
| print("\n=== Example completed successfully! ==="); | ||||
| @@ -1,208 +0,0 @@ | ||||
| //! Multi-namespace Kubernetes operations example | ||||
| //! | ||||
| //! This script demonstrates working with multiple namespaces and comparing resources across them. | ||||
| //!  | ||||
| //! Prerequisites: | ||||
| //! - A running Kubernetes cluster | ||||
| //! - Valid kubeconfig file or in-cluster configuration | ||||
| //! - Appropriate permissions for the operations | ||||
| //! | ||||
| //! Usage: | ||||
| //!   herodo examples/kubernetes/multi_namespace_operations.rhai | ||||
|  | ||||
| print("=== SAL Kubernetes Multi-Namespace Operations Example ==="); | ||||
|  | ||||
| // Define namespaces to work with | ||||
| let target_namespaces = ["default", "kube-system"]; | ||||
| let managers = #{}; | ||||
|  | ||||
| print("Creating managers for multiple namespaces..."); | ||||
|  | ||||
| // Create managers for each namespace | ||||
| for ns in target_namespaces { | ||||
|     try { | ||||
|         let km = kubernetes_manager_new(ns); | ||||
|         managers[ns] = km; | ||||
|         print("✓ Created manager for namespace: " + ns); | ||||
|     } catch(e) { | ||||
|         print("✗ Failed to create manager for " + ns + ": " + e); | ||||
|     } | ||||
| } | ||||
|  | ||||
| // Function to safely get resource counts | ||||
| fn get_safe_counts(km) { | ||||
|     try { | ||||
|         return resource_counts(km); | ||||
|     } catch(e) { | ||||
|         print("  Warning: Could not get resource counts - " + e); | ||||
|         return #{}; | ||||
|     } | ||||
| } | ||||
|  | ||||
| // Function to safely get pod list | ||||
| fn get_safe_pods(km) { | ||||
|     try { | ||||
|         return pods_list(km); | ||||
|     } catch(e) { | ||||
|         print("  Warning: Could not list pods - " + e); | ||||
|         return []; | ||||
|     } | ||||
| } | ||||
|  | ||||
| // Compare resource counts across namespaces | ||||
| print("\n--- Resource Comparison Across Namespaces ---"); | ||||
| let total_resources = #{}; | ||||
|  | ||||
| for ns in target_namespaces { | ||||
|     if ns in managers { | ||||
|         let km = managers[ns]; | ||||
|         print("\nNamespace: " + ns); | ||||
|         let counts = get_safe_counts(km); | ||||
|          | ||||
|         for resource_type in counts.keys() { | ||||
|             let count = counts[resource_type]; | ||||
|             print("  " + resource_type + ": " + count); | ||||
|              | ||||
|             // Accumulate totals | ||||
|             if resource_type in total_resources { | ||||
|                 total_resources[resource_type] = total_resources[resource_type] + count; | ||||
|             } else { | ||||
|                 total_resources[resource_type] = count; | ||||
|             } | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  | ||||
| print("\n--- Total Resources Across All Namespaces ---"); | ||||
| for resource_type in total_resources.keys() { | ||||
|     print("Total " + resource_type + ": " + total_resources[resource_type]); | ||||
| } | ||||
|  | ||||
| // Find namespaces with the most resources | ||||
| print("\n--- Namespace Resource Analysis ---"); | ||||
| let namespace_totals = #{}; | ||||
|  | ||||
| for ns in target_namespaces { | ||||
|     if ns in managers { | ||||
|         let km = managers[ns]; | ||||
|         let counts = get_safe_counts(km); | ||||
|         let total = 0; | ||||
|          | ||||
|         for resource_type in counts.keys() { | ||||
|             total = total + counts[resource_type]; | ||||
|         } | ||||
|          | ||||
|         namespace_totals[ns] = total; | ||||
|         print("Namespace '" + ns + "' has " + total + " total resources"); | ||||
|     } | ||||
| } | ||||
|  | ||||
| // Find the busiest namespace | ||||
| let busiest_ns = ""; | ||||
| let max_resources = 0; | ||||
| for ns in namespace_totals.keys() { | ||||
|     if namespace_totals[ns] > max_resources { | ||||
|         max_resources = namespace_totals[ns]; | ||||
|         busiest_ns = ns; | ||||
|     } | ||||
| } | ||||
|  | ||||
| if busiest_ns != "" { | ||||
|     print("🏆 Busiest namespace: '" + busiest_ns + "' with " + max_resources + " resources"); | ||||
| } | ||||
|  | ||||
| // Detailed pod analysis | ||||
| print("\n--- Pod Analysis Across Namespaces ---"); | ||||
| let all_pods = []; | ||||
|  | ||||
| for ns in target_namespaces { | ||||
|     if ns in managers { | ||||
|         let km = managers[ns]; | ||||
|         let pods = get_safe_pods(km); | ||||
|          | ||||
|         print("\nNamespace '" + ns + "' pods:"); | ||||
|         if pods.len() == 0 { | ||||
|             print("  (no pods)"); | ||||
|         } else { | ||||
|             for pod in pods { | ||||
|                 print("  - " + pod); | ||||
|                 all_pods.push(ns + "/" + pod); | ||||
|             } | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  | ||||
| print("\n--- All Pods Summary ---"); | ||||
| print("Total pods across all namespaces: " + all_pods.len()); | ||||
|  | ||||
| // Look for common pod name patterns | ||||
| print("\n--- Pod Name Pattern Analysis ---"); | ||||
| let patterns = #{ | ||||
|     "system": 0, | ||||
|     "kube": 0, | ||||
|     "coredns": 0, | ||||
|     "proxy": 0, | ||||
|     "controller": 0 | ||||
| }; | ||||
|  | ||||
| for pod_full_name in all_pods { | ||||
|     let pod_name = pod_full_name.to_lower(); | ||||
|      | ||||
|     for pattern in patterns.keys() { | ||||
|         if pod_name.contains(pattern) { | ||||
|             patterns[pattern] = patterns[pattern] + 1; | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  | ||||
| print("Common pod name patterns found:"); | ||||
| for pattern in patterns.keys() { | ||||
|     if patterns[pattern] > 0 { | ||||
|         print("  '" + pattern + "': " + patterns[pattern] + " pods"); | ||||
|     } | ||||
| } | ||||
|  | ||||
| // Namespace health check | ||||
| print("\n--- Namespace Health Check ---"); | ||||
| for ns in target_namespaces { | ||||
|     if ns in managers { | ||||
|         let km = managers[ns]; | ||||
|         print("\nChecking namespace: " + ns); | ||||
|          | ||||
|         // Check if namespace exists (should always be true for our managers) | ||||
|         let exists = namespace_exists(km, ns); | ||||
|         if exists { | ||||
|             print("  ✓ Namespace exists and is accessible"); | ||||
|         } else { | ||||
|             print("  ✗ Namespace existence check failed"); | ||||
|         } | ||||
|          | ||||
|         // Try to get resource counts as a health indicator | ||||
|         let counts = get_safe_counts(km); | ||||
|         if counts.len() > 0 { | ||||
|             print("  ✓ Can access resources (" + counts.len() + " resource types)"); | ||||
|         } else { | ||||
|             print("  ⚠ No resources found or access limited"); | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  | ||||
| // Create a summary report | ||||
| print("\n--- Summary Report ---"); | ||||
| print("Namespaces analyzed: " + target_namespaces.len()); | ||||
| print("Total unique resource types: " + total_resources.len()); | ||||
|  | ||||
| let grand_total = 0; | ||||
| for resource_type in total_resources.keys() { | ||||
|     grand_total = grand_total + total_resources[resource_type]; | ||||
| } | ||||
| print("Grand total resources: " + grand_total); | ||||
|  | ||||
| print("\nResource breakdown:"); | ||||
| for resource_type in total_resources.keys() { | ||||
|     let count = total_resources[resource_type]; | ||||
|     let percentage = (count * 100) / grand_total; | ||||
|     print("  " + resource_type + ": " + count + " (" + percentage + "%)"); | ||||
| } | ||||
|  | ||||
| print("\n=== Multi-namespace operations example completed! ==="); | ||||
| @@ -1,95 +0,0 @@ | ||||
| //! Kubernetes namespace management example | ||||
| //! | ||||
| //! This script demonstrates namespace creation and management operations. | ||||
| //!  | ||||
| //! Prerequisites: | ||||
| //! - A running Kubernetes cluster | ||||
| //! - Valid kubeconfig file or in-cluster configuration | ||||
| //! - Permissions to create and manage namespaces | ||||
| //! | ||||
| //! Usage: | ||||
| //!   herodo examples/kubernetes/namespace_management.rhai | ||||
|  | ||||
| print("=== SAL Kubernetes Namespace Management Example ==="); | ||||
|  | ||||
| // Create a KubernetesManager | ||||
| let km = kubernetes_manager_new("default"); | ||||
| print("Created KubernetesManager for namespace: " + namespace(km)); | ||||
|  | ||||
| // Define test namespace names | ||||
| let test_namespaces = [ | ||||
|     "sal-test-namespace-1", | ||||
|     "sal-test-namespace-2",  | ||||
|     "sal-example-app" | ||||
| ]; | ||||
|  | ||||
| print("\n--- Creating Test Namespaces ---"); | ||||
| for ns in test_namespaces { | ||||
|     print("Creating namespace: " + ns); | ||||
|     try { | ||||
|         namespace_create(km, ns); | ||||
|         print("✓ Successfully created namespace: " + ns); | ||||
|     } catch(e) { | ||||
|         print("✗ Failed to create namespace " + ns + ": " + e); | ||||
|     } | ||||
| } | ||||
|  | ||||
| // Wait a moment for namespaces to be created | ||||
| print("\nWaiting for namespaces to be ready..."); | ||||
|  | ||||
| // Verify namespaces were created | ||||
| print("\n--- Verifying Namespace Creation ---"); | ||||
| for ns in test_namespaces { | ||||
|     let exists = namespace_exists(km, ns); | ||||
|     if exists { | ||||
|         print("✓ Namespace '" + ns + "' exists"); | ||||
|     } else { | ||||
|         print("✗ Namespace '" + ns + "' was not found"); | ||||
|     } | ||||
| } | ||||
|  | ||||
| // List all namespaces to see our new ones | ||||
| print("\n--- Current Namespaces ---"); | ||||
| let all_namespaces = namespaces_list(km); | ||||
| print("Total namespaces in cluster: " + all_namespaces.len()); | ||||
| for ns in all_namespaces { | ||||
|     if ns.starts_with("sal-") { | ||||
|         print("  🔹 " + ns + " (created by this example)"); | ||||
|     } else { | ||||
|         print("  - " + ns); | ||||
|     } | ||||
| } | ||||
|  | ||||
| // Test idempotent creation (creating the same namespace again) | ||||
| print("\n--- Testing Idempotent Creation ---"); | ||||
| let test_ns = test_namespaces[0]; | ||||
| print("Attempting to create existing namespace: " + test_ns); | ||||
| try { | ||||
|     namespace_create(km, test_ns); | ||||
|     print("✓ Idempotent creation successful (no error for existing namespace)"); | ||||
| } catch(e) { | ||||
|     print("✗ Unexpected error during idempotent creation: " + e); | ||||
| } | ||||
|  | ||||
| // Create managers for the new namespaces and check their properties | ||||
| print("\n--- Creating Managers for New Namespaces ---"); | ||||
| for ns in test_namespaces { | ||||
|     try { | ||||
|         let ns_km = kubernetes_manager_new(ns); | ||||
|         print("✓ Created manager for namespace: " + namespace(ns_km)); | ||||
|          | ||||
|         // Get resource counts for the new namespace (should be mostly empty) | ||||
|         let counts = resource_counts(ns_km); | ||||
|         print("  Resource counts: " + counts); | ||||
|     } catch(e) { | ||||
|         print("✗ Failed to create manager for " + ns + ": " + e); | ||||
|     } | ||||
| } | ||||
|  | ||||
| print("\n--- Cleanup Instructions ---"); | ||||
| print("To clean up the test namespaces created by this example, run:"); | ||||
| for ns in test_namespaces { | ||||
|     print("  kubectl delete namespace " + ns); | ||||
| } | ||||
|  | ||||
| print("\n=== Namespace management example completed! ==="); | ||||
| @@ -1,157 +0,0 @@ | ||||
| //! Kubernetes pattern-based deletion example | ||||
| //! | ||||
| //! This script demonstrates how to use PCRE patterns to delete multiple resources. | ||||
| //!  | ||||
| //! ⚠️  WARNING: This example includes actual deletion operations! | ||||
| //! ⚠️  Only run this in a test environment! | ||||
| //!  | ||||
| //! Prerequisites: | ||||
| //! - A running Kubernetes cluster (preferably a test cluster) | ||||
| //! - Valid kubeconfig file or in-cluster configuration | ||||
| //! - Permissions to delete resources | ||||
| //! | ||||
| //! Usage: | ||||
| //!   herodo examples/kubernetes/pattern_deletion.rhai | ||||
|  | ||||
| print("=== SAL Kubernetes Pattern Deletion Example ==="); | ||||
| print("⚠️  WARNING: This example will delete resources matching patterns!"); | ||||
| print("⚠️  Only run this in a test environment!"); | ||||
|  | ||||
| // Create a KubernetesManager for a test namespace | ||||
| let test_namespace = "sal-pattern-test"; | ||||
| let km = kubernetes_manager_new("default"); | ||||
|  | ||||
| print("\nCreating test namespace: " + test_namespace); | ||||
| try { | ||||
|     namespace_create(km, test_namespace); | ||||
|     print("✓ Test namespace created"); | ||||
| } catch(e) { | ||||
|     print("Note: " + e); | ||||
| } | ||||
|  | ||||
| // Switch to the test namespace | ||||
| let test_km = kubernetes_manager_new(test_namespace); | ||||
| print("Switched to namespace: " + namespace(test_km)); | ||||
|  | ||||
| // Show current resources before any operations | ||||
| print("\n--- Current Resources in Test Namespace ---"); | ||||
| let counts = resource_counts(test_km); | ||||
| print("Resource counts before operations:"); | ||||
| for resource_type in counts.keys() { | ||||
|     print("  " + resource_type + ": " + counts[resource_type]); | ||||
| } | ||||
|  | ||||
| // List current pods to see what we're working with | ||||
| let current_pods = pods_list(test_km); | ||||
| print("\nCurrent pods in namespace:"); | ||||
| if current_pods.len() == 0 { | ||||
|     print("  (no pods found)"); | ||||
| } else { | ||||
|     for pod in current_pods { | ||||
|         print("  - " + pod); | ||||
|     } | ||||
| } | ||||
|  | ||||
| // Demonstrate pattern matching without deletion first | ||||
| print("\n--- Pattern Matching Demo (Dry Run) ---"); | ||||
| let test_patterns = [ | ||||
|     "test-.*",           // Match anything starting with "test-" | ||||
|     ".*-temp$",          // Match anything ending with "-temp" | ||||
|     "demo-pod-.*",       // Match demo pods | ||||
|     "nginx-.*",          // Match nginx pods | ||||
|     "app-[0-9]+",        // Match app-1, app-2, etc. | ||||
| ]; | ||||
|  | ||||
| for pattern in test_patterns { | ||||
|     print("Testing pattern: '" + pattern + "'"); | ||||
|      | ||||
|     // Check which pods would match this pattern | ||||
|     let matching_pods = []; | ||||
|     for pod in current_pods { | ||||
|         // Simple pattern matching simulation (Rhai doesn't have regex, so this is illustrative) | ||||
|         if pod.contains("test") && pattern == "test-.*" { | ||||
|             matching_pods.push(pod); | ||||
|         } else if pod.contains("temp") && pattern == ".*-temp$" { | ||||
|             matching_pods.push(pod); | ||||
|         } else if pod.contains("demo") && pattern == "demo-pod-.*" { | ||||
|             matching_pods.push(pod); | ||||
|         } else if pod.contains("nginx") && pattern == "nginx-.*" { | ||||
|             matching_pods.push(pod); | ||||
|         } | ||||
|     } | ||||
|      | ||||
|     print("  Would match " + matching_pods.len() + " pods: " + matching_pods); | ||||
| } | ||||
|  | ||||
| // Example of safe deletion patterns | ||||
| print("\n--- Safe Deletion Examples ---"); | ||||
| print("These patterns are designed to be safe for testing:"); | ||||
|  | ||||
| let safe_patterns = [ | ||||
|     "test-example-.*",      // Very specific test resources | ||||
|     "sal-demo-.*",          // SAL demo resources | ||||
|     "temp-resource-.*",     // Temporary resources | ||||
| ]; | ||||
|  | ||||
| for pattern in safe_patterns { | ||||
|     print("\nTesting safe pattern: '" + pattern + "'"); | ||||
|      | ||||
|     try { | ||||
|         // This will actually attempt deletion, but should be safe in a test environment | ||||
|         let deleted_count = delete(test_km, pattern); | ||||
|         print("✓ Pattern '" + pattern + "' matched and deleted " + deleted_count + " resources"); | ||||
|     } catch(e) { | ||||
|         print("Note: Pattern '" + pattern + "' - " + e); | ||||
|     } | ||||
| } | ||||
|  | ||||
| // Show resources after deletion attempts | ||||
| print("\n--- Resources After Deletion Attempts ---"); | ||||
| let final_counts = resource_counts(test_km); | ||||
| print("Final resource counts:"); | ||||
| for resource_type in final_counts.keys() { | ||||
|     print("  " + resource_type + ": " + final_counts[resource_type]); | ||||
| } | ||||
|  | ||||
| // Example of individual resource deletion | ||||
| print("\n--- Individual Resource Deletion Examples ---"); | ||||
| print("These functions delete specific resources by name:"); | ||||
|  | ||||
| // These are examples - they will fail if the resources don't exist, which is expected | ||||
| let example_deletions = [ | ||||
|     ["pod", "test-pod-example"], | ||||
|     ["service", "test-service-example"], | ||||
|     ["deployment", "test-deployment-example"], | ||||
| ]; | ||||
|  | ||||
| for deletion in example_deletions { | ||||
|     let resource_type = deletion[0]; | ||||
|     let resource_name = deletion[1]; | ||||
|      | ||||
|     print("Attempting to delete " + resource_type + ": " + resource_name); | ||||
|     try { | ||||
|         if resource_type == "pod" { | ||||
|             pod_delete(test_km, resource_name); | ||||
|         } else if resource_type == "service" { | ||||
|             service_delete(test_km, resource_name); | ||||
|         } else if resource_type == "deployment" { | ||||
|             deployment_delete(test_km, resource_name); | ||||
|         } | ||||
|         print("✓ Successfully deleted " + resource_type + ": " + resource_name); | ||||
|     } catch(e) { | ||||
|         print("Note: " + resource_type + " '" + resource_name + "' - " + e); | ||||
|     } | ||||
| } | ||||
|  | ||||
| print("\n--- Best Practices for Pattern Deletion ---"); | ||||
| print("1. Always test patterns in a safe environment first"); | ||||
| print("2. Use specific patterns rather than broad ones"); | ||||
| print("3. Consider using dry-run approaches when possible"); | ||||
| print("4. Have backups or be able to recreate resources"); | ||||
| print("5. Use descriptive naming conventions for easier pattern matching"); | ||||
|  | ||||
| print("\n--- Cleanup ---"); | ||||
| print("To clean up the test namespace:"); | ||||
| print("  kubectl delete namespace " + test_namespace); | ||||
|  | ||||
| print("\n=== Pattern deletion example completed! ==="); | ||||
| @@ -1,33 +0,0 @@ | ||||
| //! Test Kubernetes module registration | ||||
| //! | ||||
| //! This script tests that the Kubernetes module is properly registered | ||||
| //! and available in the Rhai environment. | ||||
|  | ||||
| print("=== Testing Kubernetes Module Registration ==="); | ||||
|  | ||||
| // Test that we can reference the kubernetes functions | ||||
| print("Testing function registration..."); | ||||
|  | ||||
| // These should not error even if we can't connect to a cluster | ||||
| let functions_to_test = [ | ||||
|     "kubernetes_manager_new", | ||||
|     "pods_list", | ||||
|     "services_list",  | ||||
|     "deployments_list", | ||||
|     "delete", | ||||
|     "namespace_create", | ||||
|     "namespace_exists", | ||||
|     "resource_counts", | ||||
|     "pod_delete", | ||||
|     "service_delete", | ||||
|     "deployment_delete", | ||||
|     "namespace" | ||||
| ]; | ||||
|  | ||||
| for func_name in functions_to_test { | ||||
|     print("✓ Function '" + func_name + "' is available"); | ||||
| } | ||||
|  | ||||
| print("\n=== All Kubernetes functions are properly registered! ==="); | ||||
| print("Note: To test actual functionality, you need a running Kubernetes cluster."); | ||||
| print("See other examples in this directory for real cluster operations."); | ||||
| @@ -1,18 +1,9 @@ | ||||
| # SAL Git Package (`sal-git`) | ||||
| # SAL `git` Module | ||||
|  | ||||
| The `sal-git` package provides comprehensive functionalities for interacting with Git repositories. It offers both high-level abstractions for common Git workflows and a flexible executor for running arbitrary Git commands with integrated authentication. | ||||
| The `git` module in SAL provides comprehensive functionalities for interacting with Git repositories. It offers both high-level abstractions for common Git workflows and a flexible executor for running arbitrary Git commands with integrated authentication. | ||||
|  | ||||
| This module is central to SAL's capabilities for managing source code, enabling automation of development tasks, and integrating with version control systems. | ||||
|  | ||||
| ## Installation | ||||
|  | ||||
| Add this to your `Cargo.toml`: | ||||
|  | ||||
| ```toml | ||||
| [dependencies] | ||||
| sal-git = "0.1.0" | ||||
| ``` | ||||
|  | ||||
| ## Core Components | ||||
|  | ||||
| The module is primarily composed of two main parts: | ||||
|   | ||||
| @@ -18,8 +18,8 @@ path = "src/main.rs" | ||||
| env_logger = { workspace = true } | ||||
| rhai = { workspace = true } | ||||
|  | ||||
| # SAL library for Rhai module registration (with all features for herodo) | ||||
| sal = { path = "..", features = ["all"] } | ||||
| # SAL library for Rhai module registration | ||||
| sal = { path = ".." } | ||||
|  | ||||
| [dev-dependencies] | ||||
| tempfile = { workspace = true } | ||||
|   | ||||
| @@ -15,32 +15,14 @@ Herodo is a command-line utility that executes Rhai scripts with full access to | ||||
|  | ||||
| ## Installation | ||||
|  | ||||
| ### Build and Install | ||||
| Build the herodo binary: | ||||
|  | ||||
| ```bash | ||||
| git clone https://github.com/PlanetFirst/sal.git | ||||
| cd sal | ||||
| ./build_herodo.sh | ||||
| cd herodo | ||||
| cargo build --release | ||||
| ``` | ||||
|  | ||||
| This script will: | ||||
| - Build herodo in debug mode | ||||
| - Install it to `~/hero/bin/herodo` (non-root) or `/usr/local/bin/herodo` (root) | ||||
| - Make it available in your PATH | ||||
|  | ||||
| **Note**: If using the non-root installation, make sure `~/hero/bin` is in your PATH: | ||||
| ```bash | ||||
| export PATH="$HOME/hero/bin:$PATH" | ||||
| ``` | ||||
|  | ||||
| ### Install from crates.io (Coming Soon) | ||||
|  | ||||
| ```bash | ||||
| # This will be available once herodo is published to crates.io | ||||
| cargo install herodo | ||||
| ``` | ||||
|  | ||||
| **Note**: `herodo` is not yet published to crates.io due to publishing rate limits. It will be available soon. | ||||
| The executable will be available at `target/release/herodo`. | ||||
|  | ||||
| ## Usage | ||||
|  | ||||
|   | ||||
| @@ -1,56 +0,0 @@ | ||||
| [package] | ||||
| name = "sal-kubernetes" | ||||
| version = "0.1.0" | ||||
| edition = "2021" | ||||
| authors = ["PlanetFirst <info@incubaid.com>"] | ||||
| description = "SAL Kubernetes - Kubernetes cluster management and operations using kube-rs SDK" | ||||
| repository = "https://git.threefold.info/herocode/sal" | ||||
| license = "Apache-2.0" | ||||
| keywords = ["kubernetes", "k8s", "cluster", "container", "orchestration"] | ||||
| categories = ["api-bindings", "development-tools"] | ||||
|  | ||||
| [dependencies] | ||||
| # Kubernetes client library | ||||
| kube = { version = "0.95.0", features = ["client", "config", "derive"] } | ||||
| k8s-openapi = { version = "0.23.0", features = ["latest"] } | ||||
|  | ||||
| # Async runtime | ||||
| tokio = { version = "1.45.0", features = ["full"] } | ||||
|  | ||||
| # Production safety features | ||||
| tokio-retry = "0.3.0" | ||||
| governor = "0.6.3" | ||||
| tower = { version = "0.5.2", features = ["timeout", "limit"] } | ||||
|  | ||||
| # Error handling | ||||
| thiserror = "2.0.12" | ||||
| anyhow = "1.0.98" | ||||
|  | ||||
| # Serialization | ||||
| serde = { version = "1.0", features = ["derive"] } | ||||
| serde_json = "1.0" | ||||
| serde_yaml = "0.9" | ||||
|  | ||||
| # Regular expressions for pattern matching | ||||
| regex = "1.10.2" | ||||
|  | ||||
| # Logging | ||||
| log = "0.4" | ||||
|  | ||||
| # Rhai scripting support (optional) | ||||
| rhai = { version = "1.12.0", features = ["sync"], optional = true } | ||||
|  | ||||
| # UUID for resource identification | ||||
| uuid = { version = "1.16.0", features = ["v4"] } | ||||
|  | ||||
| # Base64 encoding for secrets | ||||
| base64 = "0.22.1" | ||||
|  | ||||
| [dev-dependencies] | ||||
| tempfile = "3.5" | ||||
| tokio-test = "0.4.4" | ||||
| env_logger = "0.11.5" | ||||
|  | ||||
| [features] | ||||
| default = ["rhai"] | ||||
| rhai = ["dep:rhai"] | ||||
| @@ -1,227 +0,0 @@ | ||||
| # SAL Kubernetes (`sal-kubernetes`) | ||||
|  | ||||
| Kubernetes cluster management and operations for the System Abstraction Layer (SAL). | ||||
|  | ||||
| ## Installation | ||||
|  | ||||
| Add this to your `Cargo.toml`: | ||||
|  | ||||
| ```toml | ||||
| [dependencies] | ||||
| sal-kubernetes = "0.1.0" | ||||
| ``` | ||||
|  | ||||
| ## ⚠️ **IMPORTANT SECURITY NOTICE** | ||||
|  | ||||
| **This package includes destructive operations that can permanently delete Kubernetes resources!** | ||||
|  | ||||
| - The `delete(pattern)` function uses PCRE regex patterns to bulk delete resources | ||||
| - **Always test patterns in a safe environment first** | ||||
| - Use specific patterns to avoid accidental deletion of critical resources | ||||
| - Consider the impact on dependent resources before deletion | ||||
| - **No confirmation prompts** - deletions are immediate and irreversible | ||||
|  | ||||
| ## Overview | ||||
|  | ||||
| This package provides a high-level interface for managing Kubernetes clusters using the `kube-rs` SDK. It focuses on namespace-scoped operations through the `KubernetesManager` factory pattern. | ||||
|  | ||||
| ### Production Safety Features | ||||
|  | ||||
| - **Configurable Timeouts**: All operations have configurable timeouts to prevent hanging | ||||
| - **Exponential Backoff Retry**: Automatic retry logic for transient failures | ||||
| - **Rate Limiting**: Built-in rate limiting to prevent API overload | ||||
| - **Comprehensive Error Handling**: Detailed error types and proper error propagation | ||||
| - **Structured Logging**: Production-ready logging for monitoring and debugging | ||||
|  | ||||
| ## Features | ||||
|  | ||||
| - **Namespace-scoped Management**: Each `KubernetesManager` instance operates on a single namespace | ||||
| - **Pod Management**: List, create, and manage pods | ||||
| - **Pattern-based Deletion**: Delete resources using PCRE pattern matching | ||||
| - **Namespace Operations**: Create and manage namespaces (idempotent operations) | ||||
| - **Resource Management**: Support for pods, services, deployments, configmaps, secrets, and more | ||||
| - **Rhai Integration**: Full scripting support through Rhai wrappers | ||||
|  | ||||
| ## Usage | ||||
|  | ||||
| ### Basic Operations | ||||
|  | ||||
| ```rust | ||||
| use sal_kubernetes::KubernetesManager; | ||||
|  | ||||
| #[tokio::main] | ||||
| async fn main() -> Result<(), Box<dyn std::error::Error>> { | ||||
|     // Create a manager for the "default" namespace | ||||
|     let km = KubernetesManager::new("default").await?; | ||||
|      | ||||
|     // List all pods in the namespace | ||||
|     let pods = km.pods_list().await?; | ||||
|     println!("Found {} pods", pods.len()); | ||||
|      | ||||
|     // Create a namespace (no error if it already exists) | ||||
|     km.namespace_create("my-namespace").await?; | ||||
|      | ||||
|     // Delete resources matching a pattern | ||||
|     km.delete("test-.*").await?; | ||||
|      | ||||
|     Ok(()) | ||||
| } | ||||
| ``` | ||||
|  | ||||
| ### Rhai Scripting | ||||
|  | ||||
| ```javascript | ||||
| // Create Kubernetes manager for namespace | ||||
| let km = kubernetes_manager_new("default"); | ||||
|  | ||||
| // List pods | ||||
| let pods = pods_list(km); | ||||
| print("Found " + pods.len() + " pods"); | ||||
|  | ||||
| // Create namespace | ||||
| namespace_create(km, "my-app"); | ||||
|  | ||||
| // Delete test resources | ||||
| delete(km, "test-.*"); | ||||
| ``` | ||||
|  | ||||
| ## Dependencies | ||||
|  | ||||
| - `kube`: Kubernetes client library | ||||
| - `k8s-openapi`: Kubernetes API types | ||||
| - `tokio`: Async runtime | ||||
| - `regex`: Pattern matching for resource deletion | ||||
| - `rhai`: Scripting integration (optional) | ||||
|  | ||||
| ## Configuration | ||||
|  | ||||
| ### Kubernetes Authentication | ||||
|  | ||||
| The package uses the standard Kubernetes configuration methods: | ||||
| - In-cluster configuration (when running in a pod) | ||||
| - Kubeconfig file (`~/.kube/config` or `KUBECONFIG` environment variable) | ||||
| - Service account tokens | ||||
|  | ||||
| ### Production Safety Configuration | ||||
|  | ||||
| ```rust | ||||
| use sal_kubernetes::{KubernetesManager, KubernetesConfig}; | ||||
| use std::time::Duration; | ||||
|  | ||||
| // Create with custom configuration | ||||
| let config = KubernetesConfig::new() | ||||
|     .with_timeout(Duration::from_secs(60)) | ||||
|     .with_retries(5, Duration::from_secs(1), Duration::from_secs(30)) | ||||
|     .with_rate_limit(20, 50); | ||||
|  | ||||
| let km = KubernetesManager::with_config("my-namespace", config).await?; | ||||
| ``` | ||||
|  | ||||
| ### Pre-configured Profiles | ||||
|  | ||||
| ```rust | ||||
| // High-throughput environment | ||||
| let config = KubernetesConfig::high_throughput(); | ||||
|  | ||||
| // Low-latency environment | ||||
| let config = KubernetesConfig::low_latency(); | ||||
|  | ||||
| // Development/testing | ||||
| let config = KubernetesConfig::development(); | ||||
| ``` | ||||
|  | ||||
| ## Error Handling | ||||
|  | ||||
| All operations return `Result<T, KubernetesError>` with comprehensive error types for different failure scenarios including API errors, configuration issues, and permission problems. | ||||
|  | ||||
| ## API Reference | ||||
|  | ||||
| ### KubernetesManager | ||||
|  | ||||
| The main interface for Kubernetes operations. Each instance is scoped to a single namespace. | ||||
|  | ||||
| #### Constructor | ||||
|  | ||||
| - `KubernetesManager::new(namespace)` - Create a manager for the specified namespace | ||||
|  | ||||
| #### Resource Listing | ||||
|  | ||||
| - `pods_list()` - List all pods in the namespace | ||||
| - `services_list()` - List all services in the namespace | ||||
| - `deployments_list()` - List all deployments in the namespace | ||||
| - `configmaps_list()` - List all configmaps in the namespace | ||||
| - `secrets_list()` - List all secrets in the namespace | ||||
|  | ||||
| #### Resource Management | ||||
|  | ||||
| - `pod_get(name)` - Get a specific pod by name | ||||
| - `service_get(name)` - Get a specific service by name | ||||
| - `deployment_get(name)` - Get a specific deployment by name | ||||
| - `pod_delete(name)` - Delete a specific pod by name | ||||
| - `service_delete(name)` - Delete a specific service by name | ||||
| - `deployment_delete(name)` - Delete a specific deployment by name | ||||
|  | ||||
| #### Pattern-based Operations | ||||
|  | ||||
| - `delete(pattern)` - Delete all resources matching a PCRE pattern | ||||
|  | ||||
| #### Namespace Operations | ||||
|  | ||||
| - `namespace_create(name)` - Create a namespace (idempotent) | ||||
| - `namespace_exists(name)` - Check if a namespace exists | ||||
| - `namespaces_list()` - List all namespaces (cluster-wide) | ||||
|  | ||||
| #### Utility Functions | ||||
|  | ||||
| - `resource_counts()` - Get counts of all resource types in the namespace | ||||
| - `namespace()` - Get the namespace this manager operates on | ||||
|  | ||||
| ### Rhai Functions | ||||
|  | ||||
| When using the Rhai integration, the following functions are available: | ||||
|  | ||||
| - `kubernetes_manager_new(namespace)` - Create a KubernetesManager | ||||
| - `pods_list(km)` - List pods | ||||
| - `services_list(km)` - List services | ||||
| - `deployments_list(km)` - List deployments | ||||
| - `namespaces_list(km)` - List all namespaces | ||||
| - `delete(km, pattern)` - Delete resources matching pattern | ||||
| - `namespace_create(km, name)` - Create namespace | ||||
| - `namespace_exists(km, name)` - Check namespace existence | ||||
| - `resource_counts(km)` - Get resource counts | ||||
| - `pod_delete(km, name)` - Delete specific pod | ||||
| - `service_delete(km, name)` - Delete specific service | ||||
| - `deployment_delete(km, name)` - Delete specific deployment | ||||
| - `namespace(km)` - Get manager's namespace | ||||
|  | ||||
| ## Examples | ||||
|  | ||||
| The `examples/kubernetes/` directory contains comprehensive examples: | ||||
|  | ||||
| - `basic_operations.rhai` - Basic listing and counting operations | ||||
| - `namespace_management.rhai` - Creating and managing namespaces | ||||
| - `pattern_deletion.rhai` - Using PCRE patterns for bulk deletion | ||||
| - `multi_namespace_operations.rhai` - Working across multiple namespaces | ||||
|  | ||||
| ## Testing | ||||
|  | ||||
| Run tests with: | ||||
|  | ||||
| ```bash | ||||
| # Unit tests (no cluster required) | ||||
| cargo test --package sal-kubernetes | ||||
|  | ||||
| # Integration tests (requires cluster) | ||||
| KUBERNETES_TEST_ENABLED=1 cargo test --package sal-kubernetes | ||||
|  | ||||
| # Rhai integration tests | ||||
| KUBERNETES_TEST_ENABLED=1 cargo test --package sal-kubernetes --features rhai | ||||
| ``` | ||||
|  | ||||
| ## Security Considerations | ||||
|  | ||||
| - Always use specific PCRE patterns to avoid accidental deletion of important resources | ||||
| - Test deletion patterns in a safe environment first | ||||
| - Ensure proper RBAC permissions are configured | ||||
| - Be cautious with cluster-wide operations like namespace listing | ||||
| - Consider using dry-run approaches when possible | ||||
| @@ -1,113 +0,0 @@ | ||||
| //! Configuration for production safety features | ||||
|  | ||||
| use std::time::Duration; | ||||
|  | ||||
| /// Configuration for Kubernetes operations with production safety features | ||||
| #[derive(Debug, Clone)] | ||||
| pub struct KubernetesConfig { | ||||
|     /// Timeout for individual API operations | ||||
|     pub operation_timeout: Duration, | ||||
|      | ||||
|     /// Maximum number of retry attempts for failed operations | ||||
|     pub max_retries: u32, | ||||
|      | ||||
|     /// Base delay for exponential backoff retry strategy | ||||
|     pub retry_base_delay: Duration, | ||||
|      | ||||
|     /// Maximum delay between retries | ||||
|     pub retry_max_delay: Duration, | ||||
|      | ||||
|     /// Rate limiting: maximum requests per second | ||||
|     pub rate_limit_rps: u32, | ||||
|      | ||||
|     /// Rate limiting: burst capacity | ||||
|     pub rate_limit_burst: u32, | ||||
| } | ||||
|  | ||||
| impl Default for KubernetesConfig { | ||||
|     fn default() -> Self { | ||||
|         Self { | ||||
|             // Conservative timeout for production | ||||
|             operation_timeout: Duration::from_secs(30), | ||||
|              | ||||
|             // Reasonable retry attempts | ||||
|             max_retries: 3, | ||||
|              | ||||
|             // Exponential backoff starting at 1 second | ||||
|             retry_base_delay: Duration::from_secs(1), | ||||
|              | ||||
|             // Maximum 30 seconds between retries | ||||
|             retry_max_delay: Duration::from_secs(30), | ||||
|              | ||||
|             // Conservative rate limiting: 10 requests per second | ||||
|             rate_limit_rps: 10, | ||||
|              | ||||
|             // Allow small bursts | ||||
|             rate_limit_burst: 20, | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  | ||||
| impl KubernetesConfig { | ||||
|     /// Create a new configuration with custom settings | ||||
|     pub fn new() -> Self { | ||||
|         Self::default() | ||||
|     } | ||||
|      | ||||
|     /// Set operation timeout | ||||
|     pub fn with_timeout(mut self, timeout: Duration) -> Self { | ||||
|         self.operation_timeout = timeout; | ||||
|         self | ||||
|     } | ||||
|      | ||||
|     /// Set retry configuration | ||||
|     pub fn with_retries(mut self, max_retries: u32, base_delay: Duration, max_delay: Duration) -> Self { | ||||
|         self.max_retries = max_retries; | ||||
|         self.retry_base_delay = base_delay; | ||||
|         self.retry_max_delay = max_delay; | ||||
|         self | ||||
|     } | ||||
|      | ||||
|     /// Set rate limiting configuration | ||||
|     pub fn with_rate_limit(mut self, rps: u32, burst: u32) -> Self { | ||||
|         self.rate_limit_rps = rps; | ||||
|         self.rate_limit_burst = burst; | ||||
|         self | ||||
|     } | ||||
|      | ||||
|     /// Create configuration optimized for high-throughput environments | ||||
|     pub fn high_throughput() -> Self { | ||||
|         Self { | ||||
|             operation_timeout: Duration::from_secs(60), | ||||
|             max_retries: 5, | ||||
|             retry_base_delay: Duration::from_millis(500), | ||||
|             retry_max_delay: Duration::from_secs(60), | ||||
|             rate_limit_rps: 50, | ||||
|             rate_limit_burst: 100, | ||||
|         } | ||||
|     } | ||||
|      | ||||
|     /// Create configuration optimized for low-latency environments | ||||
|     pub fn low_latency() -> Self { | ||||
|         Self { | ||||
|             operation_timeout: Duration::from_secs(10), | ||||
|             max_retries: 2, | ||||
|             retry_base_delay: Duration::from_millis(100), | ||||
|             retry_max_delay: Duration::from_secs(5), | ||||
|             rate_limit_rps: 20, | ||||
|             rate_limit_burst: 40, | ||||
|         } | ||||
|     } | ||||
|      | ||||
|     /// Create configuration for development/testing | ||||
|     pub fn development() -> Self { | ||||
|         Self { | ||||
|             operation_timeout: Duration::from_secs(120), | ||||
|             max_retries: 1, | ||||
|             retry_base_delay: Duration::from_millis(100), | ||||
|             retry_max_delay: Duration::from_secs(2), | ||||
|             rate_limit_rps: 100, | ||||
|             rate_limit_burst: 200, | ||||
|         } | ||||
|     } | ||||
| } | ||||
| @@ -1,85 +0,0 @@ | ||||
| //! Error types for SAL Kubernetes operations | ||||
|  | ||||
| use thiserror::Error; | ||||
|  | ||||
| /// Errors that can occur during Kubernetes operations | ||||
| #[derive(Error, Debug)] | ||||
| pub enum KubernetesError { | ||||
|     /// Kubernetes API client error | ||||
|     #[error("Kubernetes API error: {0}")] | ||||
|     ApiError(#[from] kube::Error), | ||||
|  | ||||
|     /// Configuration error | ||||
|     #[error("Configuration error: {0}")] | ||||
|     ConfigError(String), | ||||
|  | ||||
|     /// Resource not found error | ||||
|     #[error("Resource not found: {0}")] | ||||
|     ResourceNotFound(String), | ||||
|  | ||||
|     /// Invalid resource name or pattern | ||||
|     #[error("Invalid resource name or pattern: {0}")] | ||||
|     InvalidResourceName(String), | ||||
|  | ||||
|     /// Regular expression error | ||||
|     #[error("Regular expression error: {0}")] | ||||
|     RegexError(#[from] regex::Error), | ||||
|  | ||||
|     /// Serialization/deserialization error | ||||
|     #[error("Serialization error: {0}")] | ||||
|     SerializationError(#[from] serde_json::Error), | ||||
|  | ||||
|     /// YAML parsing error | ||||
|     #[error("YAML error: {0}")] | ||||
|     YamlError(#[from] serde_yaml::Error), | ||||
|  | ||||
|     /// Generic operation error | ||||
|     #[error("Operation failed: {0}")] | ||||
|     OperationError(String), | ||||
|  | ||||
|     /// Namespace error | ||||
|     #[error("Namespace error: {0}")] | ||||
|     NamespaceError(String), | ||||
|  | ||||
|     /// Permission denied error | ||||
|     #[error("Permission denied: {0}")] | ||||
|     PermissionDenied(String), | ||||
|  | ||||
|     /// Timeout error | ||||
|     #[error("Operation timed out: {0}")] | ||||
|     Timeout(String), | ||||
|  | ||||
|     /// Generic error wrapper | ||||
|     #[error("Generic error: {0}")] | ||||
|     Generic(#[from] anyhow::Error), | ||||
| } | ||||
|  | ||||
| impl KubernetesError { | ||||
|     /// Create a new configuration error | ||||
|     pub fn config_error(msg: impl Into<String>) -> Self { | ||||
|         Self::ConfigError(msg.into()) | ||||
|     } | ||||
|  | ||||
|     /// Create a new operation error | ||||
|     pub fn operation_error(msg: impl Into<String>) -> Self { | ||||
|         Self::OperationError(msg.into()) | ||||
|     } | ||||
|  | ||||
|     /// Create a new namespace error | ||||
|     pub fn namespace_error(msg: impl Into<String>) -> Self { | ||||
|         Self::NamespaceError(msg.into()) | ||||
|     } | ||||
|  | ||||
|     /// Create a new permission denied error | ||||
|     pub fn permission_denied(msg: impl Into<String>) -> Self { | ||||
|         Self::PermissionDenied(msg.into()) | ||||
|     } | ||||
|  | ||||
|     /// Create a new timeout error | ||||
|     pub fn timeout(msg: impl Into<String>) -> Self { | ||||
|         Self::Timeout(msg.into()) | ||||
|     } | ||||
| } | ||||
|  | ||||
| /// Result type for Kubernetes operations | ||||
| pub type KubernetesResult<T> = Result<T, KubernetesError>; | ||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @@ -1,49 +0,0 @@ | ||||
| //! SAL Kubernetes: Kubernetes cluster management and operations | ||||
| //! | ||||
| //! This package provides Kubernetes cluster management functionality including: | ||||
| //! - Namespace-scoped resource management via KubernetesManager | ||||
| //! - Pod listing and management | ||||
| //! - Resource deletion with PCRE pattern matching | ||||
| //! - Namespace creation and management | ||||
| //! - Support for various Kubernetes resources (pods, services, deployments, etc.) | ||||
| //! | ||||
| //! # Example | ||||
| //! | ||||
| //! ```rust | ||||
| //! use sal_kubernetes::KubernetesManager; | ||||
| //! | ||||
| //! #[tokio::main] | ||||
| //! async fn main() -> Result<(), Box<dyn std::error::Error>> { | ||||
| //!     // Create a manager for the "default" namespace | ||||
| //!     let km = KubernetesManager::new("default").await?; | ||||
| //!      | ||||
| //!     // List all pods in the namespace | ||||
| //!     let pods = km.pods_list().await?; | ||||
| //!     println!("Found {} pods", pods.len()); | ||||
| //!      | ||||
| //!     // Create a namespace (idempotent) | ||||
| //!     km.namespace_create("my-namespace").await?; | ||||
| //!      | ||||
| //!     // Delete resources matching a pattern | ||||
| //!     km.delete("test-.*").await?; | ||||
| //!      | ||||
| //!     Ok(()) | ||||
| //! } | ||||
| //! ``` | ||||
|  | ||||
| pub mod config; | ||||
| pub mod error; | ||||
| pub mod kubernetes_manager; | ||||
|  | ||||
| // Rhai integration module | ||||
| #[cfg(feature = "rhai")] | ||||
| pub mod rhai; | ||||
|  | ||||
| // Re-export main types for convenience | ||||
| pub use config::KubernetesConfig; | ||||
| pub use error::KubernetesError; | ||||
| pub use kubernetes_manager::KubernetesManager; | ||||
|  | ||||
| // Re-export commonly used Kubernetes types | ||||
| pub use k8s_openapi::api::apps::v1::{Deployment, ReplicaSet}; | ||||
| pub use k8s_openapi::api::core::v1::{Namespace, Pod, Service}; | ||||
| @@ -1,555 +0,0 @@ | ||||
| //! Rhai wrappers for Kubernetes module functions | ||||
| //! | ||||
| //! This module provides Rhai wrappers for the functions in the Kubernetes module, | ||||
| //! enabling scripting access to Kubernetes operations. | ||||
|  | ||||
| use crate::{KubernetesError, KubernetesManager}; | ||||
| use rhai::{Array, Dynamic, Engine, EvalAltResult, Map}; | ||||
|  | ||||
| /// Helper function to execute async operations with proper runtime handling | ||||
| fn execute_async<F, T>(future: F) -> Result<T, Box<EvalAltResult>> | ||||
| where | ||||
|     F: std::future::Future<Output = Result<T, KubernetesError>>, | ||||
| { | ||||
|     match tokio::runtime::Handle::try_current() { | ||||
|         Ok(handle) => handle | ||||
|             .block_on(future) | ||||
|             .map_err(kubernetes_error_to_rhai_error), | ||||
|         Err(_) => { | ||||
|             // No runtime available, create a new one | ||||
|             let rt = tokio::runtime::Runtime::new().map_err(|e| { | ||||
|                 Box::new(EvalAltResult::ErrorRuntime( | ||||
|                     format!("Failed to create Tokio runtime: {}", e).into(), | ||||
|                     rhai::Position::NONE, | ||||
|                 )) | ||||
|             })?; | ||||
|             rt.block_on(future).map_err(kubernetes_error_to_rhai_error) | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  | ||||
| /// Create a new KubernetesManager for the specified namespace | ||||
| /// | ||||
| /// # Arguments | ||||
| /// | ||||
| /// * `namespace` - The Kubernetes namespace to operate on | ||||
| /// | ||||
| /// # Returns | ||||
| /// | ||||
| /// * `Result<KubernetesManager, Box<EvalAltResult>>` - The manager instance or an error | ||||
| fn kubernetes_manager_new(namespace: String) -> Result<KubernetesManager, Box<EvalAltResult>> { | ||||
|     execute_async(KubernetesManager::new(namespace)) | ||||
| } | ||||
|  | ||||
| /// List all pods in the namespace | ||||
| /// | ||||
| /// # Arguments | ||||
| /// | ||||
| /// * `km` - The KubernetesManager instance | ||||
| /// | ||||
| /// # Returns | ||||
| /// | ||||
| /// * `Result<Array, Box<EvalAltResult>>` - Array of pod names or an error | ||||
| fn pods_list(km: &mut KubernetesManager) -> Result<Array, Box<EvalAltResult>> { | ||||
|     let pods = execute_async(km.pods_list())?; | ||||
|  | ||||
|     let pod_names: Array = pods | ||||
|         .iter() | ||||
|         .filter_map(|pod| pod.metadata.name.as_ref()) | ||||
|         .map(|name| Dynamic::from(name.clone())) | ||||
|         .collect(); | ||||
|  | ||||
|     Ok(pod_names) | ||||
| } | ||||
|  | ||||
| /// List all services in the namespace | ||||
| /// | ||||
| /// # Arguments | ||||
| /// | ||||
| /// * `km` - The KubernetesManager instance | ||||
| /// | ||||
| /// # Returns | ||||
| /// | ||||
| /// * `Result<Array, Box<EvalAltResult>>` - Array of service names or an error | ||||
| fn services_list(km: &mut KubernetesManager) -> Result<Array, Box<EvalAltResult>> { | ||||
|     let services = execute_async(km.services_list())?; | ||||
|  | ||||
|     let service_names: Array = services | ||||
|         .iter() | ||||
|         .filter_map(|service| service.metadata.name.as_ref()) | ||||
|         .map(|name| Dynamic::from(name.clone())) | ||||
|         .collect(); | ||||
|  | ||||
|     Ok(service_names) | ||||
| } | ||||
|  | ||||
| /// List all deployments in the namespace | ||||
| /// | ||||
| /// # Arguments | ||||
| /// | ||||
| /// * `km` - The KubernetesManager instance | ||||
| /// | ||||
| /// # Returns | ||||
| /// | ||||
| /// * `Result<Array, Box<EvalAltResult>>` - Array of deployment names or an error | ||||
| fn deployments_list(km: &mut KubernetesManager) -> Result<Array, Box<EvalAltResult>> { | ||||
|     let deployments = execute_async(km.deployments_list())?; | ||||
|  | ||||
|     let deployment_names: Array = deployments | ||||
|         .iter() | ||||
|         .filter_map(|deployment| deployment.metadata.name.as_ref()) | ||||
|         .map(|name| Dynamic::from(name.clone())) | ||||
|         .collect(); | ||||
|  | ||||
|     Ok(deployment_names) | ||||
| } | ||||
|  | ||||
| /// Delete resources matching a PCRE pattern | ||||
| /// | ||||
| /// # Arguments | ||||
| /// | ||||
| /// * `km` - The KubernetesManager instance | ||||
| /// * `pattern` - PCRE pattern to match resource names against | ||||
| /// | ||||
| /// # Returns | ||||
| /// | ||||
| /// * `Result<i64, Box<EvalAltResult>>` - Number of resources deleted or an error | ||||
| /// Create a pod with a single container | ||||
| /// | ||||
| /// # Arguments | ||||
| /// | ||||
| /// * `km` - Mutable reference to KubernetesManager | ||||
| /// * `name` - Name of the pod | ||||
| /// * `image` - Container image to use | ||||
| /// * `labels` - Optional labels as a Map | ||||
| /// | ||||
| /// # Returns | ||||
| /// | ||||
| /// * `Result<String, Box<EvalAltResult>>` - Pod name or an error | ||||
| fn pod_create( | ||||
|     km: &mut KubernetesManager, | ||||
|     name: String, | ||||
|     image: String, | ||||
|     labels: Map, | ||||
| ) -> Result<String, Box<EvalAltResult>> { | ||||
|     let labels_map: Option<std::collections::HashMap<String, String>> = if labels.is_empty() { | ||||
|         None | ||||
|     } else { | ||||
|         Some( | ||||
|             labels | ||||
|                 .into_iter() | ||||
|                 .map(|(k, v)| (k.to_string(), v.to_string())) | ||||
|                 .collect(), | ||||
|         ) | ||||
|     }; | ||||
|  | ||||
|     let pod = execute_async(km.pod_create(&name, &image, labels_map))?; | ||||
|     Ok(pod.metadata.name.unwrap_or(name)) | ||||
| } | ||||
|  | ||||
| /// Create a service | ||||
| /// | ||||
| /// # Arguments | ||||
| /// | ||||
| /// * `km` - Mutable reference to KubernetesManager | ||||
| /// * `name` - Name of the service | ||||
| /// * `selector` - Labels to select pods as a Map | ||||
| /// * `port` - Port to expose | ||||
| /// * `target_port` - Target port on pods (optional, defaults to port) | ||||
| /// | ||||
| /// # Returns | ||||
| /// | ||||
| /// * `Result<String, Box<EvalAltResult>>` - Service name or an error | ||||
| fn service_create( | ||||
|     km: &mut KubernetesManager, | ||||
|     name: String, | ||||
|     selector: Map, | ||||
|     port: i64, | ||||
|     target_port: i64, | ||||
| ) -> Result<String, Box<EvalAltResult>> { | ||||
|     let selector_map: std::collections::HashMap<String, String> = selector | ||||
|         .into_iter() | ||||
|         .map(|(k, v)| (k.to_string(), v.to_string())) | ||||
|         .collect(); | ||||
|  | ||||
|     let target_port_opt = if target_port == 0 { | ||||
|         None | ||||
|     } else { | ||||
|         Some(target_port as i32) | ||||
|     }; | ||||
|     let service = | ||||
|         execute_async(km.service_create(&name, selector_map, port as i32, target_port_opt))?; | ||||
|     Ok(service.metadata.name.unwrap_or(name)) | ||||
| } | ||||
|  | ||||
| /// Create a deployment | ||||
| /// | ||||
| /// # Arguments | ||||
| /// | ||||
| /// * `km` - Mutable reference to KubernetesManager | ||||
| /// * `name` - Name of the deployment | ||||
| /// * `image` - Container image to use | ||||
| /// * `replicas` - Number of replicas | ||||
| /// * `labels` - Optional labels as a Map | ||||
| /// | ||||
| /// # Returns | ||||
| /// | ||||
| /// * `Result<String, Box<EvalAltResult>>` - Deployment name or an error | ||||
| fn deployment_create( | ||||
|     km: &mut KubernetesManager, | ||||
|     name: String, | ||||
|     image: String, | ||||
|     replicas: i64, | ||||
|     labels: Map, | ||||
| ) -> Result<String, Box<EvalAltResult>> { | ||||
|     let labels_map: Option<std::collections::HashMap<String, String>> = if labels.is_empty() { | ||||
|         None | ||||
|     } else { | ||||
|         Some( | ||||
|             labels | ||||
|                 .into_iter() | ||||
|                 .map(|(k, v)| (k.to_string(), v.to_string())) | ||||
|                 .collect(), | ||||
|         ) | ||||
|     }; | ||||
|  | ||||
|     let deployment = | ||||
|         execute_async(km.deployment_create(&name, &image, replicas as i32, labels_map))?; | ||||
|     Ok(deployment.metadata.name.unwrap_or(name)) | ||||
| } | ||||
|  | ||||
| /// Create a ConfigMap | ||||
| /// | ||||
| /// # Arguments | ||||
| /// | ||||
| /// * `km` - Mutable reference to KubernetesManager | ||||
| /// * `name` - Name of the ConfigMap | ||||
| /// * `data` - Data as a Map | ||||
| /// | ||||
| /// # Returns | ||||
| /// | ||||
| /// * `Result<String, Box<EvalAltResult>>` - ConfigMap name or an error | ||||
| fn configmap_create( | ||||
|     km: &mut KubernetesManager, | ||||
|     name: String, | ||||
|     data: Map, | ||||
| ) -> Result<String, Box<EvalAltResult>> { | ||||
|     let data_map: std::collections::HashMap<String, String> = data | ||||
|         .into_iter() | ||||
|         .map(|(k, v)| (k.to_string(), v.to_string())) | ||||
|         .collect(); | ||||
|  | ||||
|     let configmap = execute_async(km.configmap_create(&name, data_map))?; | ||||
|     Ok(configmap.metadata.name.unwrap_or(name)) | ||||
| } | ||||
|  | ||||
| /// Create a Secret | ||||
| /// | ||||
| /// # Arguments | ||||
| /// | ||||
| /// * `km` - Mutable reference to KubernetesManager | ||||
| /// * `name` - Name of the Secret | ||||
| /// * `data` - Data as a Map (will be base64 encoded) | ||||
| /// * `secret_type` - Type of secret (optional, defaults to "Opaque") | ||||
| /// | ||||
| /// # Returns | ||||
| /// | ||||
| /// * `Result<String, Box<EvalAltResult>>` - Secret name or an error | ||||
| fn secret_create( | ||||
|     km: &mut KubernetesManager, | ||||
|     name: String, | ||||
|     data: Map, | ||||
|     secret_type: String, | ||||
| ) -> Result<String, Box<EvalAltResult>> { | ||||
|     let data_map: std::collections::HashMap<String, String> = data | ||||
|         .into_iter() | ||||
|         .map(|(k, v)| (k.to_string(), v.to_string())) | ||||
|         .collect(); | ||||
|  | ||||
|     let secret_type_opt = if secret_type.is_empty() { | ||||
|         None | ||||
|     } else { | ||||
|         Some(secret_type.as_str()) | ||||
|     }; | ||||
|     let secret = execute_async(km.secret_create(&name, data_map, secret_type_opt))?; | ||||
|     Ok(secret.metadata.name.unwrap_or(name)) | ||||
| } | ||||
|  | ||||
| /// Get a pod by name | ||||
| /// | ||||
| /// # Arguments | ||||
| /// | ||||
| /// * `km` - Mutable reference to KubernetesManager | ||||
| /// * `name` - Name of the pod to get | ||||
| /// | ||||
| /// # Returns | ||||
| /// | ||||
| /// * `Result<String, Box<EvalAltResult>>` - Pod name or an error | ||||
| fn pod_get(km: &mut KubernetesManager, name: String) -> Result<String, Box<EvalAltResult>> { | ||||
|     let pod = execute_async(km.pod_get(&name))?; | ||||
|     Ok(pod.metadata.name.unwrap_or(name)) | ||||
| } | ||||
|  | ||||
| /// Get a service by name | ||||
| /// | ||||
| /// # Arguments | ||||
| /// | ||||
| /// * `km` - Mutable reference to KubernetesManager | ||||
| /// * `name` - Name of the service to get | ||||
| /// | ||||
| /// # Returns | ||||
| /// | ||||
| /// * `Result<String, Box<EvalAltResult>>` - Service name or an error | ||||
| fn service_get(km: &mut KubernetesManager, name: String) -> Result<String, Box<EvalAltResult>> { | ||||
|     let service = execute_async(km.service_get(&name))?; | ||||
|     Ok(service.metadata.name.unwrap_or(name)) | ||||
| } | ||||
|  | ||||
| /// Get a deployment by name | ||||
| /// | ||||
| /// # Arguments | ||||
| /// | ||||
| /// * `km` - Mutable reference to KubernetesManager | ||||
| /// * `name` - Name of the deployment to get | ||||
| /// | ||||
| /// # Returns | ||||
| /// | ||||
| /// * `Result<String, Box<EvalAltResult>>` - Deployment name or an error | ||||
| fn deployment_get(km: &mut KubernetesManager, name: String) -> Result<String, Box<EvalAltResult>> { | ||||
|     let deployment = execute_async(km.deployment_get(&name))?; | ||||
|     Ok(deployment.metadata.name.unwrap_or(name)) | ||||
| } | ||||
|  | ||||
| fn delete(km: &mut KubernetesManager, pattern: String) -> Result<i64, Box<EvalAltResult>> { | ||||
|     let deleted_count = execute_async(km.delete(&pattern))?; | ||||
|  | ||||
|     Ok(deleted_count as i64) | ||||
| } | ||||
|  | ||||
| /// Create a namespace (idempotent operation) | ||||
| /// | ||||
| /// # Arguments | ||||
| /// | ||||
| /// * `km` - The KubernetesManager instance | ||||
| /// * `name` - The name of the namespace to create | ||||
| /// | ||||
| /// # Returns | ||||
| /// | ||||
| /// * `Result<(), Box<EvalAltResult>>` - Success or an error | ||||
| fn namespace_create(km: &mut KubernetesManager, name: String) -> Result<(), Box<EvalAltResult>> { | ||||
|     execute_async(km.namespace_create(&name)) | ||||
| } | ||||
|  | ||||
| /// Delete a namespace (destructive operation) | ||||
| /// | ||||
| /// # Arguments | ||||
| /// | ||||
| /// * `km` - Mutable reference to KubernetesManager | ||||
| /// * `name` - Name of the namespace to delete | ||||
| /// | ||||
| /// # Returns | ||||
| /// | ||||
| /// * `Result<(), Box<EvalAltResult>>` - Success or an error | ||||
| fn namespace_delete(km: &mut KubernetesManager, name: String) -> Result<(), Box<EvalAltResult>> { | ||||
|     execute_async(km.namespace_delete(&name)) | ||||
| } | ||||
|  | ||||
| /// Check if a namespace exists | ||||
| /// | ||||
| /// # Arguments | ||||
| /// | ||||
| /// * `km` - The KubernetesManager instance | ||||
| /// * `name` - The name of the namespace to check | ||||
| /// | ||||
| /// # Returns | ||||
| /// | ||||
| /// * `Result<bool, Box<EvalAltResult>>` - True if namespace exists, false otherwise | ||||
| fn namespace_exists(km: &mut KubernetesManager, name: String) -> Result<bool, Box<EvalAltResult>> { | ||||
|     execute_async(km.namespace_exists(&name)) | ||||
| } | ||||
|  | ||||
| /// List all namespaces | ||||
| /// | ||||
| /// # Arguments | ||||
| /// | ||||
| /// * `km` - The KubernetesManager instance | ||||
| /// | ||||
| /// # Returns | ||||
| /// | ||||
| /// * `Result<Array, Box<EvalAltResult>>` - Array of namespace names or an error | ||||
| fn namespaces_list(km: &mut KubernetesManager) -> Result<Array, Box<EvalAltResult>> { | ||||
|     let namespaces = execute_async(km.namespaces_list())?; | ||||
|  | ||||
|     let namespace_names: Array = namespaces | ||||
|         .iter() | ||||
|         .filter_map(|ns| ns.metadata.name.as_ref()) | ||||
|         .map(|name| Dynamic::from(name.clone())) | ||||
|         .collect(); | ||||
|  | ||||
|     Ok(namespace_names) | ||||
| } | ||||
|  | ||||
| /// Get resource counts for the namespace | ||||
| /// | ||||
| /// # Arguments | ||||
| /// | ||||
| /// * `km` - The KubernetesManager instance | ||||
| /// | ||||
| /// # Returns | ||||
| /// | ||||
| /// * `Result<Map, Box<EvalAltResult>>` - Map of resource counts by type or an error | ||||
| fn resource_counts(km: &mut KubernetesManager) -> Result<Map, Box<EvalAltResult>> { | ||||
|     let counts = execute_async(km.resource_counts())?; | ||||
|  | ||||
|     let mut rhai_map = Map::new(); | ||||
|     for (key, value) in counts { | ||||
|         rhai_map.insert(key.into(), Dynamic::from(value as i64)); | ||||
|     } | ||||
|  | ||||
|     Ok(rhai_map) | ||||
| } | ||||
|  | ||||
| /// Delete a specific pod by name | ||||
| /// | ||||
| /// # Arguments | ||||
| /// | ||||
| /// * `km` - The KubernetesManager instance | ||||
| /// * `name` - The name of the pod to delete | ||||
| /// | ||||
| /// # Returns | ||||
| /// | ||||
| /// * `Result<(), Box<EvalAltResult>>` - Success or an error | ||||
| fn pod_delete(km: &mut KubernetesManager, name: String) -> Result<(), Box<EvalAltResult>> { | ||||
|     execute_async(km.pod_delete(&name)) | ||||
| } | ||||
|  | ||||
| /// Delete a specific service by name | ||||
| /// | ||||
| /// # Arguments | ||||
| /// | ||||
| /// * `km` - The KubernetesManager instance | ||||
| /// * `name` - The name of the service to delete | ||||
| /// | ||||
| /// # Returns | ||||
| /// | ||||
| /// * `Result<(), Box<EvalAltResult>>` - Success or an error | ||||
| fn service_delete(km: &mut KubernetesManager, name: String) -> Result<(), Box<EvalAltResult>> { | ||||
|     execute_async(km.service_delete(&name)) | ||||
| } | ||||
|  | ||||
| /// Delete a specific deployment by name | ||||
| /// | ||||
| /// # Arguments | ||||
| /// | ||||
| /// * `km` - The KubernetesManager instance | ||||
| /// * `name` - The name of the deployment to delete | ||||
| /// | ||||
| /// # Returns | ||||
| /// | ||||
| /// * `Result<(), Box<EvalAltResult>>` - Success or an error | ||||
| fn deployment_delete(km: &mut KubernetesManager, name: String) -> Result<(), Box<EvalAltResult>> { | ||||
|     execute_async(km.deployment_delete(&name)) | ||||
| } | ||||
|  | ||||
| /// Delete a ConfigMap by name | ||||
| /// | ||||
| /// # Arguments | ||||
| /// | ||||
| /// * `km` - Mutable reference to KubernetesManager | ||||
| /// * `name` - Name of the ConfigMap to delete | ||||
| /// | ||||
| /// # Returns | ||||
| /// | ||||
| /// * `Result<(), Box<EvalAltResult>>` - Success or an error | ||||
| fn configmap_delete(km: &mut KubernetesManager, name: String) -> Result<(), Box<EvalAltResult>> { | ||||
|     execute_async(km.configmap_delete(&name)) | ||||
| } | ||||
|  | ||||
| /// Delete a Secret by name | ||||
| /// | ||||
| /// # Arguments | ||||
| /// | ||||
| /// * `km` - Mutable reference to KubernetesManager | ||||
| /// * `name` - Name of the Secret to delete | ||||
| /// | ||||
| /// # Returns | ||||
| /// | ||||
| /// * `Result<(), Box<EvalAltResult>>` - Success or an error | ||||
| fn secret_delete(km: &mut KubernetesManager, name: String) -> Result<(), Box<EvalAltResult>> { | ||||
|     execute_async(km.secret_delete(&name)) | ||||
| } | ||||
|  | ||||
| /// Get the namespace this manager operates on | ||||
| /// | ||||
| /// # Arguments | ||||
| /// | ||||
| /// * `km` - The KubernetesManager instance | ||||
| /// | ||||
| /// # Returns | ||||
| /// | ||||
| /// * `String` - The namespace name | ||||
| fn kubernetes_manager_namespace(km: &mut KubernetesManager) -> String { | ||||
|     km.namespace().to_string() | ||||
| } | ||||
|  | ||||
| /// Register Kubernetes module functions with the Rhai engine | ||||
| /// | ||||
| /// # Arguments | ||||
| /// | ||||
| /// * `engine` - The Rhai engine to register the functions with | ||||
| /// | ||||
| /// # Returns | ||||
| /// | ||||
| /// * `Result<(), Box<EvalAltResult>>` - Ok if registration was successful, Err otherwise | ||||
| pub fn register_kubernetes_module(engine: &mut Engine) -> Result<(), Box<EvalAltResult>> { | ||||
|     // Register KubernetesManager type | ||||
|     engine.register_type::<KubernetesManager>(); | ||||
|  | ||||
|     // Register KubernetesManager constructor and methods | ||||
|     engine.register_fn("kubernetes_manager_new", kubernetes_manager_new); | ||||
|     engine.register_fn("namespace", kubernetes_manager_namespace); | ||||
|  | ||||
|     // Register resource listing functions | ||||
|     engine.register_fn("pods_list", pods_list); | ||||
|     engine.register_fn("services_list", services_list); | ||||
|     engine.register_fn("deployments_list", deployments_list); | ||||
|     engine.register_fn("namespaces_list", namespaces_list); | ||||
|  | ||||
|     // Register resource creation methods (object-oriented style) | ||||
|     engine.register_fn("create_pod", pod_create); | ||||
|     engine.register_fn("create_service", service_create); | ||||
|     engine.register_fn("create_deployment", deployment_create); | ||||
|     engine.register_fn("create_configmap", configmap_create); | ||||
|     engine.register_fn("create_secret", secret_create); | ||||
|  | ||||
|     // Register resource get methods | ||||
|     engine.register_fn("get_pod", pod_get); | ||||
|     engine.register_fn("get_service", service_get); | ||||
|     engine.register_fn("get_deployment", deployment_get); | ||||
|  | ||||
|     // Register resource management methods | ||||
|     engine.register_fn("delete", delete); | ||||
|     engine.register_fn("delete_pod", pod_delete); | ||||
|     engine.register_fn("delete_service", service_delete); | ||||
|     engine.register_fn("delete_deployment", deployment_delete); | ||||
|     engine.register_fn("delete_configmap", configmap_delete); | ||||
|     engine.register_fn("delete_secret", secret_delete); | ||||
|  | ||||
|     // Register namespace methods (object-oriented style) | ||||
|     engine.register_fn("create_namespace", namespace_create); | ||||
|     engine.register_fn("delete_namespace", namespace_delete); | ||||
|     engine.register_fn("namespace_exists", namespace_exists); | ||||
|  | ||||
|     // Register utility functions | ||||
|     engine.register_fn("resource_counts", resource_counts); | ||||
|  | ||||
|     Ok(()) | ||||
| } | ||||
|  | ||||
| // Helper function for error conversion | ||||
| fn kubernetes_error_to_rhai_error(error: KubernetesError) -> Box<EvalAltResult> { | ||||
|     Box::new(EvalAltResult::ErrorRuntime( | ||||
|         format!("Kubernetes error: {}", error).into(), | ||||
|         rhai::Position::NONE, | ||||
|     )) | ||||
| } | ||||
| @@ -1,174 +0,0 @@ | ||||
| //! CRUD operations tests for SAL Kubernetes | ||||
| //! | ||||
| //! These tests verify that all Create, Read, Update, Delete operations work correctly. | ||||
|  | ||||
| #[cfg(test)] | ||||
| mod crud_tests { | ||||
|     use sal_kubernetes::KubernetesManager; | ||||
|     use std::collections::HashMap; | ||||
|  | ||||
|     /// Check if Kubernetes integration tests should run | ||||
|     fn should_run_k8s_tests() -> bool { | ||||
|         std::env::var("KUBERNETES_TEST_ENABLED").unwrap_or_default() == "1" | ||||
|     } | ||||
|  | ||||
|     #[tokio::test] | ||||
|     async fn test_complete_crud_operations() { | ||||
|         if !should_run_k8s_tests() { | ||||
|             println!("Skipping CRUD test. Set KUBERNETES_TEST_ENABLED=1 to enable."); | ||||
|             return; | ||||
|         } | ||||
|  | ||||
|         println!("🔍 Testing complete CRUD operations..."); | ||||
|  | ||||
|         // Create a test namespace for our operations | ||||
|         let test_namespace = "sal-crud-test"; | ||||
|         let km = KubernetesManager::new("default").await | ||||
|             .expect("Should connect to cluster"); | ||||
|  | ||||
|         // Clean up any existing test namespace | ||||
|         let _ = km.namespace_delete(test_namespace).await; | ||||
|         tokio::time::sleep(tokio::time::Duration::from_secs(2)).await; | ||||
|  | ||||
|         // CREATE operations | ||||
|         println!("\n=== CREATE Operations ==="); | ||||
|  | ||||
|         // 1. Create namespace | ||||
|         km.namespace_create(test_namespace).await | ||||
|             .expect("Should create test namespace"); | ||||
|         println!("✅ Created namespace: {}", test_namespace); | ||||
|  | ||||
|         // Switch to test namespace | ||||
|         let test_km = KubernetesManager::new(test_namespace).await | ||||
|             .expect("Should connect to test namespace"); | ||||
|  | ||||
|         // 2. Create ConfigMap | ||||
|         let mut config_data = HashMap::new(); | ||||
|         config_data.insert("app.properties".to_string(), "debug=true\nport=8080".to_string()); | ||||
|         config_data.insert("config.yaml".to_string(), "key: value\nenv: test".to_string()); | ||||
|          | ||||
|         let configmap = test_km.configmap_create("test-config", config_data).await | ||||
|             .expect("Should create ConfigMap"); | ||||
|         println!("✅ Created ConfigMap: {}", configmap.metadata.name.unwrap_or_default()); | ||||
|  | ||||
|         // 3. Create Secret | ||||
|         let mut secret_data = HashMap::new(); | ||||
|         secret_data.insert("username".to_string(), "testuser".to_string()); | ||||
|         secret_data.insert("password".to_string(), "secret123".to_string()); | ||||
|          | ||||
|         let secret = test_km.secret_create("test-secret", secret_data, None).await | ||||
|             .expect("Should create Secret"); | ||||
|         println!("✅ Created Secret: {}", secret.metadata.name.unwrap_or_default()); | ||||
|  | ||||
|         // 4. Create Pod | ||||
|         let mut pod_labels = HashMap::new(); | ||||
|         pod_labels.insert("app".to_string(), "test-app".to_string()); | ||||
|         pod_labels.insert("version".to_string(), "v1".to_string()); | ||||
|          | ||||
|         let pod = test_km.pod_create("test-pod", "nginx:alpine", Some(pod_labels.clone())).await | ||||
|             .expect("Should create Pod"); | ||||
|         println!("✅ Created Pod: {}", pod.metadata.name.unwrap_or_default()); | ||||
|  | ||||
|         // 5. Create Service | ||||
|         let service = test_km.service_create("test-service", pod_labels.clone(), 80, Some(80)).await | ||||
|             .expect("Should create Service"); | ||||
|         println!("✅ Created Service: {}", service.metadata.name.unwrap_or_default()); | ||||
|  | ||||
|         // 6. Create Deployment | ||||
|         let deployment = test_km.deployment_create("test-deployment", "nginx:alpine", 2, Some(pod_labels)).await | ||||
|             .expect("Should create Deployment"); | ||||
|         println!("✅ Created Deployment: {}", deployment.metadata.name.unwrap_or_default()); | ||||
|  | ||||
|         // READ operations | ||||
|         println!("\n=== READ Operations ==="); | ||||
|  | ||||
|         // List all resources | ||||
|         let pods = test_km.pods_list().await.expect("Should list pods"); | ||||
|         println!("✅ Listed {} pods", pods.len()); | ||||
|  | ||||
|         let services = test_km.services_list().await.expect("Should list services"); | ||||
|         println!("✅ Listed {} services", services.len()); | ||||
|  | ||||
|         let deployments = test_km.deployments_list().await.expect("Should list deployments"); | ||||
|         println!("✅ Listed {} deployments", deployments.len()); | ||||
|  | ||||
|         let configmaps = test_km.configmaps_list().await.expect("Should list configmaps"); | ||||
|         println!("✅ Listed {} configmaps", configmaps.len()); | ||||
|  | ||||
|         let secrets = test_km.secrets_list().await.expect("Should list secrets"); | ||||
|         println!("✅ Listed {} secrets", secrets.len()); | ||||
|  | ||||
|         // Get specific resources | ||||
|         let pod = test_km.pod_get("test-pod").await.expect("Should get pod"); | ||||
|         println!("✅ Retrieved pod: {}", pod.metadata.name.unwrap_or_default()); | ||||
|  | ||||
|         let service = test_km.service_get("test-service").await.expect("Should get service"); | ||||
|         println!("✅ Retrieved service: {}", service.metadata.name.unwrap_or_default()); | ||||
|  | ||||
|         let deployment = test_km.deployment_get("test-deployment").await.expect("Should get deployment"); | ||||
|         println!("✅ Retrieved deployment: {}", deployment.metadata.name.unwrap_or_default()); | ||||
|  | ||||
|         // Resource counts | ||||
|         let counts = test_km.resource_counts().await.expect("Should get resource counts"); | ||||
|         println!("✅ Resource counts: {:?}", counts); | ||||
|  | ||||
|         // DELETE operations | ||||
|         println!("\n=== DELETE Operations ==="); | ||||
|  | ||||
|         // Delete individual resources | ||||
|         test_km.pod_delete("test-pod").await.expect("Should delete pod"); | ||||
|         println!("✅ Deleted pod"); | ||||
|  | ||||
|         test_km.service_delete("test-service").await.expect("Should delete service"); | ||||
|         println!("✅ Deleted service"); | ||||
|  | ||||
|         test_km.deployment_delete("test-deployment").await.expect("Should delete deployment"); | ||||
|         println!("✅ Deleted deployment"); | ||||
|  | ||||
|         test_km.configmap_delete("test-config").await.expect("Should delete configmap"); | ||||
|         println!("✅ Deleted configmap"); | ||||
|  | ||||
|         test_km.secret_delete("test-secret").await.expect("Should delete secret"); | ||||
|         println!("✅ Deleted secret"); | ||||
|  | ||||
|         // Verify resources are deleted | ||||
|         let final_counts = test_km.resource_counts().await.expect("Should get final resource counts"); | ||||
|         println!("✅ Final resource counts: {:?}", final_counts); | ||||
|  | ||||
|         // Delete the test namespace | ||||
|         km.namespace_delete(test_namespace).await.expect("Should delete test namespace"); | ||||
|         println!("✅ Deleted test namespace"); | ||||
|  | ||||
|         println!("\n🎉 All CRUD operations completed successfully!"); | ||||
|     } | ||||
|  | ||||
|     #[tokio::test] | ||||
|     async fn test_error_handling_in_crud() { | ||||
|         if !should_run_k8s_tests() { | ||||
|             println!("Skipping CRUD error handling test. Set KUBERNETES_TEST_ENABLED=1 to enable."); | ||||
|             return; | ||||
|         } | ||||
|  | ||||
|         println!("🔍 Testing error handling in CRUD operations..."); | ||||
|  | ||||
|         let km = KubernetesManager::new("default").await | ||||
|             .expect("Should connect to cluster"); | ||||
|  | ||||
|         // Test creating resources with invalid names | ||||
|         let result = km.pod_create("", "nginx", None).await; | ||||
|         assert!(result.is_err(), "Should fail with empty pod name"); | ||||
|         println!("✅ Empty pod name properly rejected"); | ||||
|  | ||||
|         // Test getting non-existent resources | ||||
|         let result = km.pod_get("non-existent-pod").await; | ||||
|         assert!(result.is_err(), "Should fail to get non-existent pod"); | ||||
|         println!("✅ Non-existent pod properly handled"); | ||||
|  | ||||
|         // Test deleting non-existent resources | ||||
|         let result = km.service_delete("non-existent-service").await; | ||||
|         assert!(result.is_err(), "Should fail to delete non-existent service"); | ||||
|         println!("✅ Non-existent service deletion properly handled"); | ||||
|  | ||||
|         println!("✅ Error handling in CRUD operations is robust"); | ||||
|     } | ||||
| } | ||||
| @@ -1,385 +0,0 @@ | ||||
| //! Integration tests for SAL Kubernetes | ||||
| //! | ||||
| //! These tests require a running Kubernetes cluster and appropriate credentials. | ||||
| //! Set KUBERNETES_TEST_ENABLED=1 to run these tests. | ||||
|  | ||||
| use sal_kubernetes::KubernetesManager; | ||||
|  | ||||
| /// Check if Kubernetes integration tests should run | ||||
| fn should_run_k8s_tests() -> bool { | ||||
|     std::env::var("KUBERNETES_TEST_ENABLED").unwrap_or_default() == "1" | ||||
| } | ||||
|  | ||||
| #[tokio::test] | ||||
| async fn test_kubernetes_manager_creation() { | ||||
|     if !should_run_k8s_tests() { | ||||
|         println!("Skipping Kubernetes integration tests. Set KUBERNETES_TEST_ENABLED=1 to enable."); | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     let result = KubernetesManager::new("default").await; | ||||
|     match result { | ||||
|         Ok(_) => println!("Successfully created KubernetesManager"), | ||||
|         Err(e) => println!("Failed to create KubernetesManager: {}", e), | ||||
|     } | ||||
| } | ||||
|  | ||||
| #[tokio::test] | ||||
| async fn test_namespace_operations() { | ||||
|     if !should_run_k8s_tests() { | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     let km = match KubernetesManager::new("default").await { | ||||
|         Ok(km) => km, | ||||
|         Err(_) => return, // Skip if can't connect | ||||
|     }; | ||||
|  | ||||
|     // Test namespace creation (should be idempotent) | ||||
|     let test_namespace = "sal-test-namespace"; | ||||
|     let result = km.namespace_create(test_namespace).await; | ||||
|     assert!(result.is_ok(), "Failed to create namespace: {:?}", result); | ||||
|  | ||||
|     // Test creating the same namespace again (should not error) | ||||
|     let result = km.namespace_create(test_namespace).await; | ||||
|     assert!( | ||||
|         result.is_ok(), | ||||
|         "Failed to create namespace idempotently: {:?}", | ||||
|         result | ||||
|     ); | ||||
| } | ||||
|  | ||||
| #[tokio::test] | ||||
| async fn test_pods_list() { | ||||
|     if !should_run_k8s_tests() { | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     let km = match KubernetesManager::new("default").await { | ||||
|         Ok(km) => km, | ||||
|         Err(_) => return, // Skip if can't connect | ||||
|     }; | ||||
|  | ||||
|     let result = km.pods_list().await; | ||||
|     match result { | ||||
|         Ok(pods) => { | ||||
|             println!("Found {} pods in default namespace", pods.len()); | ||||
|  | ||||
|             // Verify pod structure | ||||
|             for pod in pods.iter().take(3) { | ||||
|                 // Check first 3 pods | ||||
|                 assert!(pod.metadata.name.is_some()); | ||||
|                 assert!(pod.metadata.namespace.is_some()); | ||||
|                 println!( | ||||
|                     "Pod: {} in namespace: {}", | ||||
|                     pod.metadata.name.as_ref().unwrap(), | ||||
|                     pod.metadata.namespace.as_ref().unwrap() | ||||
|                 ); | ||||
|             } | ||||
|         } | ||||
|         Err(e) => { | ||||
|             println!("Failed to list pods: {}", e); | ||||
|             // Don't fail the test if we can't list pods due to permissions | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  | ||||
| #[tokio::test] | ||||
| async fn test_services_list() { | ||||
|     if !should_run_k8s_tests() { | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     let km = match KubernetesManager::new("default").await { | ||||
|         Ok(km) => km, | ||||
|         Err(_) => return, | ||||
|     }; | ||||
|  | ||||
|     let result = km.services_list().await; | ||||
|     match result { | ||||
|         Ok(services) => { | ||||
|             println!("Found {} services in default namespace", services.len()); | ||||
|  | ||||
|             // Verify service structure | ||||
|             for service in services.iter().take(3) { | ||||
|                 assert!(service.metadata.name.is_some()); | ||||
|                 println!("Service: {}", service.metadata.name.as_ref().unwrap()); | ||||
|             } | ||||
|         } | ||||
|         Err(e) => { | ||||
|             println!("Failed to list services: {}", e); | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  | ||||
| #[tokio::test] | ||||
| async fn test_deployments_list() { | ||||
|     if !should_run_k8s_tests() { | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     let km = match KubernetesManager::new("default").await { | ||||
|         Ok(km) => km, | ||||
|         Err(_) => return, | ||||
|     }; | ||||
|  | ||||
|     let result = km.deployments_list().await; | ||||
|     match result { | ||||
|         Ok(deployments) => { | ||||
|             println!( | ||||
|                 "Found {} deployments in default namespace", | ||||
|                 deployments.len() | ||||
|             ); | ||||
|  | ||||
|             // Verify deployment structure | ||||
|             for deployment in deployments.iter().take(3) { | ||||
|                 assert!(deployment.metadata.name.is_some()); | ||||
|                 println!("Deployment: {}", deployment.metadata.name.as_ref().unwrap()); | ||||
|             } | ||||
|         } | ||||
|         Err(e) => { | ||||
|             println!("Failed to list deployments: {}", e); | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  | ||||
| #[tokio::test] | ||||
| async fn test_resource_counts() { | ||||
|     if !should_run_k8s_tests() { | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     let km = match KubernetesManager::new("default").await { | ||||
|         Ok(km) => km, | ||||
|         Err(_) => return, | ||||
|     }; | ||||
|  | ||||
|     let result = km.resource_counts().await; | ||||
|     match result { | ||||
|         Ok(counts) => { | ||||
|             println!("Resource counts: {:?}", counts); | ||||
|  | ||||
|             // Verify expected resource types are present | ||||
|             assert!(counts.contains_key("pods")); | ||||
|             assert!(counts.contains_key("services")); | ||||
|             assert!(counts.contains_key("deployments")); | ||||
|             assert!(counts.contains_key("configmaps")); | ||||
|             assert!(counts.contains_key("secrets")); | ||||
|  | ||||
|             // Verify counts are reasonable (counts are usize, so always non-negative) | ||||
|             for (resource_type, count) in counts { | ||||
|                 // Verify we got a count for each resource type | ||||
|                 println!("Resource type '{}' has {} items", resource_type, count); | ||||
|                 // Counts should be reasonable (not impossibly large) | ||||
|                 assert!( | ||||
|                     count < 10000, | ||||
|                     "Count for {} seems unreasonably high: {}", | ||||
|                     resource_type, | ||||
|                     count | ||||
|                 ); | ||||
|             } | ||||
|         } | ||||
|         Err(e) => { | ||||
|             println!("Failed to get resource counts: {}", e); | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  | ||||
| #[tokio::test] | ||||
| async fn test_namespaces_list() { | ||||
|     if !should_run_k8s_tests() { | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     let km = match KubernetesManager::new("default").await { | ||||
|         Ok(km) => km, | ||||
|         Err(_) => return, | ||||
|     }; | ||||
|  | ||||
|     let result = km.namespaces_list().await; | ||||
|     match result { | ||||
|         Ok(namespaces) => { | ||||
|             println!("Found {} namespaces", namespaces.len()); | ||||
|  | ||||
|             // Should have at least default namespace | ||||
|             let namespace_names: Vec<String> = namespaces | ||||
|                 .iter() | ||||
|                 .filter_map(|ns| ns.metadata.name.as_ref()) | ||||
|                 .cloned() | ||||
|                 .collect(); | ||||
|  | ||||
|             println!("Namespaces: {:?}", namespace_names); | ||||
|             assert!(namespace_names.contains(&"default".to_string())); | ||||
|         } | ||||
|         Err(e) => { | ||||
|             println!("Failed to list namespaces: {}", e); | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  | ||||
| #[tokio::test] | ||||
| async fn test_pattern_matching_dry_run() { | ||||
|     if !should_run_k8s_tests() { | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     let km = match KubernetesManager::new("default").await { | ||||
|         Ok(km) => km, | ||||
|         Err(_) => return, | ||||
|     }; | ||||
|  | ||||
|     // Test pattern matching without actually deleting anything | ||||
|     // We'll just verify that the regex patterns work correctly | ||||
|     let test_patterns = vec![ | ||||
|         "test-.*",        // Should match anything starting with "test-" | ||||
|         ".*-temp$",       // Should match anything ending with "-temp" | ||||
|         "nonexistent-.*", // Should match nothing (hopefully) | ||||
|     ]; | ||||
|  | ||||
|     for pattern in test_patterns { | ||||
|         println!("Testing pattern: {}", pattern); | ||||
|  | ||||
|         // Get all pods first | ||||
|         if let Ok(pods) = km.pods_list().await { | ||||
|             let regex = regex::Regex::new(pattern).unwrap(); | ||||
|             let matching_pods: Vec<_> = pods | ||||
|                 .iter() | ||||
|                 .filter_map(|pod| pod.metadata.name.as_ref()) | ||||
|                 .filter(|name| regex.is_match(name)) | ||||
|                 .collect(); | ||||
|  | ||||
|             println!( | ||||
|                 "Pattern '{}' would match {} pods: {:?}", | ||||
|                 pattern, | ||||
|                 matching_pods.len(), | ||||
|                 matching_pods | ||||
|             ); | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  | ||||
| #[tokio::test] | ||||
| async fn test_namespace_exists_functionality() { | ||||
|     if !should_run_k8s_tests() { | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     let km = match KubernetesManager::new("default").await { | ||||
|         Ok(km) => km, | ||||
|         Err(_) => return, | ||||
|     }; | ||||
|  | ||||
|     // Test that default namespace exists | ||||
|     let result = km.namespace_exists("default").await; | ||||
|     match result { | ||||
|         Ok(exists) => { | ||||
|             assert!(exists, "Default namespace should exist"); | ||||
|             println!("Default namespace exists: {}", exists); | ||||
|         } | ||||
|         Err(e) => { | ||||
|             println!("Failed to check if default namespace exists: {}", e); | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     // Test that a non-existent namespace doesn't exist | ||||
|     let result = km.namespace_exists("definitely-does-not-exist-12345").await; | ||||
|     match result { | ||||
|         Ok(exists) => { | ||||
|             assert!(!exists, "Non-existent namespace should not exist"); | ||||
|             println!("Non-existent namespace exists: {}", exists); | ||||
|         } | ||||
|         Err(e) => { | ||||
|             println!("Failed to check if non-existent namespace exists: {}", e); | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  | ||||
| #[tokio::test] | ||||
| async fn test_manager_namespace_property() { | ||||
|     if !should_run_k8s_tests() { | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     let test_namespace = "test-namespace"; | ||||
|     let km = match KubernetesManager::new(test_namespace).await { | ||||
|         Ok(km) => km, | ||||
|         Err(_) => return, | ||||
|     }; | ||||
|  | ||||
|     // Verify the manager knows its namespace | ||||
|     assert_eq!(km.namespace(), test_namespace); | ||||
|     println!("Manager namespace: {}", km.namespace()); | ||||
| } | ||||
|  | ||||
| #[tokio::test] | ||||
| async fn test_error_handling() { | ||||
|     if !should_run_k8s_tests() { | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     let km = match KubernetesManager::new("default").await { | ||||
|         Ok(km) => km, | ||||
|         Err(_) => return, | ||||
|     }; | ||||
|  | ||||
|     // Test getting a non-existent pod | ||||
|     let result = km.pod_get("definitely-does-not-exist-12345").await; | ||||
|     assert!(result.is_err(), "Getting non-existent pod should fail"); | ||||
|  | ||||
|     if let Err(e) = result { | ||||
|         println!("Expected error for non-existent pod: {}", e); | ||||
|         // Verify it's the right kind of error | ||||
|         match e { | ||||
|             sal_kubernetes::KubernetesError::ApiError(_) => { | ||||
|                 println!("Correctly got API error for non-existent resource"); | ||||
|             } | ||||
|             _ => { | ||||
|                 println!("Got unexpected error type: {:?}", e); | ||||
|             } | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  | ||||
| #[tokio::test] | ||||
| async fn test_configmaps_and_secrets() { | ||||
|     if !should_run_k8s_tests() { | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     let km = match KubernetesManager::new("default").await { | ||||
|         Ok(km) => km, | ||||
|         Err(_) => return, | ||||
|     }; | ||||
|  | ||||
|     // Test configmaps listing | ||||
|     let result = km.configmaps_list().await; | ||||
|     match result { | ||||
|         Ok(configmaps) => { | ||||
|             println!("Found {} configmaps in default namespace", configmaps.len()); | ||||
|             for cm in configmaps.iter().take(3) { | ||||
|                 if let Some(name) = &cm.metadata.name { | ||||
|                     println!("ConfigMap: {}", name); | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
|         Err(e) => { | ||||
|             println!("Failed to list configmaps: {}", e); | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     // Test secrets listing | ||||
|     let result = km.secrets_list().await; | ||||
|     match result { | ||||
|         Ok(secrets) => { | ||||
|             println!("Found {} secrets in default namespace", secrets.len()); | ||||
|             for secret in secrets.iter().take(3) { | ||||
|                 if let Some(name) = &secret.metadata.name { | ||||
|                     println!("Secret: {}", name); | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
|         Err(e) => { | ||||
|             println!("Failed to list secrets: {}", e); | ||||
|         } | ||||
|     } | ||||
| } | ||||
| @@ -1,231 +0,0 @@ | ||||
| //! Production readiness tests for SAL Kubernetes | ||||
| //! | ||||
| //! These tests verify that the module is ready for real-world production use. | ||||
|  | ||||
| #[cfg(test)] | ||||
| mod production_tests { | ||||
|     use sal_kubernetes::{KubernetesConfig, KubernetesManager}; | ||||
|     use std::time::Duration; | ||||
|  | ||||
|     /// Check if Kubernetes integration tests should run | ||||
|     fn should_run_k8s_tests() -> bool { | ||||
|         std::env::var("KUBERNETES_TEST_ENABLED").unwrap_or_default() == "1" | ||||
|     } | ||||
|  | ||||
|     #[tokio::test] | ||||
|     async fn test_production_configuration_profiles() { | ||||
|         // Test all pre-configured profiles work | ||||
|         let configs = vec![ | ||||
|             ("default", KubernetesConfig::default()), | ||||
|             ("high_throughput", KubernetesConfig::high_throughput()), | ||||
|             ("low_latency", KubernetesConfig::low_latency()), | ||||
|             ("development", KubernetesConfig::development()), | ||||
|         ]; | ||||
|  | ||||
|         for (name, config) in configs { | ||||
|             println!("Testing {} configuration profile", name); | ||||
|  | ||||
|             // Verify configuration values are reasonable | ||||
|             assert!( | ||||
|                 config.operation_timeout >= Duration::from_secs(5), | ||||
|                 "{} timeout too short", | ||||
|                 name | ||||
|             ); | ||||
|             assert!( | ||||
|                 config.operation_timeout <= Duration::from_secs(300), | ||||
|                 "{} timeout too long", | ||||
|                 name | ||||
|             ); | ||||
|             assert!(config.max_retries <= 10, "{} too many retries", name); | ||||
|             assert!(config.rate_limit_rps >= 1, "{} rate limit too low", name); | ||||
|             assert!( | ||||
|                 config.rate_limit_burst >= config.rate_limit_rps, | ||||
|                 "{} burst should be >= RPS", | ||||
|                 name | ||||
|             ); | ||||
|  | ||||
|             println!("✓ {} configuration is valid", name); | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     #[tokio::test] | ||||
|     async fn test_real_cluster_operations() { | ||||
|         if !should_run_k8s_tests() { | ||||
|             println!("Skipping real cluster test. Set KUBERNETES_TEST_ENABLED=1 to enable."); | ||||
|             return; | ||||
|         } | ||||
|  | ||||
|         println!("🔍 Testing production operations with real cluster..."); | ||||
|  | ||||
|         // Test with production-like configuration | ||||
|         let config = KubernetesConfig::default() | ||||
|             .with_timeout(Duration::from_secs(30)) | ||||
|             .with_retries(3, Duration::from_secs(1), Duration::from_secs(10)) | ||||
|             .with_rate_limit(5, 10); // Conservative for testing | ||||
|  | ||||
|         let km = KubernetesManager::with_config("default", config) | ||||
|             .await | ||||
|             .expect("Should connect to cluster"); | ||||
|  | ||||
|         println!("✅ Connected to cluster successfully"); | ||||
|  | ||||
|         // Test basic operations | ||||
|         let namespaces = km.namespaces_list().await.expect("Should list namespaces"); | ||||
|         println!("✅ Listed {} namespaces", namespaces.len()); | ||||
|  | ||||
|         let pods = km.pods_list().await.expect("Should list pods"); | ||||
|         println!("✅ Listed {} pods in default namespace", pods.len()); | ||||
|  | ||||
|         let counts = km | ||||
|             .resource_counts() | ||||
|             .await | ||||
|             .expect("Should get resource counts"); | ||||
|         println!("✅ Got resource counts for {} resource types", counts.len()); | ||||
|  | ||||
|         // Test namespace operations | ||||
|         let test_ns = "sal-production-test"; | ||||
|         km.namespace_create(test_ns) | ||||
|             .await | ||||
|             .expect("Should create test namespace"); | ||||
|         println!("✅ Created test namespace: {}", test_ns); | ||||
|  | ||||
|         let exists = km | ||||
|             .namespace_exists(test_ns) | ||||
|             .await | ||||
|             .expect("Should check namespace existence"); | ||||
|         assert!(exists, "Test namespace should exist"); | ||||
|         println!("✅ Verified test namespace exists"); | ||||
|  | ||||
|         println!("🎉 All production operations completed successfully!"); | ||||
|     } | ||||
|  | ||||
|     #[tokio::test] | ||||
|     async fn test_error_handling_robustness() { | ||||
|         if !should_run_k8s_tests() { | ||||
|             println!("Skipping error handling test. Set KUBERNETES_TEST_ENABLED=1 to enable."); | ||||
|             return; | ||||
|         } | ||||
|  | ||||
|         println!("🔍 Testing error handling robustness..."); | ||||
|  | ||||
|         let km = KubernetesManager::new("default") | ||||
|             .await | ||||
|             .expect("Should connect to cluster"); | ||||
|  | ||||
|         // Test with invalid namespace name (should handle gracefully) | ||||
|         let result = km.namespace_exists("").await; | ||||
|         match result { | ||||
|             Ok(_) => println!("✅ Empty namespace name handled"), | ||||
|             Err(e) => println!("✅ Empty namespace name rejected: {}", e), | ||||
|         } | ||||
|  | ||||
|         // Test with very long namespace name | ||||
|         let long_name = "a".repeat(100); | ||||
|         let result = km.namespace_exists(&long_name).await; | ||||
|         match result { | ||||
|             Ok(_) => println!("✅ Long namespace name handled"), | ||||
|             Err(e) => println!("✅ Long namespace name rejected: {}", e), | ||||
|         } | ||||
|  | ||||
|         println!("✅ Error handling is robust"); | ||||
|     } | ||||
|  | ||||
|     #[tokio::test] | ||||
|     async fn test_concurrent_operations() { | ||||
|         if !should_run_k8s_tests() { | ||||
|             println!("Skipping concurrency test. Set KUBERNETES_TEST_ENABLED=1 to enable."); | ||||
|             return; | ||||
|         } | ||||
|  | ||||
|         println!("🔍 Testing concurrent operations..."); | ||||
|  | ||||
|         let km = KubernetesManager::new("default") | ||||
|             .await | ||||
|             .expect("Should connect to cluster"); | ||||
|  | ||||
|         // Test multiple concurrent operations | ||||
|         let task1 = tokio::spawn({ | ||||
|             let km = km.clone(); | ||||
|             async move { km.pods_list().await } | ||||
|         }); | ||||
|         let task2 = tokio::spawn({ | ||||
|             let km = km.clone(); | ||||
|             async move { km.services_list().await } | ||||
|         }); | ||||
|         let task3 = tokio::spawn({ | ||||
|             let km = km.clone(); | ||||
|             async move { km.namespaces_list().await } | ||||
|         }); | ||||
|  | ||||
|         let mut success_count = 0; | ||||
|  | ||||
|         // Handle each task result | ||||
|         match task1.await { | ||||
|             Ok(Ok(_)) => { | ||||
|                 success_count += 1; | ||||
|                 println!("✅ Pods list operation succeeded"); | ||||
|             } | ||||
|             Ok(Err(e)) => println!("⚠️ Pods list operation failed: {}", e), | ||||
|             Err(e) => println!("⚠️ Pods task join failed: {}", e), | ||||
|         } | ||||
|  | ||||
|         match task2.await { | ||||
|             Ok(Ok(_)) => { | ||||
|                 success_count += 1; | ||||
|                 println!("✅ Services list operation succeeded"); | ||||
|             } | ||||
|             Ok(Err(e)) => println!("⚠️ Services list operation failed: {}", e), | ||||
|             Err(e) => println!("⚠️ Services task join failed: {}", e), | ||||
|         } | ||||
|  | ||||
|         match task3.await { | ||||
|             Ok(Ok(_)) => { | ||||
|                 success_count += 1; | ||||
|                 println!("✅ Namespaces list operation succeeded"); | ||||
|             } | ||||
|             Ok(Err(e)) => println!("⚠️ Namespaces list operation failed: {}", e), | ||||
|             Err(e) => println!("⚠️ Namespaces task join failed: {}", e), | ||||
|         } | ||||
|  | ||||
|         assert!( | ||||
|             success_count >= 2, | ||||
|             "At least 2 concurrent operations should succeed" | ||||
|         ); | ||||
|         println!( | ||||
|             "✅ Concurrent operations handled well ({}/3 succeeded)", | ||||
|             success_count | ||||
|         ); | ||||
|     } | ||||
|  | ||||
|     #[test] | ||||
|     fn test_security_and_validation() { | ||||
|         println!("🔍 Testing security and validation..."); | ||||
|  | ||||
|         // Test regex pattern validation | ||||
|         let dangerous_patterns = vec![ | ||||
|             ".*",         // Too broad | ||||
|             ".+",         // Too broad | ||||
|             "",           // Empty | ||||
|             "a{1000000}", // Potential ReDoS | ||||
|         ]; | ||||
|  | ||||
|         for pattern in dangerous_patterns { | ||||
|             match regex::Regex::new(pattern) { | ||||
|                 Ok(_) => println!("⚠️ Pattern '{}' accepted (review if safe)", pattern), | ||||
|                 Err(_) => println!("✅ Pattern '{}' rejected", pattern), | ||||
|             } | ||||
|         } | ||||
|  | ||||
|         // Test safe patterns | ||||
|         let safe_patterns = vec!["^test-.*$", "^app-[a-z0-9]+$", "^namespace-\\d+$"]; | ||||
|  | ||||
|         for pattern in safe_patterns { | ||||
|             match regex::Regex::new(pattern) { | ||||
|                 Ok(_) => println!("✅ Safe pattern '{}' accepted", pattern), | ||||
|                 Err(e) => println!("❌ Safe pattern '{}' rejected: {}", pattern, e), | ||||
|             } | ||||
|         } | ||||
|  | ||||
|         println!("✅ Security validation completed"); | ||||
|     } | ||||
| } | ||||
| @@ -1,62 +0,0 @@ | ||||
| //! Basic Kubernetes operations test | ||||
| //! | ||||
| //! This script tests basic Kubernetes functionality through Rhai. | ||||
|  | ||||
| print("=== Basic Kubernetes Operations Test ==="); | ||||
|  | ||||
| // Test 1: Create KubernetesManager | ||||
| print("Test 1: Creating KubernetesManager..."); | ||||
| let km = kubernetes_manager_new("default"); | ||||
| let ns = namespace(km); | ||||
| print("✓ Created manager for namespace: " + ns); | ||||
| if ns != "default" { | ||||
|     print("❌ ERROR: Expected namespace 'default', got '" + ns + "'"); | ||||
| } else { | ||||
|     print("✓ Namespace validation passed"); | ||||
| } | ||||
|  | ||||
| // Test 2: Function availability check | ||||
| print("\nTest 2: Checking function availability..."); | ||||
| let functions = [ | ||||
|     "pods_list", | ||||
|     "services_list",  | ||||
|     "deployments_list", | ||||
|     "namespaces_list", | ||||
|     "resource_counts", | ||||
|     "namespace_create", | ||||
|     "namespace_exists", | ||||
|     "delete", | ||||
|     "pod_delete", | ||||
|     "service_delete", | ||||
|     "deployment_delete" | ||||
| ]; | ||||
|  | ||||
| for func_name in functions { | ||||
|     print("✓ Function '" + func_name + "' is available"); | ||||
| } | ||||
|  | ||||
| // Test 3: Basic operations (if cluster is available) | ||||
| print("\nTest 3: Testing basic operations..."); | ||||
| try { | ||||
|     // Test namespace existence | ||||
|     let default_exists = namespace_exists(km, "default"); | ||||
|     print("✓ Default namespace exists: " + default_exists); | ||||
|      | ||||
|     // Test resource counting | ||||
|     let counts = resource_counts(km); | ||||
|     print("✓ Resource counts retrieved: " + counts.len() + " resource types"); | ||||
|      | ||||
|     // Test namespace listing | ||||
|     let namespaces = namespaces_list(km); | ||||
|     print("✓ Found " + namespaces.len() + " namespaces"); | ||||
|      | ||||
|     // Test pod listing | ||||
|     let pods = pods_list(km); | ||||
|     print("✓ Found " + pods.len() + " pods in default namespace"); | ||||
|      | ||||
|     print("\n=== All basic tests passed! ==="); | ||||
|      | ||||
| } catch(e) { | ||||
|     print("Note: Some operations failed (likely no cluster): " + e); | ||||
|     print("✓ Function registration tests passed"); | ||||
| } | ||||
| @@ -1,200 +0,0 @@ | ||||
| //! CRUD operations test in Rhai | ||||
| //! | ||||
| //! This script tests all Create, Read, Update, Delete operations through Rhai. | ||||
|  | ||||
| print("=== CRUD Operations Test ==="); | ||||
|  | ||||
| // Test 1: Create manager | ||||
| print("Test 1: Creating KubernetesManager..."); | ||||
| let km = kubernetes_manager_new("default"); | ||||
| print("✓ Manager created for namespace: " + namespace(km)); | ||||
|  | ||||
| // Test 2: Create test namespace | ||||
| print("\nTest 2: Creating test namespace..."); | ||||
| let test_ns = "rhai-crud-test"; | ||||
| try { | ||||
|     km.create_namespace(test_ns); | ||||
|     print("✓ Created test namespace: " + test_ns); | ||||
|  | ||||
|     // Verify it exists | ||||
|     let exists = km.namespace_exists(test_ns); | ||||
|     if exists { | ||||
|         print("✓ Verified test namespace exists"); | ||||
|     } else { | ||||
|         print("❌ Test namespace creation failed"); | ||||
|     } | ||||
| } catch(e) { | ||||
|     print("Note: Namespace creation failed (likely no cluster): " + e); | ||||
| } | ||||
|  | ||||
| // Test 3: Switch to test namespace and create resources | ||||
| print("\nTest 3: Creating resources in test namespace..."); | ||||
| try { | ||||
|     let test_km = kubernetes_manager_new(test_ns); | ||||
|      | ||||
|     // Create ConfigMap | ||||
|     let config_data = #{ | ||||
|         "app.properties": "debug=true\nport=8080", | ||||
|         "config.yaml": "key: value\nenv: test" | ||||
|     }; | ||||
|     let configmap_name = test_km.create_configmap("rhai-config", config_data); | ||||
|     print("✓ Created ConfigMap: " + configmap_name); | ||||
|  | ||||
|     // Create Secret | ||||
|     let secret_data = #{ | ||||
|         "username": "rhaiuser", | ||||
|         "password": "secret456" | ||||
|     }; | ||||
|     let secret_name = test_km.create_secret("rhai-secret", secret_data, "Opaque"); | ||||
|     print("✓ Created Secret: " + secret_name); | ||||
|  | ||||
|     // Create Pod | ||||
|     let pod_labels = #{ | ||||
|         "app": "rhai-app", | ||||
|         "version": "v1" | ||||
|     }; | ||||
|     let pod_name = test_km.create_pod("rhai-pod", "nginx:alpine", pod_labels); | ||||
|     print("✓ Created Pod: " + pod_name); | ||||
|  | ||||
|     // Create Service | ||||
|     let service_selector = #{ | ||||
|         "app": "rhai-app" | ||||
|     }; | ||||
|     let service_name = test_km.create_service("rhai-service", service_selector, 80, 80); | ||||
|     print("✓ Created Service: " + service_name); | ||||
|  | ||||
|     // Create Deployment | ||||
|     let deployment_labels = #{ | ||||
|         "app": "rhai-app", | ||||
|         "tier": "frontend" | ||||
|     }; | ||||
|     let deployment_name = test_km.create_deployment("rhai-deployment", "nginx:alpine", 2, deployment_labels); | ||||
|     print("✓ Created Deployment: " + deployment_name); | ||||
|      | ||||
| } catch(e) { | ||||
|     print("Note: Resource creation failed (likely no cluster): " + e); | ||||
| } | ||||
|  | ||||
| // Test 4: Read operations | ||||
| print("\nTest 4: Reading resources..."); | ||||
| try { | ||||
|     let test_km = kubernetes_manager_new(test_ns); | ||||
|      | ||||
|     // List all resources | ||||
|     let pods = pods_list(test_km); | ||||
|     print("✓ Found " + pods.len() + " pods"); | ||||
|      | ||||
|     let services = services_list(test_km); | ||||
|     print("✓ Found " + services.len() + " services"); | ||||
|      | ||||
|     let deployments = deployments_list(test_km); | ||||
|     print("✓ Found " + deployments.len() + " deployments"); | ||||
|      | ||||
|     // Get resource counts | ||||
|     let counts = resource_counts(test_km); | ||||
|     print("✓ Resource counts for " + counts.len() + " resource types"); | ||||
|     for resource_type in counts.keys() { | ||||
|         let count = counts[resource_type]; | ||||
|         print("  " + resource_type + ": " + count); | ||||
|     } | ||||
|      | ||||
| } catch(e) { | ||||
|     print("Note: Resource reading failed (likely no cluster): " + e); | ||||
| } | ||||
|  | ||||
| // Test 5: Delete operations | ||||
| print("\nTest 5: Deleting resources..."); | ||||
| try { | ||||
|     let test_km = kubernetes_manager_new(test_ns); | ||||
|      | ||||
|     // Delete individual resources | ||||
|     test_km.delete_pod("rhai-pod"); | ||||
|     print("✓ Deleted pod"); | ||||
|  | ||||
|     test_km.delete_service("rhai-service"); | ||||
|     print("✓ Deleted service"); | ||||
|  | ||||
|     test_km.delete_deployment("rhai-deployment"); | ||||
|     print("✓ Deleted deployment"); | ||||
|  | ||||
|     test_km.delete_configmap("rhai-config"); | ||||
|     print("✓ Deleted configmap"); | ||||
|  | ||||
|     test_km.delete_secret("rhai-secret"); | ||||
|     print("✓ Deleted secret"); | ||||
|      | ||||
|     // Verify cleanup | ||||
|     let final_counts = resource_counts(test_km); | ||||
|     print("✓ Final resource counts:"); | ||||
|     for resource_type in final_counts.keys() { | ||||
|         let count = final_counts[resource_type]; | ||||
|         print("  " + resource_type + ": " + count); | ||||
|     } | ||||
|      | ||||
| } catch(e) { | ||||
|     print("Note: Resource deletion failed (likely no cluster): " + e); | ||||
| } | ||||
|  | ||||
| // Test 6: Cleanup test namespace | ||||
| print("\nTest 6: Cleaning up test namespace..."); | ||||
| try { | ||||
|     km.delete_namespace(test_ns); | ||||
|     print("✓ Deleted test namespace: " + test_ns); | ||||
| } catch(e) { | ||||
|     print("Note: Namespace deletion failed (likely no cluster): " + e); | ||||
| } | ||||
|  | ||||
| // Test 7: Function availability check | ||||
| print("\nTest 7: Checking all CRUD functions are available..."); | ||||
| let crud_functions = [ | ||||
|     // Create methods (object-oriented style) | ||||
|     "create_pod", | ||||
|     "create_service", | ||||
|     "create_deployment", | ||||
|     "create_configmap", | ||||
|     "create_secret", | ||||
|     "create_namespace", | ||||
|  | ||||
|     // Get methods | ||||
|     "get_pod", | ||||
|     "get_service", | ||||
|     "get_deployment", | ||||
|  | ||||
|     // List methods | ||||
|     "pods_list", | ||||
|     "services_list", | ||||
|     "deployments_list", | ||||
|     "configmaps_list", | ||||
|     "secrets_list", | ||||
|     "namespaces_list", | ||||
|     "resource_counts", | ||||
|     "namespace_exists", | ||||
|  | ||||
|     // Delete methods | ||||
|     "delete_pod", | ||||
|     "delete_service", | ||||
|     "delete_deployment", | ||||
|     "delete_configmap", | ||||
|     "delete_secret", | ||||
|     "delete_namespace", | ||||
|     "delete" | ||||
| ]; | ||||
|  | ||||
| for func_name in crud_functions { | ||||
|     print("✓ Function '" + func_name + "' is available"); | ||||
| } | ||||
|  | ||||
| print("\n=== CRUD Operations Test Summary ==="); | ||||
| print("✅ All " + crud_functions.len() + " CRUD functions are registered"); | ||||
| print("✅ Create operations: 6 functions"); | ||||
| print("✅ Read operations: 8 functions");   | ||||
| print("✅ Delete operations: 7 functions"); | ||||
| print("✅ Total CRUD capabilities: 21 functions"); | ||||
|  | ||||
| print("\n🎉 Complete CRUD operations test completed!"); | ||||
| print("\nYour SAL Kubernetes module now supports:"); | ||||
| print("  ✅ Full resource lifecycle management"); | ||||
| print("  ✅ Namespace operations"); | ||||
| print("  ✅ All major Kubernetes resource types"); | ||||
| print("  ✅ Production-ready error handling"); | ||||
| print("  ✅ Rhai scripting integration"); | ||||
| @@ -1,85 +0,0 @@ | ||||
| //! Namespace operations test | ||||
| //! | ||||
| //! This script tests namespace creation and management operations. | ||||
|  | ||||
| print("=== Namespace Operations Test ==="); | ||||
|  | ||||
| // Test 1: Create manager | ||||
| print("Test 1: Creating KubernetesManager..."); | ||||
| let km = kubernetes_manager_new("default"); | ||||
| print("✓ Manager created for namespace: " + namespace(km)); | ||||
|  | ||||
| // Test 2: Namespace existence checks | ||||
| print("\nTest 2: Testing namespace existence..."); | ||||
| try { | ||||
|     // Test that default namespace exists | ||||
|     let default_exists = namespace_exists(km, "default"); | ||||
|     print("✓ Default namespace exists: " + default_exists); | ||||
|     assert(default_exists, "Default namespace should exist"); | ||||
|      | ||||
|     // Test non-existent namespace | ||||
|     let fake_exists = namespace_exists(km, "definitely-does-not-exist-12345"); | ||||
|     print("✓ Non-existent namespace check: " + fake_exists); | ||||
|     assert(!fake_exists, "Non-existent namespace should not exist"); | ||||
|      | ||||
| } catch(e) { | ||||
|     print("Note: Namespace existence tests failed (likely no cluster): " + e); | ||||
| } | ||||
|  | ||||
| // Test 3: Namespace creation (if cluster is available) | ||||
| print("\nTest 3: Testing namespace creation..."); | ||||
| let test_namespaces = [ | ||||
|     "rhai-test-namespace-1", | ||||
|     "rhai-test-namespace-2" | ||||
| ]; | ||||
|  | ||||
| for test_ns in test_namespaces { | ||||
|     try { | ||||
|         print("Creating namespace: " + test_ns); | ||||
|         namespace_create(km, test_ns); | ||||
|         print("✓ Created namespace: " + test_ns); | ||||
|          | ||||
|         // Verify it exists | ||||
|         let exists = namespace_exists(km, test_ns); | ||||
|         print("✓ Verified namespace exists: " + exists); | ||||
|          | ||||
|         // Test idempotent creation | ||||
|         namespace_create(km, test_ns); | ||||
|         print("✓ Idempotent creation successful for: " + test_ns); | ||||
|          | ||||
|     } catch(e) { | ||||
|         print("Note: Namespace creation failed for " + test_ns + " (likely no cluster or permissions): " + e); | ||||
|     } | ||||
| } | ||||
|  | ||||
| // Test 4: List all namespaces | ||||
| print("\nTest 4: Listing all namespaces..."); | ||||
| try { | ||||
|     let all_namespaces = namespaces_list(km); | ||||
|     print("✓ Found " + all_namespaces.len() + " total namespaces"); | ||||
|      | ||||
|     // Check for our test namespaces | ||||
|     for test_ns in test_namespaces { | ||||
|         let found = false; | ||||
|         for ns in all_namespaces { | ||||
|             if ns == test_ns { | ||||
|                 found = true; | ||||
|                 break; | ||||
|             } | ||||
|         } | ||||
|         if found { | ||||
|             print("✓ Found test namespace in list: " + test_ns); | ||||
|         } | ||||
|     } | ||||
|      | ||||
| } catch(e) { | ||||
|     print("Note: Namespace listing failed (likely no cluster): " + e); | ||||
| } | ||||
|  | ||||
| print("\n--- Cleanup Instructions ---"); | ||||
| print("To clean up test namespaces, run:"); | ||||
| for test_ns in test_namespaces { | ||||
|     print("  kubectl delete namespace " + test_ns); | ||||
| } | ||||
|  | ||||
| print("\n=== Namespace operations test completed! ==="); | ||||
| @@ -1,137 +0,0 @@ | ||||
| //! Resource management test | ||||
| //! | ||||
| //! This script tests resource listing and management operations. | ||||
|  | ||||
| print("=== Resource Management Test ==="); | ||||
|  | ||||
| // Test 1: Create manager | ||||
| print("Test 1: Creating KubernetesManager..."); | ||||
| let km = kubernetes_manager_new("default"); | ||||
| print("✓ Manager created for namespace: " + namespace(km)); | ||||
|  | ||||
| // Test 2: Resource listing | ||||
| print("\nTest 2: Testing resource listing..."); | ||||
| try { | ||||
|     // Test pods listing | ||||
|     let pods = pods_list(km); | ||||
|     print("✓ Pods list: " + pods.len() + " pods found"); | ||||
|      | ||||
|     // Test services listing | ||||
|     let services = services_list(km); | ||||
|     print("✓ Services list: " + services.len() + " services found"); | ||||
|      | ||||
|     // Test deployments listing | ||||
|     let deployments = deployments_list(km); | ||||
|     print("✓ Deployments list: " + deployments.len() + " deployments found"); | ||||
|      | ||||
|     // Show some pod names if available | ||||
|     if pods.len() > 0 { | ||||
|         print("Sample pods:"); | ||||
|         let count = 0; | ||||
|         for pod in pods { | ||||
|             if count < 3 { | ||||
|                 print("  - " + pod); | ||||
|                 count = count + 1; | ||||
|             } | ||||
|         } | ||||
|     } | ||||
|      | ||||
| } catch(e) { | ||||
|     print("Note: Resource listing failed (likely no cluster): " + e); | ||||
| } | ||||
|  | ||||
| // Test 3: Resource counts | ||||
| print("\nTest 3: Testing resource counts..."); | ||||
| try { | ||||
|     let counts = resource_counts(km); | ||||
|     print("✓ Resource counts retrieved for " + counts.len() + " resource types"); | ||||
|      | ||||
|     // Display counts | ||||
|     for resource_type in counts.keys() { | ||||
|         let count = counts[resource_type]; | ||||
|         print("  " + resource_type + ": " + count); | ||||
|     } | ||||
|      | ||||
|     // Verify expected resource types are present | ||||
|     let expected_types = ["pods", "services", "deployments", "configmaps", "secrets"]; | ||||
|     for expected_type in expected_types { | ||||
|         if expected_type in counts { | ||||
|             print("✓ Found expected resource type: " + expected_type); | ||||
|         } else { | ||||
|             print("⚠ Missing expected resource type: " + expected_type); | ||||
|         } | ||||
|     } | ||||
|      | ||||
| } catch(e) { | ||||
|     print("Note: Resource counts failed (likely no cluster): " + e); | ||||
| } | ||||
|  | ||||
| // Test 4: Multi-namespace comparison | ||||
| print("\nTest 4: Multi-namespace resource comparison..."); | ||||
| let test_namespaces = ["default", "kube-system"]; | ||||
| let total_resources = #{}; | ||||
|  | ||||
| for ns in test_namespaces { | ||||
|     try { | ||||
|         let ns_km = kubernetes_manager_new(ns); | ||||
|         let counts = resource_counts(ns_km); | ||||
|          | ||||
|         print("Namespace '" + ns + "':"); | ||||
|         let ns_total = 0; | ||||
|         for resource_type in counts.keys() { | ||||
|             let count = counts[resource_type]; | ||||
|             print("  " + resource_type + ": " + count); | ||||
|             ns_total = ns_total + count; | ||||
|              | ||||
|             // Accumulate totals | ||||
|             if resource_type in total_resources { | ||||
|                 total_resources[resource_type] = total_resources[resource_type] + count; | ||||
|             } else { | ||||
|                 total_resources[resource_type] = count; | ||||
|             } | ||||
|         } | ||||
|         print("  Total: " + ns_total + " resources"); | ||||
|          | ||||
|     } catch(e) { | ||||
|         print("Note: Failed to analyze namespace '" + ns + "': " + e); | ||||
|     } | ||||
| } | ||||
|  | ||||
| // Show totals | ||||
| print("\nTotal resources across all namespaces:"); | ||||
| let grand_total = 0; | ||||
| for resource_type in total_resources.keys() { | ||||
|     let count = total_resources[resource_type]; | ||||
|     print("  " + resource_type + ": " + count); | ||||
|     grand_total = grand_total + count; | ||||
| } | ||||
| print("Grand total: " + grand_total + " resources"); | ||||
|  | ||||
| // Test 5: Pattern matching simulation | ||||
| print("\nTest 5: Pattern matching simulation..."); | ||||
| try { | ||||
|     let pods = pods_list(km); | ||||
|     print("Testing pattern matching on " + pods.len() + " pods:"); | ||||
|      | ||||
|     // Simulate pattern matching (since Rhai doesn't have regex) | ||||
|     let test_patterns = ["test", "kube", "system", "app"]; | ||||
|     for pattern in test_patterns { | ||||
|         let matches = []; | ||||
|         for pod in pods { | ||||
|             if pod.contains(pattern) { | ||||
|                 matches.push(pod); | ||||
|             } | ||||
|         } | ||||
|         print("  Pattern '" + pattern + "' would match " + matches.len() + " pods"); | ||||
|         if matches.len() > 0 && matches.len() <= 3 { | ||||
|             for match in matches { | ||||
|                 print("    - " + match); | ||||
|             } | ||||
|         } | ||||
|     } | ||||
|      | ||||
| } catch(e) { | ||||
|     print("Note: Pattern matching test failed (likely no cluster): " + e); | ||||
| } | ||||
|  | ||||
| print("\n=== Resource management test completed! ==="); | ||||
| @@ -1,86 +0,0 @@ | ||||
| //! Run all Kubernetes Rhai tests | ||||
| //! | ||||
| //! This script runs all the Kubernetes Rhai tests in sequence. | ||||
|  | ||||
| print("=== Running All Kubernetes Rhai Tests ==="); | ||||
| print(""); | ||||
|  | ||||
| // Test configuration | ||||
| let test_files = [ | ||||
|     "basic_kubernetes.rhai", | ||||
|     "namespace_operations.rhai",  | ||||
|     "resource_management.rhai" | ||||
| ]; | ||||
|  | ||||
| let passed_tests = 0; | ||||
| let total_tests = test_files.len(); | ||||
|  | ||||
| print("Found " + total_tests + " test files to run:"); | ||||
| for test_file in test_files { | ||||
|     print("  - " + test_file); | ||||
| } | ||||
| print(""); | ||||
|  | ||||
| // Note: In a real implementation, we would use eval_file or similar | ||||
| // For now, this serves as documentation of the test structure | ||||
| print("=== Test Execution Summary ==="); | ||||
| print(""); | ||||
| print("To run these tests individually:"); | ||||
| for test_file in test_files { | ||||
|     print("  herodo kubernetes/tests/rhai/" + test_file); | ||||
| } | ||||
| print(""); | ||||
|  | ||||
| print("To run with Kubernetes cluster:"); | ||||
| print("  KUBERNETES_TEST_ENABLED=1 herodo kubernetes/tests/rhai/basic_kubernetes.rhai"); | ||||
| print(""); | ||||
|  | ||||
| // Basic validation that we can create a manager | ||||
| print("=== Quick Validation ==="); | ||||
| try { | ||||
|     let km = kubernetes_manager_new("default"); | ||||
|     let ns = namespace(km); | ||||
|     print("✓ KubernetesManager creation works"); | ||||
|     print("✓ Namespace getter works: " + ns); | ||||
|     passed_tests = passed_tests + 1; | ||||
| } catch(e) { | ||||
|     print("✗ Basic validation failed: " + e); | ||||
| } | ||||
|  | ||||
| // Test function registration | ||||
| print(""); | ||||
| print("=== Function Registration Check ==="); | ||||
| let required_functions = [ | ||||
|     "kubernetes_manager_new", | ||||
|     "namespace", | ||||
|     "pods_list", | ||||
|     "services_list", | ||||
|     "deployments_list", | ||||
|     "namespaces_list", | ||||
|     "resource_counts", | ||||
|     "namespace_create", | ||||
|     "namespace_exists", | ||||
|     "delete", | ||||
|     "pod_delete", | ||||
|     "service_delete", | ||||
|     "deployment_delete" | ||||
| ]; | ||||
|  | ||||
| let registered_functions = 0; | ||||
| for func_name in required_functions { | ||||
|     // We can't easily test function existence in Rhai, but we can document them | ||||
|     print("✓ " + func_name + " should be registered"); | ||||
|     registered_functions = registered_functions + 1; | ||||
| } | ||||
|  | ||||
| print(""); | ||||
| print("=== Summary ==="); | ||||
| print("Required functions: " + registered_functions + "/" + required_functions.len()); | ||||
| print("Basic validation: " + (passed_tests > 0 ? "PASSED" : "FAILED")); | ||||
| print(""); | ||||
| print("For full testing with a Kubernetes cluster:"); | ||||
| print("1. Ensure you have a running Kubernetes cluster"); | ||||
| print("2. Set KUBERNETES_TEST_ENABLED=1"); | ||||
| print("3. Run individual test files"); | ||||
| print(""); | ||||
| print("=== All tests documentation completed ==="); | ||||
| @@ -1,90 +0,0 @@ | ||||
| //! Simple API pattern test | ||||
| //! | ||||
| //! This script demonstrates the new object-oriented API pattern. | ||||
|  | ||||
| print("=== Object-Oriented API Pattern Test ==="); | ||||
|  | ||||
| // Test 1: Create manager | ||||
| print("Test 1: Creating KubernetesManager..."); | ||||
| let km = kubernetes_manager_new("default"); | ||||
| print("✓ Manager created for namespace: " + namespace(km)); | ||||
|  | ||||
| // Test 2: Show the new API pattern | ||||
| print("\nTest 2: New Object-Oriented API Pattern"); | ||||
| print("Now you can use:"); | ||||
| print("  km.create_pod(name, image, labels)"); | ||||
| print("  km.create_service(name, selector, port, target_port)"); | ||||
| print("  km.create_deployment(name, image, replicas, labels)"); | ||||
| print("  km.create_configmap(name, data)"); | ||||
| print("  km.create_secret(name, data, type)"); | ||||
| print("  km.create_namespace(name)"); | ||||
| print(""); | ||||
| print("  km.get_pod(name)"); | ||||
| print("  km.get_service(name)"); | ||||
| print("  km.get_deployment(name)"); | ||||
| print(""); | ||||
| print("  km.delete_pod(name)"); | ||||
| print("  km.delete_service(name)"); | ||||
| print("  km.delete_deployment(name)"); | ||||
| print("  km.delete_configmap(name)"); | ||||
| print("  km.delete_secret(name)"); | ||||
| print("  km.delete_namespace(name)"); | ||||
| print(""); | ||||
| print("  km.pods_list()"); | ||||
| print("  km.services_list()"); | ||||
| print("  km.deployments_list()"); | ||||
| print("  km.resource_counts()"); | ||||
| print("  km.namespace_exists(name)"); | ||||
|  | ||||
| // Test 3: Function availability check | ||||
| print("\nTest 3: Checking all API methods are available..."); | ||||
| let api_methods = [ | ||||
|     // Create methods | ||||
|     "create_pod", | ||||
|     "create_service",  | ||||
|     "create_deployment", | ||||
|     "create_configmap", | ||||
|     "create_secret", | ||||
|     "create_namespace", | ||||
|      | ||||
|     // Get methods | ||||
|     "get_pod", | ||||
|     "get_service", | ||||
|     "get_deployment", | ||||
|      | ||||
|     // List methods | ||||
|     "pods_list", | ||||
|     "services_list", | ||||
|     "deployments_list", | ||||
|     "configmaps_list", | ||||
|     "secrets_list", | ||||
|     "namespaces_list", | ||||
|     "resource_counts", | ||||
|     "namespace_exists", | ||||
|      | ||||
|     // Delete methods | ||||
|     "delete_pod", | ||||
|     "delete_service", | ||||
|     "delete_deployment", | ||||
|     "delete_configmap", | ||||
|     "delete_secret", | ||||
|     "delete_namespace", | ||||
|     "delete" | ||||
| ]; | ||||
|  | ||||
| for method_name in api_methods { | ||||
|     print("✓ Method 'km." + method_name + "()' is available"); | ||||
| } | ||||
|  | ||||
| print("\n=== API Pattern Summary ==="); | ||||
| print("✅ Object-oriented API: km.method_name()"); | ||||
| print("✅ " + api_methods.len() + " methods available"); | ||||
| print("✅ Consistent naming: create_*, get_*, delete_*, *_list()"); | ||||
| print("✅ Full CRUD operations for all resource types"); | ||||
|  | ||||
| print("\n🎉 Object-oriented API pattern is ready!"); | ||||
| print("\nExample usage:"); | ||||
| print("  let km = kubernetes_manager_new('my-namespace');"); | ||||
| print("  let pod = km.create_pod('my-pod', 'nginx:latest', #{});"); | ||||
| print("  let pods = km.pods_list();"); | ||||
| print("  km.delete_pod('my-pod');"); | ||||
| @@ -1,368 +0,0 @@ | ||||
| //! Rhai integration tests for SAL Kubernetes | ||||
| //! | ||||
| //! These tests verify that the Rhai wrappers work correctly and can execute | ||||
| //! the Rhai test scripts in the tests/rhai/ directory. | ||||
|  | ||||
| #[cfg(feature = "rhai")] | ||||
| mod rhai_tests { | ||||
|     use rhai::Engine; | ||||
|     use sal_kubernetes::rhai::*; | ||||
|     use std::fs; | ||||
|     use std::path::Path; | ||||
|  | ||||
|     /// Check if Kubernetes integration tests should run | ||||
|     fn should_run_k8s_tests() -> bool { | ||||
|         std::env::var("KUBERNETES_TEST_ENABLED").unwrap_or_default() == "1" | ||||
|     } | ||||
|  | ||||
|     #[test] | ||||
|     fn test_register_kubernetes_module() { | ||||
|         let mut engine = Engine::new(); | ||||
|         let result = register_kubernetes_module(&mut engine); | ||||
|         assert!( | ||||
|             result.is_ok(), | ||||
|             "Failed to register Kubernetes module: {:?}", | ||||
|             result | ||||
|         ); | ||||
|     } | ||||
|  | ||||
|     #[test] | ||||
|     fn test_kubernetes_functions_registered() { | ||||
|         let mut engine = Engine::new(); | ||||
|         register_kubernetes_module(&mut engine).unwrap(); | ||||
|  | ||||
|         // Test that the constructor function is registered | ||||
|         let script = r#" | ||||
|             let result = ""; | ||||
|             try { | ||||
|                 let km = kubernetes_manager_new("test"); | ||||
|                 result = "constructor_exists"; | ||||
|             } catch(e) { | ||||
|                 result = "constructor_exists_but_failed"; | ||||
|             } | ||||
|             result | ||||
|         "#; | ||||
|  | ||||
|         let result = engine.eval::<String>(script); | ||||
|         assert!(result.is_ok()); | ||||
|         let result_value = result.unwrap(); | ||||
|         assert!( | ||||
|             result_value == "constructor_exists" || result_value == "constructor_exists_but_failed", | ||||
|             "Expected constructor to be registered, got: {}", | ||||
|             result_value | ||||
|         ); | ||||
|     } | ||||
|  | ||||
|     #[test] | ||||
|     fn test_rhai_function_signatures() { | ||||
|         if !should_run_k8s_tests() { | ||||
|             println!( | ||||
|                 "Skipping Rhai function signature tests. Set KUBERNETES_TEST_ENABLED=1 to enable." | ||||
|             ); | ||||
|             return; | ||||
|         } | ||||
|  | ||||
|         let mut engine = Engine::new(); | ||||
|         register_kubernetes_module(&mut engine).unwrap(); | ||||
|  | ||||
|         // Test that the new object-oriented API methods work correctly | ||||
|         // These will fail without a cluster, but should not fail due to missing methods | ||||
|         let test_scripts = vec![ | ||||
|             // List methods (still function-based for listing) | ||||
|             ("pods_list", "let km = kubernetes_manager_new(\"test\"); km.pods_list();"), | ||||
|             ("services_list", "let km = kubernetes_manager_new(\"test\"); km.services_list();"), | ||||
|             ("deployments_list", "let km = kubernetes_manager_new(\"test\"); km.deployments_list();"), | ||||
|             ("namespaces_list", "let km = kubernetes_manager_new(\"test\"); km.namespaces_list();"), | ||||
|             ("resource_counts", "let km = kubernetes_manager_new(\"test\"); km.resource_counts();"), | ||||
|  | ||||
|             // Create methods (object-oriented) | ||||
|             ("create_namespace", "let km = kubernetes_manager_new(\"test\"); km.create_namespace(\"test-ns\");"), | ||||
|             ("create_pod", "let km = kubernetes_manager_new(\"test\"); km.create_pod(\"test-pod\", \"nginx\", #{});"), | ||||
|             ("create_service", "let km = kubernetes_manager_new(\"test\"); km.create_service(\"test-svc\", #{}, 80, 80);"), | ||||
|  | ||||
|             // Get methods (object-oriented) | ||||
|             ("get_pod", "let km = kubernetes_manager_new(\"test\"); km.get_pod(\"test-pod\");"), | ||||
|             ("get_service", "let km = kubernetes_manager_new(\"test\"); km.get_service(\"test-svc\");"), | ||||
|  | ||||
|             // Delete methods (object-oriented) | ||||
|             ("delete_pod", "let km = kubernetes_manager_new(\"test\"); km.delete_pod(\"test-pod\");"), | ||||
|             ("delete_service", "let km = kubernetes_manager_new(\"test\"); km.delete_service(\"test-service\");"), | ||||
|             ("delete_deployment", "let km = kubernetes_manager_new(\"test\"); km.delete_deployment(\"test-deployment\");"), | ||||
|             ("delete_namespace", "let km = kubernetes_manager_new(\"test\"); km.delete_namespace(\"test-ns\");"), | ||||
|  | ||||
|             // Utility methods | ||||
|             ("namespace_exists", "let km = kubernetes_manager_new(\"test\"); km.namespace_exists(\"test-ns\");"), | ||||
|             ("namespace", "let km = kubernetes_manager_new(\"test\"); namespace(km);"), | ||||
|             ("delete_pattern", "let km = kubernetes_manager_new(\"test\"); km.delete(\"test-.*\");"), | ||||
|         ]; | ||||
|  | ||||
|         for (function_name, script) in test_scripts { | ||||
|             println!("Testing function: {}", function_name); | ||||
|             let result = engine.eval::<rhai::Dynamic>(script); | ||||
|  | ||||
|             // The function should be registered (not get a "function not found" error) | ||||
|             // It may fail due to no Kubernetes cluster, but that's expected | ||||
|             match result { | ||||
|                 Ok(_) => { | ||||
|                     println!("Function {} executed successfully", function_name); | ||||
|                 } | ||||
|                 Err(e) => { | ||||
|                     let error_msg = e.to_string(); | ||||
|                     // Should not be a "function not found" error | ||||
|                     assert!( | ||||
|                         !error_msg.contains("Function not found") | ||||
|                             && !error_msg.contains("Unknown function"), | ||||
|                         "Function {} not registered: {}", | ||||
|                         function_name, | ||||
|                         error_msg | ||||
|                     ); | ||||
|                     println!( | ||||
|                         "Function {} failed as expected (no cluster): {}", | ||||
|                         function_name, error_msg | ||||
|                     ); | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     #[tokio::test] | ||||
|     async fn test_rhai_with_real_cluster() { | ||||
|         if !should_run_k8s_tests() { | ||||
|             println!("Skipping Rhai Kubernetes integration tests. Set KUBERNETES_TEST_ENABLED=1 to enable."); | ||||
|             return; | ||||
|         } | ||||
|  | ||||
|         let mut engine = Engine::new(); | ||||
|         register_kubernetes_module(&mut engine).unwrap(); | ||||
|  | ||||
|         // Test basic functionality with a real cluster | ||||
|         let script = r#" | ||||
|             let km = kubernetes_manager_new("default"); | ||||
|             let ns = namespace(km); | ||||
|             ns | ||||
|         "#; | ||||
|  | ||||
|         let result = engine.eval::<String>(script); | ||||
|         match result { | ||||
|             Ok(namespace) => { | ||||
|                 assert_eq!(namespace, "default"); | ||||
|                 println!("Successfully got namespace from Rhai: {}", namespace); | ||||
|             } | ||||
|             Err(e) => { | ||||
|                 println!("Failed to execute Rhai script with real cluster: {}", e); | ||||
|                 // Don't fail the test if we can't connect to cluster | ||||
|             } | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     #[tokio::test] | ||||
|     async fn test_rhai_pods_list() { | ||||
|         if !should_run_k8s_tests() { | ||||
|             return; | ||||
|         } | ||||
|  | ||||
|         let mut engine = Engine::new(); | ||||
|         register_kubernetes_module(&mut engine).unwrap(); | ||||
|  | ||||
|         let script = r#" | ||||
|             let km = kubernetes_manager_new("default"); | ||||
|             let pods = pods_list(km); | ||||
|             pods.len() | ||||
|         "#; | ||||
|  | ||||
|         let result = engine.eval::<i64>(script); | ||||
|         match result { | ||||
|             Ok(count) => { | ||||
|                 assert!(count >= 0); | ||||
|                 println!("Successfully listed {} pods from Rhai", count); | ||||
|             } | ||||
|             Err(e) => { | ||||
|                 println!("Failed to list pods from Rhai: {}", e); | ||||
|                 // Don't fail the test if we can't connect to cluster | ||||
|             } | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     #[tokio::test] | ||||
|     async fn test_rhai_resource_counts() { | ||||
|         if !should_run_k8s_tests() { | ||||
|             return; | ||||
|         } | ||||
|  | ||||
|         let mut engine = Engine::new(); | ||||
|         register_kubernetes_module(&mut engine).unwrap(); | ||||
|  | ||||
|         let script = r#" | ||||
|             let km = kubernetes_manager_new("default"); | ||||
|             let counts = resource_counts(km); | ||||
|             counts | ||||
|         "#; | ||||
|  | ||||
|         let result = engine.eval::<rhai::Map>(script); | ||||
|         match result { | ||||
|             Ok(counts) => { | ||||
|                 println!("Successfully got resource counts from Rhai: {:?}", counts); | ||||
|  | ||||
|                 // Verify expected keys are present | ||||
|                 assert!(counts.contains_key("pods")); | ||||
|                 assert!(counts.contains_key("services")); | ||||
|                 assert!(counts.contains_key("deployments")); | ||||
|             } | ||||
|             Err(e) => { | ||||
|                 println!("Failed to get resource counts from Rhai: {}", e); | ||||
|                 // Don't fail the test if we can't connect to cluster | ||||
|             } | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     #[tokio::test] | ||||
|     async fn test_rhai_namespace_operations() { | ||||
|         if !should_run_k8s_tests() { | ||||
|             return; | ||||
|         } | ||||
|  | ||||
|         let mut engine = Engine::new(); | ||||
|         register_kubernetes_module(&mut engine).unwrap(); | ||||
|  | ||||
|         // Test namespace existence check | ||||
|         let script = r#" | ||||
|             let km = kubernetes_manager_new("default"); | ||||
|             let exists = namespace_exists(km, "default"); | ||||
|             exists | ||||
|         "#; | ||||
|  | ||||
|         let result = engine.eval::<bool>(script); | ||||
|         match result { | ||||
|             Ok(exists) => { | ||||
|                 assert!(exists, "Default namespace should exist"); | ||||
|                 println!( | ||||
|                     "Successfully checked namespace existence from Rhai: {}", | ||||
|                     exists | ||||
|                 ); | ||||
|             } | ||||
|             Err(e) => { | ||||
|                 println!("Failed to check namespace existence from Rhai: {}", e); | ||||
|                 // Don't fail the test if we can't connect to cluster | ||||
|             } | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     #[test] | ||||
|     fn test_rhai_error_handling() { | ||||
|         if !should_run_k8s_tests() { | ||||
|             println!( | ||||
|                 "Skipping Rhai error handling tests. Set KUBERNETES_TEST_ENABLED=1 to enable." | ||||
|             ); | ||||
|             return; | ||||
|         } | ||||
|  | ||||
|         let mut engine = Engine::new(); | ||||
|         register_kubernetes_module(&mut engine).unwrap(); | ||||
|  | ||||
|         // Test that errors are properly converted to Rhai errors | ||||
|         let script = r#" | ||||
|             let km = kubernetes_manager_new("invalid-namespace-name-that-should-fail"); | ||||
|             pods_list(km) | ||||
|         "#; | ||||
|  | ||||
|         let result = engine.eval::<rhai::Array>(script); | ||||
|         assert!(result.is_err(), "Expected error for invalid configuration"); | ||||
|  | ||||
|         if let Err(e) = result { | ||||
|             let error_msg = e.to_string(); | ||||
|             println!("Got expected error: {}", error_msg); | ||||
|             assert!(error_msg.contains("Kubernetes error") || error_msg.contains("error")); | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     #[test] | ||||
|     fn test_rhai_script_files_exist() { | ||||
|         // Test that our Rhai test files exist and are readable | ||||
|         let test_files = [ | ||||
|             "tests/rhai/basic_kubernetes.rhai", | ||||
|             "tests/rhai/namespace_operations.rhai", | ||||
|             "tests/rhai/resource_management.rhai", | ||||
|             "tests/rhai/run_all_tests.rhai", | ||||
|         ]; | ||||
|  | ||||
|         for test_file in test_files { | ||||
|             let path = Path::new(test_file); | ||||
|             assert!(path.exists(), "Rhai test file should exist: {}", test_file); | ||||
|  | ||||
|             // Try to read the file to ensure it's valid | ||||
|             let content = fs::read_to_string(path) | ||||
|                 .unwrap_or_else(|e| panic!("Failed to read {}: {}", test_file, e)); | ||||
|  | ||||
|             assert!( | ||||
|                 !content.is_empty(), | ||||
|                 "Rhai test file should not be empty: {}", | ||||
|                 test_file | ||||
|             ); | ||||
|             assert!( | ||||
|                 content.contains("print("), | ||||
|                 "Rhai test file should contain print statements: {}", | ||||
|                 test_file | ||||
|             ); | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     #[test] | ||||
|     fn test_basic_rhai_script_syntax() { | ||||
|         // Test that we can at least parse our basic Rhai script | ||||
|         let mut engine = Engine::new(); | ||||
|         register_kubernetes_module(&mut engine).unwrap(); | ||||
|  | ||||
|         // Simple script that should parse without errors | ||||
|         let script = r#" | ||||
|             print("Testing Kubernetes Rhai integration"); | ||||
|             let functions = ["kubernetes_manager_new", "pods_list", "namespace"]; | ||||
|             for func in functions { | ||||
|                 print("Function: " + func); | ||||
|             } | ||||
|             print("Basic syntax test completed"); | ||||
|         "#; | ||||
|  | ||||
|         let result = engine.eval::<()>(script); | ||||
|         assert!( | ||||
|             result.is_ok(), | ||||
|             "Basic Rhai script should parse and execute: {:?}", | ||||
|             result | ||||
|         ); | ||||
|     } | ||||
|  | ||||
|     #[tokio::test] | ||||
|     async fn test_rhai_script_execution_with_cluster() { | ||||
|         if !should_run_k8s_tests() { | ||||
|             println!( | ||||
|                 "Skipping Rhai script execution test. Set KUBERNETES_TEST_ENABLED=1 to enable." | ||||
|             ); | ||||
|             return; | ||||
|         } | ||||
|  | ||||
|         let mut engine = Engine::new(); | ||||
|         register_kubernetes_module(&mut engine).unwrap(); | ||||
|  | ||||
|         // Try to execute a simple script that creates a manager | ||||
|         let script = r#" | ||||
|             let km = kubernetes_manager_new("default"); | ||||
|             let ns = namespace(km); | ||||
|             print("Created manager for namespace: " + ns); | ||||
|             ns | ||||
|         "#; | ||||
|  | ||||
|         let result = engine.eval::<String>(script); | ||||
|         match result { | ||||
|             Ok(namespace) => { | ||||
|                 assert_eq!(namespace, "default"); | ||||
|                 println!("Successfully executed Rhai script with cluster"); | ||||
|             } | ||||
|             Err(e) => { | ||||
|                 println!( | ||||
|                     "Rhai script execution failed (expected if no cluster): {}", | ||||
|                     e | ||||
|                 ); | ||||
|                 // Don't fail the test if we can't connect to cluster | ||||
|             } | ||||
|         } | ||||
|     } | ||||
| } | ||||
| @@ -1,303 +0,0 @@ | ||||
| //! Unit tests for SAL Kubernetes | ||||
| //! | ||||
| //! These tests focus on testing individual components and error handling | ||||
| //! without requiring a live Kubernetes cluster. | ||||
|  | ||||
| use sal_kubernetes::KubernetesError; | ||||
|  | ||||
| #[test] | ||||
| fn test_kubernetes_error_creation() { | ||||
|     let config_error = KubernetesError::config_error("Test config error"); | ||||
|     assert!(matches!(config_error, KubernetesError::ConfigError(_))); | ||||
|     assert_eq!( | ||||
|         config_error.to_string(), | ||||
|         "Configuration error: Test config error" | ||||
|     ); | ||||
|  | ||||
|     let operation_error = KubernetesError::operation_error("Test operation error"); | ||||
|     assert!(matches!( | ||||
|         operation_error, | ||||
|         KubernetesError::OperationError(_) | ||||
|     )); | ||||
|     assert_eq!( | ||||
|         operation_error.to_string(), | ||||
|         "Operation failed: Test operation error" | ||||
|     ); | ||||
|  | ||||
|     let namespace_error = KubernetesError::namespace_error("Test namespace error"); | ||||
|     assert!(matches!( | ||||
|         namespace_error, | ||||
|         KubernetesError::NamespaceError(_) | ||||
|     )); | ||||
|     assert_eq!( | ||||
|         namespace_error.to_string(), | ||||
|         "Namespace error: Test namespace error" | ||||
|     ); | ||||
|  | ||||
|     let permission_error = KubernetesError::permission_denied("Test permission error"); | ||||
|     assert!(matches!( | ||||
|         permission_error, | ||||
|         KubernetesError::PermissionDenied(_) | ||||
|     )); | ||||
|     assert_eq!( | ||||
|         permission_error.to_string(), | ||||
|         "Permission denied: Test permission error" | ||||
|     ); | ||||
|  | ||||
|     let timeout_error = KubernetesError::timeout("Test timeout error"); | ||||
|     assert!(matches!(timeout_error, KubernetesError::Timeout(_))); | ||||
|     assert_eq!( | ||||
|         timeout_error.to_string(), | ||||
|         "Operation timed out: Test timeout error" | ||||
|     ); | ||||
| } | ||||
|  | ||||
| #[test] | ||||
| fn test_regex_error_conversion() { | ||||
|     use regex::Regex; | ||||
|  | ||||
|     // Test invalid regex pattern | ||||
|     let invalid_pattern = "[invalid"; | ||||
|     let regex_result = Regex::new(invalid_pattern); | ||||
|     assert!(regex_result.is_err()); | ||||
|  | ||||
|     // Convert to KubernetesError | ||||
|     let k8s_error = KubernetesError::from(regex_result.unwrap_err()); | ||||
|     assert!(matches!(k8s_error, KubernetesError::RegexError(_))); | ||||
| } | ||||
|  | ||||
| #[test] | ||||
| fn test_error_display() { | ||||
|     let errors = vec![ | ||||
|         KubernetesError::config_error("Config test"), | ||||
|         KubernetesError::operation_error("Operation test"), | ||||
|         KubernetesError::namespace_error("Namespace test"), | ||||
|         KubernetesError::permission_denied("Permission test"), | ||||
|         KubernetesError::timeout("Timeout test"), | ||||
|     ]; | ||||
|  | ||||
|     for error in errors { | ||||
|         let error_string = error.to_string(); | ||||
|         assert!(!error_string.is_empty()); | ||||
|         assert!(error_string.contains("test")); | ||||
|     } | ||||
| } | ||||
|  | ||||
| #[cfg(feature = "rhai")] | ||||
| #[test] | ||||
| fn test_rhai_module_registration() { | ||||
|     use rhai::Engine; | ||||
|     use sal_kubernetes::rhai::register_kubernetes_module; | ||||
|  | ||||
|     let mut engine = Engine::new(); | ||||
|     let result = register_kubernetes_module(&mut engine); | ||||
|     assert!( | ||||
|         result.is_ok(), | ||||
|         "Failed to register Kubernetes module: {:?}", | ||||
|         result | ||||
|     ); | ||||
| } | ||||
|  | ||||
| #[cfg(feature = "rhai")] | ||||
| #[test] | ||||
| fn test_rhai_functions_registered() { | ||||
|     use rhai::Engine; | ||||
|     use sal_kubernetes::rhai::register_kubernetes_module; | ||||
|  | ||||
|     let mut engine = Engine::new(); | ||||
|     register_kubernetes_module(&mut engine).unwrap(); | ||||
|  | ||||
|     // Test that functions are registered by checking if they exist in the engine | ||||
|     // We can't actually call async functions without a runtime, so we just verify registration | ||||
|  | ||||
|     // Check that the main functions are registered by looking for them in the engine | ||||
|     let function_names = vec![ | ||||
|         "kubernetes_manager_new", | ||||
|         "pods_list", | ||||
|         "services_list", | ||||
|         "deployments_list", | ||||
|         "delete", | ||||
|         "namespace_create", | ||||
|         "namespace_exists", | ||||
|     ]; | ||||
|  | ||||
|     for function_name in function_names { | ||||
|         // Try to parse a script that references the function | ||||
|         // This will succeed if the function is registered, even if we don't call it | ||||
|         let script = format!("let f = {};", function_name); | ||||
|         let result = engine.compile(&script); | ||||
|         assert!( | ||||
|             result.is_ok(), | ||||
|             "Function '{}' should be registered in the engine", | ||||
|             function_name | ||||
|         ); | ||||
|     } | ||||
| } | ||||
|  | ||||
| #[test] | ||||
| fn test_namespace_validation() { | ||||
|     // Test valid namespace names | ||||
|     let valid_names = vec!["default", "kube-system", "my-app", "test123"]; | ||||
|     for name in valid_names { | ||||
|         assert!(!name.is_empty()); | ||||
|         assert!(name.chars().all(|c| c.is_alphanumeric() || c == '-')); | ||||
|     } | ||||
| } | ||||
|  | ||||
| #[test] | ||||
| fn test_resource_name_patterns() { | ||||
|     use regex::Regex; | ||||
|  | ||||
|     // Test common patterns that might be used with the delete function | ||||
|     let patterns = vec![ | ||||
|         r"test-.*",    // Match anything starting with "test-" | ||||
|         r".*-temp$",   // Match anything ending with "-temp" | ||||
|         r"^pod-\d+$",  // Match "pod-" followed by digits | ||||
|         r"app-[a-z]+", // Match "app-" followed by lowercase letters | ||||
|     ]; | ||||
|  | ||||
|     for pattern in patterns { | ||||
|         let regex = Regex::new(pattern); | ||||
|         assert!(regex.is_ok(), "Pattern '{}' should be valid", pattern); | ||||
|  | ||||
|         let regex = regex.unwrap(); | ||||
|  | ||||
|         // Test some example matches based on the pattern | ||||
|         match pattern { | ||||
|             r"test-.*" => { | ||||
|                 assert!(regex.is_match("test-pod")); | ||||
|                 assert!(regex.is_match("test-service")); | ||||
|                 assert!(!regex.is_match("prod-pod")); | ||||
|             } | ||||
|             r".*-temp$" => { | ||||
|                 assert!(regex.is_match("my-pod-temp")); | ||||
|                 assert!(regex.is_match("service-temp")); | ||||
|                 assert!(!regex.is_match("temp-pod")); | ||||
|             } | ||||
|             r"^pod-\d+$" => { | ||||
|                 assert!(regex.is_match("pod-123")); | ||||
|                 assert!(regex.is_match("pod-1")); | ||||
|                 assert!(!regex.is_match("pod-abc")); | ||||
|                 assert!(!regex.is_match("service-123")); | ||||
|             } | ||||
|             r"app-[a-z]+" => { | ||||
|                 assert!(regex.is_match("app-frontend")); | ||||
|                 assert!(regex.is_match("app-backend")); | ||||
|                 assert!(!regex.is_match("app-123")); | ||||
|                 assert!(!regex.is_match("service-frontend")); | ||||
|             } | ||||
|             _ => {} | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  | ||||
| #[test] | ||||
| fn test_invalid_regex_patterns() { | ||||
|     use regex::Regex; | ||||
|  | ||||
|     // Test invalid regex patterns that should fail | ||||
|     let invalid_patterns = vec![ | ||||
|         "[invalid",   // Unclosed bracket | ||||
|         "*invalid",   // Invalid quantifier | ||||
|         "(?invalid)", // Invalid group | ||||
|         "\\",         // Incomplete escape | ||||
|     ]; | ||||
|  | ||||
|     for pattern in invalid_patterns { | ||||
|         let regex = Regex::new(pattern); | ||||
|         assert!(regex.is_err(), "Pattern '{}' should be invalid", pattern); | ||||
|     } | ||||
| } | ||||
|  | ||||
| #[test] | ||||
| fn test_kubernetes_config_creation() { | ||||
|     use sal_kubernetes::KubernetesConfig; | ||||
|     use std::time::Duration; | ||||
|  | ||||
|     // Test default configuration | ||||
|     let default_config = KubernetesConfig::default(); | ||||
|     assert_eq!(default_config.operation_timeout, Duration::from_secs(30)); | ||||
|     assert_eq!(default_config.max_retries, 3); | ||||
|     assert_eq!(default_config.rate_limit_rps, 10); | ||||
|     assert_eq!(default_config.rate_limit_burst, 20); | ||||
|  | ||||
|     // Test custom configuration | ||||
|     let custom_config = KubernetesConfig::new() | ||||
|         .with_timeout(Duration::from_secs(60)) | ||||
|         .with_retries(5, Duration::from_secs(2), Duration::from_secs(60)) | ||||
|         .with_rate_limit(50, 100); | ||||
|  | ||||
|     assert_eq!(custom_config.operation_timeout, Duration::from_secs(60)); | ||||
|     assert_eq!(custom_config.max_retries, 5); | ||||
|     assert_eq!(custom_config.retry_base_delay, Duration::from_secs(2)); | ||||
|     assert_eq!(custom_config.retry_max_delay, Duration::from_secs(60)); | ||||
|     assert_eq!(custom_config.rate_limit_rps, 50); | ||||
|     assert_eq!(custom_config.rate_limit_burst, 100); | ||||
|  | ||||
|     // Test pre-configured profiles | ||||
|     let high_throughput = KubernetesConfig::high_throughput(); | ||||
|     assert_eq!(high_throughput.rate_limit_rps, 50); | ||||
|     assert_eq!(high_throughput.rate_limit_burst, 100); | ||||
|  | ||||
|     let low_latency = KubernetesConfig::low_latency(); | ||||
|     assert_eq!(low_latency.operation_timeout, Duration::from_secs(10)); | ||||
|     assert_eq!(low_latency.max_retries, 2); | ||||
|  | ||||
|     let development = KubernetesConfig::development(); | ||||
|     assert_eq!(development.operation_timeout, Duration::from_secs(120)); | ||||
|     assert_eq!(development.rate_limit_rps, 100); | ||||
| } | ||||
|  | ||||
| #[test] | ||||
| fn test_retryable_error_detection() { | ||||
|     use kube::Error as KubeError; | ||||
|     use sal_kubernetes::kubernetes_manager::is_retryable_error; | ||||
|  | ||||
|     // Test that the function exists and works with basic error types | ||||
|     // Note: We can't easily create all error types, so we test what we can | ||||
|  | ||||
|     // Test API errors with different status codes | ||||
|     let api_error_500 = KubeError::Api(kube::core::ErrorResponse { | ||||
|         status: "Failure".to_string(), | ||||
|         message: "Internal server error".to_string(), | ||||
|         reason: "InternalError".to_string(), | ||||
|         code: 500, | ||||
|     }); | ||||
|     assert!( | ||||
|         is_retryable_error(&api_error_500), | ||||
|         "500 errors should be retryable" | ||||
|     ); | ||||
|  | ||||
|     let api_error_429 = KubeError::Api(kube::core::ErrorResponse { | ||||
|         status: "Failure".to_string(), | ||||
|         message: "Too many requests".to_string(), | ||||
|         reason: "TooManyRequests".to_string(), | ||||
|         code: 429, | ||||
|     }); | ||||
|     assert!( | ||||
|         is_retryable_error(&api_error_429), | ||||
|         "429 errors should be retryable" | ||||
|     ); | ||||
|  | ||||
|     let api_error_404 = KubeError::Api(kube::core::ErrorResponse { | ||||
|         status: "Failure".to_string(), | ||||
|         message: "Not found".to_string(), | ||||
|         reason: "NotFound".to_string(), | ||||
|         code: 404, | ||||
|     }); | ||||
|     assert!( | ||||
|         !is_retryable_error(&api_error_404), | ||||
|         "404 errors should not be retryable" | ||||
|     ); | ||||
|  | ||||
|     let api_error_400 = KubeError::Api(kube::core::ErrorResponse { | ||||
|         status: "Failure".to_string(), | ||||
|         message: "Bad request".to_string(), | ||||
|         reason: "BadRequest".to_string(), | ||||
|         code: 400, | ||||
|     }); | ||||
|     assert!( | ||||
|         !is_retryable_error(&api_error_400), | ||||
|         "400 errors should not be retryable" | ||||
|     ); | ||||
| } | ||||
| @@ -1,16 +1,7 @@ | ||||
| # SAL Mycelium (`sal-mycelium`) | ||||
| # SAL Mycelium | ||||
|  | ||||
| A Rust client library for interacting with Mycelium node's HTTP API, with Rhai scripting support. | ||||
|  | ||||
| ## Installation | ||||
|  | ||||
| Add this to your `Cargo.toml`: | ||||
|  | ||||
| ```toml | ||||
| [dependencies] | ||||
| sal-mycelium = "0.1.0" | ||||
| ``` | ||||
|  | ||||
| ## Overview | ||||
|  | ||||
| SAL Mycelium provides async HTTP client functionality for managing Mycelium nodes, including: | ||||
|   | ||||
| @@ -1,16 +1,7 @@ | ||||
| # SAL Network Package (`sal-net`) | ||||
| # SAL Network Package | ||||
|  | ||||
| Network connectivity utilities for TCP, HTTP, and SSH operations. | ||||
|  | ||||
| ## Installation | ||||
|  | ||||
| Add this to your `Cargo.toml`: | ||||
|  | ||||
| ```toml | ||||
| [dependencies] | ||||
| sal-net = "0.1.0" | ||||
| ``` | ||||
|  | ||||
| ## Overview | ||||
|  | ||||
| The `sal-net` package provides a comprehensive set of network connectivity tools for the SAL (System Abstraction Layer) ecosystem. It includes utilities for TCP port checking, HTTP/HTTPS connectivity testing, and SSH command execution. | ||||
|   | ||||
| @@ -165,18 +165,9 @@ fn test_mv() { | ||||
|  | ||||
| #[test] | ||||
| fn test_which() { | ||||
|     // Test with a command that should exist on all systems | ||||
|     #[cfg(target_os = "windows")] | ||||
|     let existing_cmd = "cmd"; | ||||
|     #[cfg(not(target_os = "windows"))] | ||||
|     let existing_cmd = "ls"; | ||||
|  | ||||
|     let result = fs::which(existing_cmd); | ||||
|     assert!( | ||||
|         !result.is_empty(), | ||||
|         "Command '{}' should exist", | ||||
|         existing_cmd | ||||
|     ); | ||||
|     // Test with a command that should exist on most systems | ||||
|     let result = fs::which("ls"); | ||||
|     assert!(!result.is_empty()); | ||||
|  | ||||
|     // Test with a command that shouldn't exist | ||||
|     let result = fs::which("nonexistentcommand12345"); | ||||
|   | ||||
| @@ -1,16 +1,7 @@ | ||||
| # SAL PostgreSQL Client (`sal-postgresclient`) | ||||
| # SAL PostgreSQL Client | ||||
|  | ||||
| The SAL PostgreSQL Client (`sal-postgresclient`) is an independent package that provides a simple and efficient way to interact with PostgreSQL databases in Rust. It offers connection management, query execution, a builder pattern for flexible configuration, and PostgreSQL installer functionality using nerdctl. | ||||
|  | ||||
| ## Installation | ||||
|  | ||||
| Add this to your `Cargo.toml`: | ||||
|  | ||||
| ```toml | ||||
| [dependencies] | ||||
| sal-postgresclient = "0.1.0" | ||||
| ``` | ||||
|  | ||||
| ## Features | ||||
|  | ||||
| - **Connection Management**: Automatic connection handling and reconnection | ||||
|   | ||||
| @@ -17,7 +17,7 @@ Add this to your `Cargo.toml`: | ||||
|  | ||||
| ```toml | ||||
| [dependencies] | ||||
| sal-process = "0.1.0" | ||||
| sal-process = { path = "../process" } | ||||
| ``` | ||||
|  | ||||
| ## Usage | ||||
|   | ||||
| @@ -138,12 +138,7 @@ fn test_run_with_environment_variables() { | ||||
| #[test] | ||||
| fn test_run_with_working_directory() { | ||||
|     // Test that commands run in the current working directory | ||||
|     #[cfg(target_os = "windows")] | ||||
|     let result = run_command("cd").unwrap(); | ||||
|  | ||||
|     #[cfg(not(target_os = "windows"))] | ||||
|     let result = run_command("pwd").unwrap(); | ||||
|  | ||||
|     assert!(result.success); | ||||
|     assert!(!result.stdout.is_empty()); | ||||
| } | ||||
| @@ -205,16 +200,6 @@ fn test_run_script_with_variables() { | ||||
|  | ||||
| #[test] | ||||
| fn test_run_script_with_conditionals() { | ||||
|     #[cfg(target_os = "windows")] | ||||
|     let script = r#" | ||||
|         if "hello"=="hello" ( | ||||
|             echo Condition passed | ||||
|         ) else ( | ||||
|             echo Condition failed | ||||
|         ) | ||||
|     "#; | ||||
|  | ||||
|     #[cfg(not(target_os = "windows"))] | ||||
|     let script = r#" | ||||
|         if [ "hello" = "hello" ]; then | ||||
|             echo "Condition passed" | ||||
| @@ -230,14 +215,6 @@ fn test_run_script_with_conditionals() { | ||||
|  | ||||
| #[test] | ||||
| fn test_run_script_with_loops() { | ||||
|     #[cfg(target_os = "windows")] | ||||
|     let script = r#" | ||||
|         for %%i in (1 2 3) do ( | ||||
|             echo Number: %%i | ||||
|         ) | ||||
|     "#; | ||||
|  | ||||
|     #[cfg(not(target_os = "windows"))] | ||||
|     let script = r#" | ||||
|         for i in 1 2 3; do | ||||
|             echo "Number: $i" | ||||
|   | ||||
| @@ -1,16 +1,7 @@ | ||||
| # SAL Redis Client (`sal-redisclient`) | ||||
| # Redis Client Module | ||||
|  | ||||
| A robust Redis client wrapper for Rust applications that provides connection management, automatic reconnection, and a simple interface for executing Redis commands. | ||||
|  | ||||
| ## Installation | ||||
|  | ||||
| Add this to your `Cargo.toml`: | ||||
|  | ||||
| ```toml | ||||
| [dependencies] | ||||
| sal-redisclient = "0.1.0" | ||||
| ``` | ||||
|  | ||||
| ## Features | ||||
|  | ||||
| - **Singleton Pattern**: Maintains a global Redis client instance, so we don't re-int all the time. | ||||
|   | ||||
							
								
								
									
										2
									
								
								rfs-client/.gitignore
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										2
									
								
								rfs-client/.gitignore
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,2 @@ | ||||
| /target/ | ||||
| **/*.rs.bk | ||||
							
								
								
									
										26
									
								
								rfs-client/Cargo.toml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										26
									
								
								rfs-client/Cargo.toml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,26 @@ | ||||
| [package] | ||||
| name = "sal-rfs-client" | ||||
| version = "0.1.0" | ||||
| edition = "2021" | ||||
| description = "SAL RFS Client - Client library for Remote File System server" | ||||
| repository = "https://git.threefold.info/herocode/sal" | ||||
| license = "Apache-2.0" | ||||
| keywords = ["rfs", "client", "filesystem", "remote"] | ||||
| categories = ["filesystem", "api-bindings"] | ||||
|  | ||||
| [dependencies] | ||||
| openapi = { path = "./openapi" } | ||||
| thiserror.workspace = true | ||||
| url.workspace = true | ||||
| reqwest = { workspace = true, features = ["json", "multipart"] } | ||||
| tokio = { workspace = true, features = ["full"] } | ||||
| serde = { workspace = true, features = ["derive"] } | ||||
| serde_json.workspace = true | ||||
| log.workspace = true | ||||
| bytes.workspace = true | ||||
| futures.workspace = true | ||||
| rhai.workspace = true | ||||
| lazy_static.workspace = true | ||||
|  | ||||
| [dev-dependencies] | ||||
| tempfile = "3.0" | ||||
							
								
								
									
										195
									
								
								rfs-client/README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										195
									
								
								rfs-client/README.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,195 @@ | ||||
| # RFS Client | ||||
|  | ||||
| A Rust client library for interacting with the Remote File System (RFS) server. | ||||
|  | ||||
| ## Overview | ||||
|  | ||||
| This client library provides a user-friendly wrapper around the OpenAPI-generated client code. It offers high-level abstractions for common operations such as: | ||||
|  | ||||
| - Authentication and session management | ||||
| - File uploads and downloads with progress tracking | ||||
| - Block-level operations and verification | ||||
| - FList creation, monitoring, and management | ||||
| - Timeout configuration and error handling | ||||
|  | ||||
| ## Structure | ||||
|  | ||||
| The library is organized as follows: | ||||
|  | ||||
| - `client.rs`: Main client implementation with methods for interacting with the RFS server | ||||
| - `error.rs`: Error types and handling | ||||
| - `types.rs`: Type definitions and utilities | ||||
|  | ||||
| ## Quick Start | ||||
|  | ||||
| ```rust | ||||
| use rfs_client::RfsClient; | ||||
| use rfs_client::types::{ClientConfig, Credentials}; | ||||
|  | ||||
| #[tokio::main] | ||||
| async fn main() -> Result<(), Box<dyn std::error::Error>> { | ||||
|     // Create a client with custom configuration | ||||
|     let config = ClientConfig { | ||||
|         base_url: "http://localhost:8080".to_string(), | ||||
|         credentials: Some(Credentials { | ||||
|             username: "user".to_string(), | ||||
|             password: "password".to_string(), | ||||
|         }), | ||||
|         timeout_seconds: 60, | ||||
|     }; | ||||
|      | ||||
|     let mut client = RfsClient::new(config); | ||||
|      | ||||
|     // Authenticate | ||||
|     client.authenticate().await?; | ||||
|     println!("Authentication successful"); | ||||
|      | ||||
|     // Upload a file | ||||
|     let file_path = "/path/to/file.txt"; | ||||
|     let file_hash = client.upload_file(file_path, None).await?; | ||||
|     println!("File uploaded with hash: {}", file_hash); | ||||
|      | ||||
|     // Download the file | ||||
|     let output_path = "/path/to/output.txt"; | ||||
|     client.download_file(&file_hash, output_path, None).await?; | ||||
|     println!("File downloaded to {}", output_path); | ||||
|      | ||||
|     Ok(()) | ||||
| } | ||||
| ``` | ||||
|  | ||||
| ## Feature Examples | ||||
|  | ||||
| ### Authentication | ||||
|  | ||||
| ```rust | ||||
| // Create a client with authentication | ||||
| let config = ClientConfig { | ||||
|     base_url: "http://localhost:8080".to_string(), | ||||
|     credentials: Some(Credentials { | ||||
|         username: "user".to_string(), | ||||
|         password: "password".to_string(), | ||||
|     }), | ||||
|     timeout_seconds: 30, | ||||
| }; | ||||
|  | ||||
| let mut client = RfsClient::new(config); | ||||
|  | ||||
| // Authenticate with the server | ||||
| client.authenticate().await?; | ||||
| if client.is_authenticated() { | ||||
|     println!("Authentication successful"); | ||||
| } | ||||
| ``` | ||||
|  | ||||
| ### File Management | ||||
|  | ||||
| ```rust | ||||
| // Upload a file with options | ||||
| let upload_options = UploadOptions { | ||||
|     chunk_size: Some(1024 * 1024), // 1MB chunks | ||||
|     verify: true, | ||||
| }; | ||||
|  | ||||
| let file_hash = client.upload_file("/path/to/file.txt", Some(upload_options)).await?; | ||||
|  | ||||
| // Download the file | ||||
| let download_options = DownloadOptions { | ||||
|     verify: true, | ||||
| }; | ||||
|  | ||||
| client.download_file(&file_hash, "/path/to/output.txt", Some(download_options)).await?; | ||||
| ``` | ||||
|  | ||||
| ### FList Operations | ||||
|  | ||||
| ```rust | ||||
| // Create an FList from a Docker image | ||||
| let options = FlistOptions { | ||||
|     auth: None, | ||||
|     username: None, | ||||
|     password: None, | ||||
|     email: None, | ||||
|     server_address: Some("docker.io".to_string()), | ||||
|     identity_token: None, | ||||
|     registry_token: None, | ||||
| }; | ||||
|  | ||||
| let job_id = client.create_flist("alpine:latest", Some(options)).await?; | ||||
|  | ||||
| // Wait for FList creation with progress tracking | ||||
| let wait_options = WaitOptions { | ||||
|     timeout_seconds: 60, | ||||
|     poll_interval_ms: 1000, | ||||
|     progress_callback: Some(Box::new(|state| { | ||||
|         println!("Progress: FList state is now {:?}", state); | ||||
|     })), | ||||
| }; | ||||
|  | ||||
| let final_state = client.wait_for_flist_creation(&job_id, Some(wait_options)).await?; | ||||
|  | ||||
| // List available FLists | ||||
| let flists = client.list_flists().await?; | ||||
|  | ||||
| // Preview an FList | ||||
| let preview = client.preview_flist("flists/user/alpine-latest.fl").await?; | ||||
|  | ||||
| // Download an FList | ||||
| client.download_flist("flists/user/alpine-latest.fl", "/tmp/downloaded_flist.fl").await?; | ||||
| ``` | ||||
|  | ||||
| ### Block Management | ||||
|  | ||||
| ```rust | ||||
| // List blocks | ||||
| let blocks_list = client.list_blocks(None).await?; | ||||
|  | ||||
| // Check if a block exists | ||||
| let exists = client.check_block("block_hash").await?; | ||||
|  | ||||
| // Get block content | ||||
| let block_content = client.get_block("block_hash").await?; | ||||
|  | ||||
| // Upload a block | ||||
| let block_hash = client.upload_block("file_hash", 0, data).await?; | ||||
|  | ||||
| // Verify blocks | ||||
| let request = VerifyBlocksRequest { blocks: verify_blocks }; | ||||
| let verify_result = client.verify_blocks(request).await?; | ||||
| ``` | ||||
|  | ||||
| ## Complete Examples | ||||
|  | ||||
| For more detailed examples, check the `examples` directory: | ||||
|  | ||||
| - `authentication.rs`: Authentication and health check examples | ||||
| - `file_management.rs`: File upload and download with verification | ||||
| - `flist_operations.rs`: Complete FList creation, monitoring, listing, preview, and download | ||||
| - `block_management.rs`: Block-level operations including listing, verification, and upload | ||||
| - `wait_for_flist.rs`: Advanced FList creation with progress monitoring | ||||
|  | ||||
| Run an example with: | ||||
|  | ||||
| ```bash | ||||
| cargo run --example flist_operations | ||||
| ``` | ||||
|  | ||||
| ## Development | ||||
|  | ||||
| This library wraps the OpenAPI-generated client located in the `openapi` directory. The OpenAPI client was generated using the OpenAPI Generator CLI. | ||||
|  | ||||
| To build the library: | ||||
|  | ||||
| ```bash | ||||
| cargo build | ||||
| ``` | ||||
|  | ||||
| To run tests: | ||||
|  | ||||
| ```bash | ||||
| cargo test | ||||
| ``` | ||||
|  | ||||
| ## License | ||||
|  | ||||
| MIT | ||||
							
								
								
									
										42
									
								
								rfs-client/examples/authentication.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										42
									
								
								rfs-client/examples/authentication.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,42 @@ | ||||
| use sal_rfs_client::RfsClient; | ||||
| use sal_rfs_client::types::{ClientConfig, Credentials}; | ||||
|  | ||||
| #[tokio::main] | ||||
| async fn main() -> Result<(), Box<dyn std::error::Error>> { | ||||
|     // Create a client with authentication credentials | ||||
|     let config = ClientConfig { | ||||
|         base_url: "http://localhost:8080".to_string(), | ||||
|         credentials: Some(Credentials { | ||||
|             username: "user".to_string(), | ||||
|             password: "password".to_string(), | ||||
|         }), | ||||
|         timeout_seconds: 30, | ||||
|     }; | ||||
|      | ||||
|     let mut client = RfsClient::new(config); | ||||
|     println!("Client created with authentication credentials"); | ||||
|      | ||||
|     // Authenticate with the server | ||||
|     client.authenticate().await?; | ||||
|     if client.is_authenticated() { | ||||
|         println!("Authentication successful"); | ||||
|     } else { | ||||
|         println!("Authentication failed"); | ||||
|     } | ||||
|  | ||||
|     // Create a client without authentication | ||||
|     let config_no_auth = ClientConfig { | ||||
|         base_url: "http://localhost:8080".to_string(), | ||||
|         credentials: None, | ||||
|         timeout_seconds: 30, | ||||
|     }; | ||||
|      | ||||
|     let client_no_auth = RfsClient::new(config_no_auth); | ||||
|     println!("Client created without authentication credentials"); | ||||
|      | ||||
|     // Check health endpoint (doesn't require authentication) | ||||
|     let health = client_no_auth.health_check().await?; | ||||
|     println!("Server health: {:?}", health); | ||||
|      | ||||
|     Ok(()) | ||||
| } | ||||
							
								
								
									
										128
									
								
								rfs-client/examples/block_management.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										128
									
								
								rfs-client/examples/block_management.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,128 @@ | ||||
| use sal_rfs_client::RfsClient; | ||||
| use sal_rfs_client::types::{ClientConfig, Credentials}; | ||||
| use openapi::models::{VerifyBlock, VerifyBlocksRequest}; | ||||
|  | ||||
| #[tokio::main] | ||||
| async fn main() -> Result<(), Box<dyn std::error::Error>> { | ||||
|     // Create a client with authentication | ||||
|     let config = ClientConfig { | ||||
|         base_url: "http://localhost:8080".to_string(), | ||||
|         credentials: Some(Credentials { | ||||
|             username: "user".to_string(), | ||||
|             password: "password".to_string(), | ||||
|         }), | ||||
|         timeout_seconds: 60, | ||||
|     }; | ||||
|      | ||||
|     let mut client = RfsClient::new(config); | ||||
|      | ||||
|     // Authenticate with the server | ||||
|     client.authenticate().await?; | ||||
|     println!("Authentication successful"); | ||||
|      | ||||
|     // Create a test file to upload for block testing | ||||
|     let test_file_path = "/tmp/block_test.txt"; | ||||
|     let test_content = "This is a test file for RFS client block management"; | ||||
|     std::fs::write(test_file_path, test_content)?; | ||||
|     println!("Created test file at {}", test_file_path); | ||||
|      | ||||
|     // Upload the file to get blocks | ||||
|     println!("Uploading file to get blocks..."); | ||||
|     let file_hash = client.upload_file(test_file_path, None).await?; | ||||
|     println!("File uploaded with hash: {}", file_hash); | ||||
|      | ||||
|     // Get blocks by file hash | ||||
|     println!("Getting blocks for file hash: {}", file_hash); | ||||
|     let blocks = client.get_blocks_by_hash(&file_hash).await?; | ||||
|     println!("Found {} blocks for the file", blocks.blocks.len()); | ||||
|      | ||||
|     // Print block information | ||||
|     for (i, block_data) in blocks.blocks.iter().enumerate() { | ||||
|         println!("Block {}: Hash={}, Index={}", i, block_data.hash, block_data.index); | ||||
|     } | ||||
|      | ||||
|     // Verify blocks with complete information | ||||
|     println!("Verifying blocks..."); | ||||
|      | ||||
|     // Create a list of VerifyBlock objects with complete information | ||||
|     let verify_blocks = blocks.blocks.iter().map(|block| { | ||||
|         VerifyBlock { | ||||
|             block_hash: block.hash.clone(), | ||||
|             block_index: block.index, | ||||
|             file_hash: file_hash.clone(), // Using the actual file hash | ||||
|         } | ||||
|     }).collect::<Vec<_>>(); | ||||
|  | ||||
|     // Create the request with the complete block information | ||||
|     for block in verify_blocks.iter() { | ||||
|         println!("Block: {}", block.block_hash); | ||||
|         println!("Block index: {}", block.block_index); | ||||
|         println!("File hash: {}", block.file_hash); | ||||
|     } | ||||
|     let request = VerifyBlocksRequest { blocks: verify_blocks }; | ||||
|      | ||||
|     // Send the verification request | ||||
|     let verify_result = client.verify_blocks(request).await?; | ||||
|     println!("Verification result: {} missing blocks", verify_result.missing.len()); | ||||
|     for block in verify_result.missing.iter() { | ||||
|         println!("Missing block: {}", block); | ||||
|     } | ||||
|      | ||||
|     // List blocks (list_blocks_handler) | ||||
|     println!("\n1. Listing all blocks with pagination..."); | ||||
|     let blocks_list = client.list_blocks(None).await?; | ||||
|     println!("Server has {} blocks in total", blocks_list.len()); | ||||
|     if !blocks_list.is_empty() { | ||||
|         let first_few = blocks_list.iter().take(3) | ||||
|             .map(|s| s.as_str()) | ||||
|             .collect::<Vec<_>>() | ||||
|             .join(", "); | ||||
|         println!("First few blocks: {}", first_few); | ||||
|     } | ||||
|      | ||||
|     // Check if a block exists (check_block_handler) | ||||
|     if !blocks.blocks.is_empty() { | ||||
|         let block_to_check = &blocks.blocks[0].hash; | ||||
|         println!("\n2. Checking if block exists: {}", block_to_check); | ||||
|         let exists = client.check_block(block_to_check).await?; | ||||
|         println!("Block exists: {}", exists); | ||||
|     } | ||||
|      | ||||
|     // Get block downloads statistics (get_block_downloads_handler) | ||||
|     if !blocks.blocks.is_empty() { | ||||
|         let block_to_check = &blocks.blocks[0].hash; | ||||
|         println!("\n3. Getting download statistics for block: {}", block_to_check); | ||||
|         let downloads = client.get_block_downloads(block_to_check).await?; | ||||
|         println!("Block has been downloaded {} times", downloads.downloads_count); | ||||
|     } | ||||
|      | ||||
|     // Get a specific block content (get_block_handler) | ||||
|     if !blocks.blocks.is_empty() { | ||||
|         let block_to_get = &blocks.blocks[0].hash; | ||||
|         println!("\n4. Getting content for block: {}", block_to_get); | ||||
|         let block_content = client.get_block(block_to_get).await?; | ||||
|         println!("Retrieved block with {} bytes", block_content.len()); | ||||
|     } | ||||
|      | ||||
|     // Get user blocks (get_user_blocks_handler) | ||||
|     println!("\n6. Listing user blocks..."); | ||||
|     let user_blocks = client.get_user_blocks(Some(1), Some(10)).await?; | ||||
|     println!("User has {} blocks (showing page 1 with 10 per page)", user_blocks.total); | ||||
|     for block in user_blocks.blocks.iter().take(3) { | ||||
|         println!("  - Block: {}, Size: {}", block.hash, block.size); | ||||
|     } | ||||
|      | ||||
|     // Upload a block (upload_block_handler) | ||||
|     println!("\n7. Uploading a new test block..."); | ||||
|     let test_block_data = b"This is test block data for direct block upload"; | ||||
|     let new_file_hash = "test_file_hash_for_block_upload"; | ||||
|     let block_index = 0; | ||||
|     let block_hash = client.upload_block(new_file_hash, block_index, test_block_data.to_vec()).await?; | ||||
|     println!("Uploaded block with hash: {}", block_hash); | ||||
|      | ||||
|     // Clean up | ||||
|     std::fs::remove_file(test_file_path)?; | ||||
|     println!("Test file cleaned up"); | ||||
|      | ||||
|     Ok(()) | ||||
| } | ||||
							
								
								
									
										64
									
								
								rfs-client/examples/file_management.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										64
									
								
								rfs-client/examples/file_management.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,64 @@ | ||||
| use sal_rfs_client::RfsClient; | ||||
| use sal_rfs_client::types::{ClientConfig, Credentials, UploadOptions, DownloadOptions}; | ||||
|  | ||||
| #[tokio::main] | ||||
| async fn main() -> Result<(), Box<dyn std::error::Error>> { | ||||
|     // Create a client with authentication | ||||
|     let config = ClientConfig { | ||||
|         base_url: "http://localhost:8080".to_string(), | ||||
|         credentials: Some(Credentials { | ||||
|             username: "user".to_string(), | ||||
|             password: "password".to_string(), | ||||
|         }), | ||||
|         timeout_seconds: 60, | ||||
|     }; | ||||
|      | ||||
|     let mut client = RfsClient::new(config); | ||||
|      | ||||
|     // Authenticate with the server | ||||
|     client.authenticate().await?; | ||||
|     println!("Authentication successful"); | ||||
|      | ||||
|     // Create a test file to upload | ||||
|     let test_file_path = "/tmp/test_upload.txt"; | ||||
|     std::fs::write(test_file_path, "This is a test file for RFS client upload")?; | ||||
|     println!("Created test file at {}", test_file_path); | ||||
|      | ||||
|     // Upload the file with options | ||||
|     println!("Uploading file..."); | ||||
|     let upload_options = UploadOptions { | ||||
|         chunk_size: Some(1024 * 1024), // 1MB chunks | ||||
|         verify: true, | ||||
|     }; | ||||
|      | ||||
|     let file_hash = client.upload_file(test_file_path, Some(upload_options)).await?; | ||||
|     println!("File uploaded with hash: {}", file_hash); | ||||
|      | ||||
|     // Download the file | ||||
|     let download_path = "/tmp/test_download.txt"; | ||||
|     println!("Downloading file to {}...", download_path); | ||||
|      | ||||
|     let download_options = DownloadOptions { | ||||
|         verify: true, | ||||
|     }; | ||||
|      | ||||
|     client.download_file(&file_hash, download_path, Some(download_options)).await?; | ||||
|     println!("File downloaded to {}", download_path); | ||||
|      | ||||
|     // Verify the downloaded file matches the original | ||||
|     let original_content = std::fs::read_to_string(test_file_path)?; | ||||
|     let downloaded_content = std::fs::read_to_string(download_path)?; | ||||
|      | ||||
|     if original_content == downloaded_content { | ||||
|         println!("File contents match! Download successful."); | ||||
|     } else { | ||||
|         println!("ERROR: File contents do not match!"); | ||||
|     } | ||||
|      | ||||
|     // Clean up test files | ||||
|     std::fs::remove_file(test_file_path)?; | ||||
|     std::fs::remove_file(download_path)?; | ||||
|     println!("Test files cleaned up"); | ||||
|      | ||||
|     Ok(()) | ||||
| } | ||||
							
								
								
									
										170
									
								
								rfs-client/examples/flist_operations.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										170
									
								
								rfs-client/examples/flist_operations.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,170 @@ | ||||
| use sal_rfs_client::RfsClient; | ||||
| use sal_rfs_client::types::{ClientConfig, Credentials, FlistOptions, WaitOptions}; | ||||
|  | ||||
| #[tokio::main] | ||||
| async fn main() -> Result<(), Box<dyn std::error::Error>> { | ||||
|     let parent_dir = "flists"; | ||||
|     // Create a client with authentication | ||||
|     let config = ClientConfig { | ||||
|         base_url: "http://localhost:8080".to_string(), | ||||
|         credentials: Some(Credentials { | ||||
|             username: "user".to_string(), | ||||
|             password: "password".to_string(), | ||||
|         }), | ||||
|         timeout_seconds: 60, | ||||
|     }; | ||||
|      | ||||
|     let mut client = RfsClient::new(config); | ||||
|      | ||||
|     // Authenticate with the server | ||||
|     client.authenticate().await?; | ||||
|     println!("Authentication successful"); | ||||
|      | ||||
|     println!("\n1. CREATE FLIST - Creating an FList from a Docker image"); | ||||
|     let image_name = "alpine:latest"; | ||||
|     println!("Creating FList for image: {}", image_name); | ||||
|      | ||||
|     // Use FlistOptions to specify additional parameters | ||||
|     let options = FlistOptions { | ||||
|         auth: None, | ||||
|         username: None, | ||||
|         password: None, | ||||
|         email: None, | ||||
|         server_address: Some("docker.io".to_string()), | ||||
|         identity_token: None, | ||||
|         registry_token: None, | ||||
|     }; | ||||
|      | ||||
|     // Create the FList and handle potential conflict error | ||||
|     let job_id = match client.create_flist(&image_name, Some(options)).await { | ||||
|         Ok(id) => { | ||||
|             println!("FList creation started with job ID: {}", id); | ||||
|             Some(id) | ||||
|         }, | ||||
|         Err(e) => { | ||||
|             if e.to_string().contains("Conflict") { | ||||
|                 println!("FList already exists"); | ||||
|                 None | ||||
|             } else { | ||||
|                 return Err(e.into()); | ||||
|             } | ||||
|         } | ||||
|     }; | ||||
|      | ||||
|     // 2. Check FList state if we have a job ID | ||||
|     if let Some(job_id) = &job_id { | ||||
|         println!("\n2. GET FLIST STATE - Checking FList creation state"); | ||||
|         let state = client.get_flist_state(job_id).await?; | ||||
|         println!("Current FList state: {:?}", state.flist_state); | ||||
|          | ||||
|         // 3. Wait for FList creation with progress reporting | ||||
|         println!("\n3. WAIT FOR FLIST CREATION - Waiting for FList to be created with progress reporting"); | ||||
|         let wait_options = WaitOptions { | ||||
|             timeout_seconds: 60,  // Shorter timeout for the example | ||||
|             poll_interval_ms: 1000, | ||||
|             progress_callback: Some(Box::new(|state| { | ||||
|                 println!("Progress: FList state is now {:?}", state); | ||||
|                 // No return value needed (returns unit type) | ||||
|             })), | ||||
|         }; | ||||
|          | ||||
|         // Wait for the FList to be created (with a timeout) | ||||
|         match client.wait_for_flist_creation(job_id, Some(wait_options)).await { | ||||
|             Ok(final_state) => { | ||||
|                 println!("FList creation completed with state: {:?}", final_state); | ||||
|             }, | ||||
|             Err(e) => { | ||||
|                 println!("Error waiting for FList creation: {}", e); | ||||
|                 // Continue with the example even if waiting fails | ||||
|             } | ||||
|         }; | ||||
|     } | ||||
|      | ||||
|     // 4. List all available FLists | ||||
|     println!("\n4. LIST FLISTS - Listing all available FLists"); | ||||
|      | ||||
|     // Variable to store the FList path for preview and download | ||||
|     let mut flist_path_for_preview: Option<String> = None; | ||||
|      | ||||
|     match client.list_flists().await { | ||||
|         Ok(flists) => { | ||||
|             println!("Found {} FList categories", flists.len()); | ||||
|              | ||||
|             for (category, files) in &flists { | ||||
|                 println!("Category: {}", category); | ||||
|                 for file in files.iter().take(2) { // Show only first 2 files per category | ||||
|                     println!("  - {} (size: {} bytes)", file.name, file.size); | ||||
|                      | ||||
|                     // Save the first FList path for preview | ||||
|                     if flist_path_for_preview.is_none() { | ||||
|                         let path = format!("{}/{}/{}", parent_dir, category, file.name); | ||||
|                         flist_path_for_preview = Some(path); | ||||
|                     } | ||||
|                 } | ||||
|                 if files.len() > 2 { | ||||
|                     println!("  - ... and {} more files", files.len() - 2); | ||||
|                 } | ||||
|             } | ||||
|              | ||||
|             // 5. Preview an FList if we found one | ||||
|             if let Some(ref flist_path) = flist_path_for_preview { | ||||
|                 println!("\n5. PREVIEW FLIST - Previewing FList: {}", flist_path); | ||||
|                 match client.preview_flist(flist_path).await { | ||||
|                     Ok(preview) => { | ||||
|                         println!("FList preview for {}:", flist_path); | ||||
|                         println!("  - Checksum: {}", preview.checksum); | ||||
|                         println!("  - Metadata: {}", preview.metadata); | ||||
|                          | ||||
|                         // Display content (list of strings) | ||||
|                         if !preview.content.is_empty() { | ||||
|                             println!("  - Content entries:"); | ||||
|                             for (i, entry) in preview.content.iter().enumerate().take(5) { | ||||
|                                 println!("    {}. {}", i+1, entry); | ||||
|                             } | ||||
|                             if preview.content.len() > 5 { | ||||
|                                 println!("    ... and {} more entries", preview.content.len() - 5); | ||||
|                             } | ||||
|                         } | ||||
|                     }, | ||||
|                     Err(e) => println!("Error previewing FList: {}", e), | ||||
|                 } | ||||
|             } else { | ||||
|                 println!("No FLists available for preview"); | ||||
|             } | ||||
|         }, | ||||
|         Err(e) => println!("Error listing FLists: {}", e), | ||||
|     } | ||||
|      | ||||
|     // 6. DOWNLOAD FLIST - Downloading an FList to a local file | ||||
|     if let Some(ref flist_path) = flist_path_for_preview { | ||||
|         println!("\n6. DOWNLOAD FLIST - Downloading FList: {}", flist_path); | ||||
|          | ||||
|         // Create a temporary output path for the downloaded FList | ||||
|         let output_path = "/tmp/downloaded_flist.fl"; | ||||
|          | ||||
|         match client.download_flist(flist_path, output_path).await { | ||||
|             Ok(_) => { | ||||
|                 println!("FList successfully downloaded to {}", output_path); | ||||
|                  | ||||
|                 // Get file size | ||||
|                 match std::fs::metadata(output_path) { | ||||
|                     Ok(metadata) => println!("Downloaded file size: {} bytes", metadata.len()), | ||||
|                     Err(e) => println!("Error getting file metadata: {}", e), | ||||
|                 } | ||||
|             }, | ||||
|             Err(e) => println!("Error downloading FList: {}", e), | ||||
|         } | ||||
|     } else { | ||||
|         println!("\n6. DOWNLOAD FLIST - No FList available for download"); | ||||
|     } | ||||
|      | ||||
|     println!("\nAll FList operations demonstrated:"); | ||||
|     println!("1. create_flist - Create a new FList from a Docker image"); | ||||
|     println!("2. get_flist_state - Check the state of an FList creation job"); | ||||
|     println!("3. wait_for_flist_creation - Wait for an FList to be created with progress reporting"); | ||||
|     println!("4. list_flists - List all available FLists"); | ||||
|     println!("5. preview_flist - Preview the content of an FList"); | ||||
|     println!("6. download_flist - Download an FList to a local file"); | ||||
|      | ||||
|     Ok(()) | ||||
| } | ||||
							
								
								
									
										61
									
								
								rfs-client/examples/wait_for_flist.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										61
									
								
								rfs-client/examples/wait_for_flist.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,61 @@ | ||||
| use sal_rfs_client::RfsClient; | ||||
| use sal_rfs_client::types::{ClientConfig, Credentials, WaitOptions}; | ||||
| use openapi::models::FlistState; | ||||
|  | ||||
| #[tokio::main] | ||||
| async fn main() -> Result<(), Box<dyn std::error::Error>> { | ||||
|     // Create a client with authentication | ||||
|     let config = ClientConfig { | ||||
|         base_url: "http://localhost:8080".to_string(), | ||||
|         credentials: Some(Credentials { | ||||
|             username: "user".to_string(), | ||||
|             password: "password".to_string(), | ||||
|         }), | ||||
|         timeout_seconds: 60, | ||||
|     }; | ||||
|      | ||||
|     let mut client = RfsClient::new(config); | ||||
|      | ||||
|     // Authenticate with the server | ||||
|     client.authenticate().await?; | ||||
|     println!("Authentication successful"); | ||||
|      | ||||
|     // Create an FList from a Docker image | ||||
|     let image_name = "redis:latest"; | ||||
|     println!("Creating FList for image: {}", image_name); | ||||
|      | ||||
|     let job_id = client.create_flist(&image_name, None).await?; | ||||
|     println!("FList creation started with job ID: {}", job_id); | ||||
|      | ||||
|     // Set up options for waiting with progress reporting | ||||
|     let options = WaitOptions { | ||||
|         timeout_seconds: 600,  // 10 minutes timeout | ||||
|         poll_interval_ms: 2000, // Check every 2 seconds | ||||
|         progress_callback: Some(Box::new(|state| { | ||||
|             match state { | ||||
|                 FlistState::FlistStateInProgress(info) => { | ||||
|                     println!("Progress: {:.1}% - {}", info.in_progress.progress, info.in_progress.msg); | ||||
|                 }, | ||||
|                 FlistState::FlistStateStarted(_) => { | ||||
|                     println!("FList creation started..."); | ||||
|                 }, | ||||
|                 FlistState::FlistStateAccepted(_) => { | ||||
|                     println!("FList creation request accepted..."); | ||||
|                 }, | ||||
|                 _ => println!("State: {:?}", state), | ||||
|             } | ||||
|         })), | ||||
|     }; | ||||
|      | ||||
|     // Wait for the FList to be created | ||||
|     println!("Waiting for FList creation to complete..."); | ||||
|      | ||||
|     // Use ? operator to propagate errors properly | ||||
|     let state = client.wait_for_flist_creation(&job_id, Some(options)).await | ||||
|         .map_err(|e| -> Box<dyn std::error::Error> { Box::new(e) })?; | ||||
|      | ||||
|     println!("FList created successfully!"); | ||||
|     println!("Final state: {:?}", state); | ||||
|      | ||||
|     Ok(()) | ||||
| } | ||||
							
								
								
									
										1
									
								
								rfs-client/openapi.json
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								rfs-client/openapi.json
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because one or more lines are too long
											
										
									
								
							
							
								
								
									
										3
									
								
								rfs-client/openapi/.gitignore
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										3
									
								
								rfs-client/openapi/.gitignore
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,3 @@ | ||||
| /target/ | ||||
| **/*.rs.bk | ||||
| Cargo.lock | ||||
							
								
								
									
										23
									
								
								rfs-client/openapi/.openapi-generator-ignore
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										23
									
								
								rfs-client/openapi/.openapi-generator-ignore
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,23 @@ | ||||
| # OpenAPI Generator Ignore | ||||
| # Generated by openapi-generator https://github.com/openapitools/openapi-generator | ||||
|  | ||||
| # Use this file to prevent files from being overwritten by the generator. | ||||
| # The patterns follow closely to .gitignore or .dockerignore. | ||||
|  | ||||
| # As an example, the C# client generator defines ApiClient.cs. | ||||
| # You can make changes and tell OpenAPI Generator to ignore just this file by uncommenting the following line: | ||||
| #ApiClient.cs | ||||
|  | ||||
| # You can match any string of characters against a directory, file or extension with a single asterisk (*): | ||||
| #foo/*/qux | ||||
| # The above matches foo/bar/qux and foo/baz/qux, but not foo/bar/baz/qux | ||||
|  | ||||
| # You can recursively match patterns against a directory, file or extension with a double asterisk (**): | ||||
| #foo/**/qux | ||||
| # This matches foo/bar/qux, foo/baz/qux, and foo/bar/baz/qux | ||||
|  | ||||
| # You can also negate patterns with an exclamation (!). | ||||
| # For example, you can ignore all files in a docs folder with the file extension .md: | ||||
| #docs/*.md | ||||
| # Then explicitly reverse the ignore rule for a single file: | ||||
| #!docs/README.md | ||||
							
								
								
									
										125
									
								
								rfs-client/openapi/.openapi-generator/FILES
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										125
									
								
								rfs-client/openapi/.openapi-generator/FILES
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,125 @@ | ||||
| .gitignore | ||||
| .travis.yml | ||||
| Cargo.toml | ||||
| README.md | ||||
| docs/AuthenticationApi.md | ||||
| docs/BlockDownloadsResponse.md | ||||
| docs/BlockInfo.md | ||||
| docs/BlockManagementApi.md | ||||
| docs/BlockUploadedResponse.md | ||||
| docs/BlocksResponse.md | ||||
| docs/DirListTemplate.md | ||||
| docs/DirLister.md | ||||
| docs/ErrorTemplate.md | ||||
| docs/FileDownloadRequest.md | ||||
| docs/FileInfo.md | ||||
| docs/FileManagementApi.md | ||||
| docs/FileUploadResponse.md | ||||
| docs/FlistBody.md | ||||
| docs/FlistManagementApi.md | ||||
| docs/FlistState.md | ||||
| docs/FlistStateAccepted.md | ||||
| docs/FlistStateCreated.md | ||||
| docs/FlistStateInProgress.md | ||||
| docs/FlistStateInfo.md | ||||
| docs/FlistStateResponse.md | ||||
| docs/FlistStateStarted.md | ||||
| docs/HealthResponse.md | ||||
| docs/Job.md | ||||
| docs/ListBlocksParams.md | ||||
| docs/ListBlocksResponse.md | ||||
| docs/PreviewResponse.md | ||||
| docs/ResponseError.md | ||||
| docs/ResponseErrorBadRequest.md | ||||
| docs/ResponseErrorConflict.md | ||||
| docs/ResponseErrorForbidden.md | ||||
| docs/ResponseErrorNotFound.md | ||||
| docs/ResponseErrorTemplateError.md | ||||
| docs/ResponseErrorUnauthorized.md | ||||
| docs/ResponseResult.md | ||||
| docs/ResponseResultBlockUploaded.md | ||||
| docs/ResponseResultDirTemplate.md | ||||
| docs/ResponseResultFileUploaded.md | ||||
| docs/ResponseResultFlistCreated.md | ||||
| docs/ResponseResultFlistState.md | ||||
| docs/ResponseResultFlists.md | ||||
| docs/ResponseResultPreviewFlist.md | ||||
| docs/ResponseResultRes.md | ||||
| docs/ResponseResultSignedIn.md | ||||
| docs/SignInBody.md | ||||
| docs/SignInResponse.md | ||||
| docs/SystemApi.md | ||||
| docs/TemplateErr.md | ||||
| docs/TemplateErrBadRequest.md | ||||
| docs/TemplateErrInternalServerError.md | ||||
| docs/TemplateErrNotFound.md | ||||
| docs/UploadBlockParams.md | ||||
| docs/UserBlockInfo.md | ||||
| docs/UserBlocksResponse.md | ||||
| docs/VerifyBlock.md | ||||
| docs/VerifyBlocksRequest.md | ||||
| docs/VerifyBlocksResponse.md | ||||
| docs/WebsiteServingApi.md | ||||
| git_push.sh | ||||
| src/apis/authentication_api.rs | ||||
| src/apis/block_management_api.rs | ||||
| src/apis/configuration.rs | ||||
| src/apis/file_management_api.rs | ||||
| src/apis/flist_management_api.rs | ||||
| src/apis/mod.rs | ||||
| src/apis/system_api.rs | ||||
| src/apis/website_serving_api.rs | ||||
| src/lib.rs | ||||
| src/models/block_downloads_response.rs | ||||
| src/models/block_info.rs | ||||
| src/models/block_uploaded_response.rs | ||||
| src/models/blocks_response.rs | ||||
| src/models/dir_list_template.rs | ||||
| src/models/dir_lister.rs | ||||
| src/models/error_template.rs | ||||
| src/models/file_download_request.rs | ||||
| src/models/file_info.rs | ||||
| src/models/file_upload_response.rs | ||||
| src/models/flist_body.rs | ||||
| src/models/flist_state.rs | ||||
| src/models/flist_state_accepted.rs | ||||
| src/models/flist_state_created.rs | ||||
| src/models/flist_state_in_progress.rs | ||||
| src/models/flist_state_info.rs | ||||
| src/models/flist_state_response.rs | ||||
| src/models/flist_state_started.rs | ||||
| src/models/health_response.rs | ||||
| src/models/job.rs | ||||
| src/models/list_blocks_params.rs | ||||
| src/models/list_blocks_response.rs | ||||
| src/models/mod.rs | ||||
| src/models/preview_response.rs | ||||
| src/models/response_error.rs | ||||
| src/models/response_error_bad_request.rs | ||||
| src/models/response_error_conflict.rs | ||||
| src/models/response_error_forbidden.rs | ||||
| src/models/response_error_not_found.rs | ||||
| src/models/response_error_template_error.rs | ||||
| src/models/response_error_unauthorized.rs | ||||
| src/models/response_result.rs | ||||
| src/models/response_result_block_uploaded.rs | ||||
| src/models/response_result_dir_template.rs | ||||
| src/models/response_result_file_uploaded.rs | ||||
| src/models/response_result_flist_created.rs | ||||
| src/models/response_result_flist_state.rs | ||||
| src/models/response_result_flists.rs | ||||
| src/models/response_result_preview_flist.rs | ||||
| src/models/response_result_res.rs | ||||
| src/models/response_result_signed_in.rs | ||||
| src/models/sign_in_body.rs | ||||
| src/models/sign_in_response.rs | ||||
| src/models/template_err.rs | ||||
| src/models/template_err_bad_request.rs | ||||
| src/models/template_err_internal_server_error.rs | ||||
| src/models/template_err_not_found.rs | ||||
| src/models/upload_block_params.rs | ||||
| src/models/user_block_info.rs | ||||
| src/models/user_blocks_response.rs | ||||
| src/models/verify_block.rs | ||||
| src/models/verify_blocks_request.rs | ||||
| src/models/verify_blocks_response.rs | ||||
							
								
								
									
										1
									
								
								rfs-client/openapi/.openapi-generator/VERSION
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								rfs-client/openapi/.openapi-generator/VERSION
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1 @@ | ||||
| 7.13.0 | ||||
							
								
								
									
										1
									
								
								rfs-client/openapi/.travis.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								rfs-client/openapi/.travis.yml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1 @@ | ||||
| language: rust | ||||
							
								
								
									
										15
									
								
								rfs-client/openapi/Cargo.toml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										15
									
								
								rfs-client/openapi/Cargo.toml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,15 @@ | ||||
| [package] | ||||
| name = "openapi" | ||||
| version = "0.2.0" | ||||
| authors = ["OpenAPI Generator team and contributors"] | ||||
| description = "No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)" | ||||
| license = "" | ||||
| edition = "2021" | ||||
|  | ||||
| [dependencies] | ||||
| serde = { version = "^1.0", features = ["derive"] } | ||||
| serde_with = { version = "^3.8", default-features = false, features = ["base64", "std", "macros"] } | ||||
| serde_json = "^1.0" | ||||
| serde_repr = "^0.1" | ||||
| url = "^2.5" | ||||
| reqwest = { version = "^0.12", default-features = false, features = ["json", "multipart"] } | ||||
							
								
								
									
										114
									
								
								rfs-client/openapi/README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										114
									
								
								rfs-client/openapi/README.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,114 @@ | ||||
| # Rust API client for openapi | ||||
|  | ||||
| No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) | ||||
|  | ||||
|  | ||||
| ## Overview | ||||
|  | ||||
| This API client was generated by the [OpenAPI Generator](https://openapi-generator.tech) project.  By using the [openapi-spec](https://openapis.org) from a remote server, you can easily generate an API client. | ||||
|  | ||||
| - API version: 0.2.0 | ||||
| - Package version: 0.2.0 | ||||
| - Generator version: 7.13.0 | ||||
| - Build package: `org.openapitools.codegen.languages.RustClientCodegen` | ||||
|  | ||||
| ## Installation | ||||
|  | ||||
| Put the package under your project folder in a directory named `openapi` and add the following to `Cargo.toml` under `[dependencies]`: | ||||
|  | ||||
| ``` | ||||
| openapi = { path = "./openapi" } | ||||
| ``` | ||||
|  | ||||
| ## Documentation for API Endpoints | ||||
|  | ||||
| All URIs are relative to *http://localhost* | ||||
|  | ||||
| Class | Method | HTTP request | Description | ||||
| ------------ | ------------- | ------------- | ------------- | ||||
| *AuthenticationApi* | [**sign_in_handler**](docs/AuthenticationApi.md#sign_in_handler) | **POST** /api/v1/signin |  | ||||
| *BlockManagementApi* | [**check_block_handler**](docs/BlockManagementApi.md#check_block_handler) | **HEAD** /api/v1/block/{hash} | Checks a block by its hash. | ||||
| *BlockManagementApi* | [**get_block_downloads_handler**](docs/BlockManagementApi.md#get_block_downloads_handler) | **GET** /api/v1/block/{hash}/downloads | Retrieve the number of times a block has been downloaded. | ||||
| *BlockManagementApi* | [**get_block_handler**](docs/BlockManagementApi.md#get_block_handler) | **GET** /api/v1/block/{hash} | Retrieve a block by its hash. | ||||
| *BlockManagementApi* | [**get_blocks_by_hash_handler**](docs/BlockManagementApi.md#get_blocks_by_hash_handler) | **GET** /api/v1/blocks/{hash} | Retrieve blocks by hash (file hash or block hash). | ||||
| *BlockManagementApi* | [**get_user_blocks_handler**](docs/BlockManagementApi.md#get_user_blocks_handler) | **GET** /api/v1/user/blocks | Retrieve all blocks uploaded by a specific user. | ||||
| *BlockManagementApi* | [**list_blocks_handler**](docs/BlockManagementApi.md#list_blocks_handler) | **GET** /api/v1/blocks | List all block hashes in the server with pagination | ||||
| *BlockManagementApi* | [**upload_block_handler**](docs/BlockManagementApi.md#upload_block_handler) | **POST** /api/v1/block | Upload a block to the server. | ||||
| *BlockManagementApi* | [**verify_blocks_handler**](docs/BlockManagementApi.md#verify_blocks_handler) | **POST** /api/v1/block/verify | Verify if multiple blocks exist on the server. | ||||
| *FileManagementApi* | [**get_file_handler**](docs/FileManagementApi.md#get_file_handler) | **GET** /api/v1/file/{hash} | Retrieve a file by its hash from path, with optional custom filename in request body. | ||||
| *FileManagementApi* | [**upload_file_handler**](docs/FileManagementApi.md#upload_file_handler) | **POST** /api/v1/file | Upload a file to the server. | ||||
| *FlistManagementApi* | [**create_flist_handler**](docs/FlistManagementApi.md#create_flist_handler) | **POST** /api/v1/fl |  | ||||
| *FlistManagementApi* | [**get_flist_state_handler**](docs/FlistManagementApi.md#get_flist_state_handler) | **GET** /api/v1/fl/{job_id} |  | ||||
| *FlistManagementApi* | [**list_flists_handler**](docs/FlistManagementApi.md#list_flists_handler) | **GET** /api/v1/fl |  | ||||
| *FlistManagementApi* | [**preview_flist_handler**](docs/FlistManagementApi.md#preview_flist_handler) | **GET** /api/v1/fl/preview/{flist_path} |  | ||||
| *FlistManagementApi* | [**serve_flists**](docs/FlistManagementApi.md#serve_flists) | **GET** /{path} | Serve flist files from the server's filesystem | ||||
| *SystemApi* | [**health_check_handler**](docs/SystemApi.md#health_check_handler) | **GET** /api/v1 |  | ||||
| *WebsiteServingApi* | [**serve_website_handler**](docs/WebsiteServingApi.md#serve_website_handler) | **GET** /api/v1/website/{website_hash}/{path} |  | ||||
|  | ||||
|  | ||||
| ## Documentation For Models | ||||
|  | ||||
|  - [BlockDownloadsResponse](docs/BlockDownloadsResponse.md) | ||||
|  - [BlockInfo](docs/BlockInfo.md) | ||||
|  - [BlockUploadedResponse](docs/BlockUploadedResponse.md) | ||||
|  - [BlocksResponse](docs/BlocksResponse.md) | ||||
|  - [DirListTemplate](docs/DirListTemplate.md) | ||||
|  - [DirLister](docs/DirLister.md) | ||||
|  - [ErrorTemplate](docs/ErrorTemplate.md) | ||||
|  - [FileDownloadRequest](docs/FileDownloadRequest.md) | ||||
|  - [FileInfo](docs/FileInfo.md) | ||||
|  - [FileUploadResponse](docs/FileUploadResponse.md) | ||||
|  - [FlistBody](docs/FlistBody.md) | ||||
|  - [FlistState](docs/FlistState.md) | ||||
|  - [FlistStateAccepted](docs/FlistStateAccepted.md) | ||||
|  - [FlistStateCreated](docs/FlistStateCreated.md) | ||||
|  - [FlistStateInProgress](docs/FlistStateInProgress.md) | ||||
|  - [FlistStateInfo](docs/FlistStateInfo.md) | ||||
|  - [FlistStateResponse](docs/FlistStateResponse.md) | ||||
|  - [FlistStateStarted](docs/FlistStateStarted.md) | ||||
|  - [HealthResponse](docs/HealthResponse.md) | ||||
|  - [Job](docs/Job.md) | ||||
|  - [ListBlocksParams](docs/ListBlocksParams.md) | ||||
|  - [ListBlocksResponse](docs/ListBlocksResponse.md) | ||||
|  - [PreviewResponse](docs/PreviewResponse.md) | ||||
|  - [ResponseError](docs/ResponseError.md) | ||||
|  - [ResponseErrorBadRequest](docs/ResponseErrorBadRequest.md) | ||||
|  - [ResponseErrorConflict](docs/ResponseErrorConflict.md) | ||||
|  - [ResponseErrorForbidden](docs/ResponseErrorForbidden.md) | ||||
|  - [ResponseErrorNotFound](docs/ResponseErrorNotFound.md) | ||||
|  - [ResponseErrorTemplateError](docs/ResponseErrorTemplateError.md) | ||||
|  - [ResponseErrorUnauthorized](docs/ResponseErrorUnauthorized.md) | ||||
|  - [ResponseResult](docs/ResponseResult.md) | ||||
|  - [ResponseResultBlockUploaded](docs/ResponseResultBlockUploaded.md) | ||||
|  - [ResponseResultDirTemplate](docs/ResponseResultDirTemplate.md) | ||||
|  - [ResponseResultFileUploaded](docs/ResponseResultFileUploaded.md) | ||||
|  - [ResponseResultFlistCreated](docs/ResponseResultFlistCreated.md) | ||||
|  - [ResponseResultFlistState](docs/ResponseResultFlistState.md) | ||||
|  - [ResponseResultFlists](docs/ResponseResultFlists.md) | ||||
|  - [ResponseResultPreviewFlist](docs/ResponseResultPreviewFlist.md) | ||||
|  - [ResponseResultRes](docs/ResponseResultRes.md) | ||||
|  - [ResponseResultSignedIn](docs/ResponseResultSignedIn.md) | ||||
|  - [SignInBody](docs/SignInBody.md) | ||||
|  - [SignInResponse](docs/SignInResponse.md) | ||||
|  - [TemplateErr](docs/TemplateErr.md) | ||||
|  - [TemplateErrBadRequest](docs/TemplateErrBadRequest.md) | ||||
|  - [TemplateErrInternalServerError](docs/TemplateErrInternalServerError.md) | ||||
|  - [TemplateErrNotFound](docs/TemplateErrNotFound.md) | ||||
|  - [UploadBlockParams](docs/UploadBlockParams.md) | ||||
|  - [UserBlockInfo](docs/UserBlockInfo.md) | ||||
|  - [UserBlocksResponse](docs/UserBlocksResponse.md) | ||||
|  - [VerifyBlock](docs/VerifyBlock.md) | ||||
|  - [VerifyBlocksRequest](docs/VerifyBlocksRequest.md) | ||||
|  - [VerifyBlocksResponse](docs/VerifyBlocksResponse.md) | ||||
|  | ||||
|  | ||||
| To get access to the crate's generated documentation, use: | ||||
|  | ||||
| ``` | ||||
| cargo doc --open | ||||
| ``` | ||||
|  | ||||
| ## Author | ||||
|  | ||||
|  | ||||
|  | ||||
							
								
								
									
										37
									
								
								rfs-client/openapi/docs/AuthenticationApi.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										37
									
								
								rfs-client/openapi/docs/AuthenticationApi.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,37 @@ | ||||
| # \AuthenticationApi | ||||
|  | ||||
| All URIs are relative to *http://localhost* | ||||
|  | ||||
| Method | HTTP request | Description | ||||
| ------------- | ------------- | ------------- | ||||
| [**sign_in_handler**](AuthenticationApi.md#sign_in_handler) | **POST** /api/v1/signin |  | ||||
|  | ||||
|  | ||||
|  | ||||
| ## sign_in_handler | ||||
|  | ||||
| > models::SignInResponse sign_in_handler(sign_in_body) | ||||
|  | ||||
|  | ||||
| ### Parameters | ||||
|  | ||||
|  | ||||
| Name | Type | Description  | Required | Notes | ||||
| ------------- | ------------- | ------------- | ------------- | ------------- | ||||
| **sign_in_body** | [**SignInBody**](SignInBody.md) |  | [required] | | ||||
|  | ||||
| ### Return type | ||||
|  | ||||
| [**models::SignInResponse**](SignInResponse.md) | ||||
|  | ||||
| ### Authorization | ||||
|  | ||||
| No authorization required | ||||
|  | ||||
| ### HTTP request headers | ||||
|  | ||||
| - **Content-Type**: application/json | ||||
| - **Accept**: application/json | ||||
|  | ||||
| [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) | ||||
|  | ||||
							
								
								
									
										14
									
								
								rfs-client/openapi/docs/Block.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										14
									
								
								rfs-client/openapi/docs/Block.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,14 @@ | ||||
| # Block | ||||
|  | ||||
| ## Properties | ||||
|  | ||||
| Name | Type | Description | Notes | ||||
| ------------ | ------------- | ------------- | ------------- | ||||
| **data** | [**std::path::PathBuf**](std::path::PathBuf.md) |  |  | ||||
| **hash** | **String** |  |  | ||||
| **index** | **i64** |  |  | ||||
| **size** | **i32** |  |  | ||||
|  | ||||
| [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
							
								
								
									
										13
									
								
								rfs-client/openapi/docs/BlockDownloadsResponse.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										13
									
								
								rfs-client/openapi/docs/BlockDownloadsResponse.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,13 @@ | ||||
| # BlockDownloadsResponse | ||||
|  | ||||
| ## Properties | ||||
|  | ||||
| Name | Type | Description | Notes | ||||
| ------------ | ------------- | ------------- | ------------- | ||||
| **block_hash** | **String** | Block hash |  | ||||
| **block_size** | **i64** | Size of the block in bytes |  | ||||
| **downloads_count** | **i64** | Number of times the block has been downloaded |  | ||||
|  | ||||
| [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
							
								
								
									
										12
									
								
								rfs-client/openapi/docs/BlockInfo.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										12
									
								
								rfs-client/openapi/docs/BlockInfo.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,12 @@ | ||||
| # BlockInfo | ||||
|  | ||||
| ## Properties | ||||
|  | ||||
| Name | Type | Description | Notes | ||||
| ------------ | ------------- | ------------- | ------------- | ||||
| **hash** | **String** | Block hash |  | ||||
| **index** | **i64** | Block index within the file |  | ||||
|  | ||||
| [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
							
								
								
									
										250
									
								
								rfs-client/openapi/docs/BlockManagementApi.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										250
									
								
								rfs-client/openapi/docs/BlockManagementApi.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,250 @@ | ||||
| # \BlockManagementApi | ||||
|  | ||||
| All URIs are relative to *http://localhost* | ||||
|  | ||||
| Method | HTTP request | Description | ||||
| ------------- | ------------- | ------------- | ||||
| [**check_block_handler**](BlockManagementApi.md#check_block_handler) | **HEAD** /api/v1/block/{hash} | Checks a block by its hash. | ||||
| [**get_block_downloads_handler**](BlockManagementApi.md#get_block_downloads_handler) | **GET** /api/v1/block/{hash}/downloads | Retrieve the number of times a block has been downloaded. | ||||
| [**get_block_handler**](BlockManagementApi.md#get_block_handler) | **GET** /api/v1/block/{hash} | Retrieve a block by its hash. | ||||
| [**get_blocks_by_hash_handler**](BlockManagementApi.md#get_blocks_by_hash_handler) | **GET** /api/v1/blocks/{hash} | Retrieve blocks by hash (file hash or block hash). | ||||
| [**get_user_blocks_handler**](BlockManagementApi.md#get_user_blocks_handler) | **GET** /api/v1/user/blocks | Retrieve all blocks uploaded by a specific user. | ||||
| [**list_blocks_handler**](BlockManagementApi.md#list_blocks_handler) | **GET** /api/v1/blocks | List all block hashes in the server with pagination | ||||
| [**upload_block_handler**](BlockManagementApi.md#upload_block_handler) | **POST** /api/v1/block | Upload a block to the server. | ||||
| [**verify_blocks_handler**](BlockManagementApi.md#verify_blocks_handler) | **POST** /api/v1/block/verify | Verify if multiple blocks exist on the server. | ||||
|  | ||||
|  | ||||
|  | ||||
| ## check_block_handler | ||||
|  | ||||
| > check_block_handler(hash) | ||||
| Checks a block by its hash. | ||||
|  | ||||
| ### Parameters | ||||
|  | ||||
|  | ||||
| Name | Type | Description  | Required | Notes | ||||
| ------------- | ------------- | ------------- | ------------- | ------------- | ||||
| **hash** | **String** | Block hash | [required] | | ||||
|  | ||||
| ### Return type | ||||
|  | ||||
|  (empty response body) | ||||
|  | ||||
| ### Authorization | ||||
|  | ||||
| No authorization required | ||||
|  | ||||
| ### HTTP request headers | ||||
|  | ||||
| - **Content-Type**: Not defined | ||||
| - **Accept**: application/json | ||||
|  | ||||
| [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
| ## get_block_downloads_handler | ||||
|  | ||||
| > models::BlockDownloadsResponse get_block_downloads_handler(hash) | ||||
| Retrieve the number of times a block has been downloaded. | ||||
|  | ||||
| ### Parameters | ||||
|  | ||||
|  | ||||
| Name | Type | Description  | Required | Notes | ||||
| ------------- | ------------- | ------------- | ------------- | ------------- | ||||
| **hash** | **String** | Block hash | [required] | | ||||
|  | ||||
| ### Return type | ||||
|  | ||||
| [**models::BlockDownloadsResponse**](BlockDownloadsResponse.md) | ||||
|  | ||||
| ### Authorization | ||||
|  | ||||
| No authorization required | ||||
|  | ||||
| ### HTTP request headers | ||||
|  | ||||
| - **Content-Type**: Not defined | ||||
| - **Accept**: application/json | ||||
|  | ||||
| [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
| ## get_block_handler | ||||
|  | ||||
| > std::path::PathBuf get_block_handler(hash) | ||||
| Retrieve a block by its hash. | ||||
|  | ||||
| ### Parameters | ||||
|  | ||||
|  | ||||
| Name | Type | Description  | Required | Notes | ||||
| ------------- | ------------- | ------------- | ------------- | ------------- | ||||
| **hash** | **String** | Block hash | [required] | | ||||
|  | ||||
| ### Return type | ||||
|  | ||||
| [**std::path::PathBuf**](std::path::PathBuf.md) | ||||
|  | ||||
| ### Authorization | ||||
|  | ||||
| No authorization required | ||||
|  | ||||
| ### HTTP request headers | ||||
|  | ||||
| - **Content-Type**: Not defined | ||||
| - **Accept**: application/octet-stream, application/json | ||||
|  | ||||
| [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
| ## get_blocks_by_hash_handler | ||||
|  | ||||
| > models::BlocksResponse get_blocks_by_hash_handler(hash) | ||||
| Retrieve blocks by hash (file hash or block hash). | ||||
|  | ||||
| If the hash is a file hash, returns all blocks with their block index related to that file. If the hash is a block hash, returns the block itself. | ||||
|  | ||||
| ### Parameters | ||||
|  | ||||
|  | ||||
| Name | Type | Description  | Required | Notes | ||||
| ------------- | ------------- | ------------- | ------------- | ------------- | ||||
| **hash** | **String** | File hash or block hash | [required] | | ||||
|  | ||||
| ### Return type | ||||
|  | ||||
| [**models::BlocksResponse**](BlocksResponse.md) | ||||
|  | ||||
| ### Authorization | ||||
|  | ||||
| No authorization required | ||||
|  | ||||
| ### HTTP request headers | ||||
|  | ||||
| - **Content-Type**: Not defined | ||||
| - **Accept**: application/json | ||||
|  | ||||
| [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
| ## get_user_blocks_handler | ||||
|  | ||||
| > models::UserBlocksResponse get_user_blocks_handler(page, per_page) | ||||
| Retrieve all blocks uploaded by a specific user. | ||||
|  | ||||
| ### Parameters | ||||
|  | ||||
|  | ||||
| Name | Type | Description  | Required | Notes | ||||
| ------------- | ------------- | ------------- | ------------- | ------------- | ||||
| **page** | Option<**i32**> | Page number (1-indexed) |  | | ||||
| **per_page** | Option<**i32**> | Number of items per page |  | | ||||
|  | ||||
| ### Return type | ||||
|  | ||||
| [**models::UserBlocksResponse**](UserBlocksResponse.md) | ||||
|  | ||||
| ### Authorization | ||||
|  | ||||
| [bearerAuth](../README.md#bearerAuth) | ||||
|  | ||||
| ### HTTP request headers | ||||
|  | ||||
| - **Content-Type**: Not defined | ||||
| - **Accept**: application/json | ||||
|  | ||||
| [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
| ## list_blocks_handler | ||||
|  | ||||
| > models::ListBlocksResponse list_blocks_handler(page, per_page) | ||||
| List all block hashes in the server with pagination | ||||
|  | ||||
| ### Parameters | ||||
|  | ||||
|  | ||||
| Name | Type | Description  | Required | Notes | ||||
| ------------- | ------------- | ------------- | ------------- | ------------- | ||||
| **page** | Option<**i32**> | Page number (1-indexed) |  | | ||||
| **per_page** | Option<**i32**> | Number of items per page |  | | ||||
|  | ||||
| ### Return type | ||||
|  | ||||
| [**models::ListBlocksResponse**](ListBlocksResponse.md) | ||||
|  | ||||
| ### Authorization | ||||
|  | ||||
| No authorization required | ||||
|  | ||||
| ### HTTP request headers | ||||
|  | ||||
| - **Content-Type**: Not defined | ||||
| - **Accept**: application/json | ||||
|  | ||||
| [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
| ## upload_block_handler | ||||
|  | ||||
| > models::BlockUploadedResponse upload_block_handler(file_hash, idx, body) | ||||
| Upload a block to the server. | ||||
|  | ||||
| If the block already exists, the server will return a 200 OK response. If the block is new, the server will return a 201 Created response. | ||||
|  | ||||
| ### Parameters | ||||
|  | ||||
|  | ||||
| Name | Type | Description  | Required | Notes | ||||
| ------------- | ------------- | ------------- | ------------- | ------------- | ||||
| **file_hash** | **String** | File hash associated with the block | [required] | | ||||
| **idx** | **i64** | Block index within the file | [required] | | ||||
| **body** | **std::path::PathBuf** | Block data to upload | [required] | | ||||
|  | ||||
| ### Return type | ||||
|  | ||||
| [**models::BlockUploadedResponse**](BlockUploadedResponse.md) | ||||
|  | ||||
| ### Authorization | ||||
|  | ||||
| [bearerAuth](../README.md#bearerAuth) | ||||
|  | ||||
| ### HTTP request headers | ||||
|  | ||||
| - **Content-Type**: application/octet-stream | ||||
| - **Accept**: application/json | ||||
|  | ||||
| [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
| ## verify_blocks_handler | ||||
|  | ||||
| > models::VerifyBlocksResponse verify_blocks_handler(verify_blocks_request) | ||||
| Verify if multiple blocks exist on the server. | ||||
|  | ||||
| Returns a list of missing blocks. | ||||
|  | ||||
| ### Parameters | ||||
|  | ||||
|  | ||||
| Name | Type | Description  | Required | Notes | ||||
| ------------- | ------------- | ------------- | ------------- | ------------- | ||||
| **verify_blocks_request** | [**VerifyBlocksRequest**](VerifyBlocksRequest.md) | List of block hashes to verify | [required] | | ||||
|  | ||||
| ### Return type | ||||
|  | ||||
| [**models::VerifyBlocksResponse**](VerifyBlocksResponse.md) | ||||
|  | ||||
| ### Authorization | ||||
|  | ||||
| No authorization required | ||||
|  | ||||
| ### HTTP request headers | ||||
|  | ||||
| - **Content-Type**: application/json | ||||
| - **Accept**: application/json | ||||
|  | ||||
| [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) | ||||
|  | ||||
							
								
								
									
										12
									
								
								rfs-client/openapi/docs/BlockUploadedResponse.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										12
									
								
								rfs-client/openapi/docs/BlockUploadedResponse.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,12 @@ | ||||
| # BlockUploadedResponse | ||||
|  | ||||
| ## Properties | ||||
|  | ||||
| Name | Type | Description | Notes | ||||
| ------------ | ------------- | ------------- | ------------- | ||||
| **hash** | **String** |  |  | ||||
| **message** | **String** |  |  | ||||
|  | ||||
| [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
							
								
								
									
										11
									
								
								rfs-client/openapi/docs/BlocksResponse.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								rfs-client/openapi/docs/BlocksResponse.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| # BlocksResponse | ||||
|  | ||||
| ## Properties | ||||
|  | ||||
| Name | Type | Description | Notes | ||||
| ------------ | ------------- | ------------- | ------------- | ||||
| **blocks** | [**Vec<models::BlockInfo>**](BlockInfo.md) | List of blocks with their indices |  | ||||
|  | ||||
| [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
							
								
								
									
										12
									
								
								rfs-client/openapi/docs/DirListTemplate.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										12
									
								
								rfs-client/openapi/docs/DirListTemplate.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,12 @@ | ||||
| # DirListTemplate | ||||
|  | ||||
| ## Properties | ||||
|  | ||||
| Name | Type | Description | Notes | ||||
| ------------ | ------------- | ------------- | ------------- | ||||
| **cur_path** | **String** |  |  | ||||
| **lister** | [**models::DirLister**](DirLister.md) |  |  | ||||
|  | ||||
| [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
							
								
								
									
										11
									
								
								rfs-client/openapi/docs/DirLister.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								rfs-client/openapi/docs/DirLister.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| # DirLister | ||||
|  | ||||
| ## Properties | ||||
|  | ||||
| Name | Type | Description | Notes | ||||
| ------------ | ------------- | ------------- | ------------- | ||||
| **files** | [**Vec<models::FileInfo>**](FileInfo.md) |  |  | ||||
|  | ||||
| [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
							
								
								
									
										13
									
								
								rfs-client/openapi/docs/ErrorTemplate.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										13
									
								
								rfs-client/openapi/docs/ErrorTemplate.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,13 @@ | ||||
| # ErrorTemplate | ||||
|  | ||||
| ## Properties | ||||
|  | ||||
| Name | Type | Description | Notes | ||||
| ------------ | ------------- | ------------- | ------------- | ||||
| **cur_path** | **String** |  |  | ||||
| **err** | [**models::TemplateErr**](TemplateErr.md) |  |  | ||||
| **message** | **String** |  |  | ||||
|  | ||||
| [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
							
								
								
									
										12
									
								
								rfs-client/openapi/docs/File.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										12
									
								
								rfs-client/openapi/docs/File.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,12 @@ | ||||
| # File | ||||
|  | ||||
| ## Properties | ||||
|  | ||||
| Name | Type | Description | Notes | ||||
| ------------ | ------------- | ------------- | ------------- | ||||
| **file_content** | [**std::path::PathBuf**](std::path::PathBuf.md) |  |  | ||||
| **file_hash** | **String** |  |  | ||||
|  | ||||
| [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
							
								
								
									
										11
									
								
								rfs-client/openapi/docs/FileDownloadRequest.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								rfs-client/openapi/docs/FileDownloadRequest.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| # FileDownloadRequest | ||||
|  | ||||
| ## Properties | ||||
|  | ||||
| Name | Type | Description | Notes | ||||
| ------------ | ------------- | ------------- | ------------- | ||||
| **file_name** | **String** | The custom filename to use for download |  | ||||
|  | ||||
| [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
							
								
								
									
										16
									
								
								rfs-client/openapi/docs/FileInfo.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										16
									
								
								rfs-client/openapi/docs/FileInfo.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,16 @@ | ||||
| # FileInfo | ||||
|  | ||||
| ## Properties | ||||
|  | ||||
| Name | Type | Description | Notes | ||||
| ------------ | ------------- | ------------- | ------------- | ||||
| **is_file** | **bool** |  |  | ||||
| **last_modified** | **i64** |  |  | ||||
| **name** | **String** |  |  | ||||
| **path_uri** | **String** |  |  | ||||
| **progress** | **f32** |  |  | ||||
| **size** | **i64** |  |  | ||||
|  | ||||
| [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
							
								
								
									
										71
									
								
								rfs-client/openapi/docs/FileManagementApi.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										71
									
								
								rfs-client/openapi/docs/FileManagementApi.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,71 @@ | ||||
| # \FileManagementApi | ||||
|  | ||||
| All URIs are relative to *http://localhost* | ||||
|  | ||||
| Method | HTTP request | Description | ||||
| ------------- | ------------- | ------------- | ||||
| [**get_file_handler**](FileManagementApi.md#get_file_handler) | **GET** /api/v1/file/{hash} | Retrieve a file by its hash from path, with optional custom filename in request body. | ||||
| [**upload_file_handler**](FileManagementApi.md#upload_file_handler) | **POST** /api/v1/file | Upload a file to the server. | ||||
|  | ||||
|  | ||||
|  | ||||
| ## get_file_handler | ||||
|  | ||||
| > std::path::PathBuf get_file_handler(hash, file_download_request) | ||||
| Retrieve a file by its hash from path, with optional custom filename in request body. | ||||
|  | ||||
| The file will be reconstructed from its blocks. | ||||
|  | ||||
| ### Parameters | ||||
|  | ||||
|  | ||||
| Name | Type | Description  | Required | Notes | ||||
| ------------- | ------------- | ------------- | ------------- | ------------- | ||||
| **hash** | **String** | File hash | [required] | | ||||
| **file_download_request** | [**FileDownloadRequest**](FileDownloadRequest.md) | Optional custom filename for download | [required] | | ||||
|  | ||||
| ### Return type | ||||
|  | ||||
| [**std::path::PathBuf**](std::path::PathBuf.md) | ||||
|  | ||||
| ### Authorization | ||||
|  | ||||
| No authorization required | ||||
|  | ||||
| ### HTTP request headers | ||||
|  | ||||
| - **Content-Type**: application/json | ||||
| - **Accept**: application/octet-stream, application/json | ||||
|  | ||||
| [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
| ## upload_file_handler | ||||
|  | ||||
| > models::FileUploadResponse upload_file_handler(body) | ||||
| Upload a file to the server. | ||||
|  | ||||
| The file will be split into blocks and stored in the database. | ||||
|  | ||||
| ### Parameters | ||||
|  | ||||
|  | ||||
| Name | Type | Description  | Required | Notes | ||||
| ------------- | ------------- | ------------- | ------------- | ------------- | ||||
| **body** | **std::path::PathBuf** | File data to upload | [required] | | ||||
|  | ||||
| ### Return type | ||||
|  | ||||
| [**models::FileUploadResponse**](FileUploadResponse.md) | ||||
|  | ||||
| ### Authorization | ||||
|  | ||||
| [bearerAuth](../README.md#bearerAuth) | ||||
|  | ||||
| ### HTTP request headers | ||||
|  | ||||
| - **Content-Type**: application/octet-stream | ||||
| - **Accept**: application/json | ||||
|  | ||||
| [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) | ||||
|  | ||||
							
								
								
									
										12
									
								
								rfs-client/openapi/docs/FileUploadResponse.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										12
									
								
								rfs-client/openapi/docs/FileUploadResponse.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,12 @@ | ||||
| # FileUploadResponse | ||||
|  | ||||
| ## Properties | ||||
|  | ||||
| Name | Type | Description | Notes | ||||
| ------------ | ------------- | ------------- | ------------- | ||||
| **file_hash** | **String** | The file hash |  | ||||
| **message** | **String** | Message indicating success |  | ||||
|  | ||||
| [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
							
								
								
									
										18
									
								
								rfs-client/openapi/docs/FlistBody.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										18
									
								
								rfs-client/openapi/docs/FlistBody.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,18 @@ | ||||
| # FlistBody | ||||
|  | ||||
| ## Properties | ||||
|  | ||||
| Name | Type | Description | Notes | ||||
| ------------ | ------------- | ------------- | ------------- | ||||
| **auth** | Option<**String**> |  | [optional] | ||||
| **email** | Option<**String**> |  | [optional] | ||||
| **identity_token** | Option<**String**> |  | [optional] | ||||
| **image_name** | **String** |  |  | ||||
| **password** | Option<**String**> |  | [optional] | ||||
| **registry_token** | Option<**String**> |  | [optional] | ||||
| **server_address** | Option<**String**> |  | [optional] | ||||
| **username** | Option<**String**> |  | [optional] | ||||
|  | ||||
| [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
							
								
								
									
										150
									
								
								rfs-client/openapi/docs/FlistManagementApi.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										150
									
								
								rfs-client/openapi/docs/FlistManagementApi.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,150 @@ | ||||
| # \FlistManagementApi | ||||
|  | ||||
| All URIs are relative to *http://localhost* | ||||
|  | ||||
| Method | HTTP request | Description | ||||
| ------------- | ------------- | ------------- | ||||
| [**create_flist_handler**](FlistManagementApi.md#create_flist_handler) | **POST** /api/v1/fl |  | ||||
| [**get_flist_state_handler**](FlistManagementApi.md#get_flist_state_handler) | **GET** /api/v1/fl/{job_id} |  | ||||
| [**list_flists_handler**](FlistManagementApi.md#list_flists_handler) | **GET** /api/v1/fl |  | ||||
| [**preview_flist_handler**](FlistManagementApi.md#preview_flist_handler) | **GET** /api/v1/fl/preview/{flist_path} |  | ||||
| [**serve_flists**](FlistManagementApi.md#serve_flists) | **GET** /{path} | Serve flist files from the server's filesystem | ||||
|  | ||||
|  | ||||
|  | ||||
| ## create_flist_handler | ||||
|  | ||||
| > models::Job create_flist_handler(flist_body) | ||||
|  | ||||
|  | ||||
| ### Parameters | ||||
|  | ||||
|  | ||||
| Name | Type | Description  | Required | Notes | ||||
| ------------- | ------------- | ------------- | ------------- | ------------- | ||||
| **flist_body** | [**FlistBody**](FlistBody.md) |  | [required] | | ||||
|  | ||||
| ### Return type | ||||
|  | ||||
| [**models::Job**](Job.md) | ||||
|  | ||||
| ### Authorization | ||||
|  | ||||
| [bearerAuth](../README.md#bearerAuth) | ||||
|  | ||||
| ### HTTP request headers | ||||
|  | ||||
| - **Content-Type**: application/json | ||||
| - **Accept**: application/json | ||||
|  | ||||
| [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
| ## get_flist_state_handler | ||||
|  | ||||
| > models::FlistStateResponse get_flist_state_handler(job_id) | ||||
|  | ||||
|  | ||||
| ### Parameters | ||||
|  | ||||
|  | ||||
| Name | Type | Description  | Required | Notes | ||||
| ------------- | ------------- | ------------- | ------------- | ------------- | ||||
| **job_id** | **String** | flist job id | [required] | | ||||
|  | ||||
| ### Return type | ||||
|  | ||||
| [**models::FlistStateResponse**](FlistStateResponse.md) | ||||
|  | ||||
| ### Authorization | ||||
|  | ||||
| [bearerAuth](../README.md#bearerAuth) | ||||
|  | ||||
| ### HTTP request headers | ||||
|  | ||||
| - **Content-Type**: Not defined | ||||
| - **Accept**: application/json | ||||
|  | ||||
| [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
| ## list_flists_handler | ||||
|  | ||||
| > std::collections::HashMap<String, Vec<models::FileInfo>> list_flists_handler() | ||||
|  | ||||
|  | ||||
| ### Parameters | ||||
|  | ||||
| This endpoint does not need any parameter. | ||||
|  | ||||
| ### Return type | ||||
|  | ||||
| [**std::collections::HashMap<String, Vec<models::FileInfo>>**](Vec.md) | ||||
|  | ||||
| ### Authorization | ||||
|  | ||||
| No authorization required | ||||
|  | ||||
| ### HTTP request headers | ||||
|  | ||||
| - **Content-Type**: Not defined | ||||
| - **Accept**: application/json | ||||
|  | ||||
| [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
| ## preview_flist_handler | ||||
|  | ||||
| > models::PreviewResponse preview_flist_handler(flist_path) | ||||
|  | ||||
|  | ||||
| ### Parameters | ||||
|  | ||||
|  | ||||
| Name | Type | Description  | Required | Notes | ||||
| ------------- | ------------- | ------------- | ------------- | ------------- | ||||
| **flist_path** | **String** | flist file path | [required] | | ||||
|  | ||||
| ### Return type | ||||
|  | ||||
| [**models::PreviewResponse**](PreviewResponse.md) | ||||
|  | ||||
| ### Authorization | ||||
|  | ||||
| No authorization required | ||||
|  | ||||
| ### HTTP request headers | ||||
|  | ||||
| - **Content-Type**: Not defined | ||||
| - **Accept**: application/json | ||||
|  | ||||
| [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
| ## serve_flists | ||||
|  | ||||
| > std::path::PathBuf serve_flists(path) | ||||
| Serve flist files from the server's filesystem | ||||
|  | ||||
| ### Parameters | ||||
|  | ||||
|  | ||||
| Name | Type | Description  | Required | Notes | ||||
| ------------- | ------------- | ------------- | ------------- | ------------- | ||||
| **path** | **String** | Path to the flist file or directory to serve | [required] | | ||||
|  | ||||
| ### Return type | ||||
|  | ||||
| [**std::path::PathBuf**](std::path::PathBuf.md) | ||||
|  | ||||
| ### Authorization | ||||
|  | ||||
| No authorization required | ||||
|  | ||||
| ### HTTP request headers | ||||
|  | ||||
| - **Content-Type**: Not defined | ||||
| - **Accept**: application/octet-stream, application/json | ||||
|  | ||||
| [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) | ||||
|  | ||||
							
								
								
									
										37
									
								
								rfs-client/openapi/docs/FlistServingApi.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										37
									
								
								rfs-client/openapi/docs/FlistServingApi.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,37 @@ | ||||
| # \FlistServingApi | ||||
|  | ||||
| All URIs are relative to *http://localhost* | ||||
|  | ||||
| Method | HTTP request | Description | ||||
| ------------- | ------------- | ------------- | ||||
| [**serve_flists**](FlistServingApi.md#serve_flists) | **GET** /{path} | Serve flist files from the server's filesystem | ||||
|  | ||||
|  | ||||
|  | ||||
| ## serve_flists | ||||
|  | ||||
| > models::ResponseResult serve_flists(path) | ||||
| Serve flist files from the server's filesystem | ||||
|  | ||||
| ### Parameters | ||||
|  | ||||
|  | ||||
| Name | Type | Description  | Required | Notes | ||||
| ------------- | ------------- | ------------- | ------------- | ------------- | ||||
| **path** | **String** | Path to the flist file or directory to serve | [required] | | ||||
|  | ||||
| ### Return type | ||||
|  | ||||
| [**models::ResponseResult**](ResponseResult.md) | ||||
|  | ||||
| ### Authorization | ||||
|  | ||||
| No authorization required | ||||
|  | ||||
| ### HTTP request headers | ||||
|  | ||||
| - **Content-Type**: Not defined | ||||
| - **Accept**: application/json | ||||
|  | ||||
| [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) | ||||
|  | ||||
							
								
								
									
										15
									
								
								rfs-client/openapi/docs/FlistState.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										15
									
								
								rfs-client/openapi/docs/FlistState.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,15 @@ | ||||
| # FlistState | ||||
|  | ||||
| ## Enum Variants | ||||
|  | ||||
| | Name | Description | | ||||
| |---- | -----| | ||||
| | FlistStateAccepted |  | | ||||
| | FlistStateCreated |  | | ||||
| | FlistStateInProgress |  | | ||||
| | FlistStateStarted |  | | ||||
| | String |  | | ||||
|  | ||||
| [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
							
								
								
									
										11
									
								
								rfs-client/openapi/docs/FlistStateAccepted.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								rfs-client/openapi/docs/FlistStateAccepted.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| # FlistStateAccepted | ||||
|  | ||||
| ## Properties | ||||
|  | ||||
| Name | Type | Description | Notes | ||||
| ------------ | ------------- | ------------- | ------------- | ||||
| **accepted** | **String** |  |  | ||||
|  | ||||
| [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
							
								
								
									
										11
									
								
								rfs-client/openapi/docs/FlistStateCreated.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								rfs-client/openapi/docs/FlistStateCreated.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| # FlistStateCreated | ||||
|  | ||||
| ## Properties | ||||
|  | ||||
| Name | Type | Description | Notes | ||||
| ------------ | ------------- | ------------- | ------------- | ||||
| **created** | **String** |  |  | ||||
|  | ||||
| [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
							
								
								
									
										11
									
								
								rfs-client/openapi/docs/FlistStateInProgress.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								rfs-client/openapi/docs/FlistStateInProgress.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| # FlistStateInProgress | ||||
|  | ||||
| ## Properties | ||||
|  | ||||
| Name | Type | Description | Notes | ||||
| ------------ | ------------- | ------------- | ------------- | ||||
| **in_progress** | [**models::FlistStateInfo**](FlistStateInfo.md) |  |  | ||||
|  | ||||
| [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
							
								
								
									
										12
									
								
								rfs-client/openapi/docs/FlistStateInfo.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										12
									
								
								rfs-client/openapi/docs/FlistStateInfo.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,12 @@ | ||||
| # FlistStateInfo | ||||
|  | ||||
| ## Properties | ||||
|  | ||||
| Name | Type | Description | Notes | ||||
| ------------ | ------------- | ------------- | ------------- | ||||
| **msg** | **String** |  |  | ||||
| **progress** | **f32** |  |  | ||||
|  | ||||
| [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
							
								
								
									
										11
									
								
								rfs-client/openapi/docs/FlistStateResponse.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								rfs-client/openapi/docs/FlistStateResponse.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| # FlistStateResponse | ||||
|  | ||||
| ## Properties | ||||
|  | ||||
| Name | Type | Description | Notes | ||||
| ------------ | ------------- | ------------- | ------------- | ||||
| **flist_state** | [**models::FlistState**](FlistState.md) |  |  | ||||
|  | ||||
| [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
							
								
								
									
										11
									
								
								rfs-client/openapi/docs/FlistStateStarted.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								rfs-client/openapi/docs/FlistStateStarted.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| # FlistStateStarted | ||||
|  | ||||
| ## Properties | ||||
|  | ||||
| Name | Type | Description | Notes | ||||
| ------------ | ------------- | ------------- | ------------- | ||||
| **started** | **String** |  |  | ||||
|  | ||||
| [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
							
								
								
									
										11
									
								
								rfs-client/openapi/docs/HealthResponse.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								rfs-client/openapi/docs/HealthResponse.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| # HealthResponse | ||||
|  | ||||
| ## Properties | ||||
|  | ||||
| Name | Type | Description | Notes | ||||
| ------------ | ------------- | ------------- | ------------- | ||||
| **msg** | **String** |  |  | ||||
|  | ||||
| [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
							
								
								
									
										11
									
								
								rfs-client/openapi/docs/Job.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								rfs-client/openapi/docs/Job.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| # Job | ||||
|  | ||||
| ## Properties | ||||
|  | ||||
| Name | Type | Description | Notes | ||||
| ------------ | ------------- | ------------- | ------------- | ||||
| **id** | **String** |  |  | ||||
|  | ||||
| [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
							
								
								
									
										12
									
								
								rfs-client/openapi/docs/ListBlocksParams.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										12
									
								
								rfs-client/openapi/docs/ListBlocksParams.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,12 @@ | ||||
| # ListBlocksParams | ||||
|  | ||||
| ## Properties | ||||
|  | ||||
| Name | Type | Description | Notes | ||||
| ------------ | ------------- | ------------- | ------------- | ||||
| **page** | Option<**i32**> | Page number (1-indexed) | [optional][default to 1] | ||||
| **per_page** | Option<**i32**> | Number of items per page | [optional][default to 50] | ||||
|  | ||||
| [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
							
								
								
									
										14
									
								
								rfs-client/openapi/docs/ListBlocksResponse.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										14
									
								
								rfs-client/openapi/docs/ListBlocksResponse.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,14 @@ | ||||
| # ListBlocksResponse | ||||
|  | ||||
| ## Properties | ||||
|  | ||||
| Name | Type | Description | Notes | ||||
| ------------ | ------------- | ------------- | ------------- | ||||
| **blocks** | **Vec<String>** | List of block hashes |  | ||||
| **page** | **i32** | Current page number |  | ||||
| **per_page** | **i32** | Number of items per page |  | ||||
| **total** | **i64** | Total number of blocks |  | ||||
|  | ||||
| [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
							
								
								
									
										13
									
								
								rfs-client/openapi/docs/PreviewResponse.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										13
									
								
								rfs-client/openapi/docs/PreviewResponse.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,13 @@ | ||||
| # PreviewResponse | ||||
|  | ||||
| ## Properties | ||||
|  | ||||
| Name | Type | Description | Notes | ||||
| ------------ | ------------- | ------------- | ------------- | ||||
| **checksum** | **String** |  |  | ||||
| **content** | **Vec<String>** |  |  | ||||
| **metadata** | **String** |  |  | ||||
|  | ||||
| [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
							
								
								
									
										17
									
								
								rfs-client/openapi/docs/ResponseError.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										17
									
								
								rfs-client/openapi/docs/ResponseError.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,17 @@ | ||||
| # ResponseError | ||||
|  | ||||
| ## Enum Variants | ||||
|  | ||||
| | Name | Description | | ||||
| |---- | -----| | ||||
| | ResponseErrorBadRequest |  | | ||||
| | ResponseErrorConflict |  | | ||||
| | ResponseErrorForbidden |  | | ||||
| | ResponseErrorNotFound |  | | ||||
| | ResponseErrorTemplateError |  | | ||||
| | ResponseErrorUnauthorized |  | | ||||
| | String |  | | ||||
|  | ||||
| [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
							
								
								
									
										11
									
								
								rfs-client/openapi/docs/ResponseErrorBadRequest.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								rfs-client/openapi/docs/ResponseErrorBadRequest.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| # ResponseErrorBadRequest | ||||
|  | ||||
| ## Properties | ||||
|  | ||||
| Name | Type | Description | Notes | ||||
| ------------ | ------------- | ------------- | ------------- | ||||
| **bad_request** | **String** |  |  | ||||
|  | ||||
| [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
							
								
								
									
										11
									
								
								rfs-client/openapi/docs/ResponseErrorConflict.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								rfs-client/openapi/docs/ResponseErrorConflict.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| # ResponseErrorConflict | ||||
|  | ||||
| ## Properties | ||||
|  | ||||
| Name | Type | Description | Notes | ||||
| ------------ | ------------- | ------------- | ------------- | ||||
| **conflict** | **String** |  |  | ||||
|  | ||||
| [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
							
								
								
									
										11
									
								
								rfs-client/openapi/docs/ResponseErrorForbidden.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								rfs-client/openapi/docs/ResponseErrorForbidden.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| # ResponseErrorForbidden | ||||
|  | ||||
| ## Properties | ||||
|  | ||||
| Name | Type | Description | Notes | ||||
| ------------ | ------------- | ------------- | ------------- | ||||
| **forbidden** | **String** |  |  | ||||
|  | ||||
| [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
							
								
								
									
										11
									
								
								rfs-client/openapi/docs/ResponseErrorNotFound.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								rfs-client/openapi/docs/ResponseErrorNotFound.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| # ResponseErrorNotFound | ||||
|  | ||||
| ## Properties | ||||
|  | ||||
| Name | Type | Description | Notes | ||||
| ------------ | ------------- | ------------- | ------------- | ||||
| **not_found** | **String** |  |  | ||||
|  | ||||
| [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
							
								
								
									
										11
									
								
								rfs-client/openapi/docs/ResponseErrorTemplateError.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								rfs-client/openapi/docs/ResponseErrorTemplateError.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| # ResponseErrorTemplateError | ||||
|  | ||||
| ## Properties | ||||
|  | ||||
| Name | Type | Description | Notes | ||||
| ------------ | ------------- | ------------- | ------------- | ||||
| **template_error** | [**models::ErrorTemplate**](ErrorTemplate.md) |  |  | ||||
|  | ||||
| [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
							
								
								
									
										11
									
								
								rfs-client/openapi/docs/ResponseErrorUnauthorized.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								rfs-client/openapi/docs/ResponseErrorUnauthorized.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| # ResponseErrorUnauthorized | ||||
|  | ||||
| ## Properties | ||||
|  | ||||
| Name | Type | Description | Notes | ||||
| ------------ | ------------- | ------------- | ------------- | ||||
| **unauthorized** | **String** |  |  | ||||
|  | ||||
| [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
							
								
								
									
										20
									
								
								rfs-client/openapi/docs/ResponseResult.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										20
									
								
								rfs-client/openapi/docs/ResponseResult.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,20 @@ | ||||
| # ResponseResult | ||||
|  | ||||
| ## Enum Variants | ||||
|  | ||||
| | Name | Description | | ||||
| |---- | -----| | ||||
| | ResponseResultBlockUploaded |  | | ||||
| | ResponseResultDirTemplate |  | | ||||
| | ResponseResultFileUploaded |  | | ||||
| | ResponseResultFlistCreated |  | | ||||
| | ResponseResultFlistState |  | | ||||
| | ResponseResultFlists |  | | ||||
| | ResponseResultPreviewFlist |  | | ||||
| | ResponseResultRes |  | | ||||
| | ResponseResultSignedIn |  | | ||||
| | String |  | | ||||
|  | ||||
| [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
							
								
								
									
										11
									
								
								rfs-client/openapi/docs/ResponseResultBlockUploaded.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								rfs-client/openapi/docs/ResponseResultBlockUploaded.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| # ResponseResultBlockUploaded | ||||
|  | ||||
| ## Properties | ||||
|  | ||||
| Name | Type | Description | Notes | ||||
| ------------ | ------------- | ------------- | ------------- | ||||
| **block_uploaded** | **String** |  |  | ||||
|  | ||||
| [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
							
								
								
									
										11
									
								
								rfs-client/openapi/docs/ResponseResultDirTemplate.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								rfs-client/openapi/docs/ResponseResultDirTemplate.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| # ResponseResultDirTemplate | ||||
|  | ||||
| ## Properties | ||||
|  | ||||
| Name | Type | Description | Notes | ||||
| ------------ | ------------- | ------------- | ------------- | ||||
| **dir_template** | [**models::DirListTemplate**](DirListTemplate.md) |  |  | ||||
|  | ||||
| [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
							
								
								
									
										11
									
								
								rfs-client/openapi/docs/ResponseResultFileUploaded.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								rfs-client/openapi/docs/ResponseResultFileUploaded.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| # ResponseResultFileUploaded | ||||
|  | ||||
| ## Properties | ||||
|  | ||||
| Name | Type | Description | Notes | ||||
| ------------ | ------------- | ------------- | ------------- | ||||
| **file_uploaded** | [**models::FileUploadResponse**](FileUploadResponse.md) |  |  | ||||
|  | ||||
| [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
							
								
								
									
										11
									
								
								rfs-client/openapi/docs/ResponseResultFlistCreated.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								rfs-client/openapi/docs/ResponseResultFlistCreated.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| # ResponseResultFlistCreated | ||||
|  | ||||
| ## Properties | ||||
|  | ||||
| Name | Type | Description | Notes | ||||
| ------------ | ------------- | ------------- | ------------- | ||||
| **flist_created** | [**models::Job**](Job.md) |  |  | ||||
|  | ||||
| [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
							
								
								
									
										11
									
								
								rfs-client/openapi/docs/ResponseResultFlistState.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								rfs-client/openapi/docs/ResponseResultFlistState.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| # ResponseResultFlistState | ||||
|  | ||||
| ## Properties | ||||
|  | ||||
| Name | Type | Description | Notes | ||||
| ------------ | ------------- | ------------- | ------------- | ||||
| **flist_state** | [**models::FlistState**](FlistState.md) |  |  | ||||
|  | ||||
| [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
							
								
								
									
										11
									
								
								rfs-client/openapi/docs/ResponseResultFlists.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								rfs-client/openapi/docs/ResponseResultFlists.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| # ResponseResultFlists | ||||
|  | ||||
| ## Properties | ||||
|  | ||||
| Name | Type | Description | Notes | ||||
| ------------ | ------------- | ------------- | ------------- | ||||
| **flists** | [**std::collections::HashMap<String, Vec<models::FileInfo>>**](Vec.md) |  |  | ||||
|  | ||||
| [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
							
								
								
									
										11
									
								
								rfs-client/openapi/docs/ResponseResultPreviewFlist.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								rfs-client/openapi/docs/ResponseResultPreviewFlist.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| # ResponseResultPreviewFlist | ||||
|  | ||||
| ## Properties | ||||
|  | ||||
| Name | Type | Description | Notes | ||||
| ------------ | ------------- | ------------- | ------------- | ||||
| **preview_flist** | [**models::PreviewResponse**](PreviewResponse.md) |  |  | ||||
|  | ||||
| [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) | ||||
|  | ||||
|  | ||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user