// REGION: API // api: orchestrator::Context { cfg: crate::config::types::Config, log: crate::logging::LogOptions } // api: orchestrator::Context::new(cfg: crate::config::types::Config, log: crate::logging::LogOptions) -> Self // api: orchestrator::run(ctx: &Context) -> crate::Result<()> // REGION: API-END // // REGION: RESPONSIBILITIES // - High-level one-shot flow controller: idempotency check, device discovery, // partition planning and application, filesystem creation, mounting, reporting. // - Enforces abort-on-first-error semantics across subsystems. // Non-goals: direct device IO or shelling out; delegates to subsystem modules. // REGION: RESPONSIBILITIES-END // // REGION: EXTENSION_POINTS // ext: pluggable DeviceProvider for discovery (mocking/testing). // ext: dry-run mode (future) to emit planned actions without applying. // ext: hooks before/after each phase for metrics or additional validation. // REGION: EXTENSION_POINTS-END // // REGION: SAFETY // safety: must never proceed to filesystem creation if partition planning/apply failed. // safety: must exit success without changes when idempotency detection indicates provisioned. // safety: must ensure reporting only on overall success (no partial-success report). // REGION: SAFETY-END // // REGION: ERROR_MAPPING // errmap: subsystem errors bubble up as crate::Error::* without stringly-typed loss. // errmap: external tool failures are expected as Error::Tool from util layer. // REGION: ERROR_MAPPING-END // // REGION: TODO // todo: implement orchestration steps in phases with structured logs and timing. // todo: add per-phase tracing spans and outcome summaries. // REGION: TODO-END //! High-level orchestration for zosstorage. //! //! Drives the one-shot provisioning flow: //! - Idempotency detection //! - Device discovery //! - Partition planning and application //! - Filesystem planning and creation //! - Mount planning and application //! - Report generation and write use crate::{ types::Config, logging::LogOptions, device::{discover, DeviceFilter, Disk}, idempotency, partition, Error, Result, }; use humantime::format_rfc3339; use regex::Regex; use serde_json::{json, to_value}; use std::fs; use std::time::SystemTime; use tracing::{debug, info, warn}; /// Execution context holding resolved configuration and environment flags. #[derive(Debug, Clone)] pub struct Context { /// Validated configuration. pub cfg: Config, /// Logging options in effect. pub log: LogOptions, /// When true, print detection and planning summary to stdout (JSON). pub show: bool, /// Optional report path override (when provided by CLI --report). pub report_path_override: Option, } impl Context { /// Construct a new context from config and logging options. pub fn new(cfg: Config, log: LogOptions) -> Self { Self { cfg, log, show: false, report_path_override: None, } } /// Enable or disable preview JSON emission to stdout. /// /// When set to true (e.g. via `--show`), orchestrator: /// - Prints a compact JSON summary to stdout /// - Skips empty-disk enforcement to allow non-destructive planning /// /// Returns the updated Context for builder-style chaining. pub fn with_show(mut self, show: bool) -> Self { self.show = show; self } /// Override the report output path used by preview mode. /// /// When provided (e.g. via `--report /path/file.json`), orchestrator: /// - Writes the same JSON summary to the given path /// - Continues to respect `--show` (stdout) when also set /// /// Returns the updated Context for builder-style chaining. pub fn with_report_path(mut self, path: Option) -> Self { self.report_path_override = path; self } } /// Run the one-shot provisioning flow. /// /// Returns Ok(()) on success and also on success-noop when already provisioned. /// Any validation or execution failure aborts with an error. pub fn run(ctx: &Context) -> Result<()> { info!("orchestrator: starting run() with topology {:?}", ctx.cfg.topology); // 1) Idempotency pre-flight: if already provisioned, optionally emit summary then exit success. match idempotency::detect_existing_state()? { Some(state) => { info!("orchestrator: already provisioned"); if ctx.show || ctx.report_path_override.is_some() { let now = format_rfc3339(SystemTime::now()).to_string(); let state_json = to_value(&state).map_err(|e| { Error::Report(format!("failed to serialize StateReport: {}", e)) })?; let summary = json!({ "version": "v1", "timestamp": now, "status": "already_provisioned", "state": state_json }); if ctx.show { println!("{}", summary); } if let Some(path) = &ctx.report_path_override { fs::write(path, summary.to_string()) .map_err(|e| Error::Report(format!("failed to write report to {}: {}", path, e)))?; info!("orchestrator: wrote idempotency report to {}", path); } } return Ok(()); } None => { debug!("orchestrator: not provisioned; continuing"); } } // 2) Device discovery using compiled filter from config. let filter = build_device_filter(&ctx.cfg)?; let disks = discover(&filter)?; info!("orchestrator: discovered {} eligible disk(s)", disks.len()); // 3) Emptiness enforcement: skip in preview mode (--show/--report) to allow planning output. let preview = ctx.show || ctx.report_path_override.is_some(); if ctx.cfg.partitioning.require_empty_disks && !preview { enforce_empty_disks(&disks)?; info!("orchestrator: all target disks verified empty"); } else if ctx.cfg.partitioning.require_empty_disks && preview { warn!("orchestrator: preview mode detected (--show/--report); skipping empty-disk enforcement"); } else { warn!("orchestrator: require_empty_disks=false; proceeding without emptiness enforcement"); } // 4) Partition planning (declarative only; application not yet implemented in this step). let plan = partition::plan_partitions(&disks, &ctx.cfg)?; debug!( "orchestrator: partition plan ready (alignment={} MiB, disks={})", plan.alignment_mib, plan.disks.len() ); for dp in &plan.disks { debug!("plan for {}: {} part(s)", dp.disk.path, dp.parts.len()); } // Note: // - Applying partitions, creating filesystems, mounting, and reporting // will be wired in subsequent steps. For now this performs pre-flight // checks and planning to exercise real code paths safely. info!("orchestrator: pre-flight complete (idempotency checked, devices discovered, plan computed)"); // Optional: emit JSON summary via --show or write via --report if ctx.show || ctx.report_path_override.is_some() { let summary = build_summary_json(&disks, &plan, &ctx.cfg)?; if ctx.show { // Print compact JSON to stdout println!("{}", summary); } if let Some(path) = &ctx.report_path_override { // Best-effort write (non-atomic for now, pending report::write_report implementation) fs::write(path, summary.to_string()).map_err(|e| { Error::Report(format!("failed to write report to {}: {}", path, e)) })?; info!("orchestrator: wrote summary report to {}", path); } } Ok(()) } /// Build a DeviceFilter from the runtime configuration. /// /// Compiles include/exclude regex patterns and carries the minimum-size threshold /// as well as the removable-device policy (allow_removable). /// /// Errors: /// - Returns Error::Validation when a regex pattern is invalid. fn build_device_filter(cfg: &Config) -> Result { let mut include = Vec::new(); let mut exclude = Vec::new(); for pat in &cfg.device_selection.include_patterns { let re = Regex::new(pat).map_err(|e| { Error::Validation(format!("invalid include regex '{}': {}", pat, e)) })?; include.push(re); } for pat in &cfg.device_selection.exclude_patterns { let re = Regex::new(pat).map_err(|e| { Error::Validation(format!("invalid exclude regex '{}': {}", pat, e)) })?; exclude.push(re); } Ok(DeviceFilter { include, exclude, min_size_gib: cfg.device_selection.min_size_gib, allow_removable: cfg.device_selection.allow_removable, }) } /// Enforce empty-disk policy for all discovered target disks. /// /// For each disk: /// - Uses idempotency::is_empty_disk() to verify no partitions or FS signatures exist /// - Returns Error::Validation on the first non-empty disk encountered /// /// This function MUST NOT be called when running in preview mode, as orchestrator /// skips emptiness enforcement to allow planning on live systems. fn enforce_empty_disks(disks: &[Disk]) -> Result<()> { for d in disks { let empty = idempotency::is_empty_disk(d)?; if !empty { return Err(Error::Validation(format!( "target disk {} is not empty (partitions or signatures present)", d.path ))); } } Ok(()) } #[inline] fn role_str(role: partition::PartRole) -> &'static str { match role { partition::PartRole::BiosBoot => "bios_boot", partition::PartRole::Esp => "esp", partition::PartRole::Data => "data", partition::PartRole::Cache => "cache", } } /// Build the preview JSON payload for `--show` / `--report`. /// /// Includes: /// - version, timestamp, status="planned" /// - topology string, alignment and empty-disk policy flag /// - discovered disks (path, size_bytes, rotational, model, serial) /// - partition_plan per disk (role, size_mib or null for remainder, gpt_name) /// - filesystems_planned: inferred FS kinds per topology and labels /// - mount: scheme summary and target template (e.g., "/var/cache/{UUID}") /// /// This function is non-destructive and performs no probing beyond the provided inputs. fn build_summary_json(disks: &[Disk], plan: &partition::PartitionPlan, cfg: &Config) -> Result { // Disks summary let disks_json: Vec = disks .iter() .map(|d| { json!({ "path": d.path, "size_bytes": d.size_bytes, "rotational": d.rotational, "model": d.model, "serial": d.serial, }) }) .collect(); // Partition plan summary (spec-level) let mut plan_json: Vec = Vec::new(); for dp in &plan.disks { let parts: Vec = dp .parts .iter() .map(|p| { json!({ "role": role_str(p.role), "size_mib": p.size_mib, // null means "remainder" "gpt_name": p.gpt_name, }) }) .collect(); plan_json.push(json!({ "disk": dp.disk.path, "parts": parts })); } // Decide filesystem kinds and planned mountpoints (template) from plan + cfg.topology let topo_str = match cfg.topology { crate::types::Topology::Single => "single", crate::types::Topology::DualIndependent => "dual_independent", crate::types::Topology::SsdHddBcachefs => "ssd_hdd_bcachefs", crate::types::Topology::BtrfsRaid1 => "btrfs_raid1", }; // Count roles across plan to infer filesystems let mut esp_count = 0usize; let mut data_count = 0usize; let mut cache_count = 0usize; for dp in &plan.disks { for p in &dp.parts { match p.role { partition::PartRole::Esp => esp_count += 1, partition::PartRole::Data => data_count += 1, partition::PartRole::Cache => cache_count += 1, partition::PartRole::BiosBoot => {} } } } let mut filesystems_planned: Vec = Vec::new(); // ESP -> vfat (typically mounted by bootloader; no runtime target here) if esp_count > 0 { filesystems_planned.push(json!({ "kind": "vfat", "from_roles": ["esp"], "label": cfg.filesystem.vfat.label, "planned_mountpoint": null })); } // Data/cache-driven FS + mount targets. Mount scheme is per-UUID under base_dir. let target_template = format!("{}/{{UUID}}", cfg.mount.base_dir); match cfg.topology { crate::types::Topology::SsdHddBcachefs => { if cache_count > 0 && data_count > 0 { filesystems_planned.push(json!({ "kind": "bcachefs", "from_roles": ["cache", "data"], "label": cfg.filesystem.bcachefs.label, "planned_mountpoint_template": target_template, })); } } crate::types::Topology::BtrfsRaid1 => { // One multi-device btrfs across all data partitions if data_count >= 2 { filesystems_planned.push(json!({ "kind": "btrfs", "from_roles": ["data"], "devices_planned": data_count, "label": cfg.filesystem.btrfs.label, "planned_mountpoint_template": target_template, })); } else if data_count == 1 { filesystems_planned.push(json!({ "kind": "btrfs", "from_roles": ["data"], "label": cfg.filesystem.btrfs.label, "planned_mountpoint_template": target_template, "note": "only one data partition present; raid1 requires >= 2", })); } } _ => { // One btrfs per data partition for _ in 0..data_count { filesystems_planned.push(json!({ "kind": "btrfs", "from_roles": ["data"], "label": cfg.filesystem.btrfs.label, "planned_mountpoint_template": target_template, })); } } } let mount_scheme = json!({ "scheme": "per_uuid", "base_dir": cfg.mount.base_dir, "fstab_enabled": cfg.mount.fstab_enabled, "target_template": target_template, }); let now = format_rfc3339(SystemTime::now()).to_string(); let summary = json!({ "version": "v1", "timestamp": now, "status": "planned", "topology": topo_str, "alignment_mib": plan.alignment_mib, "require_empty_disks": plan.require_empty_disks, "disks": disks_json, "partition_plan": plan_json, "filesystems_planned": filesystems_planned, "mount": mount_scheme }); Ok(summary) }