diff --git a/src/cli/args.rs b/src/cli/args.rs index a50e514..82889ac 100644 --- a/src/cli/args.rs +++ b/src/cli/args.rs @@ -86,15 +86,24 @@ pub struct Cli { /// Overrides config.device_selection.allow_removable when provided #[arg(long = "allow-removable", default_value_t = false)] pub allow_removable: bool, - + + /// Attempt to mount existing filesystems based on on-disk headers; no partitioning or mkfs. + /// Non-destructive mounting flow; uses UUID= sources and policy from config. + #[arg(long = "mount-existing", default_value_t = false)] + pub mount_existing: bool, + + /// Report current initialized filesystems and mounts without performing changes. + #[arg(long = "report-current", default_value_t = false)] + pub report_current: bool, + /// Print detection and planning summary as JSON to stdout (non-default) #[arg(long = "show", default_value_t = false)] pub show: bool, - + /// Write detection/planning JSON report to the given path (overrides config.report.path) #[arg(long = "report")] pub report: Option, - + /// Execute destructive actions (apply mode). When false, runs preview-only. #[arg(long = "apply", default_value_t = false)] pub apply: bool, diff --git a/src/config/loader.rs b/src/config/loader.rs index a533645..17c596a 100644 --- a/src/config/loader.rs +++ b/src/config/loader.rs @@ -1,9 +1,8 @@ //! Configuration loading, merging, and validation (loader). //! //! Precedence (highest to lowest): -//! - Kernel cmdline key `zosstorage.config=` -//! - CLI flags -//! - On-disk config file at /etc/zosstorage/config.yaml (if present) +//! - CLI flags (and optional `--config PATH` when provided) +//! - Kernel cmdline key `zosstorage.topo=` //! - Built-in defaults //! //! See [docs/SCHEMA.md](../../docs/SCHEMA.md) for the schema details. @@ -26,7 +25,7 @@ // REGION: EXTENSION_POINTS-END // // REGION: SAFETY -// safety: precedence enforced (kernel > CLI flags > CLI --config > /etc file > defaults). +// safety: precedence enforced (CLI flags > kernel cmdline > built-in defaults). // safety: reserved GPT names and labels validated to avoid destructive operations later. // REGION: SAFETY-END // @@ -45,17 +44,18 @@ use std::path::Path; use crate::{cli::Cli, Error, Result}; use crate::types::*; -use serde_json::{Map, Value}; +use serde_json::{Map, Value, json}; use base64::Engine as _; -/// Load defaults, merge on-disk config, overlay CLI, and finally kernel cmdline key. +//// Load defaults, merge optional CLI --config, overlay CLI flags (highest precedence), +//// then consider kernel cmdline topology only if CLI omitted it. /// Returns a validated Config on success. /// /// Behavior: /// - Starts from built-in defaults (documented in docs/SCHEMA.md) -/// - If /etc/zosstorage/config.yaml exists, merge it -/// - If CLI --config is provided, merge that (overrides file defaults) -/// - If kernel cmdline provides `zosstorage.config=...`, merge that last (highest precedence) +/// - Skips implicit /etc reads in initramfs +/// - If CLI --config is provided, merge that (overrides defaults) +/// - If kernel cmdline provides `zosstorage.topo=...` and CLI did NOT specify `--topology`, apply it /// - Returns Error::Unimplemented when --force is used pub fn load_and_merge(cli: &Cli) -> Result { if cli.force { @@ -65,12 +65,8 @@ pub fn load_and_merge(cli: &Cli) -> Result { // 1) Start with defaults let mut merged = to_value(default_config())?; - // 2) Merge default on-disk config if present - let default_cfg_path = "/etc/zosstorage/config.yaml"; - if Path::new(default_cfg_path).exists() { - let v = load_yaml_value(default_cfg_path)?; - merge_value(&mut merged, v); - } + // 2) (initramfs) Skipped reading default on-disk config to avoid dependency on /etc. + // If a config is needed, pass it via --config PATH or kernel cmdline `zosstorage.config=...`. // 3) Merge CLI referenced config (if any) if let Some(cfg_path) = &cli.config { @@ -82,25 +78,17 @@ pub fn load_and_merge(cli: &Cli) -> Result { let cli_overlay = cli_overlay_value(cli); merge_value(&mut merged, cli_overlay); - // 5) Merge kernel cmdline referenced config (if any) - if let Some(src) = kernel_cmdline_config_source()? { - match src { - KernelConfigSource::Path(kpath) => { - let v = load_yaml_value(&kpath)?; - merge_value(&mut merged, v); - } - KernelConfigSource::Data(yaml) => { - let v: serde_json::Value = serde_yaml::from_str(&yaml) - .map_err(|e| Error::Config(format!("failed to parse YAML from data: URL: {}", e)))?; - merge_value(&mut merged, v); - } - } - } + // 5) Kernel cmdline topology (only if CLI did not specify topology), e.g., `zosstorage.topo=dual-independent` + if cli.topology.is_none() { + if let Some(topo) = kernel_cmdline_topology() { + merge_value(&mut merged, json!({"topology": topo.to_string()})); + } + } - // Finalize - let cfg: Config = serde_json::from_value(merged).map_err(|e| Error::Other(e.into()))?; - validate(&cfg)?; - Ok(cfg) + // Finalize + let cfg: Config = serde_json::from_value(merged).map_err(|e| Error::Other(e.into()))?; + validate(&cfg)?; + Ok(cfg) } /// Validate semantic correctness of the configuration. @@ -331,6 +319,38 @@ fn kernel_cmdline_config_source() -> Result> { Ok(None) } +fn kernel_cmdline_topology() -> Option { + let cmdline = fs::read_to_string("/proc/cmdline").unwrap_or_default(); + for token in cmdline.split_whitespace() { + if let Some(mut val) = token.strip_prefix("zosstorage.topo=") { + // Trim surrounding quotes if any + if (val.starts_with('"') && val.ends_with('"')) || (val.starts_with('\'') && val.ends_with('\'')) { + val = &val[1..val.len() - 1]; + } + let val_norm = val.trim(); + if let Some(t) = parse_topology_token(val_norm) { + return Some(t); + } + } + } + None +} + +/// Helper to parse known topology tokens in kebab- or snake-case. +fn parse_topology_token(s: &str) -> Option { + // Normalize underscores to hyphens for simpler matching. + let k = s.trim().to_ascii_lowercase().replace('_', "-"); + match k.as_str() { + "btrfs-single" => Some(Topology::BtrfsSingle), + "bcachefs-single" => Some(Topology::BcachefsSingle), + "dual-independent" => Some(Topology::DualIndependent), + "ssd-hdd-bcachefs" => Some(Topology::SsdHddBcachefs), + "bcachefs2-copy" | "bcachefs-2copy" | "bcachefs-2-copy" => Some(Topology::Bcachefs2Copy), + "btrfs-raid1" => Some(Topology::BtrfsRaid1), + _ => None, + } +} + /// Built-in defaults for the entire configuration (schema version 1). fn default_config() -> Config { Config { @@ -354,7 +374,7 @@ fn default_config() -> Config { allow_removable: false, min_size_gib: 10, }, - topology: Topology::BtrfsSingle, + topology: Topology::DualIndependent, partitioning: Partitioning { alignment_mib: 1, require_empty_disks: true, diff --git a/src/fs/plan.rs b/src/fs/plan.rs index 42c42b9..80e6b16 100644 --- a/src/fs/plan.rs +++ b/src/fs/plan.rs @@ -49,6 +49,7 @@ use crate::{ Error, }; use tracing::{debug, warn}; +use std::fs; /// Filesystem kinds supported by zosstorage. #[derive(Debug, Clone, Copy)] @@ -342,6 +343,95 @@ fn parse_blkid_export(s: &str) -> std::collections::HashMap { map } +/// Probe existing filesystems on the system and return their identities (kind, uuid, label). +/// +/// This inspects /proc/partitions and uses `blkid -o export` on each device to detect: +/// - Data filesystems: Btrfs or Bcachefs with label "ZOSDATA" +/// - ESP filesystems: Vfat with label "ZOSBOOT" +/// Multi-device filesystems (e.g., btrfs) are de-duplicated by UUID. +/// +/// Returns: +/// - Vec with at most one entry per filesystem UUID. +pub fn probe_existing_filesystems() -> Result> { + let Some(blkid) = which_tool("blkid")? else { + return Err(Error::Filesystem("blkid not found in PATH; cannot probe existing filesystems".into())); + }; + + let content = fs::read_to_string("/proc/partitions") + .map_err(|e| Error::Filesystem(format!("/proc/partitions read error: {}", e)))?; + + let mut results_by_uuid: std::collections::HashMap = std::collections::HashMap::new(); + + for line in content.lines() { + let line = line.trim(); + if line.is_empty() || line.starts_with("major") { + continue; + } + // Format: major minor #blocks name + let parts: Vec<&str> = line.split_whitespace().collect(); + if parts.len() < 4 { + continue; + } + let name = parts[3]; + // Skip pseudo devices commonly not relevant (loop, ram, zram, fd) + if name.starts_with("loop") + || name.starts_with("ram") + || name.starts_with("zram") + || name.starts_with("fd") + { + continue; + } + + let dev_path = format!("/dev/{}", name); + // Probe with blkid -o export; ignore non-zero statuses meaning "nothing found" + let out = match run_cmd_capture(&[blkid.as_str(), "-o", "export", dev_path.as_str()]) { + Ok(o) => o, + Err(Error::Tool { status, .. }) if status != 0 => { + // No recognizable signature; skip + continue; + } + Err(_) => { + // Unexpected failure; skip this device + continue; + } + }; + + let map = parse_blkid_export(&out.stdout); + let ty = map.get("TYPE").cloned().unwrap_or_default(); + let label = map + .get("ID_FS_LABEL").cloned() + .or_else(|| map.get("LABEL").cloned()) + .unwrap_or_default(); + let uuid = map + .get("ID_FS_UUID").cloned() + .or_else(|| map.get("UUID").cloned()); + + let (kind_opt, expected_label) = match ty.as_str() { + "btrfs" => (Some(FsKind::Btrfs), "ZOSDATA"), + "bcachefs" => (Some(FsKind::Bcachefs), "ZOSDATA"), + "vfat" => (Some(FsKind::Vfat), "ZOSBOOT"), + _ => (None, ""), + }; + + if let (Some(kind), Some(u)) = (kind_opt, uuid) { + // Enforce reserved label semantics + if !expected_label.is_empty() && label != expected_label { + continue; + } + + // Deduplicate multi-device filesystems by UUID; record first-seen device + results_by_uuid.entry(u.clone()).or_insert(FsResult { + kind, + devices: vec![dev_path.clone()], + uuid: u, + label: label.clone(), + }); + } + } + + Ok(results_by_uuid.into_values().collect()) +} + #[cfg(test)] mod tests_parse { use super::parse_blkid_export; diff --git a/src/main.rs b/src/main.rs index a971e79..4b38e95 100644 --- a/src/main.rs +++ b/src/main.rs @@ -52,6 +52,8 @@ fn real_main() -> Result<()> { let ctx = orchestrator::Context::new(cfg, log_opts) .with_show(cli.show) .with_apply(cli.apply) + .with_mount_existing(cli.mount_existing) + .with_report_current(cli.report_current) .with_report_path(cli.report.clone()); orchestrator::run(&ctx) } diff --git a/src/orchestrator/run.rs b/src/orchestrator/run.rs index b34f0e5..ad9cc9e 100644 --- a/src/orchestrator/run.rs +++ b/src/orchestrator/run.rs @@ -69,6 +69,10 @@ pub struct Context { pub show: bool, /// When true, perform destructive actions (apply mode). pub apply: bool, + /// When true, attempt to mount existing filesystems based on on-disk headers (non-destructive). + pub mount_existing: bool, + /// When true, emit a report of currently initialized filesystems and mounts (non-destructive). + pub report_current: bool, /// Optional report path override (when provided by CLI --report). pub report_path_override: Option, } @@ -81,6 +85,8 @@ impl Context { log, show: false, apply: false, + mount_existing: false, + report_current: false, report_path_override: None, } } @@ -118,6 +124,18 @@ impl Context { self.report_path_override = path; self } + + /// Enable or disable mount-existing mode (non-destructive). + pub fn with_mount_existing(mut self, mount_existing: bool) -> Self { + self.mount_existing = mount_existing; + self + } + + /// Enable or disable reporting of current state (non-destructive). + pub fn with_report_current(mut self, report_current: bool) -> Self { + self.report_current = report_current; + self + } } /// Run the one-shot provisioning flow. @@ -127,15 +145,164 @@ impl Context { pub fn run(ctx: &Context) -> Result<()> { info!("orchestrator: starting run() with topology {:?}", ctx.cfg.topology); + // Enforce mutually exclusive execution modes among: --mount-existing, --report-current, --apply + let selected_modes = + (ctx.mount_existing as u8) + + (ctx.report_current as u8) + + (ctx.apply as u8); + if selected_modes > 1 { + return Err(Error::Validation( + "choose only one mode: --mount-existing | --report-current | --apply".into(), + )); + } + + // Mode 1: Mount existing filesystems (non-destructive), based on on-disk headers. + if ctx.mount_existing { + info!("orchestrator: mount-existing mode"); + let fs_results = zfs::probe_existing_filesystems()?; + if fs_results.is_empty() { + return Err(Error::Mount( + "no existing filesystems with reserved labels (ZOSBOOT/ZOSDATA) were found".into(), + )); + } + let mplan = crate::mount::plan_mounts(&fs_results, &ctx.cfg)?; + let mres = crate::mount::apply_mounts(&mplan)?; + crate::mount::maybe_write_fstab(&mres, &ctx.cfg)?; + + // Optional JSON summary for mount-existing + if ctx.show || ctx.report_path_override.is_some() || ctx.report_current { + let now = format_rfc3339(SystemTime::now()).to_string(); + let fs_json: Vec = fs_results + .iter() + .map(|r| { + let kind_str = match r.kind { + zfs::FsKind::Vfat => "vfat", + zfs::FsKind::Btrfs => "btrfs", + zfs::FsKind::Bcachefs => "bcachefs", + }; + json!({ + "kind": kind_str, + "uuid": r.uuid, + "label": r.label, + "devices": r.devices, + }) + }) + .collect(); + + let mounts_json: Vec = mres + .iter() + .map(|m| { + json!({ + "source": m.source, + "target": m.target, + "fstype": m.fstype, + "options": m.options, + }) + }) + .collect(); + + let summary = json!({ + "version": "v1", + "timestamp": now, + "status": "mounted_existing", + "filesystems": fs_json, + "mounts": mounts_json, + }); + + if ctx.show || ctx.report_current { + println!("{}", summary); + } + if let Some(path) = &ctx.report_path_override { + fs::write(path, summary.to_string()).map_err(|e| { + Error::Report(format!("failed to write report to {}: {}", path, e)) + })?; + info!("orchestrator: wrote mount-existing report to {}", path); + } + } + + return Ok(()); + } + + // Mode 3: Report current initialized filesystems and mounts (non-destructive). + if ctx.report_current { + info!("orchestrator: report-current mode"); + let fs_results = zfs::probe_existing_filesystems()?; + + // Parse /proc/mounts and include only our relevant targets. + let mounts_content = fs::read_to_string("/proc/mounts").unwrap_or_default(); + let mounts_json: Vec = mounts_content + .lines() + .filter_map(|line| { + let mut it = line.split_whitespace(); + let source = it.next()?; + let target = it.next()?; + let fstype = it.next()?; + let options = it.next().unwrap_or(""); + if target.starts_with("/var/mounts/") + || target == "/var/cache/system" + || target == "/var/cache/etc" + || target == "/var/cache/modules" + || target == "/var/cache/vm-meta" + { + Some(json!({ + "source": source, + "target": target, + "fstype": fstype, + "options": options + })) + } else { + None + } + }) + .collect(); + + let fs_json: Vec = fs_results + .iter() + .map(|r| { + let kind_str = match r.kind { + zfs::FsKind::Vfat => "vfat", + zfs::FsKind::Btrfs => "btrfs", + zfs::FsKind::Bcachefs => "bcachefs", + }; + json!({ + "kind": kind_str, + "uuid": r.uuid, + "label": r.label, + "devices": r.devices + }) + }) + .collect(); + + let now = format_rfc3339(SystemTime::now()).to_string(); + let summary = json!({ + "version": "v1", + "timestamp": now, + "status": "observed", + "filesystems": fs_json, + "mounts": mounts_json + }); + + // In report-current mode, default to stdout; also honor --report path when provided. + println!("{}", summary); + if let Some(path) = &ctx.report_path_override { + fs::write(path, summary.to_string()).map_err(|e| { + Error::Report(format!("failed to write report to {}: {}", path, e)) + })?; + info!("orchestrator: wrote report-current to {}", path); + } + return Ok(()); + } + + // Default path: plan (and optionally apply) for empty-disk initialization workflow. + // 1) Idempotency pre-flight: if already provisioned, optionally emit summary then exit success. match idempotency::detect_existing_state()? { Some(state) => { info!("orchestrator: already provisioned"); if ctx.show || ctx.report_path_override.is_some() { let now = format_rfc3339(SystemTime::now()).to_string(); - let state_json = to_value(&state).map_err(|e| { - Error::Report(format!("failed to serialize StateReport: {}", e)) - })?; + let state_json = to_value(&state) + .map_err(|e| Error::Report(format!("failed to serialize StateReport: {}", e)))?; let summary = json!({ "version": "v1", "timestamp": now, @@ -146,8 +313,9 @@ pub fn run(ctx: &Context) -> Result<()> { println!("{}", summary); } if let Some(path) = &ctx.report_path_override { - fs::write(path, summary.to_string()) - .map_err(|e| Error::Report(format!("failed to write report to {}: {}", path, e)))?; + fs::write(path, summary.to_string()).map_err(|e| { + Error::Report(format!("failed to write report to {}: {}", path, e)) + })?; info!("orchestrator: wrote idempotency report to {}", path); } } @@ -174,7 +342,7 @@ pub fn run(ctx: &Context) -> Result<()> { warn!("orchestrator: require_empty_disks=false; proceeding without emptiness enforcement"); } - // 4) Partition planning (declarative only; application not yet implemented in this step). + // 4) Partition planning (declarative). let plan = partition::plan_partitions(&disks, &ctx.cfg)?; debug!( "orchestrator: partition plan ready (alignment={} MiB, disks={})", @@ -197,7 +365,10 @@ pub fn run(ctx: &Context) -> Result<()> { // Filesystem planning and creation let fs_plan = zfs::plan_filesystems(&part_results, &ctx.cfg)?; - info!("orchestrator: filesystem plan contains {} spec(s)", fs_plan.specs.len()); + info!( + "orchestrator: filesystem plan contains {} spec(s)", + fs_plan.specs.len() + ); let fs_results = zfs::make_filesystems(&fs_plan, &ctx.cfg)?; info!("orchestrator: created {} filesystem(s)", fs_results.len());