feat: first-draft preview-capable zosstorage

- CLI: add topology selection (-t/--topology), preview flags (--show/--report), and removable policy override (--allow-removable) (src/cli/args.rs)
- Config: built-in sensible defaults; deterministic overlays for logging, fstab, removable, topology (src/config/loader.rs)
- Device: discovery via /proc + /sys with include/exclude regex and removable policy (src/device/discovery.rs)
- Idempotency: detection via blkid; safe emptiness checks (src/idempotency/mod.rs)
- Partition: topology-driven planning (Single, DualIndependent, BtrfsRaid1, SsdHddBcachefs) (src/partition/plan.rs)
- FS: planning + creation (mkfs.vfat, mkfs.btrfs, bcachefs format) and UUID capture via blkid (src/fs/plan.rs)
- Orchestrator: pre-flight with preview JSON (disks, partition_plan, filesystems_planned, mount scheme). Skips emptiness in preview; supports stdout+file (src/orchestrator/run.rs)
- Util/Logging/Types/Errors: process execution, tracing, shared types (src/util/mod.rs, src/logging/mod.rs, src/types.rs, src/errors.rs)
- Docs: add README with exhaustive usage and preview JSON shape (README.md)

Builds and unit tests pass: discovery, util, idempotency helpers, and fs parser tests.
This commit is contained in:
2025-09-29 11:37:07 +02:00
commit 507bc172c2
38 changed files with 6558 additions and 0 deletions

12
src/partition/mod.rs Normal file
View File

@@ -0,0 +1,12 @@
//! Partition module barrel.
//!
//! Re-exports the concrete planning/apply implementation from plan.rs to avoid a large mod.rs.
//! See [src/partition/plan.rs](plan.rs) for details.
//
// REGION: API
// api: partition::plan::*
// REGION: API-END
pub mod plan;
pub use plan::*;

290
src/partition/plan.rs Normal file
View File

@@ -0,0 +1,290 @@
// REGION: API
// api: partition::PartRole { BiosBoot, Esp, Data, Cache }
// api: partition::PartitionSpec { role: PartRole, size_mib: Option<u64>, gpt_name: String }
// api: partition::DiskPlan { disk: crate::device::Disk, parts: Vec<PartitionSpec> }
// api: partition::PartitionPlan { alignment_mib: u64, disks: Vec<DiskPlan>, require_empty_disks: bool }
// api: partition::PartitionResult { disk: String, part_number: u32, role: PartRole, gpt_name: String, uuid: String, start_mib: u64, size_mib: u64, device_path: String }
// api: partition::plan_partitions(disks: &[crate::device::Disk], cfg: &crate::config::types::Config) -> crate::Result<PartitionPlan>
// api: partition::apply_partitions(plan: &PartitionPlan) -> crate::Result<Vec<PartitionResult>>
// REGION: API-END
//
// REGION: RESPONSIBILITIES
// - Compute a declarative GPT partitioning plan per topology with 1 MiB alignment.
// - Apply the plan safely via system tools (sgdisk) using util wrappers.
// Non-goals: filesystem creation, mounting, reporting.
// REGION: RESPONSIBILITIES-END
//
// REGION: EXTENSION_POINTS
// ext: support additional partition roles (e.g., metadata) via PartRole extension.
// ext: device-specific alignment or reserved areas configurable via cfg in the future.
// REGION: EXTENSION_POINTS-END
//
// REGION: SAFETY
// safety: must verify require_empty_disks before any modification.
// safety: must ensure unique partition GUIDs; identical labels are allowed when expected (e.g., ESP ZOSBOOT).
// safety: must call udev settle after partition table writes.
// REGION: SAFETY-END
//
// REGION: ERROR_MAPPING
// errmap: external tool failure -> crate::Error::Tool { tool, status, stderr }.
// errmap: validation and planning errors -> crate::Error::Partition with clear context.
// REGION: ERROR_MAPPING-END
//
// REGION: TODO
// todo: implement topology-aware layout including SSD/HDD cache/backing with gpt_name zoscache.
// todo: integrate blkid probing to confirm absence of FS signatures prior to changes.
// REGION: TODO-END
//! GPT partition planning and application.
//!
//! Provides declarative planning APIs and an apply step that will later
//! shell out to system tools (sgdisk) wrapped via util helpers.
//!
//! See [fn plan_partitions](plan.rs:1) and
//! [fn apply_partitions](plan.rs:1).
use crate::{types::{Config, Topology}, device::Disk, Error, Result};
/// Partition roles supported by zosstorage.
#[derive(Debug, Clone, Copy)]
pub enum PartRole {
/// Tiny BIOS boot partition (no filesystem).
BiosBoot,
/// EFI System Partition (vfat, label ZOSBOOT).
Esp,
/// Primary data partition.
Data,
/// Cache partition (for bcachefs SSD roles).
Cache,
}
/// Declarative spec for a partition on a disk.
#[derive(Debug, Clone)]
pub struct PartitionSpec {
/// Role of this partition.
pub role: PartRole,
/// Explicit size in MiB; None means "use remainder".
pub size_mib: Option<u64>,
/// GPT partition name (zosboot, zosdata, zoscache).
pub gpt_name: String,
}
/// Plan for a single disk.
#[derive(Debug, Clone)]
pub struct DiskPlan {
/// Target disk.
pub disk: Disk,
/// Ordered partition specs for the disk.
pub parts: Vec<PartitionSpec>,
}
/// Full partitioning plan across all target disks.
#[derive(Debug, Clone)]
pub struct PartitionPlan {
/// Alignment in MiB (1 by default).
pub alignment_mib: u64,
/// Plans per disk.
pub disks: Vec<DiskPlan>,
/// When true, abort if any target disk is not empty.
pub require_empty_disks: bool,
}
/// Result of applying partitioning on a particular disk.
#[derive(Debug, Clone)]
pub struct PartitionResult {
/// Parent disk path (e.g., /dev/nvme0n1).
pub disk: String,
/// Partition index number (1-based).
pub part_number: u32,
/// Role assigned to this partition.
pub role: PartRole,
/// GPT partition name used.
pub gpt_name: String,
/// Partition GUID.
pub uuid: String,
/// Start offset in MiB.
pub start_mib: u64,
/// Size in MiB.
pub size_mib: u64,
/// Partition device path (e.g., /dev/nvme0n1p2).
pub device_path: String,
}
/**
Compute GPT-only plan per topology and constraints.
Layout defaults:
- BIOS boot: cfg.partitioning.bios_boot if enabled (size_mib)
- ESP: cfg.partitioning.esp.size_mib, GPT name cfg.partitioning.esp.gpt_name (typically "zosboot")
- Data: remainder, GPT name cfg.partitioning.data.gpt_name ("zosdata")
- Cache (only for SSD/HDD topology): remainder on SSD after boot/ESP, GPT name cfg.partitioning.cache.gpt_name ("zoscache")
Topology mapping:
- Single: use first eligible disk; create BIOS (opt) + ESP + Data
- DualIndependent: need at least 2 disks; disk0: BIOS (opt) + ESP + Data, disk1: Data
- BtrfsRaid1: need at least 2 disks; disk0: BIOS (opt) + ESP + Data, disk1: Data
- SsdHddBcachefs: need >=1 SSD (rotational=false) and >=1 HDD (rotational=true);
SSD: BIOS (opt) + ESP + Cache; HDD: Data
*/
pub fn plan_partitions(disks: &[Disk], cfg: &Config) -> Result<PartitionPlan> {
let align = cfg.partitioning.alignment_mib;
let require_empty = cfg.partitioning.require_empty_disks;
if disks.is_empty() {
return Err(Error::Partition("no disks provided to partition planner".into()));
}
let mut plans: Vec<DiskPlan> = Vec::new();
match cfg.topology {
Topology::Single => {
let d0 = &disks[0];
let mut parts = Vec::new();
if cfg.partitioning.bios_boot.enabled {
parts.push(PartitionSpec {
role: PartRole::BiosBoot,
size_mib: Some(cfg.partitioning.bios_boot.size_mib),
gpt_name: cfg.partitioning.bios_boot.gpt_name.clone(),
});
}
parts.push(PartitionSpec {
role: PartRole::Esp,
size_mib: Some(cfg.partitioning.esp.size_mib),
gpt_name: cfg.partitioning.esp.gpt_name.clone(),
});
parts.push(PartitionSpec {
role: PartRole::Data,
size_mib: None,
gpt_name: cfg.partitioning.data.gpt_name.clone(),
});
plans.push(DiskPlan { disk: d0.clone(), parts });
}
Topology::DualIndependent => {
if disks.len() < 2 {
return Err(Error::Partition("DualIndependent topology requires at least 2 disks".into()));
}
let d0 = &disks[0];
let d1 = &disks[1];
// Disk 0: BIOS (opt) + ESP + Data
let mut parts0 = Vec::new();
if cfg.partitioning.bios_boot.enabled {
parts0.push(PartitionSpec {
role: PartRole::BiosBoot,
size_mib: Some(cfg.partitioning.bios_boot.size_mib),
gpt_name: cfg.partitioning.bios_boot.gpt_name.clone(),
});
}
parts0.push(PartitionSpec {
role: PartRole::Esp,
size_mib: Some(cfg.partitioning.esp.size_mib),
gpt_name: cfg.partitioning.esp.gpt_name.clone(),
});
parts0.push(PartitionSpec {
role: PartRole::Data,
size_mib: None,
gpt_name: cfg.partitioning.data.gpt_name.clone(),
});
plans.push(DiskPlan { disk: d0.clone(), parts: parts0 });
// Disk 1: Data only
let mut parts1 = Vec::new();
parts1.push(PartitionSpec {
role: PartRole::Data,
size_mib: None,
gpt_name: cfg.partitioning.data.gpt_name.clone(),
});
plans.push(DiskPlan { disk: d1.clone(), parts: parts1 });
}
Topology::BtrfsRaid1 => {
if disks.len() < 2 {
return Err(Error::Partition("BtrfsRaid1 topology requires at least 2 disks".into()));
}
let d0 = &disks[0];
let d1 = &disks[1];
// Disk 0: BIOS (opt) + ESP + Data
let mut parts0 = Vec::new();
if cfg.partitioning.bios_boot.enabled {
parts0.push(PartitionSpec {
role: PartRole::BiosBoot,
size_mib: Some(cfg.partitioning.bios_boot.size_mib),
gpt_name: cfg.partitioning.bios_boot.gpt_name.clone(),
});
}
parts0.push(PartitionSpec {
role: PartRole::Esp,
size_mib: Some(cfg.partitioning.esp.size_mib),
gpt_name: cfg.partitioning.esp.gpt_name.clone(),
});
parts0.push(PartitionSpec {
role: PartRole::Data,
size_mib: None,
gpt_name: cfg.partitioning.data.gpt_name.clone(),
});
plans.push(DiskPlan { disk: d0.clone(), parts: parts0 });
// Disk 1: Data only (for RAID1)
let mut parts1 = Vec::new();
parts1.push(PartitionSpec {
role: PartRole::Data,
size_mib: None,
gpt_name: cfg.partitioning.data.gpt_name.clone(),
});
plans.push(DiskPlan { disk: d1.clone(), parts: parts1 });
}
Topology::SsdHddBcachefs => {
// Choose SSD (rotational=false) and HDD (rotational=true)
let ssd = disks.iter().find(|d| !d.rotational)
.ok_or_else(|| Error::Partition("SsdHddBcachefs requires an SSD (non-rotational) disk".into()))?;
let hdd = disks.iter().find(|d| d.rotational)
.ok_or_else(|| Error::Partition("SsdHddBcachefs requires an HDD (rotational) disk".into()))?;
// SSD: BIOS (opt) + ESP + Cache remainder
let mut parts_ssd = Vec::new();
if cfg.partitioning.bios_boot.enabled {
parts_ssd.push(PartitionSpec {
role: PartRole::BiosBoot,
size_mib: Some(cfg.partitioning.bios_boot.size_mib),
gpt_name: cfg.partitioning.bios_boot.gpt_name.clone(),
});
}
parts_ssd.push(PartitionSpec {
role: PartRole::Esp,
size_mib: Some(cfg.partitioning.esp.size_mib),
gpt_name: cfg.partitioning.esp.gpt_name.clone(),
});
parts_ssd.push(PartitionSpec {
role: PartRole::Cache,
size_mib: None,
gpt_name: cfg.partitioning.cache.gpt_name.clone(),
});
plans.push(DiskPlan { disk: ssd.clone(), parts: parts_ssd });
// HDD: Data remainder
let mut parts_hdd = Vec::new();
parts_hdd.push(PartitionSpec {
role: PartRole::Data,
size_mib: None,
gpt_name: cfg.partitioning.data.gpt_name.clone(),
});
plans.push(DiskPlan { disk: hdd.clone(), parts: parts_hdd });
}
}
Ok(PartitionPlan {
alignment_mib: align,
disks: plans,
require_empty_disks: require_empty,
})
}
/// Apply the partition plan using system utilities (sgdisk) via util wrappers.
///
/// Safety:
/// - Must verify target disks are empty when required.
/// - Must ensure unique partition GUIDs.
/// - Should call udev settle after changes.
pub fn apply_partitions(_plan: &PartitionPlan) -> Result<Vec<PartitionResult>> {
// To be implemented: sgdisk orchestration + udev settle + GUID collection
todo!("shell out to sgdisk, trigger udev settle, collect partition GUIDs")
}