topology: add bcachefs-2copy; add bcachefs-single; rename single->btrfs-single; update planner, fs mapping, CLI, defaults, preview topo strings, README

This commit is contained in:
2025-09-29 18:02:53 +02:00
parent cd63506d3c
commit 2d43005b07
7 changed files with 115 additions and 17 deletions

View File

@@ -24,7 +24,7 @@ Key modules
- [src/mount/ops.rs](src/mount/ops.rs) - [src/mount/ops.rs](src/mount/ops.rs)
Features at a glance Features at a glance
- Topology-driven planning with built-in defaults: Single, DualIndependent, BtrfsRaid1, SsdHddBcachefs - Topology-driven planning with built-in defaults: BtrfsSingle, BcachefsSingle, DualIndependent, Bcachefs2Copy, BtrfsRaid1, SsdHddBcachefs
- Non-destructive preview: --show/--report outputs JSON summary (disks, partition plan, filesystems, planned mountpoints) - Non-destructive preview: --show/--report outputs JSON summary (disks, partition plan, filesystems, planned mountpoints)
- Safe discovery: excludes removable media by default (USB sticks) unless explicitly allowed - Safe discovery: excludes removable media by default (USB sticks) unless explicitly allowed
- Config-optional: the tool runs without any YAML; sensible defaults are always present and may be overridden/merged by config - Config-optional: the tool runs without any YAML; sensible defaults are always present and may be overridden/merged by config
@@ -45,7 +45,7 @@ Binary is target/release/zosstorage.
CLI usage CLI usage
- Topology selection (config optional): - Topology selection (config optional):
-t, --topology single|dual-independent|btrfs-raid1|ssd-hdd-bcachefs -t, --topology btrfs-single|bcachefs-single|dual-independent|bcachefs-2copy|btrfs-raid1|ssd-hdd-bcachefs
- Preview (non-destructive): - Preview (non-destructive):
--show Print JSON summary to stdout --show Print JSON summary to stdout
--report PATH Write JSON summary to a file --report PATH Write JSON summary to a file
@@ -61,7 +61,7 @@ CLI usage
Examples Examples
- Single disk plan with debug logs: - Single disk plan with debug logs:
sudo ./zosstorage --show -t single -l debug sudo ./zosstorage --show -t btrfs-single -l debug
- RAID1 btrfs across two disks; print and write summary: - RAID1 btrfs across two disks; print and write summary:
sudo ./zosstorage --show --report /run/zosstorage/plan.json -t btrfs-raid1 -l debug -L sudo ./zosstorage --show --report /run/zosstorage/plan.json -t btrfs-raid1 -l debug -L
- SSD+HDD bcachefs plan, include removable devices (for lab cases): - SSD+HDD bcachefs plan, include removable devices (for lab cases):

View File

@@ -55,18 +55,22 @@ impl std::fmt::Display for LogLevelArg {
#[derive(Debug, Clone, Copy, ValueEnum)] #[derive(Debug, Clone, Copy, ValueEnum)]
#[value(rename_all = "kebab_case")] #[value(rename_all = "kebab_case")]
pub enum TopologyArg { pub enum TopologyArg {
Single, BtrfsSingle,
BcachefsSingle,
DualIndependent, DualIndependent,
SsdHddBcachefs, SsdHddBcachefs,
Bcachefs2Copy,
BtrfsRaid1, BtrfsRaid1,
} }
impl std::fmt::Display for TopologyArg { impl std::fmt::Display for TopologyArg {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let s = match self { let s = match self {
TopologyArg::Single => "single", TopologyArg::BtrfsSingle => "btrfs_single",
TopologyArg::BcachefsSingle => "bcachefs_single",
TopologyArg::DualIndependent => "dual_independent", TopologyArg::DualIndependent => "dual_independent",
TopologyArg::SsdHddBcachefs => "ssd_hdd_bcachefs", TopologyArg::SsdHddBcachefs => "ssd_hdd_bcachefs",
TopologyArg::Bcachefs2Copy => "bcachefs_2copy",
TopologyArg::BtrfsRaid1 => "btrfs_raid1", TopologyArg::BtrfsRaid1 => "btrfs_raid1",
}; };
f.write_str(s) f.write_str(s)

View File

@@ -187,9 +187,11 @@ pub fn validate(cfg: &Config) -> Result<()> {
// Topology-specific quick checks (basic for now) // Topology-specific quick checks (basic for now)
match cfg.topology { match cfg.topology {
Topology::Single => {} // nothing special Topology::BtrfsSingle => {} // nothing special
Topology::BcachefsSingle => {}
Topology::DualIndependent => {} Topology::DualIndependent => {}
Topology::SsdHddBcachefs => {} Topology::SsdHddBcachefs => {}
Topology::Bcachefs2Copy => {}
Topology::BtrfsRaid1 => { Topology::BtrfsRaid1 => {
// No enforced requirement here beyond presence of two disks at runtime. // No enforced requirement here beyond presence of two disks at runtime.
if cfg.filesystem.btrfs.raid_profile != "raid1" && cfg.filesystem.btrfs.raid_profile != "none" { if cfg.filesystem.btrfs.raid_profile != "raid1" && cfg.filesystem.btrfs.raid_profile != "none" {
@@ -352,7 +354,7 @@ fn default_config() -> Config {
allow_removable: false, allow_removable: false,
min_size_gib: 10, min_size_gib: 10,
}, },
topology: Topology::Single, topology: Topology::BtrfsSingle,
partitioning: Partitioning { partitioning: Partitioning {
alignment_mib: 1, alignment_mib: 1,
require_empty_disks: true, require_empty_disks: true,

View File

@@ -152,8 +152,36 @@ pub fn plan_filesystems(
label: cfg.filesystem.btrfs.label.clone(), label: cfg.filesystem.btrfs.label.clone(),
}); });
} }
_ => { Topology::Bcachefs2Copy => {
// Map each Data partition to individual Btrfs filesystems. // Group all Data partitions into a single Bcachefs filesystem across multiple devices (2-copy semantics).
let data_devs: Vec<String> = parts
.iter()
.filter(|p| matches!(p.role, PartRole::Data))
.map(|p| p.device_path.clone())
.collect();
if data_devs.len() < 2 {
return Err(Error::Filesystem(
"Bcachefs2Copy topology requires at least 2 data partitions".to_string(),
));
}
specs.push(FsSpec {
kind: FsKind::Bcachefs,
devices: data_devs,
label: cfg.filesystem.bcachefs.label.clone(),
});
}
Topology::BcachefsSingle => {
// Single-device bcachefs on the sole Data partition.
let data = parts.iter().find(|p| matches!(p.role, PartRole::Data))
.ok_or_else(|| Error::Filesystem("expected a Data partition for BcachefsSingle topology".to_string()))?;
specs.push(FsSpec {
kind: FsKind::Bcachefs,
devices: vec![data.device_path.clone()],
label: cfg.filesystem.bcachefs.label.clone(),
});
}
Topology::BtrfsSingle | Topology::DualIndependent => {
// Map Data partition(s) to Btrfs (single device per partition for DualIndependent).
for p in parts.iter().filter(|p| matches!(p.role, PartRole::Data)) { for p in parts.iter().filter(|p| matches!(p.role, PartRole::Data)) {
specs.push(FsSpec { specs.push(FsSpec {
kind: FsKind::Btrfs, kind: FsKind::Btrfs,
@@ -258,12 +286,11 @@ pub fn make_filesystems(plan: &FsPlan) -> Result<Vec<FsResult>> {
let Some(ref mkfs) = bcachefs_tool else { let Some(ref mkfs) = bcachefs_tool else {
return Err(Error::Filesystem("bcachefs not found in PATH".into())); return Err(Error::Filesystem("bcachefs not found in PATH".into()));
}; };
if spec.devices.len() < 2 { if spec.devices.is_empty() {
return Err(Error::Filesystem("bcachefs requires at least two devices (cache + backing)".into())); return Err(Error::Filesystem("bcachefs requires at least one device".into()));
} }
// bcachefs format --label LABEL dev_cache dev_backing ... // bcachefs format --label LABEL dev_cache dev_backing ... (single-device also supported)
// TODO(fs): map compression/checksum/cache-mode flags from config in a follow-up. // TODO(fs): map compression/checksum/cache-mode and data/metadata replica flags in a follow-up.
// This is deferred per current scope to focus on btrfs RAID profile wiring.
let mut args: Vec<String> = vec![mkfs.clone(), "format".into(), "--label".into(), spec.label.clone()]; let mut args: Vec<String> = vec![mkfs.clone(), "format".into(), "--label".into(), spec.label.clone()];
args.extend(spec.devices.iter().cloned()); args.extend(spec.devices.iter().cloned());
let args_ref: Vec<&str> = args.iter().map(|s| s.as_str()).collect(); let args_ref: Vec<&str> = args.iter().map(|s| s.as_str()).collect();

View File

@@ -341,9 +341,11 @@ fn build_summary_json(disks: &[Disk], plan: &partition::PartitionPlan, cfg: &Con
// Decide filesystem kinds and planned mountpoints (template) from plan + cfg.topology // Decide filesystem kinds and planned mountpoints (template) from plan + cfg.topology
let topo_str = match cfg.topology { let topo_str = match cfg.topology {
crate::types::Topology::Single => "single", crate::types::Topology::BtrfsSingle => "btrfs_single",
crate::types::Topology::BcachefsSingle => "bcachefs_single",
crate::types::Topology::DualIndependent => "dual_independent", crate::types::Topology::DualIndependent => "dual_independent",
crate::types::Topology::SsdHddBcachefs => "ssd_hdd_bcachefs", crate::types::Topology::SsdHddBcachefs => "ssd_hdd_bcachefs",
crate::types::Topology::Bcachefs2Copy => "bcachefs_2copy",
crate::types::Topology::BtrfsRaid1 => "btrfs_raid1", crate::types::Topology::BtrfsRaid1 => "btrfs_raid1",
}; };

View File

@@ -144,7 +144,29 @@ pub fn plan_partitions(disks: &[Disk], cfg: &Config) -> Result<PartitionPlan> {
let mut plans: Vec<DiskPlan> = Vec::new(); let mut plans: Vec<DiskPlan> = Vec::new();
match cfg.topology { match cfg.topology {
Topology::Single => { Topology::BtrfsSingle => {
let d0 = &disks[0];
let mut parts = Vec::new();
if add_bios {
parts.push(PartitionSpec {
role: PartRole::BiosBoot,
size_mib: Some(cfg.partitioning.bios_boot.size_mib),
gpt_name: cfg.partitioning.bios_boot.gpt_name.clone(),
});
}
parts.push(PartitionSpec {
role: PartRole::Esp,
size_mib: Some(cfg.partitioning.esp.size_mib),
gpt_name: cfg.partitioning.esp.gpt_name.clone(),
});
parts.push(PartitionSpec {
role: PartRole::Data,
size_mib: None,
gpt_name: cfg.partitioning.data.gpt_name.clone(),
});
plans.push(DiskPlan { disk: d0.clone(), parts });
}
Topology::BcachefsSingle => {
let d0 = &disks[0]; let d0 = &disks[0];
let mut parts = Vec::new(); let mut parts = Vec::new();
if add_bios { if add_bios {
@@ -240,6 +262,43 @@ pub fn plan_partitions(disks: &[Disk], cfg: &Config) -> Result<PartitionPlan> {
}); });
plans.push(DiskPlan { disk: d1.clone(), parts: parts1 }); plans.push(DiskPlan { disk: d1.clone(), parts: parts1 });
} }
Topology::Bcachefs2Copy => {
if disks.len() < 2 {
return Err(Error::Partition("Bcachefs2Copy topology requires at least 2 disks".into()));
}
let d0 = &disks[0];
let d1 = &disks[1];
// Disk 0: BIOS (opt) + ESP + Data
let mut parts0 = Vec::new();
if add_bios {
parts0.push(PartitionSpec {
role: PartRole::BiosBoot,
size_mib: Some(cfg.partitioning.bios_boot.size_mib),
gpt_name: cfg.partitioning.bios_boot.gpt_name.clone(),
});
}
parts0.push(PartitionSpec {
role: PartRole::Esp,
size_mib: Some(cfg.partitioning.esp.size_mib),
gpt_name: cfg.partitioning.esp.gpt_name.clone(),
});
parts0.push(PartitionSpec {
role: PartRole::Data,
size_mib: None,
gpt_name: cfg.partitioning.data.gpt_name.clone(),
});
plans.push(DiskPlan { disk: d0.clone(), parts: parts0 });
// Disk 1: Data only
let mut parts1 = Vec::new();
parts1.push(PartitionSpec {
role: PartRole::Data,
size_mib: None,
gpt_name: cfg.partitioning.data.gpt_name.clone(),
});
plans.push(DiskPlan { disk: d1.clone(), parts: parts1 });
}
Topology::SsdHddBcachefs => { Topology::SsdHddBcachefs => {
// Choose SSD (rotational=false) and HDD (rotational=true) // Choose SSD (rotational=false) and HDD (rotational=true)
let ssd = disks.iter().find(|d| !d.rotational) let ssd = disks.iter().find(|d| !d.rotational)

View File

@@ -29,11 +29,15 @@ pub struct DeviceSelection {
#[serde(rename_all = "snake_case")] #[serde(rename_all = "snake_case")]
pub enum Topology { pub enum Topology {
/// Single eligible disk; btrfs on remainder. /// Single eligible disk; btrfs on remainder.
Single, BtrfsSingle,
/// Single eligible disk; bcachefs on remainder.
BcachefsSingle,
/// Two eligible disks; independent btrfs on each data partition. /// Two eligible disks; independent btrfs on each data partition.
DualIndependent, DualIndependent,
/// SSD + HDD; bcachefs with SSD cache/promote and HDD backing. /// SSD + HDD; bcachefs with SSD cache/promote and HDD backing.
SsdHddBcachefs, SsdHddBcachefs,
/// Two-disk bcachefs layout using both data partitions (2 copies semantics).
Bcachefs2Copy,
/// Optional mirrored btrfs across two disks when explicitly requested. /// Optional mirrored btrfs across two disks when explicitly requested.
BtrfsRaid1, BtrfsRaid1,
} }