style: use Rust doc comments (///) instead of C-style /** */; clarify mount API header
This commit is contained in:
@@ -95,15 +95,13 @@ pub struct FsResult {
|
|||||||
pub label: String,
|
pub label: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/// Determine which partitions get which filesystem based on topology.
|
||||||
Determine which partitions get which filesystem based on topology.
|
///
|
||||||
|
/// Rules:
|
||||||
Rules:
|
/// - ESP partitions => Vfat with label from cfg.filesystem.vfat.label (reserved "ZOSBOOT")
|
||||||
- ESP partitions => Vfat with label from cfg.filesystem.vfat.label (reserved "ZOSBOOT")
|
/// - Data partitions => Btrfs with label cfg.filesystem.btrfs.label ("ZOSDATA"), unless topology SsdHddBcachefs
|
||||||
- Data partitions => Btrfs with label cfg.filesystem.btrfs.label ("ZOSDATA"), unless topology SsdHddBcachefs
|
/// - SsdHddBcachefs => pair one Cache partition (SSD) with one Data partition (HDD) into one Bcachefs FsSpec with devices [cache, data] and label cfg.filesystem.bcachefs.label ("ZOSDATA")
|
||||||
- SsdHddBcachefs => pair one Cache partition (SSD) with one Data partition (HDD) into one Bcachefs FsSpec with devices [cache, data] and label cfg.filesystem.bcachefs.label ("ZOSDATA")
|
/// - DualIndependent/BtrfsRaid1 => map each Data partition to its own Btrfs FsSpec (raid profile concerns are handled later during mkfs)
|
||||||
- DualIndependent/BtrfsRaid1 => map each Data partition to its own Btrfs FsSpec (raid profile concerns are handled later during mkfs)
|
|
||||||
*/
|
|
||||||
pub fn plan_filesystems(
|
pub fn plan_filesystems(
|
||||||
parts: &[PartitionResult],
|
parts: &[PartitionResult],
|
||||||
cfg: &Config,
|
cfg: &Config,
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
// REGION: API
|
// REGION: API
|
||||||
// api: mount::MountPlan { entries: Vec<(String /* source */, String /* target */, String /* fstype */, String /* options */)> }
|
// api: mount::MountPlan { entries: Vec<(String, String, String, String)> }
|
||||||
|
// note: tuple order = (source, target, fstype, options)
|
||||||
// api: mount::MountResult { source: String, target: String, fstype: String, options: String }
|
// api: mount::MountResult { source: String, target: String, fstype: String, options: String }
|
||||||
// api: mount::plan_mounts(fs_results: &[crate::fs::FsResult], cfg: &crate::config::types::Config) -> crate::Result<MountPlan>
|
// api: mount::plan_mounts(fs_results: &[crate::fs::FsResult], cfg: &crate::config::types::Config) -> crate::Result<MountPlan>
|
||||||
// api: mount::apply_mounts(plan: &MountPlan) -> crate::Result<Vec<MountResult>>
|
// api: mount::apply_mounts(plan: &MountPlan) -> crate::Result<Vec<MountResult>>
|
||||||
|
|||||||
@@ -81,13 +81,25 @@ impl Context {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Builder: enable showing summary to stdout.
|
/// Enable or disable preview JSON emission to stdout.
|
||||||
|
///
|
||||||
|
/// When set to true (e.g. via `--show`), orchestrator:
|
||||||
|
/// - Prints a compact JSON summary to stdout
|
||||||
|
/// - Skips empty-disk enforcement to allow non-destructive planning
|
||||||
|
///
|
||||||
|
/// Returns the updated Context for builder-style chaining.
|
||||||
pub fn with_show(mut self, show: bool) -> Self {
|
pub fn with_show(mut self, show: bool) -> Self {
|
||||||
self.show = show;
|
self.show = show;
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Builder: override report path.
|
/// Override the report output path used by preview mode.
|
||||||
|
///
|
||||||
|
/// When provided (e.g. via `--report /path/file.json`), orchestrator:
|
||||||
|
/// - Writes the same JSON summary to the given path
|
||||||
|
/// - Continues to respect `--show` (stdout) when also set
|
||||||
|
///
|
||||||
|
/// Returns the updated Context for builder-style chaining.
|
||||||
pub fn with_report_path(mut self, path: Option<String>) -> Self {
|
pub fn with_report_path(mut self, path: Option<String>) -> Self {
|
||||||
self.report_path_override = path;
|
self.report_path_override = path;
|
||||||
self
|
self
|
||||||
@@ -185,6 +197,13 @@ pub fn run(ctx: &Context) -> Result<()> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Build a DeviceFilter from the runtime configuration.
|
||||||
|
///
|
||||||
|
/// Compiles include/exclude regex patterns and carries the minimum-size threshold
|
||||||
|
/// as well as the removable-device policy (allow_removable).
|
||||||
|
///
|
||||||
|
/// Errors:
|
||||||
|
/// - Returns Error::Validation when a regex pattern is invalid.
|
||||||
fn build_device_filter(cfg: &Config) -> Result<DeviceFilter> {
|
fn build_device_filter(cfg: &Config) -> Result<DeviceFilter> {
|
||||||
let mut include = Vec::new();
|
let mut include = Vec::new();
|
||||||
let mut exclude = Vec::new();
|
let mut exclude = Vec::new();
|
||||||
@@ -210,6 +229,14 @@ fn build_device_filter(cfg: &Config) -> Result<DeviceFilter> {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Enforce empty-disk policy for all discovered target disks.
|
||||||
|
///
|
||||||
|
/// For each disk:
|
||||||
|
/// - Uses idempotency::is_empty_disk() to verify no partitions or FS signatures exist
|
||||||
|
/// - Returns Error::Validation on the first non-empty disk encountered
|
||||||
|
///
|
||||||
|
/// This function MUST NOT be called when running in preview mode, as orchestrator
|
||||||
|
/// skips emptiness enforcement to allow planning on live systems.
|
||||||
fn enforce_empty_disks(disks: &[Disk]) -> Result<()> {
|
fn enforce_empty_disks(disks: &[Disk]) -> Result<()> {
|
||||||
for d in disks {
|
for d in disks {
|
||||||
let empty = idempotency::is_empty_disk(d)?;
|
let empty = idempotency::is_empty_disk(d)?;
|
||||||
@@ -223,6 +250,7 @@ fn enforce_empty_disks(disks: &[Disk]) -> Result<()> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
fn role_str(role: partition::PartRole) -> &'static str {
|
fn role_str(role: partition::PartRole) -> &'static str {
|
||||||
match role {
|
match role {
|
||||||
partition::PartRole::BiosBoot => "bios_boot",
|
partition::PartRole::BiosBoot => "bios_boot",
|
||||||
@@ -232,6 +260,17 @@ fn role_str(role: partition::PartRole) -> &'static str {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Build the preview JSON payload for `--show` / `--report`.
|
||||||
|
///
|
||||||
|
/// Includes:
|
||||||
|
/// - version, timestamp, status="planned"
|
||||||
|
/// - topology string, alignment and empty-disk policy flag
|
||||||
|
/// - discovered disks (path, size_bytes, rotational, model, serial)
|
||||||
|
/// - partition_plan per disk (role, size_mib or null for remainder, gpt_name)
|
||||||
|
/// - filesystems_planned: inferred FS kinds per topology and labels
|
||||||
|
/// - mount: scheme summary and target template (e.g., "/var/cache/{UUID}")
|
||||||
|
///
|
||||||
|
/// This function is non-destructive and performs no probing beyond the provided inputs.
|
||||||
fn build_summary_json(disks: &[Disk], plan: &partition::PartitionPlan, cfg: &Config) -> Result<serde_json::Value> {
|
fn build_summary_json(disks: &[Disk], plan: &partition::PartitionPlan, cfg: &Config) -> Result<serde_json::Value> {
|
||||||
// Disks summary
|
// Disks summary
|
||||||
let disks_json: Vec<serde_json::Value> = disks
|
let disks_json: Vec<serde_json::Value> = disks
|
||||||
|
|||||||
@@ -109,22 +109,20 @@ pub struct PartitionResult {
|
|||||||
pub device_path: String,
|
pub device_path: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/// Compute GPT-only plan per topology and constraints.
|
||||||
Compute GPT-only plan per topology and constraints.
|
///
|
||||||
|
/// Layout defaults:
|
||||||
Layout defaults:
|
/// - BIOS boot: cfg.partitioning.bios_boot if enabled (size_mib)
|
||||||
- BIOS boot: cfg.partitioning.bios_boot if enabled (size_mib)
|
/// - ESP: cfg.partitioning.esp.size_mib, GPT name cfg.partitioning.esp.gpt_name (typically "zosboot")
|
||||||
- ESP: cfg.partitioning.esp.size_mib, GPT name cfg.partitioning.esp.gpt_name (typically "zosboot")
|
/// - Data: remainder, GPT name cfg.partitioning.data.gpt_name ("zosdata")
|
||||||
- Data: remainder, GPT name cfg.partitioning.data.gpt_name ("zosdata")
|
/// - Cache (only for SSD/HDD topology): remainder on SSD after boot/ESP, GPT name cfg.partitioning.cache.gpt_name ("zoscache")
|
||||||
- Cache (only for SSD/HDD topology): remainder on SSD after boot/ESP, GPT name cfg.partitioning.cache.gpt_name ("zoscache")
|
///
|
||||||
|
/// Topology mapping:
|
||||||
Topology mapping:
|
/// - Single: use first eligible disk; create BIOS (opt) + ESP + Data
|
||||||
- Single: use first eligible disk; create BIOS (opt) + ESP + Data
|
/// - DualIndependent: need at least 2 disks; disk0: BIOS (opt) + ESP + Data, disk1: Data
|
||||||
- DualIndependent: need at least 2 disks; disk0: BIOS (opt) + ESP + Data, disk1: Data
|
/// - BtrfsRaid1: need at least 2 disks; disk0: BIOS (opt) + ESP + Data, disk1: Data
|
||||||
- BtrfsRaid1: need at least 2 disks; disk0: BIOS (opt) + ESP + Data, disk1: Data
|
/// - SsdHddBcachefs: need >=1 SSD (rotational=false) and >=1 HDD (rotational=true);
|
||||||
- SsdHddBcachefs: need >=1 SSD (rotational=false) and >=1 HDD (rotational=true);
|
/// SSD: BIOS (opt) + ESP + Cache; HDD: Data
|
||||||
SSD: BIOS (opt) + ESP + Cache; HDD: Data
|
|
||||||
*/
|
|
||||||
pub fn plan_partitions(disks: &[Disk], cfg: &Config) -> Result<PartitionPlan> {
|
pub fn plan_partitions(disks: &[Disk], cfg: &Config) -> Result<PartitionPlan> {
|
||||||
let align = cfg.partitioning.alignment_mib;
|
let align = cfg.partitioning.alignment_mib;
|
||||||
let require_empty = cfg.partitioning.require_empty_disks;
|
let require_empty = cfg.partitioning.require_empty_disks;
|
||||||
|
|||||||
Reference in New Issue
Block a user