- Document defaults-only configuration, kernel topology override, and deprecated CLI flags in README - Mark schema doc as deprecated per ADR-0002 - Warn that --topology/--config are ignored; adjust loader/main/context flow - Refactor orchestrator run() to auto-select mount/apply, reuse state when already provisioned, and serialize topology via Display - Add Callgraph/FUNCTION_LIST/ADR docs tracking the new behavior - Derive Eq for Topology to satisfy updated CLI handling
733 lines
25 KiB
Rust
733 lines
25 KiB
Rust
// REGION: API
|
|
// api: orchestrator::Context { cfg: crate::config::types::Config, log: crate::logging::LogOptions }
|
|
// api: orchestrator::Context::new(cfg: crate::config::types::Config, log: crate::logging::LogOptions) -> Self
|
|
// api: orchestrator::run(ctx: &Context) -> crate::Result<()>
|
|
// REGION: API-END
|
|
//
|
|
// REGION: RESPONSIBILITIES
|
|
// - High-level one-shot flow controller: idempotency check, device discovery,
|
|
// partition planning and application, filesystem creation, mounting, reporting.
|
|
// - Enforces abort-on-first-error semantics across subsystems.
|
|
// Non-goals: direct device IO or shelling out; delegates to subsystem modules.
|
|
// REGION: RESPONSIBILITIES-END
|
|
//
|
|
// REGION: EXTENSION_POINTS
|
|
// ext: pluggable DeviceProvider for discovery (mocking/testing).
|
|
// ext: dry-run mode (future) to emit planned actions without applying.
|
|
// ext: hooks before/after each phase for metrics or additional validation.
|
|
// REGION: EXTENSION_POINTS-END
|
|
//
|
|
// REGION: SAFETY
|
|
// safety: must never proceed to filesystem creation if partition planning/apply failed.
|
|
// safety: must exit success without changes when idempotency detection indicates provisioned.
|
|
// safety: must ensure reporting only on overall success (no partial-success report).
|
|
// REGION: SAFETY-END
|
|
//
|
|
// REGION: ERROR_MAPPING
|
|
// errmap: subsystem errors bubble up as crate::Error::* without stringly-typed loss.
|
|
// errmap: external tool failures are expected as Error::Tool from util layer.
|
|
// REGION: ERROR_MAPPING-END
|
|
//
|
|
// REGION: TODO
|
|
// todo: implement orchestration steps in phases with structured logs and timing.
|
|
// todo: add per-phase tracing spans and outcome summaries.
|
|
// REGION: TODO-END
|
|
//! High-level orchestration for zosstorage.
|
|
//!
|
|
//! Drives the one-shot provisioning flow:
|
|
//! - Idempotency detection
|
|
//! - Device discovery
|
|
//! - Partition planning and application
|
|
//! - Filesystem planning and creation
|
|
//! - Mount planning and application
|
|
//! - Report generation and write
|
|
|
|
use crate::{
|
|
types::{Config, Topology},
|
|
logging::LogOptions,
|
|
device::{discover, DeviceFilter, Disk},
|
|
idempotency,
|
|
partition,
|
|
report::StateReport,
|
|
fs as zfs,
|
|
Error, Result,
|
|
};
|
|
use humantime::format_rfc3339;
|
|
use regex::Regex;
|
|
use serde_json::{json, to_value};
|
|
use std::fs;
|
|
use std::time::SystemTime;
|
|
use tracing::{debug, info, warn};
|
|
|
|
/// Execution context holding resolved configuration and environment flags.
|
|
#[derive(Debug, Clone)]
|
|
pub struct Context {
|
|
/// Validated configuration.
|
|
pub cfg: Config,
|
|
/// Logging options in effect.
|
|
pub log: LogOptions,
|
|
/// When true, print detection and planning summary to stdout (JSON).
|
|
pub show: bool,
|
|
/// When true, perform destructive actions (apply mode).
|
|
pub apply: bool,
|
|
/// When true, attempt to mount existing filesystems based on on-disk headers (non-destructive).
|
|
pub mount_existing: bool,
|
|
/// When true, emit a report of currently initialized filesystems and mounts (non-destructive).
|
|
pub report_current: bool,
|
|
/// Optional report path override (when provided by CLI --report).
|
|
pub report_path_override: Option<String>,
|
|
/// True when topology was provided via CLI (--topology), giving it precedence.
|
|
pub topo_from_cli: bool,
|
|
/// True when topology was provided via kernel cmdline, giving it precedence if CLI omitted it.
|
|
pub topo_from_cmdline: bool,
|
|
}
|
|
|
|
impl Context {
|
|
/// Construct a new context from config and logging options.
|
|
pub fn new(cfg: Config, log: LogOptions) -> Self {
|
|
Self {
|
|
cfg,
|
|
log,
|
|
show: false,
|
|
apply: false,
|
|
mount_existing: false,
|
|
report_current: false,
|
|
report_path_override: None,
|
|
topo_from_cli: false,
|
|
topo_from_cmdline: false,
|
|
}
|
|
}
|
|
|
|
/// Enable or disable preview JSON emission to stdout.
|
|
///
|
|
/// When set to true (e.g. via `--show`), orchestrator:
|
|
/// - Prints a compact JSON summary to stdout
|
|
/// - Skips empty-disk enforcement to allow non-destructive planning
|
|
///
|
|
/// Returns the updated Context for builder-style chaining.
|
|
pub fn with_show(mut self, show: bool) -> Self {
|
|
self.show = show;
|
|
self
|
|
}
|
|
|
|
/// Enable or disable apply mode (destructive).
|
|
///
|
|
/// When set to true (e.g. via `--apply`), orchestrator:
|
|
/// - Enforces empty-disk policy (unless disabled in config)
|
|
/// - Applies partition plan, then (future) mkfs, mounts, and report
|
|
pub fn with_apply(mut self, apply: bool) -> Self {
|
|
self.apply = apply;
|
|
self
|
|
}
|
|
|
|
/// Override the report output path used by preview mode.
|
|
///
|
|
/// When provided (e.g. via `--report /path/file.json`), orchestrator:
|
|
/// - Writes the same JSON summary to the given path
|
|
/// - Continues to respect `--show` (stdout) when also set
|
|
///
|
|
/// Returns the updated Context for builder-style chaining.
|
|
pub fn with_report_path(mut self, path: Option<String>) -> Self {
|
|
self.report_path_override = path;
|
|
self
|
|
}
|
|
|
|
/// Enable or disable mount-existing mode (non-destructive).
|
|
pub fn with_mount_existing(mut self, mount_existing: bool) -> Self {
|
|
self.mount_existing = mount_existing;
|
|
self
|
|
}
|
|
|
|
/// Enable or disable reporting of current state (non-destructive).
|
|
pub fn with_report_current(mut self, report_current: bool) -> Self {
|
|
self.report_current = report_current;
|
|
self
|
|
}
|
|
|
|
/// Mark that topology was provided via CLI (--topology).
|
|
pub fn with_topology_from_cli(mut self, v: bool) -> Self {
|
|
self.topo_from_cli = v;
|
|
self
|
|
}
|
|
|
|
/// Mark that topology was provided via kernel cmdline (zosstorage.topology=).
|
|
pub fn with_topology_from_cmdline(mut self, v: bool) -> Self {
|
|
self.topo_from_cmdline = v;
|
|
self
|
|
}
|
|
}
|
|
#[derive(Debug, Clone, Copy)]
|
|
enum ProvisioningMode {
|
|
Apply,
|
|
Preview,
|
|
}
|
|
|
|
#[derive(Debug, Clone, Copy)]
|
|
enum AutoDecision {
|
|
Apply,
|
|
MountExisting,
|
|
}
|
|
|
|
#[derive(Debug)]
|
|
struct AutoSelection {
|
|
decision: AutoDecision,
|
|
fs_results: Option<Vec<zfs::FsResult>>,
|
|
state: Option<StateReport>,
|
|
}
|
|
#[derive(Debug, Clone, Copy)]
|
|
enum ExecutionMode {
|
|
ReportCurrent,
|
|
MountExisting,
|
|
Apply,
|
|
Preview,
|
|
Auto,
|
|
}
|
|
|
|
/// Run the one-shot provisioning flow.
|
|
///
|
|
/// Returns Ok(()) on success and also on success-noop when already provisioned.
|
|
/// Any validation or execution failure aborts with an error.
|
|
pub fn run(ctx: &Context) -> Result<()> {
|
|
info!("orchestrator: starting run()");
|
|
|
|
let selected_modes =
|
|
(ctx.mount_existing as u8) +
|
|
(ctx.report_current as u8) +
|
|
(ctx.apply as u8);
|
|
if selected_modes > 1 {
|
|
return Err(Error::Validation(
|
|
"choose only one mode: --mount-existing | --report-current | --apply".into(),
|
|
));
|
|
}
|
|
|
|
let preview_requested = ctx.show || ctx.report_path_override.is_some();
|
|
|
|
let initial_mode = if ctx.report_current {
|
|
ExecutionMode::ReportCurrent
|
|
} else if ctx.mount_existing {
|
|
ExecutionMode::MountExisting
|
|
} else if ctx.apply {
|
|
ExecutionMode::Apply
|
|
} else if preview_requested {
|
|
ExecutionMode::Preview
|
|
} else {
|
|
ExecutionMode::Auto
|
|
};
|
|
|
|
match initial_mode {
|
|
ExecutionMode::ReportCurrent => run_report_current(ctx),
|
|
ExecutionMode::MountExisting => run_mount_existing(ctx, None, None),
|
|
ExecutionMode::Apply => run_provisioning(ctx, ProvisioningMode::Apply, None),
|
|
ExecutionMode::Preview => run_provisioning(ctx, ProvisioningMode::Preview, None),
|
|
ExecutionMode::Auto => {
|
|
let selection = auto_select_mode(ctx)?;
|
|
match selection.decision {
|
|
AutoDecision::MountExisting => {
|
|
run_mount_existing(ctx, selection.fs_results, selection.state)
|
|
}
|
|
AutoDecision::Apply => {
|
|
run_provisioning(ctx, ProvisioningMode::Apply, selection.state)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
fn auto_select_mode(ctx: &Context) -> Result<AutoSelection> {
|
|
info!("orchestrator: auto-selecting execution mode");
|
|
let state = idempotency::detect_existing_state()?;
|
|
let fs_results = zfs::probe_existing_filesystems()?;
|
|
|
|
if let Some(state) = state {
|
|
info!("orchestrator: provisioned state detected; attempting mount-existing flow");
|
|
return Ok(AutoSelection {
|
|
decision: AutoDecision::MountExisting,
|
|
fs_results: if fs_results.is_empty() { None } else { Some(fs_results) },
|
|
state: Some(state),
|
|
});
|
|
}
|
|
|
|
if !fs_results.is_empty() {
|
|
info!(
|
|
"orchestrator: detected {} filesystem(s) with reserved labels; selecting mount-existing",
|
|
fs_results.len()
|
|
);
|
|
return Ok(AutoSelection {
|
|
decision: AutoDecision::MountExisting,
|
|
fs_results: Some(fs_results),
|
|
state: None,
|
|
});
|
|
}
|
|
|
|
info!(
|
|
"orchestrator: no provisioned state or labeled filesystems detected; selecting apply mode (topology={:?})",
|
|
ctx.cfg.topology
|
|
);
|
|
|
|
Ok(AutoSelection {
|
|
decision: AutoDecision::Apply,
|
|
fs_results: None,
|
|
state: None,
|
|
})
|
|
}
|
|
|
|
fn run_report_current(ctx: &Context) -> Result<()> {
|
|
info!("orchestrator: report-current mode");
|
|
let fs_results = zfs::probe_existing_filesystems()?;
|
|
|
|
let mounts_content = fs::read_to_string("/proc/mounts").unwrap_or_default();
|
|
let mounts_json: Vec<serde_json::Value> = mounts_content
|
|
.lines()
|
|
.filter_map(|line| {
|
|
let mut it = line.split_whitespace();
|
|
let source = it.next()?;
|
|
let target = it.next()?;
|
|
let fstype = it.next()?;
|
|
let options = it.next().unwrap_or("");
|
|
if target.starts_with("/var/mounts/")
|
|
|| target == "/var/cache/system"
|
|
|| target == "/var/cache/etc"
|
|
|| target == "/var/cache/modules"
|
|
|| target == "/var/cache/vm-meta"
|
|
{
|
|
Some(json!({
|
|
"source": source,
|
|
"target": target,
|
|
"fstype": fstype,
|
|
"options": options
|
|
}))
|
|
} else {
|
|
None
|
|
}
|
|
})
|
|
.collect();
|
|
|
|
let fs_json: Vec<serde_json::Value> = fs_results
|
|
.iter()
|
|
.map(|r| {
|
|
let kind_str = match r.kind {
|
|
zfs::FsKind::Vfat => "vfat",
|
|
zfs::FsKind::Btrfs => "btrfs",
|
|
zfs::FsKind::Bcachefs => "bcachefs",
|
|
};
|
|
json!({
|
|
"kind": kind_str,
|
|
"uuid": r.uuid,
|
|
"label": r.label,
|
|
"devices": r.devices
|
|
})
|
|
})
|
|
.collect();
|
|
|
|
let now = format_rfc3339(SystemTime::now()).to_string();
|
|
let summary = json!({
|
|
"version": "v1",
|
|
"timestamp": now,
|
|
"status": "observed",
|
|
"filesystems": fs_json,
|
|
"mounts": mounts_json
|
|
});
|
|
|
|
println!("{}", summary);
|
|
if let Some(path) = &ctx.report_path_override {
|
|
fs::write(path, summary.to_string()).map_err(|e| {
|
|
Error::Report(format!("failed to write report to {}: {}", path, e))
|
|
})?;
|
|
info!("orchestrator: wrote report-current to {}", path);
|
|
}
|
|
Ok(())
|
|
}
|
|
|
|
fn run_mount_existing(
|
|
ctx: &Context,
|
|
fs_results_override: Option<Vec<zfs::FsResult>>,
|
|
state_hint: Option<StateReport>,
|
|
) -> Result<()> {
|
|
info!("orchestrator: mount-existing mode");
|
|
let fs_results = match fs_results_override {
|
|
Some(results) => results,
|
|
None => zfs::probe_existing_filesystems()?,
|
|
};
|
|
if fs_results.is_empty() {
|
|
return Err(Error::Mount(
|
|
"no existing filesystems with reserved labels (ZOSBOOT/ZOSDATA) were found".into(),
|
|
));
|
|
}
|
|
|
|
let mplan = crate::mount::plan_mounts(&fs_results, &ctx.cfg)?;
|
|
let mres = crate::mount::apply_mounts(&mplan)?;
|
|
crate::mount::maybe_write_fstab(&mres, &ctx.cfg)?;
|
|
|
|
if ctx.show || ctx.report_path_override.is_some() || ctx.report_current {
|
|
let now = format_rfc3339(SystemTime::now()).to_string();
|
|
let fs_json: Vec<serde_json::Value> = fs_results
|
|
.iter()
|
|
.map(|r| {
|
|
let kind_str = match r.kind {
|
|
zfs::FsKind::Vfat => "vfat",
|
|
zfs::FsKind::Btrfs => "btrfs",
|
|
zfs::FsKind::Bcachefs => "bcachefs",
|
|
};
|
|
json!({
|
|
"kind": kind_str,
|
|
"uuid": r.uuid,
|
|
"label": r.label,
|
|
"devices": r.devices,
|
|
})
|
|
})
|
|
.collect();
|
|
|
|
let mounts_json: Vec<serde_json::Value> = mres
|
|
.iter()
|
|
.map(|m| {
|
|
json!({
|
|
"source": m.source,
|
|
"target": m.target,
|
|
"fstype": m.fstype,
|
|
"options": m.options,
|
|
})
|
|
})
|
|
.collect();
|
|
|
|
let mut summary = json!({
|
|
"version": "v1",
|
|
"timestamp": now,
|
|
"status": "mounted_existing",
|
|
"filesystems": fs_json,
|
|
"mounts": mounts_json,
|
|
});
|
|
|
|
if let Some(state) = state_hint {
|
|
if let Ok(state_json) = to_value(&state) {
|
|
if let Some(obj) = summary.as_object_mut() {
|
|
obj.insert("state".to_string(), state_json);
|
|
}
|
|
}
|
|
}
|
|
|
|
if ctx.show || ctx.report_current {
|
|
println!("{}", summary);
|
|
}
|
|
if let Some(path) = &ctx.report_path_override {
|
|
fs::write(path, summary.to_string()).map_err(|e| {
|
|
Error::Report(format!("failed to write report to {}: {}", path, e))
|
|
})?;
|
|
info!("orchestrator: wrote mount-existing report to {}", path);
|
|
}
|
|
}
|
|
|
|
Ok(())
|
|
}
|
|
|
|
fn run_provisioning(
|
|
ctx: &Context,
|
|
mode: ProvisioningMode,
|
|
state_hint: Option<StateReport>,
|
|
) -> Result<()> {
|
|
let preview_outputs = ctx.show || ctx.report_path_override.is_some();
|
|
|
|
let mut state_opt = state_hint;
|
|
if state_opt.is_none() {
|
|
state_opt = idempotency::detect_existing_state()?;
|
|
}
|
|
|
|
if let Some(state) = state_opt {
|
|
info!("orchestrator: already provisioned; ensuring mounts are active");
|
|
return run_mount_existing(ctx, None, Some(state));
|
|
}
|
|
|
|
let filter = build_device_filter(&ctx.cfg)?;
|
|
let disks = discover(&filter)?;
|
|
info!("orchestrator: discovered {} eligible disk(s)", disks.len());
|
|
|
|
if ctx.cfg.partitioning.require_empty_disks {
|
|
if matches!(mode, ProvisioningMode::Apply) {
|
|
enforce_empty_disks(&disks)?;
|
|
info!("orchestrator: all target disks verified empty");
|
|
} else {
|
|
warn!("orchestrator: preview mode detected (--show/--report); skipping empty-disk enforcement");
|
|
}
|
|
} else if matches!(mode, ProvisioningMode::Apply) {
|
|
warn!("orchestrator: require_empty_disks=false; proceeding without emptiness enforcement");
|
|
}
|
|
|
|
let effective_cfg = {
|
|
let mut c = ctx.cfg.clone();
|
|
if !(ctx.topo_from_cli || ctx.topo_from_cmdline) {
|
|
let auto_topo = if disks.len() == 1 {
|
|
Topology::BtrfsSingle
|
|
} else if disks.len() == 2 {
|
|
Topology::DualIndependent
|
|
} else {
|
|
Topology::BtrfsRaid1
|
|
};
|
|
if c.topology != auto_topo {
|
|
info!("orchestrator: topology auto-selected {:?}", auto_topo);
|
|
c.topology = auto_topo;
|
|
} else {
|
|
info!("orchestrator: using configured topology {:?}", c.topology);
|
|
}
|
|
} else {
|
|
info!("orchestrator: using overridden topology {:?}", c.topology);
|
|
}
|
|
c
|
|
};
|
|
|
|
let plan = partition::plan_partitions(&disks, &effective_cfg)?;
|
|
debug!(
|
|
"orchestrator: partition plan ready (alignment={} MiB, disks={})",
|
|
plan.alignment_mib,
|
|
plan.disks.len()
|
|
);
|
|
for dp in &plan.disks {
|
|
debug!("plan for {}: {} part(s)", dp.disk.path, dp.parts.len());
|
|
}
|
|
|
|
if matches!(mode, ProvisioningMode::Apply) {
|
|
info!("orchestrator: apply mode enabled; applying partition plan");
|
|
let part_results = partition::apply_partitions(&plan)?;
|
|
info!(
|
|
"orchestrator: applied partitions on {} disk(s), total parts created: {}",
|
|
plan.disks.len(),
|
|
part_results.len()
|
|
);
|
|
|
|
let fs_plan = zfs::plan_filesystems(&part_results, &effective_cfg)?;
|
|
info!(
|
|
"orchestrator: filesystem plan contains {} spec(s)",
|
|
fs_plan.specs.len()
|
|
);
|
|
let fs_results = zfs::make_filesystems(&fs_plan, &effective_cfg)?;
|
|
info!("orchestrator: created {} filesystem(s)", fs_results.len());
|
|
|
|
let mplan = crate::mount::plan_mounts(&fs_results, &effective_cfg)?;
|
|
let mres = crate::mount::apply_mounts(&mplan)?;
|
|
crate::mount::maybe_write_fstab(&mres, &effective_cfg)?;
|
|
return Ok(());
|
|
}
|
|
|
|
info!("orchestrator: pre-flight complete (idempotency checked, devices discovered, plan computed)");
|
|
|
|
if preview_outputs {
|
|
let summary = build_summary_json(&disks, &plan, &effective_cfg)?;
|
|
if ctx.show {
|
|
println!("{}", summary);
|
|
}
|
|
if let Some(path) = &ctx.report_path_override {
|
|
fs::write(path, summary.to_string()).map_err(|e| {
|
|
Error::Report(format!("failed to write report to {}: {}", path, e))
|
|
})?;
|
|
info!("orchestrator: wrote summary report to {}", path);
|
|
}
|
|
}
|
|
|
|
Ok(())
|
|
}
|
|
|
|
/// Build a DeviceFilter from the runtime configuration.
|
|
///
|
|
/// Compiles include/exclude regex patterns and carries the minimum-size threshold
|
|
/// as well as the removable-device policy (allow_removable).
|
|
///
|
|
/// Errors:
|
|
/// - Returns Error::Validation when a regex pattern is invalid.
|
|
fn build_device_filter(cfg: &Config) -> Result<DeviceFilter> {
|
|
let mut include = Vec::new();
|
|
let mut exclude = Vec::new();
|
|
|
|
for pat in &cfg.device_selection.include_patterns {
|
|
let re = Regex::new(pat).map_err(|e| {
|
|
Error::Validation(format!("invalid include regex '{}': {}", pat, e))
|
|
})?;
|
|
include.push(re);
|
|
}
|
|
for pat in &cfg.device_selection.exclude_patterns {
|
|
let re = Regex::new(pat).map_err(|e| {
|
|
Error::Validation(format!("invalid exclude regex '{}': {}", pat, e))
|
|
})?;
|
|
exclude.push(re);
|
|
}
|
|
|
|
Ok(DeviceFilter {
|
|
include,
|
|
exclude,
|
|
min_size_gib: cfg.device_selection.min_size_gib,
|
|
allow_removable: cfg.device_selection.allow_removable,
|
|
})
|
|
}
|
|
|
|
/// Enforce empty-disk policy for all discovered target disks.
|
|
///
|
|
/// For each disk:
|
|
/// - Uses idempotency::is_empty_disk() to verify no partitions or FS signatures exist
|
|
/// - Returns Error::Validation on the first non-empty disk encountered
|
|
///
|
|
/// This function MUST NOT be called when running in preview mode, as orchestrator
|
|
/// skips emptiness enforcement to allow planning on live systems.
|
|
fn enforce_empty_disks(disks: &[Disk]) -> Result<()> {
|
|
for d in disks {
|
|
let empty = idempotency::is_empty_disk(d)?;
|
|
if !empty {
|
|
return Err(Error::Validation(format!(
|
|
"target disk {} is not empty (partitions or signatures present)",
|
|
d.path
|
|
)));
|
|
}
|
|
}
|
|
Ok(())
|
|
}
|
|
|
|
#[inline]
|
|
fn role_str(role: partition::PartRole) -> &'static str {
|
|
match role {
|
|
partition::PartRole::BiosBoot => "bios_boot",
|
|
partition::PartRole::Esp => "esp",
|
|
partition::PartRole::Data => "data",
|
|
partition::PartRole::Cache => "cache",
|
|
}
|
|
}
|
|
|
|
/// Build the preview JSON payload for `--show` / `--report`.
|
|
///
|
|
/// Includes:
|
|
/// - version, timestamp, status="planned"
|
|
/// - topology string, alignment and empty-disk policy flag
|
|
/// - discovered disks (path, size_bytes, rotational, model, serial)
|
|
/// - partition_plan per disk (role, size_mib or null for remainder, gpt_name)
|
|
/// - filesystems_planned: inferred FS kinds per topology and labels
|
|
/// - mount: scheme summary and target template (e.g., "/var/cache/{UUID}")
|
|
///
|
|
/// This function is non-destructive and performs no probing beyond the provided inputs.
|
|
fn build_summary_json(disks: &[Disk], plan: &partition::PartitionPlan, cfg: &Config) -> Result<serde_json::Value> {
|
|
// Disks summary
|
|
let disks_json: Vec<serde_json::Value> = disks
|
|
.iter()
|
|
.map(|d| {
|
|
json!({
|
|
"path": d.path,
|
|
"size_bytes": d.size_bytes,
|
|
"rotational": d.rotational,
|
|
"model": d.model,
|
|
"serial": d.serial,
|
|
})
|
|
})
|
|
.collect();
|
|
|
|
// Partition plan summary (spec-level)
|
|
let mut plan_json: Vec<serde_json::Value> = Vec::new();
|
|
for dp in &plan.disks {
|
|
let parts: Vec<serde_json::Value> = dp
|
|
.parts
|
|
.iter()
|
|
.map(|p| {
|
|
json!({
|
|
"role": role_str(p.role),
|
|
"size_mib": p.size_mib, // null means "remainder"
|
|
"gpt_name": p.gpt_name,
|
|
})
|
|
})
|
|
.collect();
|
|
plan_json.push(json!({
|
|
"disk": dp.disk.path,
|
|
"parts": parts
|
|
}));
|
|
}
|
|
|
|
// Decide filesystem kinds and planned mountpoints (template) from plan + cfg.topology
|
|
let topo_str = cfg.topology.to_string();
|
|
|
|
// Count roles across plan to infer filesystems
|
|
let mut esp_count = 0usize;
|
|
let mut data_count = 0usize;
|
|
let mut cache_count = 0usize;
|
|
for dp in &plan.disks {
|
|
for p in &dp.parts {
|
|
match p.role {
|
|
partition::PartRole::Esp => esp_count += 1,
|
|
partition::PartRole::Data => data_count += 1,
|
|
partition::PartRole::Cache => cache_count += 1,
|
|
partition::PartRole::BiosBoot => {}
|
|
}
|
|
}
|
|
}
|
|
|
|
let mut filesystems_planned: Vec<serde_json::Value> = Vec::new();
|
|
// ESP -> vfat (typically mounted by bootloader; no runtime target here)
|
|
if esp_count > 0 {
|
|
filesystems_planned.push(json!({
|
|
"kind": "vfat",
|
|
"from_roles": ["esp"],
|
|
"label": cfg.filesystem.vfat.label,
|
|
"planned_mountpoint": null
|
|
}));
|
|
}
|
|
|
|
// Data/cache-driven FS + mount targets. Mount scheme is per-UUID under base_dir.
|
|
let target_template = format!("{}/{{UUID}}", cfg.mount.base_dir);
|
|
match cfg.topology {
|
|
crate::types::Topology::SsdHddBcachefs => {
|
|
if cache_count > 0 && data_count > 0 {
|
|
filesystems_planned.push(json!({
|
|
"kind": "bcachefs",
|
|
"from_roles": ["cache", "data"],
|
|
"label": cfg.filesystem.bcachefs.label,
|
|
"planned_mountpoint_template": target_template,
|
|
}));
|
|
}
|
|
}
|
|
crate::types::Topology::BtrfsRaid1 => {
|
|
// One multi-device btrfs across all data partitions
|
|
if data_count >= 2 {
|
|
filesystems_planned.push(json!({
|
|
"kind": "btrfs",
|
|
"from_roles": ["data"],
|
|
"devices_planned": data_count,
|
|
"label": cfg.filesystem.btrfs.label,
|
|
"planned_mountpoint_template": target_template,
|
|
}));
|
|
} else if data_count == 1 {
|
|
filesystems_planned.push(json!({
|
|
"kind": "btrfs",
|
|
"from_roles": ["data"],
|
|
"label": cfg.filesystem.btrfs.label,
|
|
"planned_mountpoint_template": target_template,
|
|
"note": "only one data partition present; raid1 requires >= 2",
|
|
}));
|
|
}
|
|
}
|
|
_ => {
|
|
// One btrfs per data partition
|
|
for _ in 0..data_count {
|
|
filesystems_planned.push(json!({
|
|
"kind": "btrfs",
|
|
"from_roles": ["data"],
|
|
"label": cfg.filesystem.btrfs.label,
|
|
"planned_mountpoint_template": target_template,
|
|
}));
|
|
}
|
|
}
|
|
}
|
|
|
|
let mount_scheme = json!({
|
|
"scheme": "per_uuid",
|
|
"base_dir": cfg.mount.base_dir,
|
|
"fstab_enabled": cfg.mount.fstab_enabled,
|
|
"target_template": target_template,
|
|
});
|
|
|
|
let now = format_rfc3339(SystemTime::now()).to_string();
|
|
let summary = json!({
|
|
"version": "v1",
|
|
"timestamp": now,
|
|
"status": "planned",
|
|
"topology": topo_str,
|
|
"alignment_mib": plan.alignment_mib,
|
|
"require_empty_disks": plan.require_empty_disks,
|
|
"disks": disks_json,
|
|
"partition_plan": plan_json,
|
|
"filesystems_planned": filesystems_planned,
|
|
"mount": mount_scheme
|
|
});
|
|
|
|
Ok(summary)
|
|
} |