forked from tfgrid/zosbuilder
Replace inline boot testing with standalone runit.sh runner for clarity: - Remove scripts/lib/testing.sh source and boot_tests stage from build.sh - Remove --skip-tests option from build.sh and rebuild-after-zinit.sh - Update all docs to reference runit.sh for QEMU/cloud-hypervisor testing - Add comprehensive claude.md as AI assistant entry point with guidelines Testing is now fully decoupled from build pipeline; use ./runit.sh for QEMU/cloud-hypervisor validation after builds complete.
236 lines
8.1 KiB
Bash
Executable File
236 lines
8.1 KiB
Bash
Executable File
#!/bin/bash
|
|
# Rebuild kernel (and required prior stages) after zinit config or init scripts change
|
|
# Usage:
|
|
# scripts/rebuild-after-zinit.sh # minimal rebuild of initramfs only (skip boot tests; no kernel rebuild)
|
|
# scripts/rebuild-after-zinit.sh --run-tests # include boot tests (still no kernel rebuild by default)
|
|
# scripts/rebuild-after-zinit.sh --with-kernel # also rebuild kernel (re-embed updated initramfs)
|
|
# scripts/rebuild-after-zinit.sh --refresh-container-mods # rebuild container /lib/modules if missing (kernel modules stage)
|
|
# scripts/rebuild-after-zinit.sh --verify-only # only report detected changes, do not rebuild
|
|
# scripts/rebuild-after-zinit.sh -- ... # pass extra args to build.sh
|
|
|
|
set -euo pipefail
|
|
|
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
|
|
STAGES_DIR="${PROJECT_ROOT}/.build-stages"
|
|
|
|
log() { echo "[rebuild-zinit] $*"; }
|
|
|
|
run_tests=0
|
|
extra_args=()
|
|
rebuild_kernel=0
|
|
refresh_container_mods=0
|
|
verify_only=0
|
|
|
|
# Parse flags; pass through any remaining args to build.sh after --
|
|
while [[ $# -gt 0 ]]; do
|
|
case "$1" in
|
|
--run-tests)
|
|
run_tests=1
|
|
shift
|
|
;;
|
|
--with-kernel)
|
|
rebuild_kernel=1
|
|
shift
|
|
;;
|
|
--refresh-container-mods)
|
|
refresh_container_mods=1
|
|
shift
|
|
;;
|
|
--verify-only)
|
|
verify_only=1
|
|
shift
|
|
;;
|
|
--)
|
|
shift
|
|
extra_args=("$@")
|
|
break
|
|
;;
|
|
*)
|
|
# Pass unknown args through to build.sh
|
|
extra_args+=("$1")
|
|
shift
|
|
;;
|
|
esac
|
|
done
|
|
|
|
log "PROJECT_ROOT=${PROJECT_ROOT}"
|
|
log "STAGES_DIR=${STAGES_DIR}"
|
|
|
|
# Show current stage status before any changes (host-safe; does not require container)
|
|
log "Stage status (before):"
|
|
("${PROJECT_ROOT}/scripts/build.sh" --show-stages) || true
|
|
|
|
# ------------------------------------------------------------
|
|
# Container detection helper
|
|
# ------------------------------------------------------------
|
|
in_container() {
|
|
[[ -f /.dockerenv ]] || [[ -f /run/.containerenv ]] || grep -q 'container' /proc/1/cgroup 2>/dev/null
|
|
}
|
|
|
|
# ------------------------------------------------------------
|
|
# Change detection (verify what changed since last archive build)
|
|
# ------------------------------------------------------------
|
|
marker_init="${STAGES_DIR}/initramfs_create.done"
|
|
marker_time=0
|
|
if [[ -f "$marker_init" ]]; then
|
|
marker_time=$(stat -c %Y "$marker_init" 2>/dev/null || echo 0)
|
|
fi
|
|
log "Detecting changes since last initramfs_create marker: ${marker_init:-<none>}"
|
|
|
|
check_dir_changed() {
|
|
local path="$1"
|
|
local cutoff="$2"
|
|
local count
|
|
count=$(find "$path" -type f -printf '%T@ %p\n' 2>/dev/null | awk -v c="$cutoff" '$1 > c {n++} END {print n+0}')
|
|
echo "${count:-0}"
|
|
}
|
|
|
|
list_some_changes() {
|
|
local path="$1"
|
|
local cutoff="$2"
|
|
# list up to 5 example files
|
|
find "$path" -type f -printf '%T@ %p\n' 2>/dev/null | awk -v c="$cutoff" '$1 > c {print $2}' | head -n 5
|
|
}
|
|
|
|
zinit_dir="${PROJECT_ROOT}/config/zinit"
|
|
init_file="${PROJECT_ROOT}/config/init"
|
|
modules_conf="${PROJECT_ROOT}/config/modules.conf"
|
|
|
|
zinit_changed=0
|
|
init_changed=0
|
|
modules_changed=0
|
|
|
|
if [[ -d "$zinit_dir" ]]; then
|
|
zinit_changed=$(check_dir_changed "$zinit_dir" "$marker_time")
|
|
fi
|
|
if [[ -f "$init_file" ]]; then
|
|
if [[ $(stat -c %Y "$init_file" 2>/dev/null || echo 0) -gt $marker_time ]]; then init_changed=1; fi
|
|
fi
|
|
if [[ -f "$modules_conf" ]]; then
|
|
if [[ $(stat -c %Y "$modules_conf" 2>/dev/null || echo 0) -gt $marker_time ]]; then modules_changed=1; fi
|
|
fi
|
|
|
|
log "Changes since last archive:"
|
|
log " - config/zinit: ${zinit_changed} file(s) changed"
|
|
if [[ "$zinit_changed" -gt 0 ]]; then
|
|
list_some_changes "$zinit_dir" "$marker_time" | sed 's/^/ * /' || true
|
|
fi
|
|
log " - config/init: $([[ $init_changed -eq 1 ]] && echo changed || echo unchanged)"
|
|
log " - config/modules.conf: $([[ $modules_changed -eq 1 ]] && echo changed || echo unchanged)"
|
|
|
|
if [[ "$verify_only" -eq 1 ]]; then
|
|
log "verify-only set; exiting without rebuild"
|
|
exit 0
|
|
fi
|
|
|
|
# ------------------------------------------------------------
|
|
# Container /lib/modules/<FULL_VERSION> presence diagnostics
|
|
# (we never clear kernel_modules unless --refresh-container-mods is given)
|
|
# ------------------------------------------------------------
|
|
compute_full_kver() {
|
|
# Read from configs without sourcing (safe in any shell)
|
|
local build_conf="${PROJECT_ROOT}/config/build.conf"
|
|
local kcfg="${PROJECT_ROOT}/config/kernel.config"
|
|
local base_ver=""
|
|
local localver=""
|
|
if [[ -f "$build_conf" ]]; then
|
|
base_ver="$(grep -E '^KERNEL_VERSION=' "$build_conf" | head -1 | cut -d= -f2 | tr -d '\"')"
|
|
fi
|
|
if [[ -f "$kcfg" ]]; then
|
|
localver="$(grep -E '^CONFIG_LOCALVERSION=' "$kcfg" | head -1 | cut -d'\"' -f2)"
|
|
fi
|
|
echo "${base_ver}${localver}"
|
|
}
|
|
|
|
modules_dir_for_full() {
|
|
local full="$1"
|
|
echo "/lib/modules/${full}"
|
|
}
|
|
|
|
full_kver="$(compute_full_kver)"
|
|
container_modules_dir="$(modules_dir_for_full "$full_kver")"
|
|
|
|
log "Container modules version: ${full_kver:-<unknown>}"
|
|
if [[ -d "$container_modules_dir" ]]; then
|
|
before_count=$(find "$container_modules_dir" -type f -name '*.ko*' 2>/dev/null | wc -l | tr -d ' ')
|
|
log "Before build: ${container_modules_dir} exists with ${before_count} module file(s)"
|
|
else
|
|
log "Before build: ${container_modules_dir} not present (fresh container scenario)"
|
|
fi
|
|
|
|
# Minimal set of stages to clear when zinit changes:
|
|
# - zinit_setup: recopy zinit YAML and init scripts into initramfs
|
|
# - validation: re-check initramfs contents
|
|
# - initramfs_create: recreate archive including updated zinit files
|
|
# - initramfs_test: re-test archive
|
|
# - kernel_build: re-embed updated initramfs into kernel
|
|
# - boot_tests: optional, depends on --run-tests
|
|
stages_to_clear=(
|
|
# Ensure new/changed module selections in [config/modules.conf](config/modules.conf)
|
|
# are re-resolved and copied into the initramfs:
|
|
"modules_setup"
|
|
"modules_copy"
|
|
|
|
# Re-copy /init into the initramfs root from [config/init](config/init)
|
|
"init_script"
|
|
|
|
# Ensure zinit YAML/init script changes are reapplied:
|
|
"zinit_setup"
|
|
|
|
# Re-validate and recreate archive (no kernel rebuild by default):
|
|
"validation"
|
|
"initramfs_create"
|
|
"initramfs_test"
|
|
)
|
|
|
|
# Optionally rebuild container modules if requested (fresh container scenario)
|
|
if [[ "$refresh_container_mods" -eq 1 ]]; then
|
|
stages_to_clear=("kernel_modules" "${stages_to_clear[@]}")
|
|
fi
|
|
|
|
# Optionally rebuild kernel (re-embed updated initramfs)
|
|
if [[ "$rebuild_kernel" -eq 1 ]]; then
|
|
stages_to_clear+=("kernel_build")
|
|
fi
|
|
|
|
# Remove completion markers to force incremental rebuild of those stages
|
|
log "Planned markers to clear: ${stages_to_clear[*]}"
|
|
for s in "${stages_to_clear[@]}"; do
|
|
marker="${STAGES_DIR}/${s}.done"
|
|
if [[ -f "$marker" ]]; then
|
|
log "Removing stage marker: ${marker}"
|
|
rm -f "$marker"
|
|
else
|
|
log "Marker not present (already pending): ${s}"
|
|
fi
|
|
done
|
|
|
|
# Show stage status after marker removal (still host-safe)
|
|
log "Stage status (after marker removal):"
|
|
("${PROJECT_ROOT}/scripts/build.sh" --show-stages) || true
|
|
|
|
# Build
|
|
log "Starting incremental rebuild (zinit changes)"
|
|
# IMPORTANT: Do NOT pass --rebuild-from or --force-rebuild; that would force ALL stages to run.
|
|
# We rely exclusively on removed markers to minimally re-run only the necessary stages.
|
|
build_from_args=()
|
|
|
|
if in_container; then
|
|
# Run directly when already inside the dev/build container
|
|
# Note: Tests are run separately using runit.sh, not during build
|
|
log "Running rebuild (in-container) - use runit.sh for testing"
|
|
DEBUG=1 "${PROJECT_ROOT}/scripts/build.sh" "${build_from_args[@]}" "${extra_args[@]}"
|
|
else
|
|
# Not in container: delegate to dev-container manager which ensures container exists and is running
|
|
devctl="${PROJECT_ROOT}/scripts/dev-container.sh"
|
|
if [[ ! -x "$devctl" ]]; then
|
|
log "[ERROR] Dev container manager not found: ${devctl}"
|
|
log "[HINT] Run ./scripts/build.sh directly (it can start a transient container), or start the dev container via ./scripts/dev-container.sh start"
|
|
exit 1
|
|
fi
|
|
|
|
# Note: Tests are run separately using runit.sh, not during build
|
|
log "Running rebuild via dev-container - use runit.sh for testing"
|
|
"$devctl" build "${build_from_args[@]}" "${extra_args[@]}"
|
|
fi |