Compare commits

..

6 Commits

12 changed files with 728 additions and 144 deletions

View File

@@ -17,6 +17,8 @@ eudev-netifnames
kmod kmod
fuse3 fuse3
pciutils pciutils
efitools
efibootmgr
# Console/terminal management # Console/terminal management
util-linux util-linux
@@ -47,7 +49,3 @@ haveged
openssh-server openssh-server
zellij zellij
# Essential debugging and monitoring tools included
# NO development tools, NO curl/wget, NO python, NO redis
# NO massive linux-firmware package
# Other tools will be loaded with RFS after network connectivity

View File

@@ -8,12 +8,12 @@
S3_ENDPOINT="https://hub.grid.tf" S3_ENDPOINT="https://hub.grid.tf"
# AWS region string expected by the S3-compatible API # AWS region string expected by the S3-compatible API
S3_REGION="us-east-1" S3_REGION="garage"
# Bucket and key prefix used for RFS store (content-addressed blobs) # Bucket and key prefix used for RFS store (content-addressed blobs)
# The RFS store path will be: s3://.../<S3_BUCKET>/<S3_PREFIX> # The RFS store path will be: s3://.../<S3_BUCKET>/<S3_PREFIX>
S3_BUCKET="zos" S3_BUCKET="zos"
S3_PREFIX="zosbuilder/store" S3_PREFIX="zos/store"
# Access credentials (required by rfs pack to push blobs) # Access credentials (required by rfs pack to push blobs)
S3_ACCESS_KEY="REPLACE_ME" S3_ACCESS_KEY="REPLACE_ME"
@@ -36,10 +36,10 @@ MANIFESTS_SUBPATH="manifests"
# Behavior flags (can be overridden by CLI flags or env) # Behavior flags (can be overridden by CLI flags or env)
# Whether to keep s3:// store as a fallback entry in the .fl after adding WEB_ENDPOINT # Whether to keep s3:// store as a fallback entry in the .fl after adding WEB_ENDPOINT
KEEP_S3_FALLBACK="false" KEEP_S3_FALLBACK="true"
# Whether to attempt uploading .fl manifests to S3 (requires MinIO Client: mc) # Whether to attempt uploading .fl manifests to S3 (requires MinIO Client: mc)
UPLOAD_MANIFESTS="false" UPLOAD_MANIFESTS="true"
# Read-only credentials for route URL in manifest (optional; defaults to write keys above) # Read-only credentials for route URL in manifest (optional; defaults to write keys above)
# These will be embedded into the flist 'route.url' so runtime mounts can read directly from Garage. # These will be embedded into the flist 'route.url' so runtime mounts can read directly from Garage.
@@ -53,5 +53,27 @@ READ_SECRET_KEY="REPLACE_ME_READ"
# - ROUTE_PATH: path to the blob route (default: /blobs) # - ROUTE_PATH: path to the blob route (default: /blobs)
# - ROUTE_REGION: region string for Garage (default: garage) # - ROUTE_REGION: region string for Garage (default: garage)
ROUTE_ENDPOINT="https://hub.grid.tf" ROUTE_ENDPOINT="https://hub.grid.tf"
ROUTE_PATH="/blobs" ROUTE_PATH="/zos/store"
ROUTE_REGION="garage" ROUTE_REGION="garage"
# RESP/DB-style blob store (design-time placeholders; optional)
# Enable to allow pack scripts or future rfs CLI to upload blobs to a RESP-compatible store.
# This does not change the existing S3 flow; RESP acts as an additional backend.
#
# Example URI semantics (see docs/rfs-flists.md additions):
# resp://host:port/db?prefix=blobs
# resp+tls://host:port/db?prefix=blobs&amp;ca=/etc/ssl/certs/ca.pem
# resp+sentinel://sentinelHost:26379/mymaster?prefix=blobs
#
# Minimal keys for a direct RESP endpoint
RESP_ENABLED="false"
RESP_ENDPOINT="localhost:6379" # host:port
RESP_DB="0" # integer DB index
RESP_PREFIX="zos/blobs" # namespace/prefix for content-addressed keys
RESP_USERNAME="" # optional
RESP_PASSWORD="" # optional
RESP_TLS="false" # true/false
RESP_CA="" # path to CA bundle when RESP_TLS=true
# Optional: Sentinel topology (overrides RESP_ENDPOINT for discovery)
RESP_SENTINEL="" # sentinelHost:port (comma-separated for multiple)
RESP_MASTER="" # Sentinel master name (e.g., "mymaster")

View File

@@ -5,4 +5,4 @@ if ! getent group dhcpcd >/dev/null 2>&1; then addgroup -S dhcpcd 2>/dev/null ||
if ! getent passwd dhcpcd >/dev/null 2>&1; then adduser -S -D -s /sbin/nologin -G dhcpcd dhcpcd 2>/dev/null || true; fi if ! getent passwd dhcpcd >/dev/null 2>&1; then adduser -S -D -s /sbin/nologin -G dhcpcd dhcpcd 2>/dev/null || true; fi
# Exec dhcpcd (will run as root if it cannot drop to dhcpcd user) # Exec dhcpcd (will run as root if it cannot drop to dhcpcd user)
interfaces=$(ip -br l | awk '!/lo/&&!/my0/{print $1}') interfaces=$(ip -br l | awk '!/lo/&&!/my0/{print $1}')
exec dhcpcd -B $interfaces exec dhcpcd -p -B $interfaces

View File

@@ -4,3 +4,4 @@ exec: /usr/bin/mycelium --key-file /tmp/mycelium_priv_key.bin
tcp://209.159.146.190:9651 tcp://5.78.122.16:9651 tcp://5.223.43.251:9651 tcp://142.93.217.194:9651 tcp://209.159.146.190:9651 tcp://5.78.122.16:9651 tcp://5.223.43.251:9651 tcp://142.93.217.194:9651
after: after:
- network - network
- udev-rfs

View File

@@ -1,5 +1,6 @@
exec: sh /etc/zinit/init/network.sh eth0 exec: sh /etc/zinit/init/network.sh
after: after:
- depmod - depmod
- udevd - udevd
- udev-trigger - udev-trigger
test: ping www.google.com

View File

@@ -204,3 +204,78 @@ Change Log
- Normalize INSTALL_DIR/COMPONENTS_DIR/KERNEL_DIR/DIST_DIR to absolute paths post-config load. - Normalize INSTALL_DIR/COMPONENTS_DIR/KERNEL_DIR/DIST_DIR to absolute paths post-config load.
- Add validation diagnostics prints (input/PWD/PROJECT_ROOT/INSTALL_DIR/resolved). - Add validation diagnostics prints (input/PWD/PROJECT_ROOT/INSTALL_DIR/resolved).
- Ensure shadow package in container for passwd/chpasswd; keep openssl and openssl-dev; remove perl earlier. - Ensure shadow package in container for passwd/chpasswd; keep openssl and openssl-dev; remove perl earlier.
Updates 2025-10-01
- Function index regenerated: see [scripts/functionlist.md](scripts/functionlist.md) for an authoritative map of all functions with current line numbers. Use it alongside the quick links below to jump into code fast.
- Key jump-points (current lines):
- Finalization: [bash.initramfs_finalize_customization()](scripts/lib/initramfs.sh:568)
- CPIO creation: [bash.initramfs_create_cpio()](scripts/lib/initramfs.sh:691)
- Validation: [bash.initramfs_validate()](scripts/lib/initramfs.sh:820)
- Kernel embed config: [bash.kernel_modify_config_for_initramfs()](scripts/lib/kernel.sh:130)
- Stage orchestrator entry: [bash.main_build_process()](scripts/build.sh:214)
- Repo-wide index: [scripts/functionlist.md](scripts/functionlist.md)
Roadmap / TODO (tracked in tool todo list)
- Zosception (zinit service graph and ordering)
- Define additional services and ordering for nested/recursive orchestration.
- Likely integration points:
- Networking readiness before RFS: [config/zinit/network.yaml](config/zinit/network.yaml)
- Early udev coldplug: [config/zinit/udev-trigger.yaml](config/zinit/udev-trigger.yaml)
- Post-RFS coldplug: [config/zinit/udev-rfs.yaml](config/zinit/udev-rfs.yaml)
- Ensure dependency edges are correct in the service DAG image (see docs/img_*.png).
- Add zosstorage to initramfs
- Source:
- If packaged: add to [config/packages.list](config/packages.list).
- If built from source: extend [bash.components_parse_sources_conf()](scripts/lib/components.sh:13) and add a build_* function; install via [bash.initramfs_copy_components()](scripts/lib/initramfs.sh:102).
- Zinit unit:
- Add YAML under [config/zinit/](config/zinit/) and hook into the network-ready path.
- Ordering:
- Start after "network" and before/with RFS mounts if it provides storage functionality used by rfs.
- RFS blob store backends (design + docs; http and s3 exist)
- Current S3 store URI construction: [bash.rfs_common_build_s3_store_uri()](scripts/rfs/common.sh:137)
- Flist manifest store patching: [bash.rfs_common_patch_flist_stores()](scripts/rfs/common.sh:385)
- Route URL patching: [bash.rfs_common_patch_flist_route_url()](scripts/rfs/common.sh:494)
- Packers entrypoints:
- [scripts/rfs/pack-modules.sh](scripts/rfs/pack-modules.sh:1)
- [scripts/rfs/pack-firmware.sh](scripts/rfs/pack-firmware.sh:1)
- Proposed additional backend: RESP/DB-style store
- Goal: Allow rfs to push/fetch content-addressed blobs via a RESP-compatible endpoint (e.g., Redis/KeyDB/Dragonfly-like), or a thin HTTP/RESP adapter.
- Draft URI scheme examples:
- resp://host:port/db?tls=0&amp;prefix=blobs
- resp+tls://host:port/db?prefix=blobs&amp;ca=/etc/ssl/certs/ca.pem
- resp+sentinel://sentinelHost:26379/mymaster?prefix=blobs
- Minimum operations:
- PUT blob: SETEX prefix/ab/cd/hash ttl file-bytes or HSET prefix/hash data file-bytes
- GET blob: GET or HGET
- HEAD/exists: EXISTS
- Optional: pipelined/mget for batch prefetch
- Client integration layers:
- Pack-time: extend rfs CLI store resolver (design doc first; scripts/rfs/common.sh can map scheme→uploader if CLI not ready).
- Manifest post-process: still supported; stores table may include multiple URIs (s3 + resp) for redundancy.
- Caching and retries:
- Local on-disk cache under dist/.rfs-cache keyed by hash with LRU GC.
- Exponential backoff on GET failures; fall back across stores in order.
- Auth:
- RESP: optional username/password in URI; TLS with cert pinning parameters.
- Keep secrets in config/rfs.conf or env; do not embed write creds in manifests (read-credential routes only).
- Deliverables:
- Design section in docs/rfs-flists.md (to be added)
- Config keys in config/rfs.conf.example for RESP endpoints
- Optional shim uploader script if CLI support lags.
- Documentation refresh tasks
- Cross-check this files clickable references against [scripts/functionlist.md](scripts/functionlist.md) after changes in lib files.
- Keep “Branding behavior” and “Absolute Path Normalization” pointers aligned with:
- [bash.common.sh normalization](scripts/lib/common.sh:244)
- [bash.initramfs_finalize_customization()](scripts/lib/initramfs.sh:568)
Diagnostics-first reminder
- Use DEBUG=1 and stage markers for minimal rebuilds.
- Quick commands:
- Show stages: ./scripts/build.sh --show-stages
- Minimal rebuild after zinit/init edits: [scripts/rebuild-after-zinit.sh](scripts/rebuild-after-zinit.sh)
- Validate archive: [bash.initramfs_create_cpio()](scripts/lib/initramfs.sh:691), then [bash.initramfs_test_archive()](scripts/lib/initramfs.sh:953)

73
docs/TODO.md Normal file
View File

@@ -0,0 +1,73 @@
# Zero-OS Builder Persistent TODO
This canonical checklist is the single source of truth for ongoing work. It mirrors the live task tracker but is versioned in-repo for review and PRs. Jump-points reference exact functions and files for quick triage.
## High-level
- [x] Regenerate repository function index: [scripts/functionlist.md](../scripts/functionlist.md)
- [x] Refresh NOTES with jump-points and roadmap: [docs/NOTES.md](NOTES.md)
- [x] Extend RFS design with RESP/DB-style backend: [docs/rfs-flists.md](rfs-flists.md)
- [x] Make Rust components Git fetch non-destructive: [bash.components_download_git()](../scripts/lib/components.sh:72)
- [ ] Update zinit config for "zosception" workflow: [config/zinit/](../config/zinit/)
- [ ] Add zosstorage to the initramfs (package/build/install + zinit unit)
- [ ] Validate via minimal rebuild and boot tests; refine depmod/udev docs
- [ ] Commit and push documentation and configuration updates (post-zosception/zosstorage)
## Zosception (zinit service graph and ordering)
- [ ] Define service graph changes and ordering constraints
- Reference current triggers:
- Early coldplug: [config/zinit/udev-trigger.yaml](../config/zinit/udev-trigger.yaml)
- Network readiness: [config/zinit/network.yaml](../config/zinit/network.yaml)
- Post-mount coldplug: [config/zinit/udev-rfs.yaml](../config/zinit/udev-rfs.yaml)
- [ ] Add/update units under [config/zinit/](../config/zinit/) with proper after/needs/wants
- [ ] Validate with stage runner and logs
- Stage runner: [bash.stage_run()](../scripts/lib/stages.sh:99)
- Main flow: [bash.main_build_process()](../scripts/build.sh:214)
## ZOS Storage in initramfs
- [ ] Decide delivery mechanism:
- [ ] APK via [config/packages.list](../config/packages.list)
- [ ] Source build via [bash.components_parse_sources_conf()](../scripts/lib/components.sh:13) with a new build function
- [ ] Copy/install into initramfs
- Components copy: [bash.initramfs_copy_components()](../scripts/lib/initramfs.sh:102)
- Zinit setup: [bash.initramfs_setup_zinit()](../scripts/lib/initramfs.sh:13)
- [ ] Create zinit unit(s) for zosstorage startup and ordering
- Place after network and before RFS if it provides storage used by rfs
- [ ] Add smoke command to confirm presence in image (e.g., which/--version) during [bash.initramfs_validate()](../scripts/lib/initramfs.sh:820)
## RFS backends — implementation follow-up (beyond design)
- [ ] RESP uploader shim for packers (non-breaking)
- Packers entrypoints: [scripts/rfs/pack-modules.sh](../scripts/rfs/pack-modules.sh), [scripts/rfs/pack-firmware.sh](../scripts/rfs/pack-firmware.sh)
- Config loader: [bash.rfs_common_load_rfs_s3_config()](../scripts/rfs/common.sh:82) → extend to parse RESP_* (non-breaking)
- Store URI builder (S3 exists): [bash.rfs_common_build_s3_store_uri()](../scripts/rfs/common.sh:137)
- Manifest patching remains:
- Stores table: [bash.rfs_common_patch_flist_stores()](../scripts/rfs/common.sh:385)
- route.url: [bash.rfs_common_patch_flist_route_url()](../scripts/rfs/common.sh:494)
- [ ] Connectivity checks and retries for RESP path
- [ ] Local cache for pack-time (optional)
## Validation and boot tests
- [ ] Minimal rebuild after zinit/units/edit
- Helper: [scripts/rebuild-after-zinit.sh](../scripts/rebuild-after-zinit.sh)
- [ ] Validate contents before CPIO
- Create: [bash.initramfs_create_cpio()](../scripts/lib/initramfs.sh:691)
- Validate: [bash.initramfs_validate()](../scripts/lib/initramfs.sh:820)
- [ ] QEMU / cloud-hypervisor smoke tests
- Test suite: [bash.testing_run_all()](../scripts/lib/testing.sh:299)
- [ ] Kernel embed path and versioning sanity
- Embed config: [bash.kernel_modify_config_for_initramfs()](../scripts/lib/kernel.sh:130)
- Full version logic: [bash.kernel_get_full_version()](../scripts/lib/kernel.sh:14)
## Operational conventions (keep)
- [ ] Diagnostics-first changes; add logs before fixes
- [ ] Absolute path normalization respected
- Normalization: [scripts/lib/common.sh](../scripts/lib/common.sh:244)
Notes
- Keep this file in sync with the live tracker. Reference it in PR descriptions.
- Use the clickable references above for rapid navigation.

View File

@@ -165,3 +165,45 @@ Use the helper to inspect a manifest, optionally listing entries and testing a l
- scripts/rfs/verify-flist.sh -m dist/flists/firmware-latest.fl --tree - scripts/rfs/verify-flist.sh -m dist/flists/firmware-latest.fl --tree
- Inspect + mount test to a temp dir: - Inspect + mount test to a temp dir:
- sudo scripts/rfs/verify-flist.sh -m dist/flists/modules-6.12.44-Zero-OS.fl --mount - sudo scripts/rfs/verify-flist.sh -m dist/flists/modules-6.12.44-Zero-OS.fl --mount
## Additional blob store backends (design)
This extends the existing S3/HTTP approach with a RESP/DB-style backend option for rfs blob storage. It is a design-only addition; CLI and scripts will be extended in a follow-up.
Scope
- Keep S3 flow intact via [scripts/rfs/common.sh](scripts/rfs/common.sh:137), [scripts/rfs/common.sh](scripts/rfs/common.sh:385), and [scripts/rfs/common.sh](scripts/rfs/common.sh:494).
- Introduce RESP URIs that can be encoded in config and, later, resolved by rfs or a thin uploader shim invoked by:
- [scripts/rfs/pack-modules.sh](scripts/rfs/pack-modules.sh:1)
- [scripts/rfs/pack-firmware.sh](scripts/rfs/pack-firmware.sh:1)
URI schemes (draft)
- resp://host:port/db?prefix=blobs
- resp+tls://host:port/db?prefix=blobs&amp;ca=/etc/ssl/certs/ca.pem
- resp+sentinel://sentinelHost:26379/mymaster?prefix=blobs
- Credentials may be provided via URI userinfo or config (recommended: config only).
Operations (minimal set)
- PUT blob: write content-addressed key (e.g., prefix/ab/cd/hash)
- GET blob: fetch by exact key
- Exists/HEAD: presence test by key
- Optional batching: pipelined MGET for prefetch
Config keys (see example additions in config/rfs.conf.example)
- RESP_ENDPOINT (host:port), RESP_DB (integer), RESP_PREFIX (path namespace)
- RESP_USERNAME/RESP_PASSWORD (optional), RESP_TLS=0/1 (+ RESP_CA if needed)
- RESP_SENTINEL and RESP_MASTER for sentinel deployments
Manifests and routes
- Keep S3 store in flist stores table (fallback) while enabling route.url patching to HTTP/S3 for read-only access:
- Patch stores table as today via [scripts/rfs/common.sh](scripts/rfs/common.sh:385)
- Patch route.url as today via [scripts/rfs/common.sh](scripts/rfs/common.sh:494)
- RESP may be used primarily for pack-time blob uploads or as an additional store the CLI can consume later.
Security
- Do not embed write credentials in manifests.
- Read-only credentials may be embedded in route.url if required, mirroring S3 pattern.
Next steps
- Implement RESP uploader shim called from pack scripts; keep the CLI S3 flow unchanged.
- Extend config loader in [scripts/rfs/common.sh](scripts/rfs/common.sh:82) to parse RESP_* variables.
- Add verification routines to sanity-check connectivity before pack.

View File

@@ -1,55 +1,141 @@
# Function List - scripts/lib Library # Function List - Repository (scripts and libraries)
This document lists all functions currently defined under [scripts/lib](scripts/lib) with their source locations. This document lists functions defined under scripts/ and scripts/lib with source locations.
Regenerated from repository on 2025-10-01.
## alpine.sh - Alpine Linux operations ## Top-level build scripts
File: [scripts/lib/alpine.sh](scripts/lib/alpine.sh)
- [alpine_extract_miniroot()](scripts/lib/alpine.sh:14) - Download and extract Alpine miniroot File: [scripts/build.sh](scripts/build.sh)
- [alpine_setup_chroot()](scripts/lib/alpine.sh:70) - Setup chroot mounts and resolv.conf - [show_usage()](scripts/build.sh:49)
- [alpine_cleanup_chroot()](scripts/lib/alpine.sh:115) - Unmount chroot mounts - [parse_arguments()](scripts/build.sh:88)
- [alpine_install_packages()](scripts/lib/alpine.sh:142) - Install packages from packages.list - [setup_build_environment()](scripts/build.sh:133)
- [alpine_aggressive_cleanup()](scripts/lib/alpine.sh:211) - Reduce image size by removing docs/locales/etc - [verify_configuration_files()](scripts/build.sh:174)
- [alpine_configure_repos()](scripts/lib/alpine.sh:321) - Configure APK repositories - [main_build_process()](scripts/build.sh:214)
- [alpine_configure_system()](scripts/lib/alpine.sh:339) - Configure hostname, hosts, timezone, profile - [stage_alpine_extract()](scripts/build.sh:223)
- [alpine_install_firmware()](scripts/lib/alpine.sh:392) - Install required firmware packages - [stage_alpine_configure()](scripts/build.sh:227)
- [stage_alpine_packages()](scripts/build.sh:232)
- [stage_alpine_firmware()](scripts/build.sh:236)
- [stage_components_build()](scripts/build.sh:240)
- [stage_components_verify()](scripts/build.sh:244)
- [stage_kernel_modules()](scripts/build.sh:248)
- [stage_zinit_setup()](scripts/build.sh:265)
- [stage_init_script()](scripts/build.sh:269)
- [stage_components_copy()](scripts/build.sh:273)
- [stage_modules_setup()](scripts/build.sh:277)
- [stage_modules_copy()](scripts/build.sh:286)
- [stage_rfs_flists()](scripts/build.sh:299)
- [stage_cleanup()](scripts/build.sh:366)
- [stage_validation()](scripts/build.sh:370)
- [stage_initramfs_create()](scripts/build.sh:374)
- [stage_initramfs_test()](scripts/build.sh:385)
- [stage_kernel_build()](scripts/build.sh:398)
- [stage_boot_tests()](scripts/build.sh:415)
- [main()](scripts/build.sh:470)
File: [scripts/clean.sh](scripts/clean.sh)
- [show_usage()](scripts/clean.sh:21)
- [parse_arguments()](scripts/clean.sh:50)
- [clean_build_artifacts()](scripts/clean.sh:90)
- [clean_downloads()](scripts/clean.sh:127)
- [clean_container_images()](scripts/clean.sh:155)
- [show_space_recovery()](scripts/clean.sh:176)
- [verify_cleanup()](scripts/clean.sh:203)
- [main()](scripts/clean.sh:240)
File: [scripts/dev-container.sh](scripts/dev-container.sh)
- [show_usage()](scripts/dev-container.sh:19)
- [ensure_builder_image()](scripts/dev-container.sh:44)
- [dev_container_start()](scripts/dev-container.sh:70)
- [dev_container_stop()](scripts/dev-container.sh:109)
- [dev_container_shell()](scripts/dev-container.sh:121)
- [dev_container_build()](scripts/dev-container.sh:139)
- [dev_container_clean()](scripts/dev-container.sh:168)
- [dev_container_status()](scripts/dev-container.sh:180)
- [dev_container_logs()](scripts/dev-container.sh:202)
- [main()](scripts/dev-container.sh:214)
File: [scripts/make-grub-usb.sh](scripts/make-grub-usb.sh)
- [error()](scripts/make-grub-usb.sh:45)
- [info()](scripts/make-grub-usb.sh:46)
- [warn()](scripts/make-grub-usb.sh:47)
- [die()](scripts/make-grub-usb.sh:48)
- [require_root()](scripts/make-grub-usb.sh:50)
- [command_exists()](scripts/make-grub-usb.sh:54)
- [parse_args()](scripts/make-grub-usb.sh:56)
- [confirm_dangerous()](scripts/make-grub-usb.sh:81)
- [check_prereqs()](scripts/make-grub-usb.sh:93)
- [resolve_defaults()](scripts/make-grub-usb.sh:101)
- [umount_partitions()](scripts/make-grub-usb.sh:116)
- [partition_device_gpt()](scripts/make-grub-usb.sh:139)
- [format_esp()](scripts/make-grub-usb.sh:158)
- [mount_esp()](scripts/make-grub-usb.sh:165)
- [install_grub()](scripts/make-grub-usb.sh:171)
- [copy_kernel_initrd()](scripts/make-grub-usb.sh:180)
- [write_grub_cfg()](scripts/make-grub-usb.sh:190)
- [cleanup()](scripts/make-grub-usb.sh:226)
- [main()](scripts/make-grub-usb.sh:235)
File: [scripts/rebuild-after-zinit.sh](scripts/rebuild-after-zinit.sh)
- [log()](scripts/rebuild-after-zinit.sh:17)
- [in_container()](scripts/rebuild-after-zinit.sh:67)
- [check_dir_changed()](scripts/rebuild-after-zinit.sh:81)
- [list_some_changes()](scripts/rebuild-after-zinit.sh:89)
- [compute_full_kver()](scripts/rebuild-after-zinit.sh:131)
- [modules_dir_for_full()](scripts/rebuild-after-zinit.sh:146)
File: [scripts/test.sh](scripts/test.sh)
- [show_usage()](scripts/test.sh:20)
- [parse_arguments()](scripts/test.sh:46)
- [run_tests()](scripts/test.sh:105)
- [main()](scripts/test.sh:182)
## Library scripts
## common.sh - Core utilities
File: [scripts/lib/common.sh](scripts/lib/common.sh) File: [scripts/lib/common.sh](scripts/lib/common.sh)
- [log_info()](scripts/lib/common.sh:31) - [log_info()](scripts/lib/common.sh:31)
- [log_warn()](scripts/lib/common.sh:36) - [log_warn()](scripts/lib/common.sh:36)
- [log_error()](scripts/lib/common.sh:41) - [log_error()](scripts/lib/common.sh:41)
- [log_debug()](scripts/lib/common.sh:46) - [log_debug()](scripts/lib/common.sh:46)
- [safe_execute()](scripts/lib/common.sh:54) - [safe_execute()](scripts/lib/common.sh:54)
- [section_header()](scripts/lib/common.sh:79) - [safe_execute_stream()](scripts/lib/common.sh:77)
- [command_exists()](scripts/lib/common.sh:89) - [section_header()](scripts/lib/common.sh:87)
- [in_container()](scripts/lib/common.sh:94) - [command_exists()](scripts/lib/common.sh:97)
- [check_dependencies()](scripts/lib/common.sh:99) - [in_container()](scripts/lib/common.sh:102)
- [safe_mkdir()](scripts/lib/common.sh:142) - [check_dependencies()](scripts/lib/common.sh:107)
- [safe_rmdir()](scripts/lib/common.sh:149) - [safe_mkdir()](scripts/lib/common.sh:150)
- [safe_copy()](scripts/lib/common.sh:158) - [safe_rmdir()](scripts/lib/common.sh:157)
- [is_absolute_path()](scripts/lib/common.sh:166) - [safe_copy()](scripts/lib/common.sh:166)
- [resolve_path()](scripts/lib/common.sh:171) - [is_absolute_path()](scripts/lib/common.sh:174)
- [get_file_size()](scripts/lib/common.sh:181) - [resolve_path()](scripts/lib/common.sh:179)
- [wait_for_file()](scripts/lib/common.sh:191) - [get_file_size()](scripts/lib/common.sh:189)
- [cleanup_on_exit()](scripts/lib/common.sh:205) - [wait_for_file()](scripts/lib/common.sh:199)
- [cleanup_on_exit()](scripts/lib/common.sh:213)
File: [scripts/lib/alpine.sh](scripts/lib/alpine.sh)
- [alpine_extract_miniroot()](scripts/lib/alpine.sh:14)
- [alpine_setup_chroot()](scripts/lib/alpine.sh:70)
- [alpine_cleanup_chroot()](scripts/lib/alpine.sh:115)
- [alpine_install_packages()](scripts/lib/alpine.sh:142)
- [alpine_aggressive_cleanup()](scripts/lib/alpine.sh:211)
- [alpine_configure_repos()](scripts/lib/alpine.sh:321)
- [alpine_configure_system()](scripts/lib/alpine.sh:339)
- [alpine_install_firmware()](scripts/lib/alpine.sh:392)
## components.sh - Component management
File: [scripts/lib/components.sh](scripts/lib/components.sh) File: [scripts/lib/components.sh](scripts/lib/components.sh)
- [components_parse_sources_conf()](scripts/lib/components.sh:13) - [components_parse_sources_conf()](scripts/lib/components.sh:13)
- [components_download_git()](scripts/lib/components.sh:72) - [components_download_git()](scripts/lib/components.sh:72)
- [components_download_release()](scripts/lib/components.sh:104) - [components_download_release()](scripts/lib/components.sh:174)
- [components_process_extra_options()](scripts/lib/components.sh:144) - [components_process_extra_options()](scripts/lib/components.sh:214)
- [components_build_component()](scripts/lib/components.sh:183) - [components_build_component()](scripts/lib/components.sh:253)
- [components_setup_rust_env()](scripts/lib/components.sh:217) - [components_setup_rust_env()](scripts/lib/components.sh:287)
- [build_zinit()](scripts/lib/components.sh:252) - [build_zinit()](scripts/lib/components.sh:322)
- [build_rfs()](scripts/lib/components.sh:299) - [build_rfs()](scripts/lib/components.sh:369)
- [build_mycelium()](scripts/lib/components.sh:346) - [build_mycelium()](scripts/lib/components.sh:417)
- [install_rfs()](scripts/lib/components.sh:386) - [install_rfs()](scripts/lib/components.sh:457)
- [install_corex()](scripts/lib/components.sh:409) - [install_corex()](scripts/lib/components.sh:480)
- [components_verify_installation()](scripts/lib/components.sh:436) - [components_verify_installation()](scripts/lib/components.sh:507)
- [components_cleanup()](scripts/lib/components.sh:472) - [components_cleanup()](scripts/lib/components.sh:543)
## docker.sh - Container runtime management
File: [scripts/lib/docker.sh](scripts/lib/docker.sh) File: [scripts/lib/docker.sh](scripts/lib/docker.sh)
- [docker_detect_runtime()](scripts/lib/docker.sh:14) - [docker_detect_runtime()](scripts/lib/docker.sh:14)
- [docker_verify_rootless()](scripts/lib/docker.sh:33) - [docker_verify_rootless()](scripts/lib/docker.sh:33)
@@ -58,36 +144,34 @@ File: [scripts/lib/docker.sh](scripts/lib/docker.sh)
- [docker_start_rootless()](scripts/lib/docker.sh:116) - [docker_start_rootless()](scripts/lib/docker.sh:116)
- [docker_run_build()](scripts/lib/docker.sh:154) - [docker_run_build()](scripts/lib/docker.sh:154)
- [docker_commit_builder()](scripts/lib/docker.sh:196) - [docker_commit_builder()](scripts/lib/docker.sh:196)
- [docker_cleanup()](scripts/lib/docker.sh:208) - [docker_cleanup()](scripts/lib/docker.sh:209)
- [docker_check_capabilities()](scripts/lib/docker.sh:248) - [docker_check_capabilities()](scripts/lib/docker.sh:248)
- [docker_setup_rootless()](scripts/lib/docker.sh:279) - [docker_setup_rootless()](scripts/lib/docker.sh:279)
## initramfs.sh - Initramfs assembly
File: [scripts/lib/initramfs.sh](scripts/lib/initramfs.sh) File: [scripts/lib/initramfs.sh](scripts/lib/initramfs.sh)
- [initramfs_setup_zinit()](scripts/lib/initramfs.sh:13) - [initramfs_setup_zinit()](scripts/lib/initramfs.sh:13)
- [initramfs_install_init_script()](scripts/lib/initramfs.sh:70) - [initramfs_install_init_script()](scripts/lib/initramfs.sh:75)
- [initramfs_copy_components()](scripts/lib/initramfs.sh:97) - [initramfs_copy_components()](scripts/lib/initramfs.sh:102)
- [initramfs_setup_modules()](scripts/lib/initramfs.sh:225) - [initramfs_setup_modules()](scripts/lib/initramfs.sh:230)
- [initramfs_resolve_module_dependencies()](scripts/lib/initramfs.sh:313) - [initramfs_resolve_module_dependencies()](scripts/lib/initramfs.sh:312)
- [initramfs_create_module_scripts()](scripts/lib/initramfs.sh:422) - [resolve_single_module()](scripts/lib/initramfs.sh:348)
- [initramfs_strip_and_upx()](scripts/lib/initramfs.sh:486) - [initramfs_create_module_scripts()](scripts/lib/initramfs.sh:421)
- [initramfs_finalize_customization()](scripts/lib/initramfs.sh:569) - [initramfs_strip_and_upx()](scripts/lib/initramfs.sh:485)
- [initramfs_create_cpio()](scripts/lib/initramfs.sh:642) - [initramfs_finalize_customization()](scripts/lib/initramfs.sh:568)
- [initramfs_validate()](scripts/lib/initramfs.sh:710) - [initramfs_create_cpio()](scripts/lib/initramfs.sh:691)
- [initramfs_test_archive()](scripts/lib/initramfs.sh:809) - [initramfs_validate()](scripts/lib/initramfs.sh:820)
- [initramfs_copy_resolved_modules()](scripts/lib/initramfs.sh:846) - [initramfs_test_archive()](scripts/lib/initramfs.sh:953)
- [initramfs_copy_resolved_modules()](scripts/lib/initramfs.sh:991)
## kernel.sh - Kernel building
File: [scripts/lib/kernel.sh](scripts/lib/kernel.sh) File: [scripts/lib/kernel.sh](scripts/lib/kernel.sh)
- [kernel_get_full_version()](scripts/lib/kernel.sh:14) - [kernel_get_full_version()](scripts/lib/kernel.sh:14)
- [kernel_download_source()](scripts/lib/kernel.sh:28) - [kernel_download_source()](scripts/lib/kernel.sh:28)
- [kernel_apply_config()](scripts/lib/kernel.sh:82) - [kernel_apply_config()](scripts/lib/kernel.sh:82)
- [kernel_modify_config_for_initramfs()](scripts/lib/kernel.sh:129) - [kernel_modify_config_for_initramfs()](scripts/lib/kernel.sh:130)
- [kernel_build_with_initramfs()](scripts/lib/kernel.sh:174) - [kernel_build_with_initramfs()](scripts/lib/kernel.sh:174)
- [kernel_build_modules()](scripts/lib/kernel.sh:228) - [kernel_build_modules()](scripts/lib/kernel.sh:243)
- [kernel_cleanup()](scripts/lib/kernel.sh:284) - [kernel_cleanup()](scripts/lib/kernel.sh:298)
## stages.sh - Build stage tracking
File: [scripts/lib/stages.sh](scripts/lib/stages.sh) File: [scripts/lib/stages.sh](scripts/lib/stages.sh)
- [stages_init()](scripts/lib/stages.sh:12) - [stages_init()](scripts/lib/stages.sh:12)
- [stage_is_completed()](scripts/lib/stages.sh:33) - [stage_is_completed()](scripts/lib/stages.sh:33)
@@ -97,16 +181,46 @@ File: [scripts/lib/stages.sh](scripts/lib/stages.sh)
- [stage_run()](scripts/lib/stages.sh:99) - [stage_run()](scripts/lib/stages.sh:99)
- [stages_status()](scripts/lib/stages.sh:134) - [stages_status()](scripts/lib/stages.sh:134)
## testing.sh - Boot testing
File: [scripts/lib/testing.sh](scripts/lib/testing.sh) File: [scripts/lib/testing.sh](scripts/lib/testing.sh)
- [testing_qemu_boot()](scripts/lib/testing.sh:14) - [testing_qemu_boot()](scripts/lib/testing.sh:14)
- [testing_qemu_basic_boot()](scripts/lib/testing.sh:55) - [testing_qemu_basic_boot()](scripts/lib/testing.sh:55)
- [testing_qemu_serial_boot()](scripts/lib/testing.sh:90) - [testing_qemu_serial_boot()](scripts/lib/testing.sh:90)
- [testing_qemu_interactive_boot()](scripts/lib/testing.sh:113) - [testing_qemu_interactive_boot()](scripts/lib/testing.sh:114)
- [testing_cloud_hypervisor_boot()](scripts/lib/testing.sh:135) - [testing_cloud_hypervisor_boot()](scripts/lib/testing.sh:135)
- [testing_cloud_hypervisor_basic()](scripts/lib/testing.sh:171) - [testing_cloud_hypervisor_basic()](scripts/lib/testing.sh:172)
- [testing_cloud_hypervisor_serial()](scripts/lib/testing.sh:206) - [testing_cloud_hypervisor_serial()](scripts/lib/testing.sh:206)
- [testing_analyze_boot_log()](scripts/lib/testing.sh:227) - [testing_analyze_boot_log()](scripts/lib/testing.sh:228)
- [testing_run_all()](scripts/lib/testing.sh:299) - [testing_run_all()](scripts/lib/testing.sh:299)
## RFS tooling
File: [scripts/rfs/common.sh](scripts/rfs/common.sh)
- [rfs_common_project_root()](scripts/rfs/common.sh:12)
- [rfs_common_load_build_kernel_version()](scripts/rfs/common.sh:42)
- [rfs_common_load_rfs_s3_config()](scripts/rfs/common.sh:82)
- [rfs_common_build_s3_store_uri()](scripts/rfs/common.sh:137)
- [rfs_common_locate_rfs()](scripts/rfs/common.sh:171)
- [rfs_common_require_sqlite3()](scripts/rfs/common.sh:198)
- [rfs_common_locate_modules_dir()](scripts/rfs/common.sh:214)
- [rfs_common_locate_firmware_dir()](scripts/rfs/common.sh:244)
- [rfs_common_validate_modules_metadata()](scripts/rfs/common.sh:264)
- [rfs_common_install_all_alpine_firmware_packages()](scripts/rfs/common.sh:298)
- [rfs_common_patch_flist_stores()](scripts/rfs/common.sh:385)
- [rfs_common_build_route_url()](scripts/rfs/common.sh:453)
- [rfs_common_patch_flist_route_url()](scripts/rfs/common.sh:494)
- [rfs_common_prepare_output()](scripts/rfs/common.sh:525)
- [rfs_common_firmware_tag()](scripts/rfs/common.sh:533)
File: [scripts/rfs/pack-modules.sh](scripts/rfs/pack-modules.sh)
- [section()](scripts/rfs/pack-modules.sh:15)
File: [scripts/rfs/pack-firmware.sh](scripts/rfs/pack-firmware.sh)
- [section()](scripts/rfs/pack-firmware.sh:17)
File: [scripts/rfs/verify-flist.sh](scripts/rfs/verify-flist.sh)
- [usage()](scripts/rfs/verify-flist.sh:11)
- [section()](scripts/rfs/verify-flist.sh:26)
Notes:
- Line numbers reflect current repository state; re-run generation after edits.
- Nested/local functions are included under their parent section when applicable.

View File

@@ -34,32 +34,41 @@ function components_parse_sources_conf() {
local component_count=0 local component_count=0
# Hardcode known components to bypass parsing issues for now # Read entries from sources.conf (TYPE NAME URL VERSION BUILD_FUNCTION [EXTRA])
log_info "Building ThreeFold components (hardcoded for reliability)" while IFS= read -r _raw || [[ -n "$_raw" ]]; do
# Strip comments and trim whitespace
local line="${_raw%%#*}"
line="${line#"${line%%[![:space:]]*}"}"
line="${line%"${line##*[![:space:]]}"}"
[[ -z "$line" ]] && continue
# Component 1: zinit local type name url version build_func extra
component_count=$((component_count + 1)) # shellcheck disable=SC2086
log_info "Processing component ${component_count}: zinit (git)" read -r type name url version build_func extra <<< "$line"
components_download_git "zinit" "https://github.com/threefoldtech/zinit" "master" "$components_dir"
components_build_component "zinit" "build_zinit" "$components_dir"
# Component 2: mycelium if [[ -z "${type:-}" || -z "${name:-}" || -z "${url:-}" || -z "${version:-}" || -z "${build_func:-}" ]]; then
component_count=$((component_count + 1)) log_warn "Skipping malformed entry: ${_raw}"
log_info "Processing component ${component_count}: mycelium (git)" continue
components_download_git "mycelium" "https://github.com/threefoldtech/mycelium" "v0.6.1" "$components_dir" fi
components_build_component "mycelium" "build_mycelium" "$components_dir"
# Component 3: rfs (pre-built release) component_count=$((component_count + 1))
component_count=$((component_count + 1)) log_info "Processing component ${component_count}: ${name} (${type})"
log_info "Processing component ${component_count}: rfs (release)"
components_download_git "rfs" "https://github.com/threefoldtech/rfs" "development" "$components_dir"
components_build_component "rfs" "build_rfs" "$components_dir"
# Component 4: corex case "$type" in
component_count=$((component_count + 1)) git)
log_info "Processing component ${component_count}: corex (release)" components_download_git "$name" "$url" "$version" "$components_dir"
components_download_release "corex" "https://github.com/threefoldtech/corex/releases/download/2.1.4/corex-2.1.4-amd64-linux-static" "2.1.4" "$components_dir" "rename=corex" ;;
components_build_component "corex" "install_corex" "$components_dir" release)
components_download_release "$name" "$url" "$version" "$components_dir" "$extra"
;;
*)
log_error "Unknown component type in sources.conf: ${type}"
return 1
;;
esac
components_build_component "$name" "$build_func" "$components_dir"
done < "$sources_file"
if [[ $component_count -eq 0 ]]; then if [[ $component_count -eq 0 ]]; then
log_warn "No components found in sources configuration" log_warn "No components found in sources configuration"
@@ -68,7 +77,7 @@ function components_parse_sources_conf() {
fi fi
} }
# Download Git repository # Download Git repository (reuse tree; only reclone if invalid or version not reachable)
function components_download_git() { function components_download_git() {
local name="$1" local name="$1"
local url="$2" local url="$2"
@@ -80,24 +89,94 @@ function components_download_git() {
local target_dir="${components_dir}/${name}" local target_dir="${components_dir}/${name}"
log_info "Repository: ${url}" log_info "Repository: ${url}"
log_info "Version/Branch: ${version}" log_info "Version/Branch/Tag: ${version}"
log_info "Target directory: ${target_dir}" log_info "Target directory: ${target_dir}"
# Always do fresh clone to avoid git state issues # Ensure parent exists
if [[ -d "$target_dir" ]]; then safe_mkdir "$components_dir"
log_info "Removing existing ${name} directory for fresh clone"
safe_execute rm -rf "$target_dir" # Decide whether we can reuse the existing working tree
local need_fresh_clone="0"
if [[ -d "$target_dir/.git" ]]; then
if ! git -C "$target_dir" rev-parse --is-inside-work-tree >/dev/null 2>&1; then
log_warn "Existing ${name} directory is not a valid git repo; will reclone"
need_fresh_clone="1"
fi
elif [[ -d "$target_dir" ]]; then
log_warn "Existing ${name} directory without .git; will reclone"
need_fresh_clone="1"
fi fi
log_info "Cloning ${name} from ${url}" if [[ "$need_fresh_clone" == "1" || ! -d "$target_dir" ]]; then
safe_execute git clone --depth 1 --branch "$version" "$url" "$target_dir" log_info "Cloning ${name} (fresh) from ${url}"
safe_execute git clone "$url" "$target_dir"
fi
# Verify checkout # Ensure origin URL is correct (do not delete the tree if URL changed)
safe_execute cd "$target_dir" local current_url
local current_ref=$(git rev-parse HEAD) current_url=$(git -C "$target_dir" remote get-url origin 2>/dev/null || echo "")
log_info "Current commit: ${current_ref}" if [[ -n "$current_url" && "$current_url" != "$url" ]]; then
log_info "Updating origin URL: ${current_url} -> ${url}"
safe_execute git -C "$target_dir" remote set-url origin "$url"
elif [[ -z "$current_url" ]]; then
log_info "Setting origin URL to ${url}"
safe_execute git -C "$target_dir" remote add origin "$url" || true
fi
log_info "Git component download complete: ${name}" # Fetch updates and tags
safe_execute git -C "$target_dir" fetch --tags --prune origin
# Resolve desired commit for the requested version/branch/tag
local desired_rev=""
if git -C "$target_dir" rev-parse --verify "${version}^{commit}" >/dev/null 2>&1; then
desired_rev=$(git -C "$target_dir" rev-parse --verify "${version}^{commit}")
elif git -C "$target_dir" rev-parse --verify "origin/${version}^{commit}" >/dev/null 2>&1; then
desired_rev=$(git -C "$target_dir" rev-parse --verify "origin/${version}^{commit}")
else
log_warn "Version '${version}' not directly resolvable; fetching explicitly"
if git -C "$target_dir" fetch origin "${version}" --depth 1; then
desired_rev=$(git -C "$target_dir" rev-parse --verify FETCH_HEAD)
fi
fi
# Fallback: shallow clone at the requested ref if we still can't resolve
if [[ -z "$desired_rev" ]]; then
log_warn "Could not resolve revision for '${version}'. Performing fresh shallow clone at requested ref."
safe_execute rm -rf "${target_dir}.tmp"
if safe_execute git clone --depth 1 --branch "$version" "$url" "${target_dir}.tmp"; then
safe_execute rm -rf "$target_dir"
safe_execute mv "${target_dir}.tmp" "$target_dir"
desired_rev=$(git -C "$target_dir" rev-parse HEAD)
else
log_error "Failed to clone ${url} at '${version}'"
return 1
fi
fi
local current_rev
current_rev=$(git -C "$target_dir" rev-parse HEAD 2>/dev/null || echo "")
log_info "Current commit: ${current_rev:-<none>}"
log_info "Desired commit: ${desired_rev}"
if [[ -n "$current_rev" && "$current_rev" == "$desired_rev" ]]; then
log_info "Repository already at requested version; reusing working tree"
else
log_info "Checking out requested version"
# Prefer named refs when available; otherwise detach to exact commit
if git -C "$target_dir" show-ref --verify --quiet "refs/heads/${version}"; then
safe_execute git -C "$target_dir" checkout -f "${version}"
elif git -C "$target_dir" show-ref --verify --quiet "refs/remotes/origin/${version}"; then
safe_execute git -C "$target_dir" checkout -f -B "${version}" "origin/${version}"
elif git -C "$target_dir" show-ref --verify --quiet "refs/tags/${version}"; then
safe_execute git -C "$target_dir" checkout -f "tags/${version}"
else
safe_execute git -C "$target_dir" checkout -f --detach "${desired_rev}"
fi
# Initialize submodules if present (non-fatal)
safe_execute git -C "$target_dir" submodule update --init --recursive || true
fi
log_info "Git component ready: ${name} @ $(git -C "$target_dir" rev-parse --short HEAD)"
} }
# Download release binary/archive # Download release binary/archive
@@ -328,7 +407,6 @@ function build_rfs() {
return 1 return 1
fi fi
# remove rust-toolchain.toml, as not needed with latest release # remove rust-toolchain.toml, as not needed with latest release
safe_execute rm rust-toolchain.toml
# Build with musl target # Build with musl target
safe_execute cargo build --release --target "$RUST_TARGET" --features build-binary safe_execute cargo build --release --target "$RUST_TARGET" --features build-binary
@@ -439,29 +517,55 @@ function components_verify_installation() {
section_header "Verifying Component Build" section_header "Verifying Component Build"
# List of expected built binaries and their locations in components directory local ok_count=0
local expected_binaries=(
"zinit/target/x86_64-unknown-linux-musl/release/zinit"
"rfs/target/x86_64-unknown-linux-musl/release/rfs"
"mycelium/myceliumd/target/x86_64-unknown-linux-musl/release/mycelium"
"corex/corex"
)
local missing_count=0 local missing_count=0
for binary in "${expected_binaries[@]}"; do # zinit
local full_path="${components_dir}/${binary}" local zinit_bin="${components_dir}/zinit/target/x86_64-unknown-linux-musl/release/zinit"
if [[ -f "$full_path" && -x "$full_path" ]]; then if [[ -x "$zinit_bin" ]]; then
local size=$(get_file_size "$full_path") log_info "✓ zinit ($(get_file_size "$zinit_bin")) at: ${zinit_bin#${components_dir}/}"
log_info "✓ Built ${binary##*/} (${size}) at: ${binary}" ((ok_count++))
else else
log_error "Missing or not executable: ${binary}" log_error "zinit missing: ${zinit_bin#${components_dir}/}"
((missing_count++)) ((missing_count++))
fi fi
done
# rfs: accept both built and prebuilt locations
local rfs_built="${components_dir}/rfs/target/x86_64-unknown-linux-musl/release/rfs"
local rfs_release="${components_dir}/rfs/rfs"
if [[ -x "$rfs_built" ]]; then
log_info "✓ rfs (built) ($(get_file_size "$rfs_built")) at: ${rfs_built#${components_dir}/}"
((ok_count++))
elif [[ -x "$rfs_release" ]]; then
log_info "✓ rfs (release) ($(get_file_size "$rfs_release")) at: ${rfs_release#${components_dir}/}"
((ok_count++))
else
log_error "✗ rfs missing: checked rfs/target/.../rfs and rfs/rfs"
((missing_count++))
fi
# mycelium
local mycelium_bin="${components_dir}/mycelium/myceliumd/target/x86_64-unknown-linux-musl/release/mycelium"
if [[ -x "$mycelium_bin" ]]; then
log_info "✓ mycelium ($(get_file_size "$mycelium_bin")) at: ${mycelium_bin#${components_dir}/}"
((ok_count++))
else
log_error "✗ mycelium missing: ${mycelium_bin#${components_dir}/}"
((missing_count++))
fi
# corex
local corex_bin="${components_dir}/corex/corex"
if [[ -x "$corex_bin" ]]; then
log_info "✓ corex ($(get_file_size "$corex_bin")) at: ${corex_bin#${components_dir}/}"
((ok_count++))
else
log_error "✗ corex missing: ${corex_bin#${components_dir}/}"
((missing_count++))
fi
if [[ $missing_count -eq 0 ]]; then if [[ $missing_count -eq 0 ]]; then
log_info "All components built successfully" log_info "All components built/installed successfully"
return 0 return 0
else else
log_error "${missing_count} components missing or failed to build" log_error "${missing_count} components missing or failed to build"

View File

@@ -617,7 +617,6 @@ EOF
log_info "Branding enabled: updating /etc/issue to Zero-OS branding" log_info "Branding enabled: updating /etc/issue to Zero-OS branding"
cat > "${initramfs_dir}/etc/issue" << 'EOF' cat > "${initramfs_dir}/etc/issue" << 'EOF'
Zero-OS \r \m Zero-OS \r \m
Built on \l
EOF EOF
else else
@@ -780,7 +779,7 @@ function initramfs_create_cpio() {
case "$compression" in case "$compression" in
"xz") "xz")
log_info "Creating XZ compressed CPIO archive" log_info "Creating XZ compressed CPIO archive"
safe_execute find . -print0 | cpio -o -H newc -0 | xz -${XZ_COMPRESSION_LEVEL} --check=crc32 > "$output_file_abs" safe_execute find . -print0 | cpio -o -H newc -0 | xz -T 8 -${XZ_COMPRESSION_LEVEL} --check=crc32 > "$output_file_abs"
;; ;;
"gzip"|"gz") "gzip"|"gz")
log_info "Creating gzip compressed CPIO archive" log_info "Creating gzip compressed CPIO archive"

155
scripts/rfs/pack-tree.sh Executable file
View File

@@ -0,0 +1,155 @@
#!/bin/bash
# Pack an arbitrary directory tree into an RFS flist and upload blobs to S3 (Garage)
# - Uses config from config/rfs.conf (or rfs.conf.example fallback)
# - Packs the specified directory (default: repository root)
# - Patches manifest route URL with read-only S3 creds
# - Optionally patches stores to WEB_ENDPOINT and uploads the .fl via MinIO client
#
# Usage:
# scripts/rfs/pack-tree.sh [-p PATH] [-n MANIFEST_BASENAME] [--web-endpoint URL] [--keep-s3-fallback] [--no-upload]
# Examples:
# scripts/rfs/pack-tree.sh
# scripts/rfs/pack-tree.sh -p ./components/zinit -n zinit-src
# WEB_ENDPOINT=https://hub.grid.tf/zos/zosbuilder/store scripts/rfs/pack-tree.sh -p dist --keep-s3-fallback
set -euo pipefail
HERE="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# shellcheck source=/dev/null
source "${HERE}/common.sh"
section() { echo -e "\n==== $* ====\n"; }
print_help() {
cat <<EOF
Pack a directory to an RFS flist and upload blobs to S3, using config/rfs.conf.
Options:
-p, --path PATH Source directory to pack (default: repository root)
-n, --name NAME Manifest base name (without .fl). Default: tree-<basename(PATH)>-<YYYYMMDDHHMMSS>
--web-endpoint URL Override WEB_ENDPOINT for stores patching (default from rfs.conf)
--keep-s3-fallback Keep s3:// store rows alongside HTTPS store in manifest
--no-upload Do not upload the .fl manifest via MinIO client even if enabled in config
-h, --help Show this help
Environment/config:
Reads S3 and route settings from config/rfs.conf (or rfs.conf.example).
Honors: WEB_ENDPOINT, KEEP_S3_FALLBACK, UPLOAD_MANIFESTS.
EOF
}
# Defaults
SRC_PATH=""
MANIFEST_NAME=""
ARG_WEB_ENDPOINT=""
ARG_KEEP_S3="false"
ARG_NO_UPLOAD="false"
# Parse args
while [[ $# -gt 0 ]]; do
case "$1" in
-p|--path)
SRC_PATH="$2"; shift 2;;
-n|--name)
MANIFEST_NAME="$2"; shift 2;;
--web-endpoint)
ARG_WEB_ENDPOINT="$2"; shift 2;;
--keep-s3-fallback)
ARG_KEEP_S3="true"; shift 1;;
--no-upload)
ARG_NO_UPLOAD="true"; shift 1;;
-h|--help)
print_help; exit 0;;
*)
echo "Unknown argument: $1" >&2; print_help; exit 1;;
esac
done
# Determine PROJECT_ROOT and defaults
PROJECT_ROOT="${PROJECT_ROOT:-$(rfs_common_project_root)}"
if [[ -z "${SRC_PATH}" ]]; then
SRC_PATH="${PROJECT_ROOT}"
fi
# Normalize SRC_PATH to absolute
if [[ "${SRC_PATH}" != /* ]]; then
SRC_PATH="$(cd "${SRC_PATH}" && pwd)"
fi
if [[ ! -d "${SRC_PATH}" ]]; then
log_error "Source path is not a directory: ${SRC_PATH}"
exit 1
fi
# Compute default manifest name if not given
if [[ -z "${MANIFEST_NAME}" ]]; then
base="$(basename "${SRC_PATH}")"
ts="$(date -u +%Y%m%d%H%M%S)"
MANIFEST_NAME="tree-${base}-${ts}"
fi
MANIFEST_FILE="${MANIFEST_NAME%.fl}.fl"
section "Loading RFS and kernel configuration"
# Kernel version for consistent logs (not strictly required for generic pack)
rfs_common_load_build_kernel_version
rfs_common_load_rfs_s3_config
# Allow CLI override for WEB_ENDPOINT and KEEP_S3_FALLBACK
if [[ -n "${ARG_WEB_ENDPOINT}" ]]; then
WEB_ENDPOINT="${ARG_WEB_ENDPOINT}"
fi
if [[ "${ARG_KEEP_S3}" == "true" ]]; then
KEEP_S3_FALLBACK="true"
fi
rfs_common_build_s3_store_uri
rfs_common_locate_rfs
# Prepare output
MANIFEST_PATH="$(rfs_common_prepare_output "${MANIFEST_FILE}")"
section "Packing directory to flist"
log_info "Source path: ${SRC_PATH}"
log_info "Manifest: ${MANIFEST_PATH}"
log_info "Store: ${RFS_S3_STORE_URI}"
safe_execute "${RFS_BIN}" pack --debug -m "${MANIFEST_PATH}" -s "${RFS_S3_STORE_URI}" "${SRC_PATH}"
section "Patching route.url in manifest to S3 read-only endpoint"
rfs_common_build_route_url
rfs_common_patch_flist_route_url "${MANIFEST_PATH}"
if [[ -n "${WEB_ENDPOINT:-}" ]]; then
section "Patching stores to HTTPS web endpoint"
log_info "WEB_ENDPOINT=${WEB_ENDPOINT}"
log_info "KEEP_S3_FALLBACK=${KEEP_S3_FALLBACK:-false}"
rfs_common_patch_flist_stores "${MANIFEST_PATH}" "${WEB_ENDPOINT}" "${KEEP_S3_FALLBACK:-false}"
else
log_warn "WEB_ENDPOINT not set; leaving manifest stores as-is (s3:// only)"
fi
# Optional manifest upload via MinIO client
UPLOAD="${UPLOAD_MANIFESTS:-false}"
if [[ "${ARG_NO_UPLOAD}" == "true" ]]; then
UPLOAD="false"
fi
if [[ "${UPLOAD}" == "true" ]]; then
section "Uploading manifest .fl via MinIO client (mcli/mc)"
if command -v mcli >/dev/null 2>&1; then
MCLI_BIN="mcli"
elif command -v mc >/dev/null 2>&1; then
MCLI_BIN="mc"
else
log_warn "MinIO Client not found (expected mcli or mc); skipping manifest upload"
MCLI_BIN=""
fi
if [[ -n "${MCLI_BIN}" ]]; then
local_subpath="${MANIFESTS_SUBPATH:-manifests}"
safe_execute "${MCLI_BIN}" alias set rfs "${S3_ENDPOINT}" "${S3_ACCESS_KEY}" "${S3_SECRET_KEY}"
dst="rfs/${S3_BUCKET}/${S3_PREFIX%/}/${local_subpath%/}/${MANIFEST_FILE}"
log_info "${MCLI_BIN} cp ${MANIFEST_PATH} ${dst}"
safe_execute "${MCLI_BIN}" cp "${MANIFEST_PATH}" "${dst}"
fi
else
log_info "UPLOAD_MANIFESTS=false; skipping manifest upload"
fi
section "Done"
log_info "Packed: ${MANIFEST_PATH}"