feat: Add support for virt package
Some checks are pending
Rhai Tests / Run Rhai Tests (push) Waiting to run

- Add sal-virt package to the workspace members
- Update MONOREPO_CONVERSION_PLAN.md to reflect the
  completion of sal-process and sal-virt packages
- Update src/lib.rs to include sal-virt
- Update src/postgresclient to use sal-virt instead of local
  virt module
- Update tests to use sal-virt
This commit is contained in:
Mahmoud-Emad
2025-06-23 02:37:14 +03:00
parent 3e3d0a1d45
commit 455f84528b
112 changed files with 2924 additions and 579 deletions

View File

@@ -47,7 +47,7 @@ pub use sal_redisclient as redisclient;
pub mod rhai;
pub use sal_text as text;
pub mod vault;
pub mod virt;
pub use sal_virt as virt;
pub use sal_zinit_client as zinit_client;
// Version information

View File

@@ -10,7 +10,7 @@ use std::process::Command;
use std::thread;
use std::time::Duration;
use crate::virt::nerdctl::Container;
use sal_virt::nerdctl::Container;
use std::error::Error;
use std::fmt;

View File

@@ -138,7 +138,7 @@ mod postgres_client_tests {
#[cfg(test)]
mod postgres_installer_tests {
use super::*;
use crate::virt::nerdctl::Container;
use sal_virt::nerdctl::Container;
#[test]
fn test_postgres_installer_config() {

View File

@@ -1,253 +0,0 @@
//! Rhai wrappers for Buildah module functions
//!
//! This module provides Rhai wrappers for the functions in the Buildah module.
use rhai::{Engine, EvalAltResult, Array, Dynamic, Map};
use std::collections::HashMap;
use crate::virt::buildah::{BuildahError, Image, Builder, ContentOperations};
use crate::process::CommandResult;
/// Register Buildah module functions with the Rhai engine
///
/// # Arguments
///
/// * `engine` - The Rhai engine to register the functions with
///
/// # Returns
///
/// * `Result<(), Box<EvalAltResult>>` - Ok if registration was successful, Err otherwise
pub fn register_bah_module(engine: &mut Engine) -> Result<(), Box<EvalAltResult>> {
// Register types
register_bah_types(engine)?;
// Register Builder constructor
engine.register_fn("bah_new", bah_new);
// Register Builder instance methods
engine.register_fn("run", builder_run);
engine.register_fn("run_with_isolation", builder_run_with_isolation);
engine.register_fn("copy", builder_copy);
engine.register_fn("add", builder_add);
engine.register_fn("commit", builder_commit);
engine.register_fn("remove", builder_remove);
engine.register_fn("reset", builder_reset);
engine.register_fn("config", builder_config);
// Register Builder instance methods for entrypoint, cmd, and content operations
engine.register_fn("set_entrypoint", builder_set_entrypoint);
engine.register_fn("set_cmd", builder_set_cmd);
engine.register_fn("write_content", builder_write_content);
engine.register_fn("read_content", builder_read_content);
// Register Builder static methods
engine.register_fn("images", builder_images);
engine.register_fn("image_remove", builder_image_remove);
engine.register_fn("image_pull", builder_image_pull);
engine.register_fn("image_push", builder_image_push);
engine.register_fn("image_tag", builder_image_tag);
engine.register_fn("build", builder_build);
engine.register_fn("read_content", builder_read_content);
Ok(())
}
/// Register Buildah module types with the Rhai engine
fn register_bah_types(engine: &mut Engine) -> Result<(), Box<EvalAltResult>> {
// Register Builder type
engine.register_type_with_name::<Builder>("BuildahBuilder");
// Register getters for Builder properties
engine.register_get("container_id", get_builder_container_id);
engine.register_get("name", get_builder_name);
engine.register_get("image", get_builder_image);
engine.register_get("debug_mode", get_builder_debug);
engine.register_set("debug_mode", set_builder_debug);
// Register Image type and methods (same as before)
engine.register_type_with_name::<Image>("BuildahImage");
// Register getters for Image properties
engine.register_get("id", |img: &mut Image| img.id.clone());
engine.register_get("names", |img: &mut Image| {
let mut array = Array::new();
for name in &img.names {
array.push(Dynamic::from(name.clone()));
}
array
});
// Add a 'name' getter that returns the first name or a default
engine.register_get("name", |img: &mut Image| {
if img.names.is_empty() {
"<none>".to_string()
} else {
img.names[0].clone()
}
});
engine.register_get("size", |img: &mut Image| img.size.clone());
engine.register_get("created", |img: &mut Image| img.created.clone());
Ok(())
}
// Helper functions for error conversion
fn bah_error_to_rhai_error<T>(result: Result<T, BuildahError>) -> Result<T, Box<EvalAltResult>> {
result.map_err(|e| {
Box::new(EvalAltResult::ErrorRuntime(
format!("Buildah error: {}", e).into(),
rhai::Position::NONE
))
})
}
// Helper function to convert Rhai Map to Rust HashMap
fn convert_map_to_hashmap(options: Map) -> Result<HashMap<String, String>, Box<EvalAltResult>> {
let mut config_options = HashMap::<String, String>::new();
for (key, value) in options.iter() {
if let Ok(value_str) = value.clone().into_string() {
// Convert SmartString to String
config_options.insert(key.to_string(), value_str);
} else {
return Err(Box::new(EvalAltResult::ErrorRuntime(
format!("Option '{}' must be a string", key).into(),
rhai::Position::NONE
)));
}
}
Ok(config_options)
}
/// Create a new Builder
pub fn bah_new(name: &str, image: &str) -> Result<Builder, Box<EvalAltResult>> {
bah_error_to_rhai_error(Builder::new(name, image))
}
// Builder instance methods
pub fn builder_run(builder: &mut Builder, command: &str) -> Result<CommandResult, Box<EvalAltResult>> {
bah_error_to_rhai_error(builder.run(command))
}
pub fn builder_run_with_isolation(builder: &mut Builder, command: &str, isolation: &str) -> Result<CommandResult, Box<EvalAltResult>> {
bah_error_to_rhai_error(builder.run_with_isolation(command, isolation))
}
pub fn builder_copy(builder: &mut Builder, source: &str, dest: &str) -> Result<CommandResult, Box<EvalAltResult>> {
bah_error_to_rhai_error(builder.copy(source, dest))
}
pub fn builder_add(builder: &mut Builder, source: &str, dest: &str) -> Result<CommandResult, Box<EvalAltResult>> {
bah_error_to_rhai_error(builder.add(source, dest))
}
pub fn builder_commit(builder: &mut Builder, image_name: &str) -> Result<CommandResult, Box<EvalAltResult>> {
bah_error_to_rhai_error(builder.commit(image_name))
}
pub fn builder_remove(builder: &mut Builder) -> Result<CommandResult, Box<EvalAltResult>> {
bah_error_to_rhai_error(builder.remove())
}
pub fn builder_config(builder: &mut Builder, options: Map) -> Result<CommandResult, Box<EvalAltResult>> {
// Convert Rhai Map to Rust HashMap
let config_options = convert_map_to_hashmap(options)?;
bah_error_to_rhai_error(builder.config(config_options))
}
/// Set the entrypoint for the container
pub fn builder_set_entrypoint(builder: &mut Builder, entrypoint: &str) -> Result<CommandResult, Box<EvalAltResult>> {
bah_error_to_rhai_error(builder.set_entrypoint(entrypoint))
}
/// Set the default command for the container
pub fn builder_set_cmd(builder: &mut Builder, cmd: &str) -> Result<CommandResult, Box<EvalAltResult>> {
bah_error_to_rhai_error(builder.set_cmd(cmd))
}
/// Write content to a file in the container
pub fn builder_write_content(builder: &mut Builder, content: &str, dest_path: &str) -> Result<CommandResult, Box<EvalAltResult>> {
if let Some(container_id) = builder.container_id() {
bah_error_to_rhai_error(ContentOperations::write_content(container_id, content, dest_path))
} else {
Err(Box::new(EvalAltResult::ErrorRuntime(
"No container ID available".into(),
rhai::Position::NONE
)))
}
}
/// Read content from a file in the container
pub fn builder_read_content(builder: &mut Builder, source_path: &str) -> Result<String, Box<EvalAltResult>> {
if let Some(container_id) = builder.container_id() {
bah_error_to_rhai_error(ContentOperations::read_content(container_id, source_path))
} else {
Err(Box::new(EvalAltResult::ErrorRuntime(
"No container ID available".into(),
rhai::Position::NONE
)))
}
}
// Builder static methods
pub fn builder_images(_builder: &mut Builder) -> Result<Array, Box<EvalAltResult>> {
let images = bah_error_to_rhai_error(Builder::images())?;
// Convert Vec<Image> to Rhai Array
let mut array = Array::new();
for image in images {
array.push(Dynamic::from(image));
}
Ok(array)
}
pub fn builder_image_remove(_builder: &mut Builder, image: &str) -> Result<CommandResult, Box<EvalAltResult>> {
bah_error_to_rhai_error(Builder::image_remove(image))
}
pub fn builder_image_pull(_builder: &mut Builder, image: &str, tls_verify: bool) -> Result<CommandResult, Box<EvalAltResult>> {
bah_error_to_rhai_error(Builder::image_pull(image, tls_verify))
}
pub fn builder_image_push(_builder: &mut Builder, image: &str, destination: &str, tls_verify: bool) -> Result<CommandResult, Box<EvalAltResult>> {
bah_error_to_rhai_error(Builder::image_push(image, destination, tls_verify))
}
pub fn builder_image_tag(_builder: &mut Builder, image: &str, new_name: &str) -> Result<CommandResult, Box<EvalAltResult>> {
bah_error_to_rhai_error(Builder::image_tag(image, new_name))
}
// Getter functions for Builder properties
pub fn get_builder_container_id(builder: &mut Builder) -> String {
match builder.container_id() {
Some(id) => id.clone(),
None => "".to_string(),
}
}
pub fn get_builder_name(builder: &mut Builder) -> String {
builder.name().to_string()
}
pub fn get_builder_image(builder: &mut Builder) -> String {
builder.image().to_string()
}
/// Get the debug flag from a Builder
pub fn get_builder_debug(builder: &mut Builder) -> bool {
builder.debug()
}
/// Set the debug flag on a Builder
pub fn set_builder_debug(builder: &mut Builder, debug: bool) {
builder.set_debug(debug);
}
// Reset function for Builder
pub fn builder_reset(builder: &mut Builder) -> Result<(), Box<EvalAltResult>> {
bah_error_to_rhai_error(builder.reset())
}
// Build function for Builder
pub fn builder_build(_builder: &mut Builder, tag: &str, context_dir: &str, file: &str, isolation: &str) -> Result<CommandResult, Box<EvalAltResult>> {
bah_error_to_rhai_error(Builder::build(Some(tag), context_dir, file, Some(isolation)))
}

View File

@@ -3,15 +3,13 @@
//! This module provides integration with the Rhai scripting language,
//! allowing SAL functions to be called from Rhai scripts.
mod buildah;
mod core;
pub mod error;
mod nerdctl;
// OS module is now provided by sal-os package
// Platform module is now provided by sal-os package
mod postgresclient;
mod rfs;
// Virt modules (buildah, nerdctl, rfs) are now provided by sal-virt package
mod vault;
// zinit module is now in sal-zinit-client package
@@ -58,13 +56,8 @@ pub use sal_process::rhai::{
which,
};
// Re-export buildah functions
pub use buildah::bah_new;
pub use buildah::register_bah_module;
// Re-export nerdctl functions
pub use nerdctl::register_nerdctl_module;
pub use nerdctl::{
// Re-export virt functions from sal-virt package
pub use sal_virt::rhai::nerdctl::{
nerdctl_copy,
nerdctl_exec,
nerdctl_image_build,
@@ -83,9 +76,9 @@ pub use nerdctl::{
nerdctl_run_with_port,
nerdctl_stop,
};
// Re-export RFS module
pub use rfs::register as register_rfs_module;
pub use sal_virt::rhai::{
bah_new, register_bah_module, register_nerdctl_module, register_rfs_module,
};
// Re-export git module from sal-git package
pub use sal_git::rhai::register_git_module;
@@ -138,11 +131,8 @@ pub fn register(engine: &mut Engine) -> Result<(), Box<rhai::EvalAltResult>> {
// Register Process module functions
sal_process::rhai::register_process_module(engine)?;
// Register Buildah module functions
buildah::register_bah_module(engine)?;
// Register Nerdctl module functions
nerdctl::register_nerdctl_module(engine)?;
// Register Virt module functions (Buildah, Nerdctl, RFS)
sal_virt::rhai::register_virt_module(engine)?;
// Register Git module functions
sal_git::rhai::register_git_module(engine)?;
@@ -159,8 +149,7 @@ pub fn register(engine: &mut Engine) -> Result<(), Box<rhai::EvalAltResult>> {
// Register Net module functions
sal_net::rhai::register_net_module(engine)?;
// Register RFS module functions
rfs::register(engine)?;
// RFS module functions are now registered as part of sal_virt above
// Register Crypto module functions
vault::register_crypto_module(engine)?;

View File

@@ -1,580 +0,0 @@
//! Rhai wrappers for Nerdctl module functions
//!
//! This module provides Rhai wrappers for the functions in the Nerdctl module.
use rhai::{Engine, EvalAltResult, Array, Dynamic, Map};
use crate::virt::nerdctl::{self, NerdctlError, Image, Container};
use crate::process::CommandResult;
// Helper functions for error conversion with improved context
fn nerdctl_error_to_rhai_error<T>(result: Result<T, NerdctlError>) -> Result<T, Box<EvalAltResult>> {
result.map_err(|e| {
// Create a more detailed error message based on the error type
let error_message = match &e {
NerdctlError::CommandExecutionFailed(io_err) => {
format!("Failed to execute nerdctl command: {}. This may indicate nerdctl is not installed or not in PATH.", io_err)
},
NerdctlError::CommandFailed(msg) => {
format!("Nerdctl command failed: {}. Check container status and logs for more details.", msg)
},
NerdctlError::JsonParseError(msg) => {
format!("Failed to parse nerdctl JSON output: {}. This may indicate an incompatible nerdctl version.", msg)
},
NerdctlError::ConversionError(msg) => {
format!("Data conversion error: {}. This may indicate unexpected output format from nerdctl.", msg)
},
NerdctlError::Other(msg) => {
format!("Nerdctl error: {}. This is an unexpected error.", msg)
},
};
Box::new(EvalAltResult::ErrorRuntime(
error_message.into(),
rhai::Position::NONE
))
})
}
//
// Container Builder Pattern Implementation
//
/// Create a new Container
pub fn container_new(name: &str) -> Result<Container, Box<EvalAltResult>> {
nerdctl_error_to_rhai_error(Container::new(name))
}
/// Create a Container from an image
pub fn container_from_image(name: &str, image: &str) -> Result<Container, Box<EvalAltResult>> {
nerdctl_error_to_rhai_error(Container::from_image(name, image))
}
/// Reset the container configuration to defaults while keeping the name and image
pub fn container_reset(container: Container) -> Container {
container.reset()
}
/// Add a port mapping to a Container
pub fn container_with_port(container: Container, port: &str) -> Container {
container.with_port(port)
}
/// Add a volume mount to a Container
pub fn container_with_volume(container: Container, volume: &str) -> Container {
container.with_volume(volume)
}
/// Add an environment variable to a Container
pub fn container_with_env(container: Container, key: &str, value: &str) -> Container {
container.with_env(key, value)
}
/// Set the network for a Container
pub fn container_with_network(container: Container, network: &str) -> Container {
container.with_network(network)
}
/// Add a network alias to a Container
pub fn container_with_network_alias(container: Container, alias: &str) -> Container {
container.with_network_alias(alias)
}
/// Set CPU limit for a Container
pub fn container_with_cpu_limit(container: Container, cpus: &str) -> Container {
container.with_cpu_limit(cpus)
}
/// Set memory limit for a Container
pub fn container_with_memory_limit(container: Container, memory: &str) -> Container {
container.with_memory_limit(memory)
}
/// Set restart policy for a Container
pub fn container_with_restart_policy(container: Container, policy: &str) -> Container {
container.with_restart_policy(policy)
}
/// Set health check for a Container
pub fn container_with_health_check(container: Container, cmd: &str) -> Container {
container.with_health_check(cmd)
}
/// Add multiple port mappings to a Container
pub fn container_with_ports(mut container: Container, ports: Array) -> Container {
for port in ports.iter() {
if port.is_string() {
let port_str = port.clone().cast::<String>();
container = container.with_port(&port_str);
}
}
container
}
/// Add multiple volume mounts to a Container
pub fn container_with_volumes(mut container: Container, volumes: Array) -> Container {
for volume in volumes.iter() {
if volume.is_string() {
let volume_str = volume.clone().cast::<String>();
container = container.with_volume(&volume_str);
}
}
container
}
/// Add multiple environment variables to a Container
pub fn container_with_envs(mut container: Container, env_map: Map) -> Container {
for (key, value) in env_map.iter() {
if value.is_string() {
let value_str = value.clone().cast::<String>();
container = container.with_env(&key, &value_str);
}
}
container
}
/// Add multiple network aliases to a Container
pub fn container_with_network_aliases(mut container: Container, aliases: Array) -> Container {
for alias in aliases.iter() {
if alias.is_string() {
let alias_str = alias.clone().cast::<String>();
container = container.with_network_alias(&alias_str);
}
}
container
}
/// Set memory swap limit for a Container
pub fn container_with_memory_swap_limit(container: Container, memory_swap: &str) -> Container {
container.with_memory_swap_limit(memory_swap)
}
/// Set CPU shares for a Container
pub fn container_with_cpu_shares(container: Container, shares: &str) -> Container {
container.with_cpu_shares(shares)
}
/// Set health check with options for a Container
pub fn container_with_health_check_options(
container: Container,
cmd: &str,
interval: Option<&str>,
timeout: Option<&str>,
retries: Option<i64>,
start_period: Option<&str>
) -> Container {
// Convert i64 to u32 for retries
let retries_u32 = retries.map(|r| r as u32);
container.with_health_check_options(cmd, interval, timeout, retries_u32, start_period)
}
/// Set snapshotter for a Container
pub fn container_with_snapshotter(container: Container, snapshotter: &str) -> Container {
container.with_snapshotter(snapshotter)
}
/// Set detach mode for a Container
pub fn container_with_detach(container: Container, detach: bool) -> Container {
container.with_detach(detach)
}
/// Build and run the Container
///
/// This function builds and runs the container using the configured options.
/// It provides detailed error information if the build fails.
pub fn container_build(container: Container) -> Result<Container, Box<EvalAltResult>> {
// Get container details for better error reporting
let container_name = container.name.clone();
let image = container.image.clone().unwrap_or_else(|| "none".to_string());
let ports = container.ports.clone();
let volumes = container.volumes.clone();
let env_vars = container.env_vars.clone();
// Try to build the container
let build_result = container.build();
// Handle the result with improved error context
match build_result {
Ok(built_container) => {
// Container built successfully
Ok(built_container)
},
Err(err) => {
// Add more context to the error
let enhanced_error = match err {
NerdctlError::CommandFailed(msg) => {
// Provide more detailed error information
let mut enhanced_msg = format!("Failed to build container '{}' from image '{}': {}",
container_name, image, msg);
// Add information about configured options that might be relevant
if !ports.is_empty() {
enhanced_msg.push_str(&format!("\nConfigured ports: {:?}", ports));
}
if !volumes.is_empty() {
enhanced_msg.push_str(&format!("\nConfigured volumes: {:?}", volumes));
}
if !env_vars.is_empty() {
enhanced_msg.push_str(&format!("\nConfigured environment variables: {:?}", env_vars));
}
// Add suggestions for common issues
if msg.contains("not found") || msg.contains("no such image") {
enhanced_msg.push_str("\nSuggestion: The specified image may not exist or may not be pulled yet. Try pulling the image first with nerdctl_image_pull().");
} else if msg.contains("port is already allocated") {
enhanced_msg.push_str("\nSuggestion: One of the specified ports is already in use. Try using a different port or stopping the container using that port.");
} else if msg.contains("permission denied") {
enhanced_msg.push_str("\nSuggestion: Permission issues detected. Check if you have the necessary permissions to create containers or access the specified volumes.");
}
NerdctlError::CommandFailed(enhanced_msg)
},
_ => err
};
nerdctl_error_to_rhai_error(Err(enhanced_error))
}
}
}
/// Start the Container and verify it's running
///
/// This function starts the container and verifies that it's actually running.
/// It returns detailed error information if the container fails to start or
/// if it starts but stops immediately.
pub fn container_start(container: &mut Container) -> Result<CommandResult, Box<EvalAltResult>> {
// Get container details for better error reporting
let container_name = container.name.clone();
let container_id = container.container_id.clone().unwrap_or_else(|| "unknown".to_string());
// Try to start the container
let start_result = container.start();
// Handle the result with improved error context
match start_result {
Ok(result) => {
// Container started successfully
Ok(result)
},
Err(err) => {
// Add more context to the error
let enhanced_error = match err {
NerdctlError::CommandFailed(msg) => {
// Check if this is a "container already running" error, which is not really an error
if msg.contains("already running") {
return Ok(CommandResult {
stdout: format!("Container {} is already running", container_name),
stderr: "".to_string(),
success: true,
code: 0,
});
}
// Try to get more information about why the container might have failed to start
let mut enhanced_msg = format!("Failed to start container '{}' (ID: {}): {}",
container_name, container_id, msg);
// Try to check if the image exists
if let Some(image) = &container.image {
enhanced_msg.push_str(&format!("\nContainer was using image: {}", image));
}
NerdctlError::CommandFailed(enhanced_msg)
},
_ => err
};
nerdctl_error_to_rhai_error(Err(enhanced_error))
}
}
}
/// Stop the Container
pub fn container_stop(container: &mut Container) -> Result<CommandResult, Box<EvalAltResult>> {
nerdctl_error_to_rhai_error(container.stop())
}
/// Remove the Container
pub fn container_remove(container: &mut Container) -> Result<CommandResult, Box<EvalAltResult>> {
nerdctl_error_to_rhai_error(container.remove())
}
/// Execute a command in the Container
pub fn container_exec(container: &mut Container, command: &str) -> Result<CommandResult, Box<EvalAltResult>> {
nerdctl_error_to_rhai_error(container.exec(command))
}
/// Get container logs
pub fn container_logs(container: &mut Container) -> Result<CommandResult, Box<EvalAltResult>> {
// Get container details for better error reporting
let container_name = container.name.clone();
let container_id = container.container_id.clone().unwrap_or_else(|| "unknown".to_string());
// Use the nerdctl::logs function
let logs_result = nerdctl::logs(&container_id);
match logs_result {
Ok(result) => {
Ok(result)
},
Err(err) => {
// Add more context to the error
let enhanced_error = NerdctlError::CommandFailed(
format!("Failed to get logs for container '{}' (ID: {}): {}",
container_name, container_id, err)
);
nerdctl_error_to_rhai_error(Err(enhanced_error))
}
}
}
/// Copy files between the Container and local filesystem
pub fn container_copy(container: &mut Container, source: &str, dest: &str) -> Result<CommandResult, Box<EvalAltResult>> {
nerdctl_error_to_rhai_error(container.copy(source, dest))
}
/// Create a new Map with default run options
pub fn new_run_options() -> Map {
let mut map = Map::new();
map.insert("name".into(), Dynamic::UNIT);
map.insert("detach".into(), Dynamic::from(true));
map.insert("ports".into(), Dynamic::from(Array::new()));
map.insert("snapshotter".into(), Dynamic::from("native"));
map
}
//
// Container Function Wrappers
//
/// Wrapper for nerdctl::run
///
/// Run a container from an image.
pub fn nerdctl_run(image: &str) -> Result<CommandResult, Box<EvalAltResult>> {
nerdctl_error_to_rhai_error(nerdctl::run(image, None, true, None, None))
}
/// Run a container with a name
pub fn nerdctl_run_with_name(image: &str, name: &str) -> Result<CommandResult, Box<EvalAltResult>> {
nerdctl_error_to_rhai_error(nerdctl::run(image, Some(name), true, None, None))
}
/// Run a container with a port mapping
pub fn nerdctl_run_with_port(image: &str, name: &str, port: &str) -> Result<CommandResult, Box<EvalAltResult>> {
let ports = vec![port];
nerdctl_error_to_rhai_error(nerdctl::run(image, Some(name), true, Some(&ports), None))
}
/// Wrapper for nerdctl::exec
///
/// Execute a command in a container.
pub fn nerdctl_exec(container: &str, command: &str) -> Result<CommandResult, Box<EvalAltResult>> {
nerdctl_error_to_rhai_error(nerdctl::exec(container, command))
}
/// Wrapper for nerdctl::copy
///
/// Copy files between container and local filesystem.
pub fn nerdctl_copy(source: &str, dest: &str) -> Result<CommandResult, Box<EvalAltResult>> {
nerdctl_error_to_rhai_error(nerdctl::copy(source, dest))
}
/// Wrapper for nerdctl::stop
///
/// Stop a container.
pub fn nerdctl_stop(container: &str) -> Result<CommandResult, Box<EvalAltResult>> {
nerdctl_error_to_rhai_error(nerdctl::stop(container))
}
/// Wrapper for nerdctl::remove
///
/// Remove a container.
pub fn nerdctl_remove(container: &str) -> Result<CommandResult, Box<EvalAltResult>> {
nerdctl_error_to_rhai_error(nerdctl::remove(container))
}
/// Wrapper for nerdctl::list
///
/// List containers.
pub fn nerdctl_list(all: bool) -> Result<CommandResult, Box<EvalAltResult>> {
nerdctl_error_to_rhai_error(nerdctl::list(all))
}
/// Wrapper for nerdctl::logs
///
/// Get container logs.
pub fn nerdctl_logs(container: &str) -> Result<CommandResult, Box<EvalAltResult>> {
nerdctl_error_to_rhai_error(nerdctl::logs(container))
}
//
// Image Function Wrappers
//
/// Wrapper for nerdctl::images
///
/// List images in local storage.
pub fn nerdctl_images() -> Result<CommandResult, Box<EvalAltResult>> {
nerdctl_error_to_rhai_error(nerdctl::images())
}
/// Wrapper for nerdctl::image_remove
///
/// Remove one or more images.
pub fn nerdctl_image_remove(image: &str) -> Result<CommandResult, Box<EvalAltResult>> {
nerdctl_error_to_rhai_error(nerdctl::image_remove(image))
}
/// Wrapper for nerdctl::image_push
///
/// Push an image to a registry.
pub fn nerdctl_image_push(image: &str, destination: &str) -> Result<CommandResult, Box<EvalAltResult>> {
nerdctl_error_to_rhai_error(nerdctl::image_push(image, destination))
}
/// Wrapper for nerdctl::image_tag
///
/// Add an additional name to a local image.
pub fn nerdctl_image_tag(image: &str, new_name: &str) -> Result<CommandResult, Box<EvalAltResult>> {
nerdctl_error_to_rhai_error(nerdctl::image_tag(image, new_name))
}
/// Wrapper for nerdctl::image_pull
///
/// Pull an image from a registry.
pub fn nerdctl_image_pull(image: &str) -> Result<CommandResult, Box<EvalAltResult>> {
nerdctl_error_to_rhai_error(nerdctl::image_pull(image))
}
/// Wrapper for nerdctl::image_commit
///
/// Commit a container to an image.
pub fn nerdctl_image_commit(container: &str, image_name: &str) -> Result<CommandResult, Box<EvalAltResult>> {
nerdctl_error_to_rhai_error(nerdctl::image_commit(container, image_name))
}
/// Wrapper for nerdctl::image_build
///
/// Build an image using a Dockerfile.
pub fn nerdctl_image_build(tag: &str, context_path: &str) -> Result<CommandResult, Box<EvalAltResult>> {
nerdctl_error_to_rhai_error(nerdctl::image_build(tag, context_path))
}
/// Register Nerdctl module functions with the Rhai engine
///
/// # Arguments
///
/// * `engine` - The Rhai engine to register the functions with
///
/// # Returns
///
/// * `Result<(), Box<EvalAltResult>>` - Ok if registration was successful, Err otherwise
pub fn register_nerdctl_module(engine: &mut Engine) -> Result<(), Box<EvalAltResult>> {
// Register types
register_nerdctl_types(engine)?;
// Register Container constructor
engine.register_fn("nerdctl_container_new", container_new);
engine.register_fn("nerdctl_container_from_image", container_from_image);
// Register Container instance methods
engine.register_fn("reset", container_reset);
engine.register_fn("with_port", container_with_port);
engine.register_fn("with_volume", container_with_volume);
engine.register_fn("with_env", container_with_env);
engine.register_fn("with_network", container_with_network);
engine.register_fn("with_network_alias", container_with_network_alias);
engine.register_fn("with_cpu_limit", container_with_cpu_limit);
engine.register_fn("with_memory_limit", container_with_memory_limit);
engine.register_fn("with_restart_policy", container_with_restart_policy);
engine.register_fn("with_health_check", container_with_health_check);
engine.register_fn("with_ports", container_with_ports);
engine.register_fn("with_volumes", container_with_volumes);
engine.register_fn("with_envs", container_with_envs);
engine.register_fn("with_network_aliases", container_with_network_aliases);
engine.register_fn("with_memory_swap_limit", container_with_memory_swap_limit);
engine.register_fn("with_cpu_shares", container_with_cpu_shares);
engine.register_fn("with_health_check_options", container_with_health_check_options);
engine.register_fn("with_snapshotter", container_with_snapshotter);
engine.register_fn("with_detach", container_with_detach);
engine.register_fn("build", container_build);
engine.register_fn("start", container_start);
engine.register_fn("stop", container_stop);
engine.register_fn("remove", container_remove);
engine.register_fn("exec", container_exec);
engine.register_fn("logs", container_logs);
engine.register_fn("copy", container_copy);
// Register legacy container functions (for backward compatibility)
engine.register_fn("nerdctl_run", nerdctl_run);
engine.register_fn("nerdctl_run_with_name", nerdctl_run_with_name);
engine.register_fn("nerdctl_run_with_port", nerdctl_run_with_port);
engine.register_fn("new_run_options", new_run_options);
engine.register_fn("nerdctl_exec", nerdctl_exec);
engine.register_fn("nerdctl_copy", nerdctl_copy);
engine.register_fn("nerdctl_stop", nerdctl_stop);
engine.register_fn("nerdctl_remove", nerdctl_remove);
engine.register_fn("nerdctl_list", nerdctl_list);
engine.register_fn("nerdctl_logs", nerdctl_logs);
// Register image functions
engine.register_fn("nerdctl_images", nerdctl_images);
engine.register_fn("nerdctl_image_remove", nerdctl_image_remove);
engine.register_fn("nerdctl_image_push", nerdctl_image_push);
engine.register_fn("nerdctl_image_tag", nerdctl_image_tag);
engine.register_fn("nerdctl_image_pull", nerdctl_image_pull);
engine.register_fn("nerdctl_image_commit", nerdctl_image_commit);
engine.register_fn("nerdctl_image_build", nerdctl_image_build);
Ok(())
}
/// Register Nerdctl module types with the Rhai engine
fn register_nerdctl_types(engine: &mut Engine) -> Result<(), Box<EvalAltResult>> {
// Register Container type
engine.register_type_with_name::<Container>("NerdctlContainer");
// Register getters for Container properties
engine.register_get("name", |container: &mut Container| container.name.clone());
engine.register_get("container_id", |container: &mut Container| {
match &container.container_id {
Some(id) => id.clone(),
None => "".to_string(),
}
});
engine.register_get("image", |container: &mut Container| {
match &container.image {
Some(img) => img.clone(),
None => "".to_string(),
}
});
engine.register_get("ports", |container: &mut Container| {
let mut array = Array::new();
for port in &container.ports {
array.push(Dynamic::from(port.clone()));
}
array
});
engine.register_get("volumes", |container: &mut Container| {
let mut array = Array::new();
for volume in &container.volumes {
array.push(Dynamic::from(volume.clone()));
}
array
});
engine.register_get("detach", |container: &mut Container| container.detach);
// Register Image type and methods
engine.register_type_with_name::<Image>("NerdctlImage");
// Register getters for Image properties
engine.register_get("id", |img: &mut Image| img.id.clone());
engine.register_get("repository", |img: &mut Image| img.repository.clone());
engine.register_get("tag", |img: &mut Image| img.tag.clone());
engine.register_get("size", |img: &mut Image| img.size.clone());
engine.register_get("created", |img: &mut Image| img.created.clone());
Ok(())
}

View File

@@ -1,292 +0,0 @@
use rhai::{Engine, EvalAltResult, Map, Array};
use crate::virt::rfs::{
RfsBuilder, MountType, StoreSpec,
list_mounts, unmount_all, unmount, get_mount_info,
pack_directory, unpack, list_contents, verify
};
/// Register RFS functions with the Rhai engine
pub fn register(engine: &mut Engine) -> Result<(), Box<EvalAltResult>> {
// Register mount functions
engine.register_fn("rfs_mount", rfs_mount);
engine.register_fn("rfs_unmount", rfs_unmount);
engine.register_fn("rfs_list_mounts", rfs_list_mounts);
engine.register_fn("rfs_unmount_all", rfs_unmount_all);
engine.register_fn("rfs_get_mount_info", rfs_get_mount_info);
// Register pack functions
engine.register_fn("rfs_pack", rfs_pack);
engine.register_fn("rfs_unpack", rfs_unpack);
engine.register_fn("rfs_list_contents", rfs_list_contents);
engine.register_fn("rfs_verify", rfs_verify);
Ok(())
}
/// Mount a filesystem
///
/// # Arguments
///
/// * `source` - Source path or URL
/// * `target` - Target mount point
/// * `mount_type` - Mount type (e.g., "local", "ssh", "s3", "webdav")
/// * `options` - Mount options as a map
///
/// # Returns
///
/// * `Result<Map, Box<EvalAltResult>>` - Mount information or error
fn rfs_mount(source: &str, target: &str, mount_type: &str, options: Map) -> Result<Map, Box<EvalAltResult>> {
// Convert mount type string to MountType enum
let mount_type_enum = MountType::from_string(mount_type);
// Create a builder
let mut builder = RfsBuilder::new(source, target, mount_type_enum);
// Add options
for (key, value) in options.iter() {
if let Ok(value_str) = value.clone().into_string() {
builder = builder.with_option(key, &value_str);
}
}
// Mount the filesystem
let mount = builder.mount()
.map_err(|e| Box::new(EvalAltResult::ErrorRuntime(
format!("Failed to mount filesystem: {}", e).into(),
rhai::Position::NONE
)))?;
// Convert Mount to Map
let mut result = Map::new();
result.insert("id".into(), mount.id.into());
result.insert("source".into(), mount.source.into());
result.insert("target".into(), mount.target.into());
result.insert("fs_type".into(), mount.fs_type.into());
let options_array: Array = mount.options.iter()
.map(|opt| opt.clone().into())
.collect();
result.insert("options".into(), options_array.into());
Ok(result)
}
/// Unmount a filesystem
///
/// # Arguments
///
/// * `target` - Target mount point
///
/// # Returns
///
/// * `Result<(), Box<EvalAltResult>>` - Success or error
fn rfs_unmount(target: &str) -> Result<(), Box<EvalAltResult>> {
unmount(target)
.map_err(|e| Box::new(EvalAltResult::ErrorRuntime(
format!("Failed to unmount filesystem: {}", e).into(),
rhai::Position::NONE
)))
}
/// List all mounted filesystems
///
/// # Returns
///
/// * `Result<Array, Box<EvalAltResult>>` - List of mounts or error
fn rfs_list_mounts() -> Result<Array, Box<EvalAltResult>> {
let mounts = list_mounts()
.map_err(|e| Box::new(EvalAltResult::ErrorRuntime(
format!("Failed to list mounts: {}", e).into(),
rhai::Position::NONE
)))?;
let mut result = Array::new();
for mount in mounts {
let mut mount_map = Map::new();
mount_map.insert("id".into(), mount.id.into());
mount_map.insert("source".into(), mount.source.into());
mount_map.insert("target".into(), mount.target.into());
mount_map.insert("fs_type".into(), mount.fs_type.into());
let options_array: Array = mount.options.iter()
.map(|opt| opt.clone().into())
.collect();
mount_map.insert("options".into(), options_array.into());
result.push(mount_map.into());
}
Ok(result)
}
/// Unmount all filesystems
///
/// # Returns
///
/// * `Result<(), Box<EvalAltResult>>` - Success or error
fn rfs_unmount_all() -> Result<(), Box<EvalAltResult>> {
unmount_all()
.map_err(|e| Box::new(EvalAltResult::ErrorRuntime(
format!("Failed to unmount all filesystems: {}", e).into(),
rhai::Position::NONE
)))
}
/// Get information about a mounted filesystem
///
/// # Arguments
///
/// * `target` - Target mount point
///
/// # Returns
///
/// * `Result<Map, Box<EvalAltResult>>` - Mount information or error
fn rfs_get_mount_info(target: &str) -> Result<Map, Box<EvalAltResult>> {
let mount = get_mount_info(target)
.map_err(|e| Box::new(EvalAltResult::ErrorRuntime(
format!("Failed to get mount info: {}", e).into(),
rhai::Position::NONE
)))?;
let mut result = Map::new();
result.insert("id".into(), mount.id.into());
result.insert("source".into(), mount.source.into());
result.insert("target".into(), mount.target.into());
result.insert("fs_type".into(), mount.fs_type.into());
let options_array: Array = mount.options.iter()
.map(|opt| opt.clone().into())
.collect();
result.insert("options".into(), options_array.into());
Ok(result)
}
/// Pack a directory into a filesystem layer
///
/// # Arguments
///
/// * `directory` - Directory to pack
/// * `output` - Output file
/// * `store_specs` - Store specifications as a string (e.g., "file:path=/path/to/store,s3:bucket=my-bucket")
///
/// # Returns
///
/// * `Result<(), Box<EvalAltResult>>` - Success or error
fn rfs_pack(directory: &str, output: &str, store_specs: &str) -> Result<(), Box<EvalAltResult>> {
// Parse store specs
let specs = parse_store_specs(store_specs);
// Pack the directory
pack_directory(directory, output, &specs)
.map_err(|e| Box::new(EvalAltResult::ErrorRuntime(
format!("Failed to pack directory: {}", e).into(),
rhai::Position::NONE
)))
}
/// Unpack a filesystem layer
///
/// # Arguments
///
/// * `input` - Input file
/// * `directory` - Directory to unpack to
///
/// # Returns
///
/// * `Result<(), Box<EvalAltResult>>` - Success or error
fn rfs_unpack(input: &str, directory: &str) -> Result<(), Box<EvalAltResult>> {
unpack(input, directory)
.map_err(|e| Box::new(EvalAltResult::ErrorRuntime(
format!("Failed to unpack filesystem layer: {}", e).into(),
rhai::Position::NONE
)))
}
/// List the contents of a filesystem layer
///
/// # Arguments
///
/// * `input` - Input file
///
/// # Returns
///
/// * `Result<String, Box<EvalAltResult>>` - File listing or error
fn rfs_list_contents(input: &str) -> Result<String, Box<EvalAltResult>> {
list_contents(input)
.map_err(|e| Box::new(EvalAltResult::ErrorRuntime(
format!("Failed to list contents: {}", e).into(),
rhai::Position::NONE
)))
}
/// Verify a filesystem layer
///
/// # Arguments
///
/// * `input` - Input file
///
/// # Returns
///
/// * `Result<bool, Box<EvalAltResult>>` - Whether the layer is valid or error
fn rfs_verify(input: &str) -> Result<bool, Box<EvalAltResult>> {
verify(input)
.map_err(|e| Box::new(EvalAltResult::ErrorRuntime(
format!("Failed to verify filesystem layer: {}", e).into(),
rhai::Position::NONE
)))
}
/// Parse store specifications from a string
///
/// # Arguments
///
/// * `specs_str` - Store specifications as a string
///
/// # Returns
///
/// * `Vec<StoreSpec>` - Store specifications
fn parse_store_specs(specs_str: &str) -> Vec<StoreSpec> {
let mut result = Vec::new();
// Split by comma
for spec_str in specs_str.split(',') {
// Skip empty specs
if spec_str.trim().is_empty() {
continue;
}
// Split by colon to get type and options
let parts: Vec<&str> = spec_str.split(':').collect();
if parts.is_empty() {
continue;
}
// Get spec type
let spec_type = parts[0].trim();
// Create store spec
let mut store_spec = StoreSpec::new(spec_type);
// Add options if any
if parts.len() > 1 {
let options_str = parts[1];
// Split options by comma
for option in options_str.split(',') {
// Split option by equals sign
let option_parts: Vec<&str> = option.split('=').collect();
if option_parts.len() == 2 {
store_spec = store_spec.with_option(option_parts[0].trim(), option_parts[1].trim());
}
}
}
result.push(store_spec);
}
result
}

View File

@@ -1,232 +0,0 @@
# SAL Buildah Module (`sal::virt::buildah`)
## Overview
The Buildah module in SAL provides a comprehensive Rust interface for interacting with the `buildah` command-line tool. It allows users to build OCI (Open Container Initiative) and Docker-compatible container images programmatically. The module offers both a high-level `Builder` API for step-by-step image construction and static functions for managing images in local storage.
A Rhai script interface for this module is also available via `sal::rhai::buildah`, making these functionalities accessible from `herodo` scripts.
## Core Components
### 1. `Builder` Struct (`sal::virt::buildah::Builder`)
The `Builder` struct is the primary entry point for constructing container images. It encapsulates a Buildah working container, created from a base image, and provides methods to modify this container and eventually commit it as a new image.
- **Creation**: `Builder::new(name: &str, image: &str) -> Result<Builder, BuildahError>`
- Creates a new working container (or re-attaches to an existing one with the same name) from the specified base `image`.
- **Debug Mode**: `builder.set_debug(true)` / `builder.debug()`
- Enables/disables verbose logging for Buildah commands executed by this builder instance.
#### Working Container Operations:
- `builder.run(command: &str) -> Result<CommandResult, BuildahError>`: Executes a shell command inside the working container (e.g., `buildah run <container> -- <command>`).
- `builder.run_with_isolation(command: &str, isolation: &str) -> Result<CommandResult, BuildahError>`: Runs a command with specified isolation (e.g., "chroot").
- `builder.copy(source_on_host: &str, dest_in_container: &str) -> Result<CommandResult, BuildahError>`: Copies files/directories from the host to the container (`buildah copy`).
- `builder.add(source_on_host: &str, dest_in_container: &str) -> Result<CommandResult, BuildahError>`: Adds files/directories to the container (`buildah add`), potentially handling URLs and archive extraction.
- `builder.config(options: HashMap<String, String>) -> Result<CommandResult, BuildahError>`: Modifies image metadata (e.g., environment variables, labels, entrypoint, cmd). Example options: `{"env": "MYVAR=value", "label": "mylabel=myvalue"}`.
- `builder.set_entrypoint(entrypoint: &str) -> Result<CommandResult, BuildahError>`: Sets the image entrypoint.
- `builder.set_cmd(cmd: &str) -> Result<CommandResult, BuildahError>`: Sets the default command for the image.
- `builder.commit(image_name: &str) -> Result<CommandResult, BuildahError>`: Commits the current state of the working container to a new image named `image_name`.
- `builder.remove() -> Result<CommandResult, BuildahError>`: Removes the working container (`buildah rm`).
- `builder.reset() -> Result<(), BuildahError>`: Removes the working container and resets the builder state.
### 2. Static Image Management Functions (on `Builder`)
These functions operate on images in the local Buildah storage and are not tied to a specific `Builder` instance.
- `Builder::images() -> Result<Vec<Image>, BuildahError>`: Lists all images available locally (`buildah images --json`). Returns a vector of `Image` structs.
- `Builder::image_remove(image_ref: &str) -> Result<CommandResult, BuildahError>`: Removes an image (`buildah rmi <image_ref>`).
- `Builder::image_pull(image_name: &str, tls_verify: bool) -> Result<CommandResult, BuildahError>`: Pulls an image from a registry (`buildah pull`).
- `Builder::image_push(image_ref: &str, destination: &str, tls_verify: bool) -> Result<CommandResult, BuildahError>`: Pushes an image to a registry (`buildah push`).
- `Builder::image_tag(image_ref: &str, new_name: &str) -> Result<CommandResult, BuildahError>`: Tags an image (`buildah tag`).
- `Builder::image_commit(container_ref: &str, image_name: &str, format: Option<&str>, squash: bool, rm: bool) -> Result<CommandResult, BuildahError>`: A static version to commit any existing container to an image, with options for format (e.g., "oci", "docker"), squashing layers, and removing the container post-commit.
- `Builder::build(tag: Option<&str>, context_dir: &str, file: &str, isolation: Option<&str>) -> Result<CommandResult, BuildahError>`: Builds an image from a Dockerfile/Containerfile (`buildah bud`).
*Note: Many static image functions also have a `_with_debug(..., debug: bool)` variant for explicit debug control.*
### 3. `Image` Struct (`sal::virt::buildah::Image`)
Represents a container image as listed by `buildah images`.
```rust
pub struct Image {
pub id: String, // Image ID
pub names: Vec<String>, // Image names/tags
pub size: String, // Image size
pub created: String, // Creation timestamp (as string)
}
```
### 4. `ContentOperations` (`sal::virt::buildah::ContentOperations`)
Provides static methods for reading and writing file content directly within a container, useful for dynamic configuration or inspection.
- `ContentOperations::write_content(container_id: &str, content: &str, dest_path_in_container: &str) -> Result<CommandResult, BuildahError>`: Writes string content to a file inside the specified container.
- `ContentOperations::read_content(container_id: &str, source_path_in_container: &str) -> Result<String, BuildahError>`: Reads the content of a file from within the specified container into a string.
### 5. `BuildahError` Enum (`sal::virt::buildah::BuildahError`)
Defines the error types that can occur during Buildah operations:
- `CommandExecutionFailed(io::Error)`: The `buildah` command itself failed to start.
- `CommandFailed(String)`: The `buildah` command ran but returned a non-zero exit code or error.
- `JsonParseError(String)`: Failed to parse JSON output from Buildah.
- `ConversionError(String)`: Error during data conversion.
- `Other(String)`: Generic error.
## Key Design Points
The SAL Buildah module is designed with the following principles:
- **Builder Pattern**: The `Builder` struct (`sal::virt::buildah::Builder`) employs a builder pattern, enabling a fluent, step-by-step, and stateful approach to constructing container images. Each `Builder` instance manages a specific working container.
- **Separation of Concerns**:
- **Instance Methods**: Operations specific to a working container (e.g., `run`, `copy`, `config`, `commit`) are methods on the `Builder` instance.
- **Static Methods**: General image management tasks (e.g., listing images with `Builder::images()`, removing images with `Builder::image_remove()`, pulling, pushing, tagging, and building from a Dockerfile with `Builder::build()`) are provided as static functions on the `Builder` struct.
- **Direct Content Manipulation**: The `ContentOperations` struct provides static methods (`write_content`, `read_content`) to directly interact with files within a Buildah container. This is typically achieved by temporarily mounting the container or using `buildah add` with temporary files, abstracting the complexity from the user.
- **Debuggability**: Fine-grained control over `buildah` command logging is provided. The `builder.set_debug(true)` method enables verbose output for a specific `Builder` instance. Many static functions also offer `_with_debug(..., debug: bool)` variants. This is managed internally via a thread-local flag passed to the core `execute_buildah_command` function.
- **Comprehensive Rhai Integration**: Most functionalities of the Buildah module are exposed to Rhai scripts executed via `herodo`, allowing for powerful automation of image building workflows. This is facilitated by the `sal::rhai::buildah` module.
## Low-Level Command Execution
- `execute_buildah_command(args: &[&str]) -> Result<CommandResult, BuildahError>` (in `sal::virt::buildah::cmd`):
The core function that executes `buildah` commands. It handles debug logging based on a thread-local flag, which is managed by the higher-level `Builder` methods and `_with_debug` static function variants.
## Usage Example (Rust)
```rust
use sal::virt::buildah::{Builder, BuildahError, ContentOperations};
use std::collections::HashMap;
fn build_custom_image() -> Result<String, BuildahError> {
// Create a new builder from a base image (e.g., alpine)
let mut builder = Builder::new("my-custom-container", "docker.io/library/alpine:latest")?;
builder.set_debug(true);
// Run some commands
builder.run("apk add --no-cache curl")?;
builder.run("mkdir /app")?;
// Add a file
ContentOperations::write_content(builder.container_id().unwrap(), "Hello from SAL!", "/app/hello.txt")?;
// Set image configuration
let mut config_opts = HashMap::new();
config_opts.insert("workingdir".to_string(), "/app".to_string());
config_opts.insert("label".to_string(), "maintainer=sal-user".to_string());
builder.config(config_opts)?;
builder.set_entrypoint("["/usr/bin/curl"]")?;
builder.set_cmd("["http://ifconfig.me"]")?;
// Commit the image
let image_tag = "localhost/my-custom-image:latest";
builder.commit(image_tag)?;
println!("Successfully built image: {}", image_tag);
// Clean up the working container
builder.remove()?;
Ok(image_tag.to_string())
}
fn main() {
match build_custom_image() {
Ok(tag) => println!("Image {} created.", tag),
Err(e) => eprintln!("Error building image: {}", e),
}
}
```
## Rhai Scripting with `herodo`
The Buildah module's capabilities are extensively exposed to Rhai scripts, enabling automation of image building and management tasks via the `herodo` CLI tool. The `sal::rhai::buildah` module registers the necessary functions and types.
Below is a summary of commonly used Rhai functions for Buildah. (Note: `builder` refers to an instance of `BuildahBuilder` obtained typically via `bah_new`).
### Builder Object Management
- `bah_new(name: String, image: String) -> BuildahBuilder`: Creates a new Buildah builder instance (working container) from a base `image` with a given `name`.
- `builder.remove()`: Removes the working container associated with the `builder`.
- `builder.reset()`: Removes the working container and resets the `builder` state.
### Builder Configuration & Operations
- `builder.set_debug(is_debug: bool)`: Enables or disables verbose debug logging for commands executed by this `builder`.
- `builder.debug_mode` (property): Get or set the debug mode (e.g., `let mode = builder.debug_mode; builder.debug_mode = true;`).
- `builder.container_id` (property): Returns the ID of the working container (e.g., `let id = builder.container_id;`).
- `builder.name` (property): Returns the name of the builder/working container.
- `builder.image` (property): Returns the base image name used by the builder.
- `builder.run(command: String)`: Executes a shell command inside the `builder`'s working container.
- `builder.run_with_isolation(command: String, isolation: String)`: Runs a command with specified isolation (e.g., "chroot").
- `builder.copy(source_on_host: String, dest_in_container: String)`: Copies files/directories from the host to the `builder`'s container.
- `builder.add(source_on_host: String, dest_in_container: String)`: Adds files/directories to the `builder`'s container (can handle URLs and auto-extract archives).
- `builder.config(options: Map)`: Modifies image metadata. `options` is a Rhai map, e.g., `#{ "env": "MYVAR=value", "label": "foo=bar" }`.
- `builder.set_entrypoint(entrypoint: String)`: Sets the image entrypoint (e.g., `builder.set_entrypoint("[/app/run.sh]")`).
- `builder.set_cmd(cmd: String)`: Sets the default command for the image (e.g., `builder.set_cmd("[--help]")`).
- `builder.commit(image_tag: String)`: Commits the current state of the `builder`'s working container to a new image with `image_tag`.
### Content Operations (with a Builder instance)
- `bah_write_content(builder: BuildahBuilder, content: String, dest_path_in_container: String)`: Writes string `content` to a file at `dest_path_in_container` inside the `builder`'s container.
- `bah_read_content(builder: BuildahBuilder, source_path_in_container: String) -> String`: Reads the content of a file from `source_path_in_container` within the `builder`'s container.
### Global Image Operations
These functions generally correspond to static methods in Rust and operate on the local Buildah image storage.
- `bah_images() -> Array`: Lists all images available locally. Returns an array of `BuildahImage` objects.
- `bah_image_remove(image_ref: String)`: Removes an image (e.g., by ID or tag) from local storage.
- `bah_image_pull(image_name: String, tls_verify: bool)`: Pulls an image from a registry.
- `bah_image_push(image_ref: String, destination: String, tls_verify: bool)`: Pushes a local image to a registry.
- `bah_image_tag(image_ref: String, new_name: String)`: Adds a new tag (`new_name`) to an existing image (`image_ref`).
- `bah_build(tag: String, context_dir: String, file: String, isolation: String)`: Builds an image from a Dockerfile/Containerfile (equivalent to `buildah bud`). `file` is the path to the Dockerfile relative to `context_dir`. `isolation` can be e.g., "chroot".
### Example `herodo` Rhai Script (Revisited)
```rhai
// Create a new builder
let builder = bah_new("my-rhai-app", "docker.io/library/alpine:latest");
builder.debug_mode = true; // Enable debug logging for this builder
// Run commands in the container
builder.run("apk add --no-cache figlet curl");
builder.run("mkdir /data");
// Write content to a file in the container
bah_write_content(builder, "Hello from SAL Buildah via Rhai!", "/data/message.txt");
// Configure image metadata
builder.config(#{
"env": "APP_VERSION=1.0",
"label": "author=HerodoUser"
});
builder.set_entrypoint('["figlet"]');
builder.set_cmd('["Rhai Build"]');
// Commit the image
let image_name = "localhost/my-rhai-app:v1";
builder.commit(image_name);
print(`Image committed: ${image_name}`);
// Clean up the working container
builder.remove();
print("Builder container removed.");
// List local images
print("Current local images:");
let images = bah_images();
for img in images {
print(` ID: ${img.id}, Name(s): ${img.names}, Size: ${img.size}`);
}
// Example: Build from a Dockerfile (assuming Dockerfile exists at /tmp/build_context/Dockerfile)
// Ensure /tmp/build_context/Dockerfile exists with simple content like:
// FROM alpine
// RUN echo "Built with bah_build" > /built.txt
// CMD cat /built.txt
//
// if exist("/tmp/build_context/Dockerfile") {
// print("Building from Dockerfile...");
// bah_build("localhost/from-dockerfile:latest", "/tmp/build_context", "Dockerfile", "chroot");
// print("Dockerfile build complete.");
// bah_image_remove("localhost/from-dockerfile:latest"); // Clean up
// } else {
// print("Skipping Dockerfile build example: /tmp/build_context/Dockerfile not found.");
// }
```
This README provides a guide to using the SAL Buildah module. For more detailed information on specific functions and their parameters, consult the Rust doc comments within the source code.

View File

@@ -1,33 +0,0 @@
PREFIX := /usr/local
DATADIR := ${PREFIX}/share
MANDIR := $(DATADIR)/man
# Following go-md2man is guaranteed on host
GOMD2MAN ?= ../tests/tools/build/go-md2man
ifeq ($(shell uname -s),FreeBSD)
SED=gsed
else
SED=sed
endif
docs: $(patsubst %.md,%,$(wildcard *.md))
%.1: %.1.md
### sed is used to filter http/s links as well as relative links
### replaces "\" at the end of a line with two spaces
### this ensures that manpages are rendered correctly
@$(SED) -e 's/\((buildah[^)]*\.md\(#.*\)\?)\)//g' \
-e 's/\[\(buildah[^]]*\)\]/\1/g' \
-e 's/\[\([^]]*\)](http[^)]\+)/\1/g' \
-e 's;<\(/\)\?\(a\|a\s\+[^>]*\|sup\)>;;g' \
-e 's/\\$$/ /g' $< | \
$(GOMD2MAN) -in /dev/stdin -out $@
.PHONY: install
install:
install -d ${DESTDIR}/${MANDIR}/man1
install -m 0644 buildah*.1 ${DESTDIR}/${MANDIR}/man1
install -m 0644 links/buildah*.1 ${DESTDIR}/${MANDIR}/man1
.PHONY: clean
clean:
$(RM) buildah*.1

View File

@@ -1,162 +0,0 @@
# buildah-add "1" "April 2021" "buildah"
## NAME
buildah\-add - Add the contents of a file, URL, or a directory to a container.
## SYNOPSIS
**buildah add** [*options*] *container* *src* [[*src* ...] *dest*]
## DESCRIPTION
Adds the contents of a file, URL, or a directory to a container's working
directory or a specified location in the container. If a local source file
appears to be an archive, its contents are extracted and added instead of the
archive file itself. If a local directory is specified as a source, its
*contents* are copied to the destination.
## OPTIONS
**--add-history**
Add an entry to the history which will note the digest of the added content.
Defaults to false.
Note: You can also override the default value of --add-history by setting the
BUILDAH\_HISTORY environment variable. `export BUILDAH_HISTORY=true`
**--cert-dir** *path*
Use certificates at *path* (\*.crt, \*.cert, \*.key) when connecting to
registries for pulling images named with the **--from** flag, and when
connecting to HTTPS servers when fetching sources from locations specified with
HTTPS URLs. The default certificates directory is _/etc/containers/certs.d_.
**--checksum** *checksum*
Checksum the source content. The value of *checksum* must be a standard
container digest string. Only supported for HTTP sources.
**--chmod** *permissions*
Sets the access permissions of the destination content. Accepts the numerical format.
**--chown** *owner*:*group*
Sets the user and group ownership of the destination content.
**--contextdir** *directory*
Build context directory. Specifying a context directory causes Buildah to
chroot into that context directory. This means copying files pointed at
by symbolic links outside of the chroot will fail.
**--exclude** *pattern*
Exclude copying files matching the specified pattern. Option can be specified
multiple times. See containerignore(5) for supported formats.
**--from** *containerOrImage*
Use the root directory of the specified working container or image as the root
directory when resolving absolute source paths and the path of the context
directory. If an image needs to be pulled, options recognized by `buildah pull`
can be used.
**--ignorefile** *file*
Path to an alternative .containerignore (.dockerignore) file. Requires \-\-contextdir be specified.
**--quiet**, **-q**
Refrain from printing a digest of the added content.
**--retry** *attempts*
Number of times to retry in case of failure when pulling images from registries
or retrieving content from HTTPS URLs.
Defaults to `3`.
**--retry-delay** *duration*
Duration of delay between retry attempts in case of failure when pulling images
from registries or retrieving content from HTTPS URLs.
Defaults to `2s`.
**--tls-verify** *bool-value*
Require verification of certificates when retrieving sources from HTTPS
locations, or when pulling images referred to with the **--from*** flag
(defaults to true). TLS verification cannot be used when talking to an
insecure registry.
## EXAMPLE
buildah add containerID '/myapp/app.conf' '/myapp/app.conf'
buildah add --chown myuser:mygroup containerID '/myapp/app.conf' '/myapp/app.conf'
buildah add --chmod 660 containerID '/myapp/app.conf' '/myapp/app.conf'
buildah add containerID '/home/myuser/myproject.go'
buildah add containerID '/home/myuser/myfiles.tar' '/tmp'
buildah add containerID '/tmp/workingdir' '/tmp/workingdir'
buildah add containerID 'https://github.com/containers/buildah/blob/main/README.md' '/tmp'
buildah add containerID 'passwd' 'certs.d' /etc
## FILES
### .containerignore or .dockerignore
If a .containerignore or .dockerignore file exists in the context directory,
`buildah add` reads its contents. If both exist, then .containerignore is used.
When the `--ignorefile` option is specified Buildah reads it and
uses it to decide which content to exclude when copying content into the
working container.
Users can specify a series of Unix shell glob patterns in an ignore file to
identify files/directories to exclude.
Buildah supports a special wildcard string `**` which matches any number of
directories (including zero). For example, **/*.go will exclude all files that
end with .go that are found in all directories.
Example .containerignore/.dockerignore file:
```
# here are files we want to exclude
*/*.c
**/output*
src
```
`*/*.c`
Excludes files and directories whose names end with .c in any top level subdirectory. For example, the source file include/rootless.c.
`**/output*`
Excludes files and directories starting with `output` from any directory.
`src`
Excludes files named src and the directory src as well as any content in it.
Lines starting with ! (exclamation mark) can be used to make exceptions to
exclusions. The following is an example .containerignore file that uses this
mechanism:
```
*.doc
!Help.doc
```
Exclude all doc files except Help.doc when copying content into the container.
This functionality is compatible with the handling of .containerignore files described here:
https://github.com/containers/common/blob/main/docs/containerignore.5.md
## SEE ALSO
buildah(1), containerignore(5)

File diff suppressed because it is too large Load Diff

View File

@@ -1,393 +0,0 @@
# buildah-commit "1" "March 2017" "buildah"
## NAME
buildah\-commit - Create an image from a working container.
## SYNOPSIS
**buildah commit** [*options*] *container* [*image*]
## DESCRIPTION
Writes a new image using the specified container's read-write layer and if it
is based on an image, the layers of that image. If *image* does not begin
with a registry name component, `localhost` will be added to the name. If
*image* is not provided, the image will have no name. When an image has no
name, the `buildah images` command will display `<none>` in the `REPOSITORY` and
`TAG` columns.
The *image* value supports all transports from `containers-transports(5)`. If no transport is specified, the `containers-storage` (i.e., local storage) transport is used.
## RETURN VALUE
The image ID of the image that was created. On error, 1 is returned and errno is returned.
## OPTIONS
**--add-file** *source[:destination]*
Read the contents of the file `source` and add it to the committed image as a
file at `destination`. If `destination` is not specified, the path of `source`
will be used. The new file will be owned by UID 0, GID 0, have 0644
permissions, and be given a current timestamp unless the **--timestamp** option
is also specified. This option can be specified multiple times.
**--authfile** *path*
Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. See containers-auth.json(5) for more information. This file is created using `buildah login`.
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
Note: You can also override the default path of the authentication file by setting the REGISTRY\_AUTH\_FILE
environment variable. `export REGISTRY_AUTH_FILE=path`
**--cert-dir** *path*
Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
The default certificates directory is _/etc/containers/certs.d_.
**--change**, **-c** *"INSTRUCTION"*
Apply the change to the committed image that would have been made if it had
been built using a Containerfile which included the specified instruction.
This option can be specified multiple times.
**--config** *filename*
Read a JSON-encoded version of an image configuration object from the specified
file, and merge the values from it with the configuration of the image being
committed.
**--creds** *creds*
The [username[:password]] to use to authenticate with the registry if required.
If one or both values are not supplied, a command line prompt will appear and the
value can be entered. The password is entered without echo.
**--cw** *options*
Produce an image suitable for use as a confidential workload running in a
trusted execution environment (TEE) using krun (i.e., *crun* built with the
libkrun feature enabled and invoked as *krun*). Instead of the conventional
contents, the root filesystem of the image will contain an encrypted disk image
and configuration information for krun.
The value for *options* is a comma-separated list of key=value pairs, supplying
configuration information which is needed for producing the additional data
which will be included in the container image.
Recognized _keys_ are:
*attestation_url*: The location of a key broker / attestation server.
If a value is specified, the new image's workload ID, along with the passphrase
used to encrypt the disk image, will be registered with the server, and the
server's location will be stored in the container image.
At run-time, krun is expected to contact the server to retrieve the passphrase
using the workload ID, which is also stored in the container image.
If no value is specified, a *passphrase* value *must* be specified.
*cpus*: The number of virtual CPUs which the image expects to be run with at
run-time. If not specified, a default value will be supplied.
*firmware_library*: The location of the libkrunfw-sev shared library. If not
specified, `buildah` checks for its presence in a number of hard-coded
locations.
*memory*: The amount of memory which the image expects to be run with at
run-time, as a number of megabytes. If not specified, a default value will be
supplied.
*passphrase*: The passphrase to use to encrypt the disk image which will be
included in the container image.
If no value is specified, but an *attestation_url* value is specified, a
randomly-generated passphrase will be used.
The authors recommend setting an *attestation_url* but not a *passphrase*.
*slop*: Extra space to allocate for the disk image compared to the size of the
container image's contents, expressed either as a percentage (..%) or a size
value (bytes, or larger units if suffixes like KB or MB are present), or a sum
of two or more such specifications separated by "+". If not specified,
`buildah` guesses that 25% more space than the contents will be enough, but
this option is provided in case its guess is wrong. If the specified or
computed size is less than 10 megabytes, it will be increased to 10 megabytes.
*type*: The type of trusted execution environment (TEE) which the image should
be marked for use with. Accepted values are "SEV" (AMD Secure Encrypted
Virtualization - Encrypted State) and "SNP" (AMD Secure Encrypted
Virtualization - Secure Nested Paging). If not specified, defaults to "SNP".
*workload_id*: A workload identifier which will be recorded in the container
image, to be used at run-time for retrieving the passphrase which was used to
encrypt the disk image. If not specified, a semi-random value will be derived
from the base image's image ID.
**--disable-compression**, **-D**
Don't compress filesystem layers when building the image unless it is required
by the location where the image is being written. This is the default setting,
because image layers are compressed automatically when they are pushed to
registries, and images being written to local storage would only need to be
decompressed again to be stored. Compression can be forced in all cases by
specifying **--disable-compression=false**.
**--encrypt-layer** *layer(s)*
Layer(s) to encrypt: 0-indexed layer indices with support for negative indexing (e.g. 0 is the first layer, -1 is the last layer). If not defined, will encrypt all layers if encryption-key flag is specified.
**--encryption-key** *key*
The [protocol:keyfile] specifies the encryption protocol, which can be JWE (RFC7516), PGP (RFC4880), and PKCS7 (RFC2315) and the key material required for image encryption. For instance, jwe:/path/to/key.pem or pgp:admin@example.com or pkcs7:/path/to/x509-file.
**--format**, **-f** *[oci | docker]*
Control the format for the image manifest and configuration data. Recognized
formats include *oci* (OCI image-spec v1.0, the default) and *docker* (version
2, using schema format 2 for the manifest).
Note: You can also override the default format by setting the BUILDAH_FORMAT
environment variable. `export BUILDAH_FORMAT=docker`
**--identity-label** *bool-value*
Adds default identity label `io.buildah.version` if set. (default true).
**--iidfile** *ImageIDfile*
Write the image ID to the file.
**--manifest** "listName"
Name of the manifest list to which the built image will be added. Creates the manifest list
if it does not exist. This option is useful for building multi architecture images.
**--omit-history** *bool-value*
Omit build history information in the built image. (default false).
This option is useful for the cases where end users explicitly
want to set `--omit-history` to omit the optional `History` from
built images or when working with images built using build tools that
do not include `History` information in their images.
**--pull**
When the *--pull* flag is enabled or set explicitly to `true` (with
*--pull=true*), attempt to pull the latest versions of SBOM scanner images from
the registries listed in registries.conf if a local SBOM scanner image does not
exist or the image in the registry is newer than the one in local storage.
Raise an error if the SBOM scanner image is not in any listed registry and is
not present locally.
If the flag is disabled (with *--pull=false*), do not pull SBOM scanner images
from registries, use only local versions. Raise an error if a SBOM scanner
image is not present locally.
If the pull flag is set to `always` (with *--pull=always*), pull SBOM scanner
images from the registries listed in registries.conf. Raise an error if a SBOM
scanner image is not found in the registries, even if an image with the same
name is present locally.
If the pull flag is set to `missing` (with *--pull=missing*), pull SBOM scanner
images only if they could not be found in the local containers storage. Raise
an error if no image could be found and the pull fails.
If the pull flag is set to `never` (with *--pull=never*), do not pull SBOM
scanner images from registries, use only the local versions. Raise an error if
the image is not present locally.
**--quiet**, **-q**
When writing the output image, suppress progress output.
**--rm**
Remove the working container and its contents after creating the image.
Default leaves the container and its content in place.
**--sbom** *preset*
Generate SBOMs (Software Bills Of Materials) for the output image by scanning
the working container and build contexts using the named combination of scanner
image, scanner commands, and merge strategy. Must be specified with one or
more of **--sbom-image-output**, **--sbom-image-purl-output**, **--sbom-output**,
and **--sbom-purl-output**. Recognized presets, and the set of options which
they equate to:
- "syft", "syft-cyclonedx":
--sbom-scanner-image=ghcr.io/anchore/syft
--sbom-scanner-command="/syft scan -q dir:{ROOTFS} --output cyclonedx-json={OUTPUT}"
--sbom-scanner-command="/syft scan -q dir:{CONTEXT} --output cyclonedx-json={OUTPUT}"
--sbom-merge-strategy=merge-cyclonedx-by-component-name-and-version
- "syft-spdx":
--sbom-scanner-image=ghcr.io/anchore/syft
--sbom-scanner-command="/syft scan -q dir:{ROOTFS} --output spdx-json={OUTPUT}"
--sbom-scanner-command="/syft scan -q dir:{CONTEXT} --output spdx-json={OUTPUT}"
--sbom-merge-strategy=merge-spdx-by-package-name-and-versioninfo
- "trivy", "trivy-cyclonedx":
--sbom-scanner-image=ghcr.io/aquasecurity/trivy
--sbom-scanner-command="trivy filesystem -q {ROOTFS} --format cyclonedx --output {OUTPUT}"
--sbom-scanner-command="trivy filesystem -q {CONTEXT} --format cyclonedx --output {OUTPUT}"
--sbom-merge-strategy=merge-cyclonedx-by-component-name-and-version
- "trivy-spdx":
--sbom-scanner-image=ghcr.io/aquasecurity/trivy
--sbom-scanner-command="trivy filesystem -q {ROOTFS} --format spdx-json --output {OUTPUT}"
--sbom-scanner-command="trivy filesystem -q {CONTEXT} --format spdx-json --output {OUTPUT}"
--sbom-merge-strategy=merge-spdx-by-package-name-and-versioninfo
**--sbom-image-output** *path*
When generating SBOMs, store the generated SBOM in the specified path in the
output image. There is no default.
**--sbom-image-purl-output** *path*
When generating SBOMs, scan them for PURL ([package
URL](https://github.com/package-url/purl-spec/blob/master/PURL-SPECIFICATION.rst))
information, and save a list of found PURLs to the named file in the local
filesystem. There is no default.
**--sbom-merge-strategy** *method*
If more than one **--sbom-scanner-command** value is being used, use the
specified method to merge the output from later commands with output from
earlier commands. Recognized values include:
- cat
Concatenate the files.
- merge-cyclonedx-by-component-name-and-version
Merge the "component" fields of JSON documents, ignoring values from
documents when the combination of their "name" and "version" values is
already present. Documents are processed in the order in which they are
generated, which is the order in which the commands that generate them
were specified.
- merge-spdx-by-package-name-and-versioninfo
Merge the "package" fields of JSON documents, ignoring values from
documents when the combination of their "name" and "versionInfo" values is
already present. Documents are processed in the order in which they are
generated, which is the order in which the commands that generate them
were specified.
**--sbom-output** *file*
When generating SBOMs, store the generated SBOM in the named file on the local
filesystem. There is no default.
**--sbom-purl-output** *file*
When generating SBOMs, scan them for PURL ([package
URL](https://github.com/package-url/purl-spec/blob/master/PURL-SPECIFICATION.rst))
information, and save a list of found PURLs to the named file in the local
filesystem. There is no default.
**--sbom-scanner-command** *image*
Generate SBOMs by running the specified command from the scanner image. If
multiple commands are specified, they are run in the order in which they are
specified. These text substitutions are performed:
- {ROOTFS}
The root of the built image's filesystem, bind mounted.
- {CONTEXT}
The build context and additional build contexts, bind mounted.
- {OUTPUT}
The name of a temporary output file, to be read and merged with others or copied elsewhere.
**--sbom-scanner-image** *image*
Generate SBOMs using the specified scanner image.
**--sign-by** *fingerprint*
Sign the new image using the GPG key that matches the specified fingerprint.
**--squash**
Squash all of the new image's layers (including those inherited from a base image) into a single new layer.
**--timestamp** *seconds*
Set the create timestamp to seconds since epoch to allow for deterministic builds (defaults to current time).
By default, the created timestamp is changed and written into the image manifest with every commit,
causing the image's sha256 hash to be different even if the sources are exactly the same otherwise.
When --timestamp is set, the created timestamp is always set to the time specified and therefore not changed, allowing the image's sha256 to remain the same. All files committed to the layers of the image will be created with the timestamp.
**--tls-verify** *bool-value*
Require HTTPS and verification of certificates when talking to container registries (defaults to true). TLS verification cannot be used when talking to an insecure registry.
**--unsetenv** *env*
Unset environment variables from the final image.
## EXAMPLE
This example saves an image based on the container.
`buildah commit containerID newImageName`
This example saves an image named newImageName based on the container and removes the working container.
`buildah commit --rm containerID newImageName`
This example commits to an OCI archive file named /tmp/newImageName based on the container.
`buildah commit containerID oci-archive:/tmp/newImageName`
This example saves an image with no name, removes the working container, and creates a new container using the image's ID.
`buildah from $(buildah commit --rm containerID)`
This example saves an image based on the container disabling compression.
`buildah commit --disable-compression containerID`
This example saves an image named newImageName based on the container disabling compression.
`buildah commit --disable-compression containerID newImageName`
This example commits the container to the image on the local registry while turning off tls verification.
`buildah commit --tls-verify=false containerID docker://localhost:5000/imageId`
This example commits the container to the image on the local registry using credentials and certificates for authentication.
`buildah commit --cert-dir ~/auth --tls-verify=true --creds=username:password containerID docker://localhost:5000/imageId`
This example commits the container to the image on the local registry using credentials from the /tmp/auths/myauths.json file and certificates for authentication.
`buildah commit --authfile /tmp/auths/myauths.json --cert-dir ~/auth --tls-verify=true --creds=username:password containerID docker://localhost:5000/imageName`
This example saves an image based on the container, but stores dates based on epoch time.
`buildah commit --timestamp=0 containerID newImageName`
### Building an multi-architecture image using the --manifest option (requires emulation software)
```
#!/bin/sh
build() {
ctr=$(./bin/buildah from --arch $1 ubi8)
./bin/buildah run $ctr dnf install -y iputils
./bin/buildah commit --manifest ubi8ping $ctr
}
build arm
build amd64
build s390x
```
## ENVIRONMENT
**BUILD\_REGISTRY\_SOURCES**
BUILD\_REGISTRY\_SOURCES, if set, is treated as a JSON object which contains
lists of registry names under the keys `insecureRegistries`,
`blockedRegistries`, and `allowedRegistries`.
When committing an image, if the image is to be given a name, the portion of
the name that corresponds to a registry is compared to the items in the
`blockedRegistries` list, and if it matches any of them, the commit attempt is
denied. If there are registries in the `allowedRegistries` list, and the
portion of the name that corresponds to the registry is not in the list, the
commit attempt is denied.
**TMPDIR**
The TMPDIR environment variable allows the user to specify where temporary files
are stored while pulling and pushing images. Defaults to '/var/tmp'.
## FILES
**registries.conf** (`/etc/containers/registries.conf`)
registries.conf is the configuration file which specifies which container registries should be consulted when completing image names which do not include a registry or domain portion.
**policy.json** (`/etc/containers/policy.json`)
Signature policy file. This defines the trust policy for container images. Controls which container registries can be used for image, and whether or not the tool should trust the images.
## SEE ALSO
buildah(1), buildah-images(1), containers-policy.json(5), containers-registries.conf(5), containers-transports(5), containers-auth.json(5)

View File

@@ -1,302 +0,0 @@
# buildah-config "1" "March 2017" "buildah"
## NAME
buildah\-config - Update image configuration settings.
## SYNOPSIS
**buildah config** [*options*] *container*
## DESCRIPTION
Updates one or more of the settings kept for a container.
## OPTIONS
**--add-history**
Add an entry to the image's history which will note changes to the settings for
**--cmd**, **--entrypoint**, **--env**, **--healthcheck**, **--label**,
**--onbuild**, **--port**, **--shell**, **--stop-signal**, **--user**,
**--volume**, and **--workingdir**.
Defaults to false.
Note: You can also override the default value of --add-history by setting the
BUILDAH\_HISTORY environment variable. `export BUILDAH_HISTORY=true`
**--annotation**, **-a** *annotation*=*annotation*
Add an image *annotation* (e.g. annotation=*annotation*) to the image manifest
of any images which will be built using the specified container. Can be used multiple times.
If *annotation* has a trailing `-`, then the *annotation* is removed from the config.
If the *annotation* is set to "-" then all annotations are removed from the config.
**--arch** *architecture*
Set the target *architecture* for any images which will be built using the
specified container. By default, if the container was based on an image, that
image's target architecture is kept, otherwise the host's architecture is
recorded.
**--author** *author*
Set contact information for the *author* for any images which will be built
using the specified container.
**--cmd** *command*
Set the default *command* to run for containers based on any images which will
be built using the specified container. When used in combination with an
*entry point*, this specifies the default parameters for the *entry point*.
**--comment** *comment*
Set the image-level comment for any images which will be built using the
specified container.
Note: this setting is not present in the OCIv1 image format, so it is discarded when writing images using OCIv1 formats.
**--created-by** *created*
Set the description of how the topmost layer was *created* for any images which
will be created using the specified container.
**--domainname** *domain*
Set the domainname to set when running containers based on any images built
using the specified container.
Note: this setting is not present in the OCIv1 image format, so it is discarded when writing images using OCIv1 formats.
**--entrypoint** *"command"* | *'["command", "arg1", ...]'*
Set the *entry point* for containers based on any images which will be built
using the specified container. buildah supports two formats for entrypoint. It
can be specified as a simple string, or as an array of commands.
Note: When the entrypoint is specified as a string, container runtimes will
ignore the `cmd` value of the container image. However if you use the array
form, then the cmd will be appended onto the end of the entrypoint cmd and be
executed together.
Note: The string form is appended to the `sh -c` command as the entrypoint. The array form
replaces entrypoint entirely.
String Format:
```
$ buildah from scratch
$ buildah config --entrypoint "/usr/bin/notashell" working-container
$ buildah inspect --format '{{ .OCIv1.Config.Entrypoint }}' working-container
[/bin/sh -c /usr/bin/notshell]
$ buildah inspect --format '{{ .Docker.Config.Entrypoint }}' working-container
[/bin/sh -c /usr/bin/notshell]
```
Array Format:
```
$ buildah config --entrypoint '["/usr/bin/notashell"]' working-container
$ buildah inspect --format '{{ .OCIv1.Config.Entrypoint }}' working-container
[/usr/bin/notashell]
$ buildah inspect --format '{{ .Docker.Config.Entrypoint }}' working-container
[/usr/bin/notashell]
```
**--env**, **-e** *env[=value]*
Add a value (e.g. env=*value*) to the environment for containers based on any
images which will be built using the specified container. Can be used multiple times.
If *env* is named but neither `=` nor a `value` is specified, then the value
will be taken from the current process environment.
If *env* has a trailing `-`, then the *env* is removed from the config.
If the *env* is set to "-" then all environment variables are removed from the config.
**--healthcheck** *command*
Specify a command which should be run to check if a container is running correctly.
Values can be *NONE*, "*CMD* ..." (run the specified command directly), or
"*CMD-SHELL* ..." (run the specified command using the system's shell), or the
empty value (remove a previously-set value and related settings).
Note: this setting is not present in the OCIv1 image format, so it is discarded when writing images using OCIv1 formats.
**--healthcheck-interval** *interval*
Specify how often the command specified using the *--healthcheck* option should
be run.
Note: this setting is not present in the OCIv1 image format, so it is discarded when writing images using OCIv1 formats.
**--healthcheck-retries** *count*
Specify how many times the command specified using the *--healthcheck* option
can fail before the container is considered to be unhealthy.
Note: this setting is not present in the OCIv1 image format, so it is discarded when writing images using OCIv1 formats.
**--healthcheck-start-interval** *interval*
Specify the time between health checks during the start period.
Note: this setting is not present in the OCIv1 image format, so it is discarded when writing images using OCIv1 formats.
**--healthcheck-start-period** *interval*
Specify how much time can elapse after a container has started before a failure
to run the command specified using the *--healthcheck* option should be treated
as an indication that the container is failing. During this time period,
failures will be attributed to the container not yet having fully started, and
will not be counted as errors. After the command succeeds, or the time period
has elapsed, failures will be counted as errors.
Note: this setting is not present in the OCIv1 image format, so it is discarded when writing images using OCIv1 formats.
**--healthcheck-timeout** *interval*
Specify how long to wait after starting the command specified using the
*--healthcheck* option to wait for the command to return its exit status. If
the command has not returned within this time, it should be considered to have
failed.
Note: this setting is not present in the OCIv1 image format, so it is discarded when writing images using OCIv1 formats.
**--history-comment** *comment*
Sets a comment on the topmost layer in any images which will be created
using the specified container.
**--hostname** *host*
Set the hostname to set when running containers based on any images built using
the specified container.
Note: this setting is not present in the OCIv1 image format, so it is discarded when writing images using OCIv1 formats.
**--label**, **-l** *label*=*value*
Add an image *label* (e.g. label=*value*) to the image configuration of any
images which will be built using the specified container. Can be used multiple times.
If *label* has a trailing `-`, then the *label* is removed from the config.
If the *label* is set to "-" then all labels are removed from the config.
**--onbuild** *onbuild command*
Add an ONBUILD command to the image. ONBUILD commands are automatically run
when images are built based on the image you are creating.
Note: this setting is not present in the OCIv1 image format, so it is discarded when writing images using OCIv1 formats.
**--os** *operating system*
Set the target *operating system* for any images which will be built using
the specified container. By default, if the container was based on an image,
its OS is kept, otherwise the host's OS's name is recorded.
**--os-feature** *feature*
Set the name of a required operating system *feature* for any images which will
be built using the specified container. By default, if the container was based
on an image, the base image's required OS feature list is kept, if it specified
one. This option is typically only meaningful when the image's OS is Windows.
If *feature* has a trailing `-`, then the *feature* is removed from the set of
required features which will be listed in the image. If the *feature* is set
to "-" then the entire features list is removed from the config.
**--os-version** *version*
Set the exact required operating system *version* for any images which will be
built using the specified container. By default, if the container was based on
an image, the base image's required OS version is kept, if it specified one.
This option is typically only meaningful when the image's OS is Windows, and is
typically set in Windows base images, so using this option is usually
unnecessary.
**--port**, **-p** *port/protocol*
Add a *port* to expose when running containers based on any images which
will be built using the specified container. Can be used multiple times.
To specify whether the port listens on TCP or UDP, use "port/protocol".
The default is TCP if the protocol is not specified. To expose the port on both TCP and UDP,
specify the port option multiple times. If *port* has a trailing `-` and is already set,
then the *port* is removed from the configuration. If the port is set to `-` then all exposed
ports settings are removed from the configuration.
**--shell** *shell*
Set the default *shell* to run inside of the container image.
The shell instruction allows the default shell used for the shell form of commands to be overridden. The default shell for Linux containers is "/bin/sh -c".
Note: this setting is not present in the OCIv1 image format, so it is discarded when writing images using OCIv1 formats.
**--stop-signal** *signal*
Set default *stop signal* for container. This signal will be sent when container is stopped, default is SIGINT.
**--unsetlabel** *label*
Unset the image label, causing the label not to be inherited from the base image.
**--user**, **-u** *user*[:*group*]
Set the default *user* to be used when running containers based on this image.
The user can be specified as a user name
or UID, optionally followed by a group name or GID, separated by a colon (':').
If names are used, the container should include entries for those names in its
*/etc/passwd* and */etc/group* files.
**--variant** *variant*
Set the target architecture *variant* for any images which will be built using
the specified container. By default, if the container was based on an image,
that image's target architecture and variant information is kept, otherwise the
host's architecture and variant are recorded.
**--volume**, **-v** *volume*
Add a location in the directory tree which should be marked as a *volume* in any images which will be built using the specified container. Can be used multiple times. If *volume* has a trailing `-`, and is already set, then the *volume* is removed from the config.
If the *volume* is set to "-" then all volumes are removed from the config.
**--workingdir** *directory*
Set the initial working *directory* for containers based on images which will
be built using the specified container.
## EXAMPLE
buildah config --author='Jane Austen' --workingdir='/etc/mycontainers' containerID
buildah config --entrypoint /entrypoint.sh containerID
buildah config --entrypoint '[ "/entrypoint.sh", "dev" ]' containerID
buildah config --env foo=bar --env PATH=$PATH containerID
buildah config --env foo- containerID
buildah config --label Name=Mycontainer --label Version=1.0 containerID
buildah config --label Name- containerID
buildah config --annotation note=myNote containerID
buildah config --annotation note-
buildah config --volume /usr/myvol containerID
buildah config --volume /usr/myvol- containerID
buildah config --port 1234 --port 8080 containerID
buildah config --port 514/tcp --port 514/udp containerID
buildah config --env 1234=5678 containerID
buildah config --env 1234- containerID
buildah config --os-version 10.0.19042.1645 containerID
buildah config --os-feature win32k containerID
buildah config --os-feature win32k- containerID
## SEE ALSO
buildah(1)

View File

@@ -1,123 +0,0 @@
# buildah-containers "1" "March 2017" "buildah"
## NAME
buildah\-containers - List the working containers and their base images.
## SYNOPSIS
**buildah containers** [*options*]
## DESCRIPTION
Lists containers which appear to be Buildah working containers, their names and
IDs, and the names and IDs of the images from which they were initialized.
## OPTIONS
**--all**, **-a**
List information about all containers, including those which were not created
by and are not being used by Buildah. Containers created by Buildah are
denoted with an '*' in the 'BUILDER' column.
**--filter**, **-f**
Filter output based on conditions provided.
Valid filters are listed below:
| **Filter** | **Description** |
| --------------- | ------------------------------------------------------------------- |
| id | [ID] Container's ID |
| name | [Name] Container's name |
| ancestor | [ImageName] Image or descendant used to create container |
**--format**
Pretty-print containers using a Go template.
Valid placeholders for the Go template are listed below:
| **Placeholder** | **Description** |
| --------------- | -----------------------------------------|
| .ContainerID | Container ID |
| .Builder | Whether container was created by buildah |
| .ImageID | Image ID |
| .ImageName | Image name |
| .ContainerName | Container name |
**--json**
Output in JSON format.
**--noheading**, **-n**
Omit the table headings from the listing of containers.
**--notruncate**
Do not truncate IDs and image names in the output.
**--quiet**, **-q**
Displays only the container IDs.
## EXAMPLE
buildah containers
```
CONTAINER ID BUILDER IMAGE ID IMAGE NAME CONTAINER NAME
ccf84de04b80 * 53ce4390f2ad registry.access.redhat.com/ub... ubi8-working-container
45be1d806fc5 * 16ea53ea7c65 docker.io/library/busybox:latest busybox-working-container
```
buildah containers --quiet
```
ccf84de04b80c309ce6586997c79a769033dc4129db903c1882bc24a058438b8
45be1d806fc533fcfc2beee77e424d87e5990d3ce9214d6b374677d6630bba07
```
buildah containers -q --noheading --notruncate
```
ccf84de04b80c309ce6586997c79a769033dc4129db903c1882bc24a058438b8
45be1d806fc533fcfc2beee77e424d87e5990d3ce9214d6b374677d6630bba07
```
buildah containers --json
```
[
{
"id": "ccf84de04b80c309ce6586997c79a769033dc4129db903c1882bc24a058438b8",
"builder": true,
"imageid": "53ce4390f2adb1681eb1a90ec8b48c49c015e0a8d336c197637e7f65e365fa9e",
"imagename": "registry.access.redhat.com/ubi8:latest",
"containername": "ubi8-working-container"
},
{
"id": "45be1d806fc533fcfc2beee77e424d87e5990d3ce9214d6b374677d6630bba07",
"builder": true,
"imageid": "16ea53ea7c652456803632d67517b78a4f9075a10bfdc4fc6b7b4cbf2bc98497",
"imagename": "docker.io/library/busybox:latest",
"containername": "busybox-working-container"
}
]
```
buildah containers --format "{{.ContainerID}} {{.ContainerName}}"
```
ccf84de04b80c309ce6586997c79a769033dc4129db903c1882bc24a058438b8 ubi8-working-container
45be1d806fc533fcfc2beee77e424d87e5990d3ce9214d6b374677d6630bba07 busybox-working-container
```
buildah containers --format "Container ID: {{.ContainerID}}"
```
Container ID: ccf84de04b80c309ce6586997c79a769033dc4129db903c1882bc24a058438b8
Container ID: 45be1d806fc533fcfc2beee77e424d87e5990d3ce9214d6b374677d6630bba07
```
buildah containers --filter ancestor=ubuntu
```
CONTAINER ID BUILDER IMAGE ID IMAGE NAME CONTAINER NAME
fbfd3505376e * 0ff04b2e7b63 docker.io/library/ubuntu:latest ubuntu-working-container
```
## SEE ALSO
buildah(1)

View File

@@ -1,169 +0,0 @@
# buildah-copy "1" "April 2021" "buildah"
## NAME
buildah\-copy - Copies the contents of a file, URL, or directory into a container's working directory.
## SYNOPSIS
**buildah copy** *container* *src* [[*src* ...] *dest*]
## DESCRIPTION
Copies the contents of a file, URL, or a directory to a container's working
directory or a specified location in the container. If a local directory is
specified as a source, its *contents* are copied to the destination.
## OPTIONS
**--add-history**
Add an entry to the history which will note the digest of the added content.
Defaults to false.
Note: You can also override the default value of --add-history by setting the
BUILDAH\_HISTORY environment variable. `export BUILDAH_HISTORY=true`
**--cert-dir** *path*
Use certificates at *path* (\*.crt, \*.cert, \*.key) when connecting to
registries for pulling images named with the **--from** flag. The default
certificates directory is _/etc/containers/certs.d_.
**--checksum** *checksum*
Checksum the source content. The value of *checksum* must be a standard
container digest string. Only supported for HTTP sources.
**--chmod** *permissions*
Sets the access permissions of the destination content. Accepts the numerical
format. If `--from` is not used, defaults to `0755`.
**--chown** *owner*:*group*
Sets the user and group ownership of the destination content. If `--from` is
not used, defaults to `0:0`.
**--contextdir** *directory*
Build context directory. Specifying a context directory causes Buildah to
chroot into the context directory. This means copying files pointed at
by symbolic links outside of the chroot will fail.
**--exclude** *pattern*
Exclude copying files matching the specified pattern. Option can be specified
multiple times. See containerignore(5) for supported formats.
**--from** *containerOrImage*
Use the root directory of the specified working container or image as the root
directory when resolving absolute source paths and the path of the context
directory. If an image needs to be pulled, options recognized by `buildah pull`
can be used. If `--chown` or `--chmod` are not used, permissions and ownership
is preserved.
**--ignorefile** *file*
Path to an alternative .containerignore (.dockerignore) file. Requires \-\-contextdir be specified.
**--parents**
Preserve leading directories in the paths of items being copied, relative to either the
top of the build context, or to the "pivot point", a location in the source path marked
by a path component named "." (i.e., where "/./" occurs in the path).
**--quiet**, **-q**
Refrain from printing a digest of the copied content.
**--retry** *attempts*
Number of times to retry in case of failure when performing pull of images from registry.
Defaults to `3`.
**--retry-delay** *duration*
Duration of delay between retry attempts in case of failure when performing pull of images from registry.
Defaults to `2s`.
**--tls-verify** *bool-value*
Require verification of certificates when pulling images referred to with the
**--from*** flag (defaults to true). TLS verification cannot be used when
talking to an insecure registry.
## EXAMPLE
buildah copy containerID '/myapp/app.conf' '/myapp/app.conf'
buildah copy --exclude=**/*.md docs containerID 'docs' '/docs'
buildah copy --parents containerID './x/a.txt' './y/a.txt' '/parents'
buildah copy --chown myuser:mygroup containerID '/myapp/app.conf' '/myapp/app.conf'
buildah copy --chmod 660 containerID '/myapp/app.conf' '/myapp/app.conf'
buildah copy containerID '/home/myuser/myproject.go'
buildah copy containerID '/home/myuser/myfiles.tar' '/tmp'
buildah copy containerID '/tmp/workingdir' '/tmp/workingdir'
buildah copy containerID 'https://github.com/containers/buildah' '/tmp'
buildah copy containerID 'passwd' 'certs.d' /etc
## FILES
### .containerignore/.dockerignore
If the .containerignore/.dockerignore file exists in the context directory,
`buildah copy` reads its contents. If both exist, then .containerignore is used.
When the `--ignorefile` option is specified Buildah reads it and
uses it to decide which content to exclude when copying content into the
working container.
Users can specify a series of Unix shell glob patterns in an ignore file to
identify files/directories to exclude.
Buildah supports a special wildcard string `**` which matches any number of
directories (including zero). For example, `**/*.go` will exclude all files that
end with .go that are found in all directories.
Example .containerignore/.dockerignore file:
```
# here are files we want to exclude
*/*.c
**/output*
src
```
`*/*.c`
Excludes files and directories whose names end with .c in any top level subdirectory. For example, the source file include/rootless.c.
`**/output*`
Excludes files and directories starting with `output` from any directory.
`src`
Excludes files named src and the directory src as well as any content in it.
Lines starting with ! (exclamation mark) can be used to make exceptions to
exclusions. The following is an example .containerignore/.dockerignore file that uses this
mechanism:
```
*.doc
!Help.doc
```
Exclude all doc files except Help.doc when copying content into the container.
This functionality is compatible with the handling of .containerignore files described here:
https://github.com/containers/common/blob/main/docs/containerignore.5.md
## SEE ALSO
buildah(1), containerignore(5)

View File

@@ -1,244 +0,0 @@
# Buildah Essential Commands Guide
Buildah is a command-line tool for building OCI-compatible container images. Unlike other container build tools, Buildah doesn't require a daemon to be running and allows for granular control over the container building process.
## Creating Containers = BUILD STEP
### buildah from
Creates a new working container, either from scratch or using a specified image.
```bash
# Create a container from an image
buildah from [options] <image-name>
# Create a container from scratch
buildah from scratch
# Examples
buildah from fedora:latest
buildah from docker://ubuntu:22.04
buildah from --name my-container alpine:latest
```
Important options:
- `--name <name>`: Set a name for the container
- `--pull`: Pull image policy (missing, always, never, newer)
- `--authfile <path>`: Path to authentication file
- `--creds <username:password>`: Registry credentials
## Working with Containers
### buildah run
Runs a command inside of the container.
```bash
# Basic syntax
buildah run [options] <container-id> <command>
# Examples
buildah run my-container yum install -y httpd
buildah run my-container -- sh -c "echo 'Hello World' > /etc/motd"
buildah run --hostname myhost my-container ps -auxw
```
Important options:
- `--tty`, `-t`: Allocate a pseudo-TTY
- `--env`, `-e <env=value>`: Set environment variables
- `--volume`, `-v <host-dir:container-dir:opts>`: Mount a volume
- `--workingdir <directory>`: Set the working directory
### buildah copy
Copy files from the host into the container.
```bash
# Basic syntax
buildah copy [options] <container-id> <source> <destination>
# Examples
buildah copy my-container ./app /app
buildah copy my-container config.json /etc/myapp/
```
### buildah add
Add content from a file, URL, or directory to the container.
```bash
# Basic syntax
buildah add [options] <container-id> <source> <destination>
# Examples
buildah add my-container https://example.com/archive.tar.gz /tmp/
buildah add my-container ./local/dir /app/
```
## Configuring Containers
### buildah config
Updates container configuration settings.
```bash
# Basic syntax
buildah config [options] <container-id>
# Examples
buildah config --author="John Doe" my-container
buildah config --port 8080 my-container
buildah config --env PATH=$PATH my-container
buildah config --label version=1.0 my-container
buildah config --entrypoint "/entrypoint.sh" my-container
```
Important options:
- `--author <author>`: Set the author
- `--cmd <command>`: Set the default command
- `--entrypoint <command>`: Set the entry point
- `--env`, `-e <env=value>`: Set environment variables
- `--label`, `-l <label=value>`: Add image labels
- `--port`, `-p <port>`: Expose ports
- `--user`, `-u <user[:group]>`: Set the default user
- `--workingdir <directory>`: Set the working directory
- `--volume`, `-v <volume>`: Add a volume
## Building Images
### buildah commit
Create an image from a working container.
```bash
# Basic syntax
buildah commit [options] <container-id> [<image-name>]
# Examples
buildah commit my-container new-image:latest
buildah commit --format docker my-container docker.io/username/image:tag
buildah commit --rm my-container localhost/myimage:v1.0
```
Important options:
- `--format`, `-f`: Output format (oci or docker)
- `--rm`: Remove the container after committing
- `--quiet`, `-q`: Suppress output
- `--squash`: Squash all layers into a single layer
### buildah build
Build an image using instructions from Containerfiles or Dockerfiles.
```bash
# Basic syntax
buildah build [options] <context>
# Examples
buildah build .
buildah build -t myimage:latest .
buildah build -f Containerfile.custom .
buildah build --layers --format docker -t username/myapp:1.0 .
```
Important options:
- `--file`, `-f <Containerfile>`: Path to Containerfile/Dockerfile
- `--tag`, `-t <name:tag>`: Tag to apply to the built image
- `--layers`: Cache intermediate layers during build
- `--pull`: Force pull of newer base images
- `--no-cache`: Do not use cache during build
- `--build-arg <key=value>`: Set build-time variables
- `--format`: Output format (oci or docker)
## Managing Images
### buildah images
List images in local storage.
```bash
buildah images [options]
```
### buildah rmi
Remove one or more images.
```bash
buildah rmi [options] <image>
```
### buildah push
Push an image to a registry.
```bash
# Basic syntax
buildah push [options] <image> [destination]
# Examples
buildah push myimage:latest docker://registry.example.com/myimage:latest
buildah push --tls-verify=false localhost/myimage docker://localhost:5000/myimage
```
Important options:
- `--authfile <path>`: Path to authentication file
- `--creds <username:password>`: Registry credentials
- `--tls-verify <bool>`: Require HTTPS and verify certificates
### buildah tag
Add an additional name to a local image.
```bash
# Basic syntax
buildah tag <image> <new-name>
# Example
buildah tag localhost/myimage:latest myimage:v1.0
```
### buildah pull
Pull an image from a registry.
```bash
# Basic syntax
buildah pull [options] <image-name>
# Examples
buildah pull docker.io/library/ubuntu:latest
buildah pull --tls-verify=false registry.example.com/myimage:latest
```
Important options:
- `--authfile <path>`: Path to authentication file
- `--creds <username:password>`: Registry credentials
- `--tls-verify <bool>`: Require HTTPS and verify certificates
## Typical Workflow Example
```bash
# Create a container from an existing image
container=$(buildah from fedora:latest)
# Run a command to install software
buildah run $container dnf install -y nginx
# Copy local configuration files to the container
buildah copy $container ./nginx.conf /etc/nginx/nginx.conf
# Configure container metadata
buildah config --port 80 $container
buildah config --label maintainer="example@example.com" $container
buildah config --entrypoint "/usr/sbin/nginx" $container
# Commit the container to create a new image
buildah commit --rm $container my-nginx:latest
# Or build using a Containerfile
buildah build -t my-nginx:latest .
# Push the image to a registry
buildah push my-nginx:latest docker://docker.io/username/my-nginx:latest
```

View File

@@ -1,758 +0,0 @@
# buildah-from "1" "March 2017" "buildah"
## NAME
buildah\-from - Creates a new working container, either from scratch or using a specified image as a starting point.
## SYNOPSIS
**buildah from** [*options*] *image*
## DESCRIPTION
Creates a working container based upon the specified image name. If the
supplied image name is "scratch" a new empty container is created. Image names
use a "transport":"details" format.
Multiple transports are supported:
**dir:**_path_
An existing local directory _path_ containing the manifest, layer tarballs, and signatures in individual files. This is a non-standardized format, primarily useful for debugging or noninvasive image inspection.
**docker://**_docker-reference_ (Default)
An image in a registry implementing the "Docker Registry HTTP API V2". By default, uses the authorization state in `$XDG_RUNTIME_DIR/containers/auth.json`, which is set using `(buildah login)`. See containers-auth.json(5) for more information. If the authorization state is not found there, `$HOME/.docker/config.json` is checked, which is set using `(docker login)`.
If _docker-reference_ does not include a registry name, *localhost* will be consulted first, followed by any registries named in the registries configuration.
**docker-archive:**_path_
An image is retrieved as a `podman load` formatted file.
**docker-daemon:**_docker-reference_
An image _docker-reference_ stored in the docker daemon's internal storage. _docker-reference_ must include either a tag or a digest. Alternatively, when reading images, the format can also be docker-daemon:algo:digest (an image ID).
**oci:**_path_**:**_tag_**
An image tag in a directory compliant with "Open Container Image Layout Specification" at _path_.
**oci-archive:**_path_**:**_tag_
An image _tag_ in a directory compliant with "Open Container Image Layout Specification" at _path_.
### DEPENDENCIES
Buildah resolves the path to the registry to pull from by using the /etc/containers/registries.conf
file, containers-registries.conf(5). If the `buildah from` command fails with an "image not known" error,
first verify that the registries.conf file is installed and configured appropriately.
## RETURN VALUE
The container ID of the container that was created. On error 1 is returned.
## OPTIONS
**--add-host**=[]
Add a custom host-to-IP mapping (host:ip)
Add a line to /etc/hosts. The format is hostname:ip. The **--add-host** option can be set multiple times.
**--arch**="ARCH"
Set the ARCH of the image to be pulled to the provided value instead of using the architecture of the host. (Examples: arm, arm64, 386, amd64, ppc64le, s390x)
**--authfile** *path*
Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. See containers-auth.json(5) for more information. This file is created using `buildah login`.
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
Note: You can also override the default path of the authentication file by setting the REGISTRY\_AUTH\_FILE
environment variable. `export REGISTRY_AUTH_FILE=path`
**--cap-add**=*CAP\_xxx*
Add the specified capability to the default set of capabilities which will be
supplied for subsequent *buildah run* invocations which use this container.
Certain capabilities are granted by default; this option can be used to add
more.
**--cap-drop**=*CAP\_xxx*
Remove the specified capability from the default set of capabilities which will
be supplied for subsequent *buildah run* invocations which use this container.
The CAP\_CHOWN, CAP\_DAC\_OVERRIDE, CAP\_FOWNER, CAP\_FSETID, CAP\_KILL,
CAP\_NET\_BIND\_SERVICE, CAP\_SETFCAP, CAP\_SETGID, CAP\_SETPCAP,
and CAP\_SETUID capabilities are granted by default; this option can be used to remove them. The list of default capabilities is managed in containers.conf(5).
If a capability is specified to both the **--cap-add** and **--cap-drop**
options, it will be dropped, regardless of the order in which the options were
given.
**--cert-dir** *path*
Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
The default certificates directory is _/etc/containers/certs.d_.
**--cgroup-parent**=""
Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist.
**--cgroupns** *how*
Sets the configuration for IPC namespaces when the container is subsequently
used for `buildah run`.
The configured value can be "" (the empty string) or "private" to indicate
that a new cgroup namespace should be created, or it can be "host" to indicate
that the cgroup namespace in which `buildah` itself is being run should be reused.
**--cidfile** *ContainerIDFile*
Write the container ID to the file.
**--cpu-period**=*0*
Limit the CPU CFS (Completely Fair Scheduler) period
Limit the container's CPU usage. This flag tells the kernel to restrict the container's CPU usage to the period you specify.
**--cpu-quota**=*0*
Limit the CPU CFS (Completely Fair Scheduler) quota
Limit the container's CPU usage. By default, containers run with the full
CPU resource. This flag tells the kernel to restrict the container's CPU usage
to the quota you specify.
**--cpu-shares**, **-c**=*0*
CPU shares (relative weight)
By default, all containers get the same proportion of CPU cycles. This proportion
can be modified by changing the container's CPU share weighting relative
to the weighting of all other running containers.
To modify the proportion from the default of 1024, use the **--cpu-shares**
flag to set the weighting to 2 or higher.
The proportion will only apply when CPU-intensive processes are running.
When tasks in one container are idle, other containers can use the
left-over CPU time. The actual amount of CPU time will vary depending on
the number of containers running on the system.
For example, consider three containers, one has a cpu-share of 1024 and
two others have a cpu-share setting of 512. When processes in all three
containers attempt to use 100% of CPU, the first container would receive
50% of the total CPU time. If you add a fourth container with a cpu-share
of 1024, the first container only gets 33% of the CPU. The remaining containers
receive 16.5%, 16.5% and 33% of the CPU.
On a multi-core system, the shares of CPU time are distributed over all CPU
cores. Even if a container is limited to less than 100% of CPU time, it can
use 100% of each individual CPU core.
For example, consider a system with more than three cores. If you start one
container **{C0}** with **-c=512** running one process, and another container
**{C1}** with **-c=1024** running two processes, this can result in the following
division of CPU shares:
PID container CPU CPU share
100 {C0} 0 100% of CPU0
101 {C1} 1 100% of CPU1
102 {C1} 2 100% of CPU2
**--cpuset-cpus**=""
CPUs in which to allow execution (0-3, 0,1)
**--cpuset-mems**=""
Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.
If you have four memory nodes on your system (0-3), use `--cpuset-mems=0,1`
then processes in your container will only use memory from the first
two memory nodes.
**--creds** *creds*
The [username[:password]] to use to authenticate with the registry if required.
If one or both values are not supplied, a command line prompt will appear and the
value can be entered. The password is entered without echo.
**--decryption-key** *key[:passphrase]*
The [key[:passphrase]] to be used for decryption of images. Key can point to keys and/or certificates. Decryption will be tried with all keys. If the key is protected by a passphrase, it is required to be passed in the argument and omitted otherwise.
**--device**=*device*
Add a host device, or devices under a directory, to the environment of
subsequent **buildah run** invocations for the new working container. The
optional *permissions* parameter can be used to specify device permissions,
using any one or more of **r** for read, **w** for write, and **m** for
**mknod**(2).
Example: **--device=/dev/sdc:/dev/xvdc:rwm**.
Note: if _host-device_ is a symbolic link then it will be resolved first.
The container will only store the major and minor numbers of the host device.
The device to share can also be specified using a Container Device Interface
(CDI) specification (https://github.com/cncf-tags/container-device-interface).
Note: if the user only has access rights via a group, accessing the device
from inside a rootless container will fail. The **crun**(1) runtime offers a
workaround for this by adding the option **--annotation run.oci.keep_original_groups=1**.
**--dns**=[]
Set custom DNS servers
This option can be used to override the DNS configuration passed to the container. Typically this is necessary when the host DNS configuration is invalid for the container (e.g., 127.0.0.1). When this is the case the `--dns` flag is necessary for every run.
The special value **none** can be specified to disable creation of /etc/resolv.conf in the container by Buildah. The /etc/resolv.conf file in the image will be used without changes.
**--dns-option**=[]
Set custom DNS options
**--dns-search**=[]
Set custom DNS search domains
**--format**, **-f** *oci* | *docker*
Control the format for the built image's manifest and configuration data.
Recognized formats include *oci* (OCI image-spec v1.0, the default) and
*docker* (version 2, using schema format 2 for the manifest).
Note: You can also override the default format by setting the BUILDAH\_FORMAT
environment variable. `export BUILDAH_FORMAT=docker`
**--group-add**=*group* | *keep-groups*
Assign additional groups to the primary user running within the container
process.
- `keep-groups` is a special flag that tells Buildah to keep the supplementary
group access.
Allows container to use the user's supplementary group access. If file systems
or devices are only accessible by the rootless user's group, this flag tells the
OCI runtime to pass the group access into the container. Currently only
available with the `crun` OCI runtime. Note: `keep-groups` is exclusive, other
groups cannot be specified with this flag.
**--http-proxy**
By default proxy environment variables are passed into the container if set
for the Buildah process. This can be disabled by setting the `--http-proxy`
option to `false`. The environment variables passed in include `http_proxy`,
`https_proxy`, `ftp_proxy`, `no_proxy`, and also the upper case versions of
those.
Defaults to `true`
**--ipc** *how*
Sets the configuration for IPC namespaces when the container is subsequently
used for `buildah run`.
The configured value can be "" (the empty string) or "container" to indicate
that a new IPC namespace should be created, or it can be "host" to indicate
that the IPC namespace in which `Buildah` itself is being run should be reused,
or it can be the path to an IPC namespace which is already in use by
another process.
**--isolation** *type*
Controls what type of isolation is used for running processes under `buildah
run`. Recognized types include *oci* (OCI-compatible runtime, the default),
*rootless* (OCI-compatible runtime invoked using a modified
configuration, with *--no-new-keyring* added to its *create* invocation,
reusing the host's network and UTS namespaces, and creating private IPC, PID,
mount, and user namespaces; the default for unprivileged users), and *chroot*
(an internal wrapper that leans more toward chroot(1) than container
technology, reusing the host's control group, network, IPC, and PID namespaces,
and creating private mount and UTS namespaces, and creating user namespaces
only when they're required for ID mapping).
Note: You can also override the default isolation type by setting the
BUILDAH\_ISOLATION environment variable. `export BUILDAH_ISOLATION=oci`
**--memory**, **-m**=""
Memory limit (format: <number>[<unit>], where unit = b, k, m or g)
Allows you to constrain the memory available to a container. If the host
supports swap memory, then the **-m** memory setting can be larger than physical
RAM. If a limit of 0 is specified (not using **-m**), the container's memory is
not limited. The actual limit may be rounded up to a multiple of the operating
system's page size (the value would be very large, that's millions of trillions).
**--memory-swap**="LIMIT"
A limit value equal to memory plus swap. Must be used with the **-m**
(**--memory**) flag. The swap `LIMIT` should always be larger than **-m**
(**--memory**) value. By default, the swap `LIMIT` will be set to double
the value of --memory.
The format of `LIMIT` is `<number>[<unit>]`. Unit can be `b` (bytes),
`k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you don't specify a
unit, `b` is used. Set LIMIT to `-1` to enable unlimited swap.
**--name** *name*
A *name* for the working container
**--network**=*mode*, **--net**=*mode*
Sets the configuration for network namespaces when the container is subsequently
used for `buildah run`.
Valid _mode_ values are:
- **none**: no networking. Invalid if using **--dns**, **--dns-opt**, or **--dns-search**;
- **host**: use the host network stack. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure;
- **ns:**_path_: path to a network namespace to join;
- **private**: create a new namespace for the container (default)
- **\<network name|ID\>**: Join the network with the given name or ID, e.g. use `--network mynet` to join the network with the name mynet. Only supported for rootful users.
- **slirp4netns[:OPTIONS,...]**: use **slirp4netns**(1) to create a user network stack. This is the default for rootless containers. It is possible to specify these additional options, they can also be set with `network_cmd_options` in containers.conf:
- **allow_host_loopback=true|false**: Allow slirp4netns to reach the host loopback IP (default is 10.0.2.2 or the second IP from slirp4netns cidr subnet when changed, see the cidr option below). The default is false.
- **mtu=MTU**: Specify the MTU to use for this network. (Default is `65520`).
- **cidr=CIDR**: Specify ip range to use for this network. (Default is `10.0.2.0/24`).
- **enable_ipv6=true|false**: Enable IPv6. Default is true. (Required for `outbound_addr6`).
- **outbound_addr=INTERFACE**: Specify the outbound interface slirp binds to (ipv4 traffic only).
- **outbound_addr=IPv4**: Specify the outbound ipv4 address slirp binds to.
- **outbound_addr6=INTERFACE**: Specify the outbound interface slirp binds to (ipv6 traffic only).
- **outbound_addr6=IPv6**: Specify the outbound ipv6 address slirp binds to.
- **pasta[:OPTIONS,...]**: use **pasta**(1) to create a user-mode networking
stack. \
This is only supported in rootless mode. \
By default, IPv4 and IPv6 addresses and routes, as well as the pod interface
name, are copied from the host. If port forwarding isn't configured, ports
are forwarded dynamically as services are bound on either side (init
namespace or container namespace). Port forwarding preserves the original
source IP address. Options described in pasta(1) can be specified as
comma-separated arguments. \
In terms of pasta(1) options, **--config-net** is given by default, in
order to configure networking when the container is started, and
**--no-map-gw** is also assumed by default, to avoid direct access from
container to host using the gateway address. The latter can be overridden
by passing **--map-gw** in the pasta-specific options (despite not being an
actual pasta(1) option). \
Also, **-t none** and **-u none** are passed to disable
automatic port forwarding based on bound ports. Similarly, **-T none** and
**-U none** are given to disable the same functionality from container to
host. \
Some examples:
- **pasta:--map-gw**: Allow the container to directly reach the host using the
gateway address.
- **pasta:--mtu,1500**: Specify a 1500 bytes MTU for the _tap_ interface in
the container.
- **pasta:--ipv4-only,-a,10.0.2.0,-n,24,-g,10.0.2.2,--dns-forward,10.0.2.3,-m,1500,--no-ndp,--no-dhcpv6,--no-dhcp**,
equivalent to default slirp4netns(1) options: disable IPv6, assign
`10.0.2.0/24` to the `tap0` interface in the container, with gateway
`10.0.2.3`, enable DNS forwarder reachable at `10.0.2.3`, set MTU to 1500
bytes, disable NDP, DHCPv6 and DHCP support.
- **pasta:-I,tap0,--ipv4-only,-a,10.0.2.0,-n,24,-g,10.0.2.2,--dns-forward,10.0.2.3,--no-ndp,--no-dhcpv6,--no-dhcp**,
equivalent to default slirp4netns(1) options with Podman overrides: same as
above, but leave the MTU to 65520 bytes
- **pasta:-t,auto,-u,auto,-T,auto,-U,auto**: enable automatic port forwarding
based on observed bound ports from both host and container sides
- **pasta:-T,5201**: enable forwarding of TCP port 5201 from container to
host, using the loopback interface instead of the tap interface for improved
performance
**--os**="OS"
Set the OS of the image to be pulled to the provided value instead of using the current operating system of the host.
**--pid** *how*
Sets the configuration for PID namespaces when the container is subsequently
used for `buildah run`.
The configured value can be "" (the empty string) or "container" to indicate
that a new PID namespace should be created, or it can be "host" to indicate
that the PID namespace in which `Buildah` itself is being run should be reused,
or it can be the path to a PID namespace which is already in use by another
process.
**--platform**="OS/ARCH[/VARIANT]"
Set the OS/ARCH of the image to be pulled
to the provided value instead of using the current operating system and
architecture of the host (for example `linux/arm`).
OS/ARCH pairs are those used by the Go Programming Language. In several cases
the ARCH value for a platform differs from one produced by other tools such as
the `arch` command. Valid OS and architecture name combinations are listed as
values for $GOOS and $GOARCH at https://golang.org/doc/install/source#environment,
and can also be found by running `go tool dist list`.
While `buildah from` is happy to pull an image for any platform that exists,
`buildah run` will not be able to run binaries provided by that image without
the help of emulation provided by packages like `qemu-user-static`.
**NOTE:** The `--platform` option may not be used in combination with the `--arch`, `--os`, or `--variant` options.
**--pull**
Pull image policy. The default is **missing**.
- **always**: Pull base and SBOM scanner images from the registries listed in
registries.conf. Raise an error if a base or SBOM scanner image is not found
in the registries, even if an image with the same name is present locally.
- **missing**: SBOM scanner images only if they could not be found in the local
containers storage. Raise an error if no image could be found and the pull
fails.
- **never**: Do not pull base and SBOM scanner images from registries, use only
the local versions. Raise an error if the image is not present locally.
- **newer**: Pull base and SBOM scanner images from the registries listed in
registries.conf if newer. Raise an error if a base or SBOM scanner image is
not found in the registries when image with the same name is not present
locally.
**--quiet**, **-q**
If an image needs to be pulled from the registry, suppress progress output.
**--retry** *attempts*
Number of times to retry in case of failure when performing pull of images from registry.
Defaults to `3`.
**--retry-delay** *duration*
Duration of delay between retry attempts in case of failure when performing pull of images from registry.
Defaults to `2s`.
**--security-opt**=[]
Security Options
"label=user:USER" : Set the label user for the container
"label=role:ROLE" : Set the label role for the container
"label=type:TYPE" : Set the label type for the container
"label=level:LEVEL" : Set the label level for the container
"label=disable" : Turn off label confinement for the container
"no-new-privileges" : Not supported
"seccomp=unconfined" : Turn off seccomp confinement for the container
"seccomp=profile.json : White listed syscalls seccomp Json file to be used as a seccomp filter
"apparmor=unconfined" : Turn off apparmor confinement for the container
"apparmor=your-profile" : Set the apparmor confinement profile for the container
**--shm-size**=""
Size of `/dev/shm`. The format is `<number><unit>`. `number` must be greater than `0`.
Unit is optional and can be `b` (bytes), `k` (kilobytes), `m`(megabytes), or `g` (gigabytes).
If you omit the unit, the system uses bytes. If you omit the size entirely, the system uses `64m`.
**--tls-verify** *bool-value*
Require HTTPS and verification of certificates when talking to container registries (defaults to true). TLS verification cannot be used when talking to an insecure registry.
**--ulimit** *type*=*soft-limit*[:*hard-limit*]
Specifies resource limits to apply to processes launched during `buildah run`.
This option can be specified multiple times. Recognized resource types
include:
"core": maximum core dump size (ulimit -c)
"cpu": maximum CPU time (ulimit -t)
"data": maximum size of a process's data segment (ulimit -d)
"fsize": maximum size of new files (ulimit -f)
"locks": maximum number of file locks (ulimit -x)
"memlock": maximum amount of locked memory (ulimit -l)
"msgqueue": maximum amount of data in message queues (ulimit -q)
"nice": niceness adjustment (nice -n, ulimit -e)
"nofile": maximum number of open files (ulimit -n)
"nofile": maximum number of open files (1048576); when run by root
"nproc": maximum number of processes (ulimit -u)
"nproc": maximum number of processes (1048576); when run by root
"rss": maximum size of a process's (ulimit -m)
"rtprio": maximum real-time scheduling priority (ulimit -r)
"rttime": maximum amount of real-time execution between blocking syscalls
"sigpending": maximum number of pending signals (ulimit -i)
"stack": maximum stack size (ulimit -s)
**--userns** *how*
Sets the configuration for user namespaces when the container is subsequently
used for `buildah run`.
The configured value can be "" (the empty string) or "container" to indicate
that a new user namespace should be created, it can be "host" to indicate that
the user namespace in which `Buildah` itself is being run should be reused, or
it can be the path to an user namespace which is already in use by another
process.
**--userns-gid-map** *mapping*
Directly specifies a GID mapping which should be used to set ownership, at the
filesystem level, on the working container's contents.
Commands run when handling `RUN` instructions will default to being run in
their own user namespaces, configured using the UID and GID maps.
Entries in this map take the form of one or more colon-separated triples of a starting
in-container GID, a corresponding starting host-level GID, and the number of
consecutive IDs which the map entry represents.
This option overrides the *remap-gids* setting in the *options* section of
/etc/containers/storage.conf.
If this option is not specified, but a global --userns-gid-map setting is
supplied, settings from the global option will be used.
**--userns-gid-map-group** *mapping*
Directly specifies a GID mapping which should be used to set ownership, at the
filesystem level, on the container's contents.
Commands run using `buildah run` will default to being run in their own user
namespaces, configured using the UID and GID maps.
Entries in this map take the form of one or more triples of a starting
in-container GID, a corresponding starting host-level GID, and the number of
consecutive IDs which the map entry represents.
This option overrides the *remap-gids* setting in the *options* section of
/etc/containers/storage.conf.
If this option is not specified, but a global --userns-gid-map setting is
supplied, settings from the global option will be used.
If none of --userns-uid-map-user, --userns-gid-map-group, or --userns-gid-map
are specified, but --userns-uid-map is specified, the GID map will be set to
use the same numeric values as the UID map.
**NOTE:** When this option is specified by a rootless user, the specified mappings are relative to the rootless usernamespace in the container, rather than being relative to the host as it would be when run rootful.
**--userns-gid-map-group** *group*
Specifies that a GID mapping which should be used to set ownership, at the
filesystem level, on the container's contents, can be found in entries in the
`/etc/subgid` file which correspond to the specified group.
Commands run using `buildah run` will default to being run in their own user
namespaces, configured using the UID and GID maps.
If --userns-uid-map-user is specified, but --userns-gid-map-group is not
specified, `Buildah` will assume that the specified user name is also a
suitable group name to use as the default setting for this option.
**--userns-uid-map** *mapping*
Directly specifies a UID mapping which should be used to set ownership, at the
filesystem level, on the working container's contents.
Commands run when handling `RUN` instructions will default to being run in
their own user namespaces, configured using the UID and GID maps.
Entries in this map take the form of one or more colon-separated triples of a starting
in-container UID, a corresponding starting host-level UID, and the number of
consecutive IDs which the map entry represents.
This option overrides the *remap-uids* setting in the *options* section of
/etc/containers/storage.conf.
If this option is not specified, but a global --userns-uid-map setting is
supplied, settings from the global option will be used.
**--userns-uid-map-user** *mapping*
Directly specifies a UID mapping which should be used to set ownership, at the
filesystem level, on the container's contents.
Commands run using `buildah run` will default to being run in their own user
namespaces, configured using the UID and GID maps.
Entries in this map take the form of one or more triples of a starting
in-container UID, a corresponding starting host-level UID, and the number of
consecutive IDs which the map entry represents.
This option overrides the *remap-uids* setting in the *options* section of
/etc/containers/storage.conf.
If this option is not specified, but a global --userns-uid-map setting is
supplied, settings from the global option will be used.
If none of --userns-uid-map-user, --userns-gid-map-group, or --userns-uid-map
are specified, but --userns-gid-map is specified, the UID map will be set to
use the same numeric values as the GID map.
**NOTE:** When this option is specified by a rootless user, the specified mappings are relative to the rootless usernamespace in the container, rather than being relative to the host as it would be when run rootful.
**--userns-uid-map-user** *user*
Specifies that a UID mapping which should be used to set ownership, at the
filesystem level, on the container's contents, can be found in entries in the
`/etc/subuid` file which correspond to the specified user.
Commands run using `buildah run` will default to being run in their own user
namespaces, configured using the UID and GID maps.
If --userns-gid-map-group is specified, but --userns-uid-map-user is not
specified, `Buildah` will assume that the specified group name is also a
suitable user name to use as the default setting for this option.
**--uts** *how*
Sets the configuration for UTS namespaces when the container is subsequently
used for `buildah run`.
The configured value can be "" (the empty string) or "container" to indicate
that a new UTS namespace should be created, or it can be "host" to indicate
that the UTS namespace in which `Buildah` itself is being run should be reused,
or it can be the path to a UTS namespace which is already in use by another
process.
**--variant**=""
Set the architecture variant of the image to be pulled.
**--volume**, **-v**[=*[HOST-DIR:CONTAINER-DIR[:OPTIONS]]*]
Create a bind mount. If you specify, ` -v /HOST-DIR:/CONTAINER-DIR`, Buildah
bind mounts `/HOST-DIR` in the host to `/CONTAINER-DIR` in the Buildah
container. The `OPTIONS` are a comma delimited list and can be:
* [rw|ro]
* [U]
* [z|Z|O]
* [`[r]shared`|`[r]slave`|`[r]private`|`[r]unbindable`] <sup>[[1]](#Footnote1)</sup>
The `CONTAINER-DIR` must be an absolute path such as `/src/docs`. The `HOST-DIR`
must be an absolute path as well. Buildah bind-mounts the `HOST-DIR` to the
path you specify. For example, if you supply `/foo` as the host path,
Buildah copies the contents of `/foo` to the container filesystem on the host
and bind mounts that into the container.
You can specify multiple **-v** options to mount one or more mounts to a
container.
`Write Protected Volume Mounts`
You can add the `:ro` or `:rw` suffix to a volume to mount it read-only or
read-write mode, respectively. By default, the volumes are mounted read-write.
See examples.
`Chowning Volume Mounts`
By default, Buildah does not change the owner and group of source volume directories mounted into containers. If a container is created in a new user namespace, the UID and GID in the container may correspond to another UID and GID on the host.
The `:U` suffix tells Buildah to use the correct host UID and GID based on the UID and GID within the container, to change the owner and group of the source volume.
`Labeling Volume Mounts`
Labeling systems like SELinux require that proper labels are placed on volume
content mounted into a container. Without a label, the security system might
prevent the processes running inside the container from using the content. By
default, Buildah does not change the labels set by the OS.
To change a label in the container context, you can add either of two suffixes
`:z` or `:Z` to the volume mount. These suffixes tell Buildah to relabel file
objects on the shared volumes. The `z` option tells Buildah that two containers
share the volume content. As a result, Buildah labels the content with a shared
content label. Shared volume labels allow all containers to read/write content.
The `Z` option tells Buildah to label the content with a private unshared label.
Only the current container can use a private volume.
`Overlay Volume Mounts`
The `:O` flag tells Buildah to mount the directory from the host as a temporary storage using the Overlay file system. The `RUN` command containers are allowed to modify contents within the mountpoint and are stored in the container storage in a separate directory. In Overlay FS terms the source directory will be the lower, and the container storage directory will be the upper. Modifications to the mount point are destroyed when the `RUN` command finishes executing, similar to a tmpfs mount point.
Any subsequent execution of `RUN` commands sees the original source directory content, any changes from previous RUN commands no longer exist.
One use case of the `overlay` mount is sharing the package cache from the host into the container to allow speeding up builds.
Note:
- The `O` flag is not allowed to be specified with the `Z` or `z` flags. Content mounted into the container is labeled with the private label.
On SELinux systems, labels in the source directory need to be readable by the container label. If not, SELinux container separation must be disabled for the container to work.
- Modification of the directory volume mounted into the container with an overlay mount can cause unexpected failures. It is recommended that you do not modify the directory until the container finishes running.
By default bind mounted volumes are `private`. That means any mounts done
inside container will not be visible on the host and vice versa. This behavior can
be changed by specifying a volume mount propagation property.
When the mount propagation policy is set to `shared`, any mounts completed inside
the container on that volume will be visible to both the host and container. When
the mount propagation policy is set to `slave`, one way mount propagation is enabled
and any mounts completed on the host for that volume will be visible only inside of the container.
To control the mount propagation property of the volume use the `:[r]shared`,
`:[r]slave`, `[r]private` or `[r]unbindable`propagation flag. The propagation property can
be specified only for bind mounted volumes and not for internal volumes or
named volumes. For mount propagation to work on the source mount point (the mount point
where source dir is mounted on) it has to have the right propagation properties. For
shared volumes, the source mount point has to be shared. And for slave volumes,
the source mount has to be either shared or slave. <sup>[[1]](#Footnote1)</sup>
Use `df <source-dir>` to determine the source mount and then use
`findmnt -o TARGET,PROPAGATION <source-mount-dir>` to determine propagation
properties of source mount, if `findmnt` utility is not available, the source mount point
can be determined by looking at the mount entry in `/proc/self/mountinfo`. Look
at `optional fields` and see if any propagation properties are specified.
`shared:X` means the mount is `shared`, `master:X` means the mount is `slave` and if
nothing is there that means the mount is `private`. <sup>[[1]](#Footnote1)</sup>
To change propagation properties of a mount point use the `mount` command. For
example, to bind mount the source directory `/foo` do
`mount --bind /foo /foo` and `mount --make-private --make-shared /foo`. This
will convert /foo into a `shared` mount point. The propagation properties of the source
mount can be changed directly. For instance if `/` is the source mount for
`/foo`, then use `mount --make-shared /` to convert `/` into a `shared` mount.
## EXAMPLE
buildah from --pull imagename
buildah from --pull docker://myregistry.example.com/imagename
buildah from docker-daemon:imagename:imagetag
buildah from --name mycontainer docker-archive:filename
buildah from oci-archive:filename
buildah from --name mycontainer dir:directoryname
buildah from --pull-always --name "mycontainer" myregistry.example.com/imagename
buildah from --tls-verify=false myregistry/myrepository/imagename:imagetag
buildah from --creds=myusername:mypassword --cert-dir ~/auth myregistry/myrepository/imagename:imagetag
buildah from --authfile=/tmp/auths/myauths.json myregistry/myrepository/imagename:imagetag
buildah from --memory 40m --cpu-shares 2 --cpuset-cpus 0,2 --security-opt label=level:s0:c100,c200 myregistry/myrepository/imagename:imagetag
buildah from --ulimit nofile=1024:1028 --cgroup-parent /path/to/cgroup/parent myregistry/myrepository/imagename:imagetag
buildah from --volume /home/test:/myvol:ro,Z myregistry/myrepository/imagename:imagetag
buildah from -v /home/test:/myvol:z,U myregistry/myrepository/imagename:imagetag
buildah from -v /var/lib/yum:/var/lib/yum:O myregistry/myrepository/imagename:imagetag
buildah from --arch=arm --variant v7 myregistry/myrepository/imagename:imagetag
## ENVIRONMENT
**BUILD\_REGISTRY\_SOURCES**
BUILD\_REGISTRY\_SOURCES, if set, is treated as a JSON object which contains
lists of registry names under the keys `insecureRegistries`,
`blockedRegistries`, and `allowedRegistries`.
When pulling an image from a registry, if the name of the registry matches any
of the items in the `blockedRegistries` list, the image pull attempt is denied.
If there are registries in the `allowedRegistries` list, and the registry's
name is not in the list, the pull attempt is denied.
**TMPDIR**
The TMPDIR environment variable allows the user to specify where temporary files
are stored while pulling and pushing images. Defaults to '/var/tmp'.
## FILES
**registries.conf** (`/etc/containers/registries.conf`)
registries.conf is the configuration file which specifies which container registries should be consulted when completing image names which do not include a registry or domain portion.
**policy.json** (`/etc/containers/policy.json`)
Signature policy file. This defines the trust policy for container images. Controls which container registries can be used for image, and whether or not the tool should trust the images.
## SEE ALSO
buildah(1), buildah-pull(1), buildah-login(1), docker-login(1), namespaces(7), pid\_namespaces(7), containers-policy.json(5), containers-registries.conf(5), user\_namespaces(7), containers.conf(5), containers-auth.json(5)
## FOOTNOTES
<a name="Footnote1">1</a>: The Buildah project is committed to inclusivity, a core value of open source. The `master` and `slave` mount propagation terminology used here is problematic and divisive, and should be changed. However, these terms are currently used within the Linux kernel and must be used as-is at this time. When the kernel maintainers rectify this usage, Buildah will follow suit immediately.

View File

@@ -1,137 +0,0 @@
# buildah-images "1" "March 2017" "buildah"
## NAME
buildah\-images - List images in local storage.
## SYNOPSIS
**buildah images** [*options*] [*image*]
## DESCRIPTION
Displays locally stored images, their names, sizes, created date and their IDs.
The created date is displayed in the time locale of the local machine.
## OPTIONS
**--all**, **-a**
Show all images, including intermediate images from a build.
**--digests**
Show the image digests.
**--filter**, **-f**=[]
Filter output based on conditions provided (default []).
Filters:
**after,since=image**
Filter on images created since the given image.
**before=image**
Filter on images created before the given image.
**dangling=true|false**
Show dangling images. An images is considered to be dangling if it has no associated names and tags.
**id=id**
Show image with this specific ID.
**intermediate=true|false**
Show intermediate images. An images is considered to be an indermediate image if it is dangling and has no children.
**label=key[=value]**
Filter by images labels key and/or value.
**readonly=true|false**
Show only read only images or Read/Write images. The default is to show both. Read/Only images can be configured by modifying the "additionalimagestores" in the /etc/containers/storage.conf file.
**reference=reference**
Show images matching the specified reference. Wildcards are supported (e.g., "reference=*fedora:3*").
**--format**="TEMPLATE"
Pretty-print images using a Go template.
Valid placeholders for the Go template are listed below:
| **Placeholder** | **Description** |
| --------------- | -----------------------------------------|
| .Created | Creation date in epoch time |
| .CreatedAt | Creation date Pretty Formatted |
| .CreatedAtRaw | Creation date in raw format |
| .Digest | Image Digest |
| .ID | Image ID |
| .Name | Image Name |
| .ReadOnly | Indicates if image came from a R/O store |
| .Size | Image Size |
| .Tag | Image Tag |
**--history**
Display the image name history.
**--json**
Display the output in JSON format.
**--no-trunc**
Do not truncate output.
**--noheading**, **-n**
Omit the table headings from the listing of images.
**--quiet**, **-q**
Displays only the image IDs.
## EXAMPLE
buildah images
buildah images fedora:latest
buildah images --json
buildah images --quiet
buildah images -q --noheading --no-trunc
buildah images --quiet fedora:latest
buildah images --filter dangling=true
buildah images --format "ImageID: {{.ID}}"
```
$ buildah images
REPOSITORY TAG IMAGE ID CREATED SIZE
registry.access.redhat.com/ubi8 latest 53ce4390f2ad 3 weeks ago 233 MB
docker.io/library/busybox latest 16ea53ea7c65 3 weeks ago 1.46 MB
quay.io/libpod/testimage 20210610 9f9ec7f2fdef 4 months ago 7.99 MB
```
```
# buildah images -a
IMAGE NAME IMAGE TAG IMAGE ID CREATED AT SIZE
registry.access.redhat.com/ubi8 latest 53ce4390f2ad 3 weeks ago 233 MB
<none> <none> 8c6e16890c2b Jun 13, 2018 15:52 4.42 MB
localhost/test latest c0cfe75da054 Jun 13, 2018 15:52 4.42 MB
```
```
# buildah images --format '{{.ID}} {{.CreatedAtRaw}}'
3f53bb00af943dfdf815650be70c0fa7b426e56a66f5e3362b47a129d57d5991 2018-12-20 19:21:30.122610396 -0500 EST
8e09da8f6701d7cde1526d79e3123b0f1109b78d925dfe9f9bac6d59d702a390 2019-01-08 09:22:52.330623532 -0500 EST
```
```
# buildah images --format '{{.ID}} {{.Name}} {{.Digest}} {{.CreatedAt}} {{.Size}} {{.CreatedAtRaw}}'
3f53bb00af943dfdf815650be70c0fa7b426e56a66f5e3362b47a129d57d5991 docker.io/library/alpine sha256:3d2e482b82608d153a374df3357c0291589a61cc194ec4a9ca2381073a17f58e Dec 20, 2018 19:21 4.67 MB 2018-12-20 19:21:30.122610396 -0500 EST
8e09da8f6701d7cde1526d79e3123b0f1109b78d925dfe9f9bac6d59d702a390 <none> sha256:894532ec56e0205ce68ca7230b00c18aa3c8ee39fcdb310615c60e813057229c Jan 8, 2019 09:22 4.67 MB 2019-01-08 09:22:52.330623532 -0500 EST
```
## SEE ALSO
buildah(1), containers-storage.conf(5)

View File

@@ -1,73 +0,0 @@
# buildah-info "1" "November 2018" "Buildah"
## NAME
buildah\-info - Display Buildah system information.
## SYNOPSIS
**buildah info** [*options*]
## DESCRIPTION
The information displayed pertains to the host and current storage statistics which is useful when reporting issues.
## OPTIONS
**--debug**, **-d**
Show additional information.
**--format** *template*
Use *template* as a Go template when formatting the output.
## EXAMPLE
Run buildah info response:
```
$ buildah info
{
"host": {
"Distribution": {
"distribution": "ubuntu",
"version": "18.04"
},
"MemTotal": 16702980096,
"MemFree": 309428224,
"SwapFree": 2146693120,
"SwapTotal": 2147479552,
"arch": "amd64",
"cpus": 4,
"hostname": "localhost.localdomain",
"kernel": "4.15.0-36-generic",
"os": "linux",
"rootless": false,
"uptime": "91h 30m 59.9s (Approximately 3.79 days)"
},
"store": {
"ContainerStore": {
"number": 2
},
"GraphDriverName": "overlay",
"GraphOptions": [
"overlay.override_kernel_check=true"
],
"GraphRoot": "/var/lib/containers/storage",
"GraphStatus": {
"Backing Filesystem": "extfs",
"Native Overlay Diff": "true",
"Supports d_type": "true"
},
"ImageStore": {
"number": 1
},
"RunRoot": "/run/containers/storage"
}
}
```
Run buildah info and retrieve only the store information:
```
$ buildah info --format={{".store"}}
map[GraphOptions:[overlay.override_kernel_check=true] GraphStatus:map[Backing Filesystem:extfs Supports d_type:true Native Overlay Diff:true] ImageStore:map[number:1] ContainerStore:map[number:2] GraphRoot:/var/lib/containers/storage RunRoot:/run/containers/storage GraphDriverName:overlay]
```
## SEE ALSO
buildah(1)

View File

@@ -1,39 +0,0 @@
# buildah-inspect "1" "May 2017" "buildah"
## NAME
buildah\-inspect - Display information about working containers or images or manifest lists.
## SYNOPSIS
**buildah inspect** [*options*] [**--**] *object*
## DESCRIPTION
Prints the low-level information on Buildah object(s) (e.g. container, images, manifest lists) identified by name or ID. By default, this will render all results in a
JSON array. If the container, image, or manifest lists have the same name, this will return container JSON for an unspecified type. If a format is specified,
the given template will be executed for each result.
## OPTIONS
**--format**, **-f** *template*
Use *template* as a Go template when formatting the output.
Users of this option should be familiar with the [*text/template*
package](https://golang.org/pkg/text/template/) in the Go standard library, and
of internals of Buildah's implementation.
**--type**, **-t** **container** | **image** | **manifest**
Specify whether *object* is a container, image or a manifest list.
## EXAMPLE
buildah inspect containerID
buildah inspect --type container containerID
buildah inspect --type image imageID
buildah inspect --format '{{.OCIv1.Config.Env}}' alpine
## SEE ALSO
buildah(1)

View File

@@ -1,114 +0,0 @@
# buildah-login "1" "Apr 2019" "buildah"
## NAME
buildah\-login - Login to a container registry
## SYNOPSIS
**buildah login** [*options*] *registry*
## DESCRIPTION
**buildah login** logs into a specified registry server with the correct username
and password. **buildah login** reads in the username and password from STDIN.
The username and password can also be set using the **username** and **password** flags.
The path of the authentication file can be specified by the user by setting the **authfile**
flag. The default path used is **${XDG\_RUNTIME_DIR}/containers/auth.json**. If XDG_RUNTIME_DIR
is not set, the default is /run/user/$UID/containers/auth.json.
**buildah [GLOBAL OPTIONS]**
**buildah login [GLOBAL OPTIONS]**
**buildah login [OPTIONS] REGISTRY [GLOBAL OPTIONS]**
## OPTIONS
**--authfile**
Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. See containers-auth.json(5) for more information. This file is created using `buildah login`.
Note: You can also override the default path of the authentication file by setting the REGISTRY\_AUTH\_FILE
environment variable. `export REGISTRY_AUTH_FILE=path`
**--cert-dir** *path*
Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
The default certificates directory is _/etc/containers/certs.d_.
**--compat-auth-file**=*path*
Instead of updating the default credentials file, update the one at *path*, and use a Docker-compatible format.
**--get-login**
Return the logged-in user for the registry. Return error if no login is found.
**--help**, **-h**
Print usage statement
**--password**, **-p**
Password for registry
**--password-stdin**
Take the password from stdin
**--tls-verify**
Require HTTPS and verification of certificates when talking to container registries (default: true). If explicitly set to true,
then TLS verification will be used. If set to false, then TLS verification will not be used. If not specified,
TLS verification will be used unless the target registry is listed as an insecure registry in registries.conf.
TLS verification cannot be used when talking to an insecure registry.
**--username**, **-u**
Username for registry
**--verbose**, **-v**
print detailed information about credential store
## EXAMPLES
```
$ buildah login quay.io
Username: qiwanredhat
Password:
Login Succeeded!
```
```
$ buildah login -u testuser -p testpassword localhost:5000
Login Succeeded!
```
```
$ buildah login --authfile ./auth.json quay.io
Username: qiwanredhat
Password:
Login Succeeded!
```
```
$ buildah login --tls-verify=false -u test -p test localhost:5000
Login Succeeded!
```
```
$ buildah login --cert-dir /etc/containers/certs.d/ -u foo -p bar localhost:5000
Login Succeeded!
```
```
$ buildah login -u testuser --password-stdin < pw.txt quay.io
Login Succeeded!
```
```
$ echo $testpassword | buildah login -u testuser --password-stdin quay.io
Login Succeeded!
```
## SEE ALSO
buildah(1), buildah-logout(1), containers-auth.json(5)

View File

@@ -1,60 +0,0 @@
# buildah-logout "1" "Apr 2019" "buildah"
## NAME
buildah\-logout - Logout of a container registry
## SYNOPSIS
**buildah logout** [*options*] *registry*
## DESCRIPTION
**buildah logout** logs out of a specified registry server by deleting the cached credentials
stored in the **auth.json** file. The path of the authentication file can be overridden by the user by setting the **authfile** flag.
The default path used is **${XDG\_RUNTIME_DIR}/containers/auth.json**. See containers-auth.json(5) for more information.
All the cached credentials can be removed by setting the **all** flag.
**buildah [GLOBAL OPTIONS]**
**buildah logout [GLOBAL OPTIONS]**
**buildah logout [OPTIONS] REGISTRY [GLOBAL OPTIONS]**
## OPTIONS
**--all**, **-a**
Remove the cached credentials for all registries in the auth file
**--authfile**
Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. See containers-auth.json(5) for more information.
Note: You can also override the default path of the authentication file by setting the REGISTRY\_AUTH\_FILE
environment variable. `export REGISTRY_AUTH_FILE=path`
**--compat-auth-file**=*path*
Instead of updating the default credentials file, update the one at *path*, and use a Docker-compatible format.
**--help**, **-h**
Print usage statement
## EXAMPLES
```
$ buildah logout quay.io
Removed login credentials for quay.io
```
```
$ buildah logout --authfile authdir/myauths.json quay.io
Removed login credentials for quay.io
```
```
$ buildah logout --all
Remove login credentials for all registries
```
## SEE ALSO
buildah(1), buildah-login(1), containers-auth.json(5)

View File

@@ -1,170 +0,0 @@
# buildah-manifest-add "1" "September 2019" "buildah"
## NAME
buildah\-manifest\-add - Add an image or artifact to a manifest list or image index.
## SYNOPSIS
**buildah manifest add** [options...] *listNameOrIndexName* *imageOrArtifactName* [...]
## DESCRIPTION
Adds the specified image to the specified manifest list or image index, or
creates an artifact manifest and adds it to the specified image index.
## RETURN VALUE
The list image's ID and the digest of the image's manifest.
## OPTIONS
**--all**
If the image which should be added to the list or index is itself a list or
index, add all of the contents to the local list. By default, only one image
from such a list or index will be added to the list or index. Combining
*--all* with any of the other options described below is NOT recommended.
**--annotation** *annotation=value*
Set an annotation on the entry for the newly-added image or artifact manifest.
**--arch**
Override the architecture which the list or index records as a requirement for
the image. If *imageName* refers to a manifest list or image index, the
architecture information will be retrieved from it. Otherwise, it will be
retrieved from the image's configuration information.
**--artifact**
Create an artifact manifest and add it to the image index. Arguments after the
index name will be interpreted as file names rather than as image references.
In most scenarios, the **--artifact-type** option should also be specified.
**--artifact-annotation** *annotation=value*
When creating an artifact manifest and adding it to the image index, set an
annotation in the artifact manifest.
**--artifact-config** *filename*
When creating an artifact manifest and adding it to the image index, use the
specified file's contents as the configuration blob in the artifact manifest.
In most scenarios, leaving the default value, which signifies an empty
configuration, unchanged, is the preferred option.
**--artifact-config-type** *type*
When creating an artifact manifest and adding it to the image index, use the
specified MIME type as the `mediaType` associated with the configuration blob
in the artifact manifest. In most scenarios, leaving the default value, which
signifies either an empty configuration or the standard OCI configuration type,
unchanged, is the preferred option.
**--artifact-exclude-titles**
When creating an artifact manifest and adding it to the image index, do not
set "org.opencontainers.image.title" annotations equal to the file's basename
for each file added to the artifact manifest. Tools which retrieve artifacts
from a registry may use these values to choose names for files when saving
artifacts to disk, so this option is not recommended unless it is required
for interoperability with a particular registry.
**--artifact-layer-type** *type*
When creating an artifact manifest and adding it to the image index, use the
specified MIME type as the `mediaType` associated with the files' contents. If
not specified, guesses based on either the files names or their contents will
be made and used, but the option should be specified if certainty is needed.
**--artifact-subject** *imageName*
When creating an artifact manifest and adding it to the image index, set the
*subject* field in the artifact manifest to mark the artifact manifest as being
associated with the specified image in some way. An artifact manifest can only
be associated with, at most, one subject.
**--artifact-type** *type*
When creating an artifact manifest, use the specified MIME type as the
manifest's `artifactType` value instead of the less informative default value.
**--authfile** *path*
Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. See containers-auth.json(5) for more information. This file is created using `buildah login`.
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
Note: You can also override the default path of the authentication file by setting the REGISTRY\_AUTH\_FILE
environment variable. `export REGISTRY_AUTH_FILE=path`
**--cert-dir** *path*
Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
The default certificates directory is _/etc/containers/certs.d_.
**--creds** *creds*
The [username[:password]] to use to authenticate with the registry if required.
If one or both values are not supplied, a command line prompt will appear and the
value can be entered. The password is entered without echo.
**--features**
Specify the features list which the list or index records as requirements for
the image. This option is rarely used.
**--os**
Override the OS which the list or index records as a requirement for the image.
If *imageName* refers to a manifest list or image index, the OS information
will be retrieved from it. Otherwise, it will be retrieved from the image's
configuration information.
**--os-features**
Specify the OS features list which the list or index records as requirements
for the image. This option is rarely used.
**--os-version**
Specify the OS version which the list or index records as a requirement for the
image. This option is rarely used.
**--tls-verify** *bool-value*
Require HTTPS and verification of certificates when talking to container registries (defaults to true). TLS verification cannot be used when talking to an insecure registry.
**--variant**
Specify the variant which the list or index records for the image. This option
is typically used to distinguish between multiple entries which share the same
architecture value, but which expect different versions of its instruction set.
## EXAMPLE
```
buildah manifest add mylist:v1.11 docker://fedora
506d8f4bb54931ea03a7e70173a0ed6302e3fb92dfadb3955ba5c17812e95c51: sha256:f81f09918379d5442d20dff82a298f29698197035e737f76e511d5af422cabd7
```
```
buildah manifest add --all mylist:v1.11 docker://fedora
506d8f4bb54931ea03a7e70173a0ed6302e3fb92dfadb3955ba5c17812e95c51: sha256:f81f09918379d5442d20dff82a298f29698197035e737f76e511d5af422cabd7
```
```
buildah manifest add --arch arm64 --variant v8 mylist:v1.11 docker://fedora@sha256:c829b1810d2dbb456e74a695fd3847530c8319e5a95dca623e9f1b1b89020d8b
506d8f4bb54931ea03a7e70173a0ed6302e3fb92dfadb3955ba5c17812e95c51: sha256:c829b1810d2dbb456e74a695fd3847530c8319e5a95dca623e9f1b1b89020d8b
```
```
buildah manifest add --artifact --artifact-type application/x-cd-image mylist:v1.11 ./imagefile.iso
506d8f4bb54931ea03a7e70173a0ed6302e3fb92dfadb3955ba5c17812e95c51: sha256:1768fae728f6f8ff3d0f8c7df409d7f4f0ca5c89b070810bd4aa4a2ed2eca8bb
```
## SEE ALSO
buildah(1), buildah-login(1), buildah-manifest(1), buildah-manifest-create(1), buildah-manifest-remove(1), buildah-manifest-annotate(1), buildah-manifest-inspect(1), buildah-manifest-push(1), buildah-rmi(1), docker-login(1), containers-auth.json(5)

View File

@@ -1,84 +0,0 @@
# buildah-manifest-annotate "1" "September 2019" "buildah"
## NAME
buildah\-manifest\-annotate - Add and update information about an image or artifact to a manifest list or image index.
## SYNOPSIS
**buildah manifest annotate** [options...] *listNameOrIndexName* *imageManifestDigestOrImageOrArtifactName*
## DESCRIPTION
Adds or updates information about an image or artifact included in a manifest list or image index.
## RETURN VALUE
The list image's ID and the digest of the image's manifest.
## OPTIONS
**--annotation** *annotation=value*
Set an annotation on the entry for the specified image or artifact. If
**--index** is also specified, sets the annotation on the entire image index.
**--arch**
Override the architecture which the list or index records as a requirement for
the image. This is usually automatically retrieved from the image's
configuration information, so it is rarely necessary to use this option.
**--features**
Specify the features list which the list or index records as requirements for
the image. This option is rarely used.
**--index**
Treats arguments to the **--annotation** option as annotation values to be set
on the image index itself rather than on an entry in the image index. Implied
for **--subject**.
**--os**
Override the OS which the list or index records as a requirement for the image.
This is usually automatically retrieved from the image's configuration
information, so it is rarely necessary to use this option.
**--os-features**
Specify the OS features list which the list or index records as requirements
for the image. This option is rarely used.
**--os-version**
Specify the OS version which the list or index records as a requirement for the
image. This option is rarely used.
**--subject** *imageName*
Set the *subject* field in the image index to mark the image index as being
associated with the specified image in some way. An image index can only be
associated with, at most, one subject.
**--variant**
Specify the variant which the list or index records for the image. This option
is typically used to distinguish between multiple entries which share the same
architecture value, but which expect different versions of its instruction set.
## EXAMPLE
```
buildah manifest annotate --arch arm64 --variant v8 mylist:v1.11 sha256:c829b1810d2dbb456e74a695fd3847530c8319e5a95dca623e9f1b1b89020d8b
506d8f4bb54931ea03a7e70173a0ed6302e3fb92dfadb3955ba5c17812e95c51: sha256:c829b1810d2dbb456e74a695fd3847530c8319e5a95dca623e9f1b1b89020d8b
```
```
buildah manifest annotate --index --annotation food=yummy mylist:v1.11
506d8f4bb54931ea03a7e70173a0ed6302e3fb92dfadb3955ba5c17812e95c51: sha256:c829b1810d2dbb456e74a695fd3847530c8319e5a95dca623e9f1b1b89020d8b
```
## SEE ALSO
buildah(1), buildah-manifest(1), buildah-manifest-create(1), buildah-manifest-add(1), buildah-manifest-remove(1), buildah-manifest-inspect(1), buildah-manifest-push(1), buildah-rmi(1)

View File

@@ -1,66 +0,0 @@
# buildah-manifest-create "1" "August 2022" "buildah"
## NAME
buildah\-manifest\-create - Create a manifest list or image index.
## SYNOPSIS
**buildah manifest create** [options...] *listNameOrIndexName* [*imageName* ...]
## DESCRIPTION
Creates a new manifest list and stores it as an image in local storage using
the specified name.
If additional images are specified, they are added to the newly-created list or
index.
## RETURN VALUE
The randomly-generated image ID of the newly-created list or index. The image
can be deleted using the *buildah rmi* command.
## OPTIONS
**--all**
If any of the images which should be added to the new list or index are
themselves lists or indexes, add all of their contents. By default, only one
image from such a list will be added to the newly-created list or index.
**--amend**
If a manifest list named *listNameOrIndexName* already exists, modify the
preexisting list instead of exiting with an error. The contents of
*listNameOrIndexName* are not modified if no *imageName*s are given.
**--annotation** *annotation=value*
Set an annotation on the newly-created image index.
**--tls-verify** *bool-value*
Require HTTPS and verification of certificates when talking to container registries (defaults to true). TLS verification cannot be used when talking to an insecure registry.
## EXAMPLE
```
buildah manifest create mylist:v1.11
941c1259e4b85bebf23580a044e4838aa3c1e627528422c9bf9262ff1661fca9
buildah manifest create --amend mylist:v1.11
941c1259e4b85bebf23580a044e4838aa3c1e627528422c9bf9262ff1661fca9
```
```
buildah manifest create mylist:v1.11 docker://fedora
941c1259e4b85bebf23580a044e4838aa3c1e627528422c9bf9262ff1661fca9
```
```
buildah manifest create --all mylist:v1.11 docker://fedora
941c1259e4b85bebf23580a044e4838aa3c1e627528422c9bf9262ff1661fca9
```
## SEE ALSO
buildah(1), buildah-manifest(1), buildah-manifest-add(1), buildah-manifest-remove(1), buildah-manifest-annotate(1), buildah-manifest-inspect(1), buildah-manifest-push(1), buildah-rmi(1)

View File

@@ -1,40 +0,0 @@
% buildah-manifest-exists(1)
## NAME
buildah\-manifest\-exists - Check if the given manifest list exists in local storage
## SYNOPSIS
**buildah manifest exists** *manifest*
## DESCRIPTION
**buildah manifest exists** checks if a manifest list exists in local storage. Buildah will
return an exit code of `0` when the manifest list is found. A `1` will be returned otherwise.
An exit code of `125` indicates there was another issue.
## OPTIONS
#### **--help**, **-h**
Print usage statement.
## EXAMPLE
Check if a manifest list called `list1` exists (the manifest list does actually exist).
```
$ buildah manifest exists list1
$ echo $?
0
$
```
Check if an manifest called `mylist` exists (the manifest list does not actually exist).
```
$ buildah manifest exists mylist
$ echo $?
1
$
```
## SEE ALSO
**[buildah(1)](buildah.1.md)**, **[buildah-manifest(1)](buildah-manifest.1.md)**

View File

@@ -1,37 +0,0 @@
# buildah-manifest-inspect "1" "September 2019" "buildah"
## NAME
buildah\-manifest\-inspect - Display a manifest list or image index.
## SYNOPSIS
**buildah manifest inspect** *listNameOrIndexName*
## DESCRIPTION
Displays the manifest list or image index stored using the specified image name.
## RETURN VALUE
A formatted JSON representation of the manifest list or image index.
## OPTIONS
**--authfile** *path*
Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json, which is set using `buildah login`.
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
**--tls-verify** *bool-value*
Require HTTPS and verification of certificates when talking to container registries (defaults to true). TLS verification cannot be used when talking to an insecure registry.
## EXAMPLE
```
buildah manifest inspect mylist:v1.11
```
## SEE ALSO
buildah(1), buildah-manifest(1), buildah-manifest-create(1), buildah-manifest-add(1), buildah-manifest-remove(1), buildah-manifest-annotate(1), buildah-manifest-push(1), buildah-rmi(1)

View File

@@ -1,113 +0,0 @@
# buildah-manifest-push "1" "September 2019" "buildah"
## NAME
buildah\-manifest\-push - Push a manifest list or image index to a registry.
## SYNOPSIS
**buildah manifest push** [options...] *listNameOrIndexName* *transport:details*
## DESCRIPTION
Pushes a manifest list or image index to a registry.
## RETURN VALUE
The list image's ID and the digest of the image's manifest.
## OPTIONS
**--add-compression** *compression*
Makes sure that requested compression variant for each platform is added to the manifest list keeping original instance
intact in the same manifest list. Supported values are (`gzip`, `zstd` and `zstd:chunked`)
Note: This is different than `--compression` which replaces the instance with requested with specified compression
while `--add-compression` makes sure than each instance has it variant added to manifest list without modifying the
original instance.
**--all**
Push the images mentioned in the manifest list or image index, in addition to
the list or index itself. (Default true)
**--authfile** *path*
Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json, which is set using `buildah login`.
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
**--cert-dir** *path*
Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
The default certificates directory is _/etc/containers/certs.d_.
**--compression-format** *format*
Specifies the compression format to use. Supported values are: `gzip`, `zstd` and `zstd:chunked`.
**--compression-level** *level*
Specify the compression level used with the compression.
Specifies the compression level to use. The value is specific to the compression algorithm used, e.g. for zstd the accepted values are in the range 1-20 (inclusive), while for gzip it is 1-9 (inclusive).
**--creds** *creds*
The [username[:password]] to use to authenticate with the registry if required.
If one or both values are not supplied, a command line prompt will appear and the
value can be entered. The password is entered without echo.
**--digestfile** *Digestfile*
After copying the image, write the digest of the resulting image to the file.
**--force-compression**
If set, push uses the specified compression algorithm even if the destination contains a differently-compressed variant already.
Defaults to `true` if `--compression-format` is explicitly specified on the command-line, `false` otherwise.
**--format**, **-f**
Manifest list type (oci or v2s2) to use when pushing the list (default is oci).
**--quiet**, **-q**
Don't output progress information when pushing lists.
**--remove-signatures**
Don't copy signatures when pushing images.
**--retry** *attempts*
Number of times to retry in case of failure when performing push of images to registry.
Defaults to `3`.
**--retry-delay** *duration*
Duration of delay between retry attempts in case of failure when performing push of images to registry.
Defaults to `2s`.
**--rm**
Delete the manifest list or image index from local storage if pushing succeeds.
**--sign-by** *fingerprint*
Sign the pushed images using the GPG key that matches the specified fingerprint.
**--tls-verify** *bool-value*
Require HTTPS and verification of certificates when talking to container registries (defaults to true). TLS verification cannot be used when talking to an insecure registry.
## EXAMPLE
```
buildah manifest push mylist:v1.11 registry.example.org/mylist:v1.11
```
## SEE ALSO
buildah(1), buildah-login(1), buildah-manifest(1), buildah-manifest-create(1), buildah-manifest-add(1), buildah-manifest-remove(1), buildah-manifest-annotate(1), buildah-manifest-inspect(1), buildah-rmi(1), docker-login(1)

View File

@@ -1,28 +0,0 @@
# buildah-manifest-remove "1" "September 2019" "buildah"
## NAME
buildah\-manifest\-remove - Remove an image from a manifest list or image index.
## SYNOPSIS
**buildah manifest remove** *listNameOrIndexName* *imageNameOrManifestDigestOrArtifactName*
## DESCRIPTION
Removes the image with the specified name or digest from the specified manifest
list or image index, or the specified artifact from the specified image index.
## RETURN VALUE
The list image's ID and the digest of the removed image's manifest.
## EXAMPLE
```
buildah manifest remove mylist:v1.11 sha256:f81f09918379d5442d20dff82a298f29698197035e737f76e511d5af422cabd7
506d8f4bb54931ea03a7e70173a0ed6302e3fb92dfadb3955ba5c17812e95c51: sha256:f81f09918379d5442d20dff82a298f29698197035e737f76e511d5af422cabd7
```
## SEE ALSO
buildah(1), buildah-manifest(1), buildah-manifest-create(1), buildah-manifest-add(1), buildah-manifest-annotate(1), buildah-manifest-inspect(1), buildah-manifest-push(1), buildah-rmi(1)

View File

@@ -1,25 +0,0 @@
# buildah-manifest-rm "1" "April 2021" "buildah"
## NAME
buildah\-manifest\-rm - Removes one or more manifest lists.
## SYNOPSIS
**buildah manifest rm** [*listNameOrIndexName* ...]
## DESCRIPTION
Removes one or more locally stored manifest lists.
## EXAMPLE
buildah manifest rm <list>
buildah manifest-rm listID1 listID2
**storage.conf** (`/etc/containers/storage.conf`)
storage.conf is the storage configuration file for all tools using containers/storage
The storage configuration file specifies all of the available container storage options for tools using shared container storage.
## SEE ALSO
buildah(1), containers-storage.conf(5), buildah-manifest(1)

View File

@@ -1,77 +0,0 @@
# buildah-manifest "1" "September 2019" "buildah"
## NAME
buildah-manifest - Create and manipulate manifest lists and image indexes.
## SYNOPSIS
buildah manifest COMMAND [OPTIONS] [ARG...]
## DESCRIPTION
The `buildah manifest` command provides subcommands which can be used to:
* Create a working Docker manifest list or OCI image index.
* Add an entry to a manifest list or image index for a specified image.
* Add an entry to an image index for an artifact manifest referring to a file.
* Add or update information about an entry in a manifest list or image index.
* Delete a working container or an image.
* Push a manifest list or image index to a registry or other location.
## SUBCOMMANDS
| Command | Man Page | Description |
| ------- | -------------------------------------------------------------- | --------------------------------------------------------------------------- |
| add | [buildah-manifest-add(1)](buildah-manifest-add.1.md) | Add an image or artifact to a manifest list or image index. |
| annotate | [buildah-manifest-annotate(1)](buildah-manifest-annotate.1.md) | Add or update information about an image or artifact in a manifest list or image index. |
| create | [buildah-manifest-create(1)](buildah-manifest-create.1.md) | Create a manifest list or image index. |
| exists | [buildah-manifest-exists(1)](buildah-manifest-exists.1.md) | Check if a manifest list exists in local storage. |
| inspect | [buildah-manifest-inspect(1)](buildah-manifest-inspect.1.md) | Display the contents of a manifest list or image index. |
| push | [buildah-manifest-push(1)](buildah-manifest-push.1.md) | Push a manifest list or image index to a registry or other location. |
| remove | [buildah-manifest-remove(1)](buildah-manifest-remove.1.md) | Remove an image from a manifest list or image index. |
| rm | [buildah-manifest-rm(1)](buildah-manifest-rm.1.md) | Remove manifest list from local storage. |
## EXAMPLES
### Building a multi-arch manifest list from a Containerfile
Assuming the `Containerfile` uses `RUN` instructions, the host needs
a way to execute non-native binaries. Configuring this is beyond
the scope of this example. Building a multi-arch manifest list
`shazam` in parallel across 4-threads can be done like this:
$ platarch=linux/amd64,linux/ppc64le,linux/arm64,linux/s390x
$ buildah build --jobs=4 --platform=$platarch --manifest shazam .
**Note:** The `--jobs` argument is optional, and the `--manifest` option
should be used instead of the`-t` or `--tag` options.
### Assembling a multi-arch manifest from separately built images
Assuming `example.com/example/shazam:$arch` images are built separately
on other hosts and pushed to the `example.com` registry. They may
be combined into a manifest list, and pushed using a simple loop:
$ REPO=example.com/example/shazam
$ buildah manifest create $REPO:latest
$ for IMGTAG in amd64 s390x ppc64le arm64; do \
buildah manifest add $REPO:latest docker://$REPO:IMGTAG; \
done
$ buildah manifest push --all $REPO:latest
**Note:** The `add` instruction argument order is `<manifest>` then `<image>`.
Also, the `--all` push option is required to ensure all contents are
pushed, not just the native platform/arch.
### Removing and tagging a manifest list before pushing
Special care is needed when removing and pushing manifest lists, as opposed
to the contents. You almost always want to use the `manifest rm` and
`manifest push --all` subcommands. For example, a rename and push could
be performed like this:
$ buildah tag localhost/shazam example.com/example/shazam
$ buildah manifest rm localhost/shazam
$ buildah manifest push --all example.com/example/shazam
## SEE ALSO
buildah(1), buildah-manifest-create(1), buildah-manifest-add(1), buildah-manifest-remove(1), buildah-manifest-annotate(1), buildah-manifest-inspect(1), buildah-manifest-push(1), buildah-manifest-rm(1)

View File

@@ -1,86 +0,0 @@
# buildah-mkcw "1" "July 2023" "buildah"
## NAME
buildah\-mkcw - Convert a conventional container image into a confidential workload image.
## SYNOPSIS
**buildah mkcw** [*options*] *source* *destination*
## DESCRIPTION
Converts the contents of a container image into a new container image which is
suitable for use in a trusted execution environment (TEE), typically run using
krun (i.e., crun built with the libkrun feature enabled and invoked as *krun*).
Instead of the conventional contents, the root filesystem of the created image
will contain an encrypted disk image and configuration information for krun.
## source
A container image, stored locally or in a registry
## destination
A container image, stored locally or in a registry
## OPTIONS
**--add-file** *source[:destination]*
Read the contents of the file `source` and add it to the committed image as a
file at `destination`. If `destination` is not specified, the path of `source`
will be used. The new file will be owned by UID 0, GID 0, have 0644
permissions, and be given a current timestamp. This option can be specified
multiple times.
**--attestation-url**, **-u** *url*
The location of a key broker / attestation server.
If a value is specified, the new image's workload ID, along with the passphrase
used to encrypt the disk image, will be registered with the server, and the
server's location will be stored in the container image.
At run-time, krun is expected to contact the server to retrieve the passphrase
using the workload ID, which is also stored in the container image.
If no value is specified, a *passphrase* value *must* be specified.
**--base-image**, **-b** *image*
An alternate image to use as the base for the output image. By default,
the *scratch* non-image is used.
**--cpus**, **-c** *number*
The number of virtual CPUs which the image expects to be run with at run-time.
If not specified, a default value will be supplied.
**--firmware-library**, **-f** *file*
The location of the libkrunfw-sev shared library. If not specified, `buildah`
checks for its presence in a number of hard-coded locations.
**--memory**, **-m** *number*
The amount of memory which the image expects to be run with at run-time, as a
number of megabytes. If not specified, a default value will be supplied.
**--passphrase**, **-p** *text*
The passphrase to use to encrypt the disk image which will be included in the
container image.
If no value is specified, but an *--attestation-url* value is specified, a
randomly-generated passphrase will be used.
The authors recommend setting an *--attestation-url* but not a *--passphrase*.
**--slop**, **-s** *{percentage%|sizeKB|sizeMB|sizeGB}*
Extra space to allocate for the disk image compared to the size of the
container image's contents, expressed either as a percentage (..%) or a size
value (bytes, or larger units if suffixes like KB or MB are present), or a sum
of two or more such specifications. If not specified, `buildah` guesses that
25% more space than the contents will be enough, but this option is provided in
case its guess is wrong. If the specified or computed size is less than 10
megabytes, it will be increased to 10 megabytes.
**--type**, **-t** {SEV|SNP}
The type of trusted execution environment (TEE) which the image should be
marked for use with. Accepted values are "SEV" (AMD Secure Encrypted
Virtualization - Encrypted State) and "SNP" (AMD Secure Encrypted
Virtualization - Secure Nested Paging). If not specified, defaults to "SNP".
**--workload-id**, **-w** *id*
A workload identifier which will be recorded in the container image, to be used
at run-time for retrieving the passphrase which was used to encrypt the disk
image. If not specified, a semi-random value will be derived from the base
image's image ID.
## SEE ALSO
buildah(1)

View File

@@ -1,66 +0,0 @@
# buildah-mount "1" "March 2017" "buildah"
## NAME
buildah\-mount - Mount a working container's root filesystem.
## SYNOPSIS
**buildah mount** [*container* ...]
## DESCRIPTION
Mounts the specified container's root file system in a location which can be
accessed from the host, and returns its location.
If the mount command is invoked without any arguments, the tool will list all of the currently mounted containers.
When running in rootless mode, mount runs in a different namespace so
that the mounted volume might not be accessible from the host when
using a driver different than `vfs`. To be able to access the file
system mounted, you might need to create the mount namespace
separately as part of `buildah unshare`. In the environment created
with `buildah unshare` you can then use `buildah mount` and have
access to the mounted file system.
## RETURN VALUE
The location of the mounted file system. On error an empty string and errno is
returned.
## OPTIONS
**--json**
Output in JSON format.
## EXAMPLE
```
buildah mount working-container
/var/lib/containers/storage/overlay2/f3ac502d97b5681989dff84dfedc8354239bcecbdc2692f9a639f4e080a02364/merged
```
```
buildah mount
working-container /var/lib/containers/storage/overlay2/f3ac502d97b5681989dff84dfedc8354239bcecbdc2692f9a639f4e080a02364/merged
fedora-working-container /var/lib/containers/storage/overlay2/0ff7d7ca68bed1ace424f9df154d2dd7b5a125c19d887f17653cbcd5b6e30ba1/merged
```
```
buildah mount working-container fedora-working-container ubi8-working-container
working-container /var/lib/containers/storage/overlay/f8cac5cce73e5102ab321cc5b57c0824035b5cb82b6822e3c86ebaff69fefa9c/merged
fedora-working-container /var/lib/containers/storage/overlay/c3ec418be5bda5b72dca74c4d397e05829fe62ecd577dd7518b5f7fc1ca5f491/merged
ubi8-working-container /var/lib/containers/storage/overlay/03a071f206f70f4fcae5379bd5126be86b5352dc2a0c3449cd6fca01b77ea868/merged
```
If running in rootless mode, you need to do a buildah unshare first to use
the mount point.
```
$ buildah unshare
# buildah mount working-container
/var/lib/containers/storage/overlay/f8cac5cce73e5102ab321cc5b57c0824035b5cb82b6822e3c86ebaff69fefa9c/merged
# cp foobar /var/lib/containers/storage/overlay/f8cac5cce73e5102ab321cc5b57c0824035b5cb82b6822e3c86ebaff69fefa9c/merged
# buildah unmount working-container
# exit
$ buildah commit working-container newimage
```
## SEE ALSO
buildah(1), buildah-unshare(1), buildah-umount(1)

View File

@@ -1,33 +0,0 @@
# buildah-rmi "1" "Jan 2023" "buildah"
## NAME
buildah\-prune - Cleanup intermediate images as well as build and mount cache.
## SYNOPSIS
**buildah prune**
## DESCRIPTION
Cleanup intermediate images as well as build and mount cache.
## OPTIONS
**--all**, **-a**
All local images will be removed from the system that do not have containers using the image as a reference image.
**--force**, **-f**
This option will cause Buildah to remove all containers that are using the image before removing the image from the system.
## EXAMPLE
buildah prune
buildah prune --force
## SEE ALSO
buildah(1), containers-registries.conf(5), containers-storage.conf(5)

View File

@@ -1,162 +0,0 @@
# buildah-pull "1" "July 2018" "buildah"
## NAME
buildah\-pull - Pull an image from a registry.
## SYNOPSIS
**buildah pull** [*options*] *image*
## DESCRIPTION
Pulls an image based upon the specified input. It supports all transports from `containers-transports(5)` (see examples below). If no transport is specified, the input is subject to short-name resolution (see `containers-registries.conf(5)`) and the `docker` (i.e., container registry) transport is used.
### DEPENDENCIES
Buildah resolves the path to the registry to pull from by using the /etc/containers/registries.conf
file, containers-registries.conf(5). If the `buildah pull` command fails with an "image not known" error,
first verify that the registries.conf file is installed and configured appropriately.
## RETURN VALUE
The image ID of the image that was pulled. On error 1 is returned.
## OPTIONS
**--all-tags**, **-a**
All tagged images in the repository will be pulled.
**--arch**="ARCH"
Set the ARCH of the image to be pulled to the provided value instead of using the architecture of the host. (Examples: arm, arm64, 386, amd64, ppc64le, s390x)
**--authfile** *path*
Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. See containers-auth.json(5) for more information. This file is created using `buildah login`.
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
Note: You can also override the default path of the authentication file by setting the REGISTRY\_AUTH\_FILE
environment variable. `export REGISTRY_AUTH_FILE=path`
**--cert-dir** *path*
Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
The default certificates directory is _/etc/containers/certs.d_.
**--creds** *creds*
The [username[:password]] to use to authenticate with the registry if required.
If one or both values are not supplied, a command line prompt will appear and the
value can be entered. The password is entered without echo.
**--decryption-key** *key[:passphrase]*
The [key[:passphrase]] to be used for decryption of images. Key can point to keys and/or certificates. Decryption will be tried with all keys. If the key is protected by a passphrase, it is required to be passed in the argument and omitted otherwise.
**--os**="OS"
Set the OS of the image to be pulled instead of using the current operating system of the host.
**--platform**="OS/ARCH[/VARIANT]"
Set the OS/ARCH of the image to be pulled
to the provided value instead of using the current operating system and
architecture of the host (for example `linux/arm`).
OS/ARCH pairs are those used by the Go Programming Language. In several cases
the ARCH value for a platform differs from one produced by other tools such as
the `arch` command. Valid OS and architecture name combinations are listed as
values for $GOOS and $GOARCH at https://golang.org/doc/install/source#environment,
and can also be found by running `go tool dist list`.
**NOTE:** The `--platform` option may not be used in combination with the `--arch`, `--os`, or `--variant` options.
**--policy**=**always**|**missing**|**never**|**newer**
Pull image policy. The default is **missing**.
- **always**: Always pull the image and throw an error if the pull fails.
- **missing**: Pull the image only if it could not be found in the local containers storage. Throw an error if no image could be found and the pull fails.
- **never**: Never pull the image but use the one from the local containers storage. Throw an error if no image could be found.
- **newer**: Pull if the image on the registry is newer than the one in the local containers storage. An image is considered to be newer when the digests are different. Comparing the time stamps is prone to errors. Pull errors are suppressed if a local image was found.
**--quiet**, **-q**
If an image needs to be pulled from the registry, suppress progress output.
**--remove-signatures**
Don't copy signatures when pulling images.
**--retry** *attempts*
Number of times to retry in case of failure when performing pull of images from registry.
Defaults to `3`.
**--retry-delay** *duration*
Duration of delay between retry attempts in case of failure when performing pull of images from registry.
Defaults to `2s`.
**--tls-verify** *bool-value*
Require HTTPS and verification of certificates when talking to container registries (defaults to true). TLS verification cannot be used when talking to an insecure registry.
**--variant**=""
Set the architecture variant of the image to be pulled.
## EXAMPLE
buildah pull imagename
buildah pull docker://myregistry.example.com/imagename
buildah pull docker-daemon:imagename:imagetag
buildah pull docker-archive:filename
buildah pull oci-archive:filename
buildah pull dir:directoryname
buildah pull --tls-verify=false myregistry/myrepository/imagename:imagetag
buildah pull --creds=myusername:mypassword --cert-dir ~/auth myregistry/myrepository/imagename:imagetag
buildah pull --authfile=/tmp/auths/myauths.json myregistry/myrepository/imagename:imagetag
buildah pull --arch=aarch64 myregistry/myrepository/imagename:imagetag
buildah pull --arch=arm --variant=v7 myregistry/myrepository/imagename:imagetag
## ENVIRONMENT
**BUILD\_REGISTRY\_SOURCES**
BUILD\_REGISTRY\_SOURCES, if set, is treated as a JSON object which contains
lists of registry names under the keys `insecureRegistries`,
`blockedRegistries`, and `allowedRegistries`.
When pulling an image from a registry, if the name of the registry matches any
of the items in the `blockedRegistries` list, the image pull attempt is denied.
If there are registries in the `allowedRegistries` list, and the registry's
name is not in the list, the pull attempt is denied.
**TMPDIR**
The TMPDIR environment variable allows the user to specify where temporary files
are stored while pulling and pushing images. Defaults to '/var/tmp'.
## FILES
**registries.conf** (`/etc/containers/registries.conf`)
registries.conf is the configuration file which specifies which container registries should be consulted when completing image names which do not include a registry or domain portion.
**policy.json** (`/etc/containers/policy.json`)
Signature policy file. This defines the trust policy for container images. Controls which container registries can be used for image, and whether or not the tool should trust the images.
## SEE ALSO
buildah(1), buildah-from(1), buildah-login(1), docker-login(1), containers-policy.json(5), containers-registries.conf(5), containers-transports(5), containers-auth.json(5)

View File

@@ -1,185 +0,0 @@
# buildah-push "1" "June 2017" "buildah"
## NAME
buildah\-push - Push an image, manifest list or image index from local storage to elsewhere.
## SYNOPSIS
**buildah push** [*options*] *image* [*destination*]
## DESCRIPTION
Pushes an image from local storage to a specified destination, decompressing
and recompessing layers as needed.
## imageID
Image stored in local container/storage
## DESTINATION
DESTINATION is the location the container image is pushed to. It supports all transports from `containers-transports(5)` (see examples below). If no transport is specified, the `docker` (i.e., container registry) transport is used.
## OPTIONS
**--all**
If specified image is a manifest list or image index, push the images in addition to
the list or index itself.
**--authfile** *path*
Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. See containers-auth.json(5) for more information. This file is created using `buildah login`.
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
Note: You can also override the default path of the authentication file by setting the REGISTRY\_AUTH\_FILE
environment variable. `export REGISTRY_AUTH_FILE=path`
**--cert-dir** *path*
Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
The default certificates directory is _/etc/containers/certs.d_.
**--compression-format** *format*
Specifies the compression format to use. Supported values are: `gzip`, `zstd` and `zstd:chunked`.
`zstd:chunked` is incompatible with encrypting images, and will be treated as `zstd` with a warning in that case.
**--compression-level** *level*
Specify the compression level used with the compression.
Specifies the compression level to use. The value is specific to the compression algorithm used, e.g. for zstd the accepted values are in the range 1-20 (inclusive), while for gzip it is 1-9 (inclusive).
**--creds** *creds*
The [username[:password]] to use to authenticate with the registry if required.
If one or both values are not supplied, a command line prompt will appear and the
value can be entered. The password is entered without echo.
**--digestfile** *Digestfile*
After copying the image, write the digest of the resulting image to the file.
**--disable-compression**, **-D**
Don't compress copies of filesystem layers which will be pushed.
**--encrypt-layer** *layer(s)*
Layer(s) to encrypt: 0-indexed layer indices with support for negative indexing (e.g. 0 is the first layer, -1 is the last layer). If not defined, will encrypt all layers if encryption-key flag is specified.
**--encryption-key** *key*
The [protocol:keyfile] specifies the encryption protocol, which can be JWE (RFC7516), PGP (RFC4880), and PKCS7 (RFC2315) and the key material required for image encryption. For instance, jwe:/path/to/key.pem or pgp:admin@example.com or pkcs7:/path/to/x509-file.
**--force-compression**
If set, push uses the specified compression algorithm even if the destination contains a differently-compressed variant already.
Defaults to `true` if `--compression-format` is explicitly specified on the command-line, `false` otherwise.
**--format**, **-f**
Manifest Type (oci, v2s2, or v2s1) to use when pushing an image. (default is manifest type of the source image, with fallbacks)
**--quiet**, **-q**
When writing the output image, suppress progress output.
**--remove-signatures**
Don't copy signatures when pushing images.
**--retry** *attempts*
Number of times to retry in case of failure when performing push of images to registry.
Defaults to `3`.
**--retry-delay** *duration*
Duration of delay between retry attempts in case of failure when performing push of images to registry.
Defaults to `2s`.
**--rm**
When pushing a manifest list or image index, delete them from local storage if pushing succeeds.
**--sign-by** *fingerprint*
Sign the pushed image using the GPG key that matches the specified fingerprint.
**--tls-verify** *bool-value*
Require HTTPS and verification of certificates when talking to container registries (defaults to true). TLS verification cannot be used when talking to an insecure registry.
## EXAMPLE
This example pushes the image specified by the imageID to a local directory in docker format.
`# buildah push imageID dir:/path/to/image`
This example pushes the image specified by the imageID to a local directory in oci format.
`# buildah push imageID oci:/path/to/layout:image:tag`
This example pushes the image specified by the imageID to a tar archive in oci format.
`# buildah push imageID oci-archive:/path/to/archive:image:tag`
This example pushes the image specified by the imageID to a container registry named registry.example.com.
`# buildah push imageID docker://registry.example.com/repository:tag`
This example pushes the image specified by the imageID to a container registry named registry.example.com and saves the digest in the specified digestfile.
`# buildah push --digestfile=/tmp/mydigest imageID docker://registry.example.com/repository:tag`
This example works like **docker push**, assuming *registry.example.com/my_image* is a local image.
`# buildah push registry.example.com/my_image`
This example pushes the image specified by the imageID to a private container registry named registry.example.com with authentication from /tmp/auths/myauths.json.
`# buildah push --authfile /tmp/auths/myauths.json imageID docker://registry.example.com/repository:tag`
This example pushes the image specified by the imageID and puts it into the local docker container store.
`# buildah push imageID docker-daemon:image:tag`
This example pushes the image specified by the imageID and puts it into the registry on the localhost while turning off tls verification.
`# buildah push --tls-verify=false imageID localhost:5000/my-imageID`
This example pushes the image specified by the imageID and puts it into the registry on the localhost using credentials and certificates for authentication.
`# buildah push --cert-dir ~/auth --tls-verify=true --creds=username:password imageID localhost:5000/my-imageID`
## ENVIRONMENT
**BUILD\_REGISTRY\_SOURCES**
BUILD\_REGISTRY\_SOURCES, if set, is treated as a JSON object which contains
lists of registry names under the keys `insecureRegistries`,
`blockedRegistries`, and `allowedRegistries`.
When pushing an image to a registry, if the portion of the destination image
name that corresponds to a registry is compared to the items in the
`blockedRegistries` list, and if it matches any of them, the push attempt is
denied. If there are registries in the `allowedRegistries` list, and the
portion of the name that corresponds to the registry is not in the list, the
push attempt is denied.
**TMPDIR**
The TMPDIR environment variable allows the user to specify where temporary files
are stored while pulling and pushing images. Defaults to '/var/tmp'.
## FILES
**registries.conf** (`/etc/containers/registries.conf`)
registries.conf is the configuration file which specifies which container registries should be consulted when completing image names which do not include a registry or domain portion.
**policy.json** (`/etc/containers/policy.json`)
Signature policy file. This defines the trust policy for container images. Controls which container registries can be used for image, and whether or not the tool should trust the images.
## SEE ALSO
buildah(1), buildah-login(1), containers-policy.json(5), docker-login(1), containers-registries.conf(5), buildah-manifest(1), containers-transports(5), containers-auth.json(5)

View File

@@ -1,19 +0,0 @@
# buildah-rename "1" "July 2018" "buildah"
## NAME
buildah\-rename - Rename a local container.
## SYNOPSIS
**buildah rename** *container* *new-name*
## DESCRIPTION
Rename a local container.
## EXAMPLE
buildah rename containerName NewName
buildah rename containerID NewName
## SEE ALSO
buildah(1)

View File

@@ -1,27 +0,0 @@
# buildah-rm "1" "March 2017" "buildah"
## NAME
buildah\-rm - Removes one or more working containers.
## SYNOPSIS
**buildah rm** [*container* ...]
## DESCRIPTION
Removes one or more working containers, unmounting them if necessary.
## OPTIONS
**--all**, **-a**
All Buildah containers will be removed. Buildah containers are denoted with an '*' in the 'BUILDER' column listed by the command 'buildah containers'.A container name or id cannot be provided when this option is used.
## EXAMPLE
buildah rm containerID
buildah rm containerID1 containerID2 containerID3
buildah rm --all
## SEE ALSO
buildah(1)

View File

@@ -1,77 +0,0 @@
# buildah-rmi "1" "March 2017" "buildah"
## NAME
buildah\-rmi - Removes one or more images.
## SYNOPSIS
**buildah rmi** [*image* ...]
## DESCRIPTION
Removes one or more locally stored images.
Passing an argument _image_ deletes it, along with any of its dangling (untagged) parent images.
## LIMITATIONS
* If the image was pushed to a directory path using the 'dir:' transport,
the rmi command can not remove the image. Instead, standard file system
commands should be used.
* If _imageID_ is a name, but does not include a registry name, buildah will
attempt to find and remove the named image using the registry name _localhost_,
if no such image is found, it will search for the intended image by attempting
to expand the given name using the names of registries provided in the system's
registries configuration file, registries.conf.
* If the _imageID_ refers to a *manifest list* or *image index*, this command
will ***not*** do what you expect! This command will remove the images
associated with the *manifest list* or *index* (not the manifest list/image index
itself). To remove that, use the `buildah manifest rm` subcommand instead.
## OPTIONS
**--all**, **-a**
All local images will be removed from the system that do not have containers using the image as a reference image.
An image name or id cannot be provided when this option is used. Read/Only images configured by modifying the "additionalimagestores" in the /etc/containers/storage.conf file, can not be removed.
**--force**, **-f**
This option will cause Buildah to remove all containers that are using the image before removing the image from the system.
**--prune**, **-p**
All local images will be removed from the system that do not have a tag and do not have a child image pointing to them.
An image name or id cannot be provided when this option is used.
## EXAMPLE
buildah rmi imageID
buildah rmi --all
buildah rmi --all --force
buildah rmi --prune
buildah rmi --force imageID
buildah rmi imageID1 imageID2 imageID3
## Files
**registries.conf** (`/etc/containers/registries.conf`)
registries.conf is the configuration file which specifies which container registries should be consulted when completing image names which do not include a registry or domain portion.
**storage.conf** (`/etc/containers/storage.conf`)
storage.conf is the storage configuration file for all tools using containers/storage
The storage configuration file specifies all of the available container storage options for tools using shared container storage.
## SEE ALSO
buildah(1), containers-registries.conf(5), containers-storage.conf(5)

View File

@@ -1,424 +0,0 @@
# buildah-run "1" "March 2017" "buildah"
## NAME
buildah\-run - Run a command inside of the container.
## SYNOPSIS
**buildah run** [*options*] [**--**] *container* *command*
## DESCRIPTION
Launches a container and runs the specified command in that container using the
container's root filesystem as a root filesystem, using configuration settings
inherited from the container's image or as specified using previous calls to
the *buildah config* command. To execute *buildah run* within an
interactive shell, specify the --tty option.
## OPTIONS
**--add-history**
Add an entry to the history which will note what command is being invoked.
Defaults to false.
Note: You can also override the default value of --add-history by setting the
BUILDAH\_HISTORY environment variable. `export BUILDAH_HISTORY=true`
**--cap-add**=*CAP\_xxx*
Add the specified capability to the set of capabilities which will be granted
to the specified command.
Certain capabilities are granted by default; this option can be used to add
more beyond the defaults, which may have been modified by **--cap-add** and
**--cap-drop** options used with the *buildah from* invocation which created
the container.
**--cap-drop**=*CAP\_xxx*
Drop the specified capability from the set of capabilities which will be granted
to the specified command.
The CAP\_CHOWN, CAP\_DAC\_OVERRIDE, CAP\_FOWNER,
CAP\_FSETID, CAP\_KILL, CAP\_NET\_BIND\_SERVICE, CAP\_SETFCAP,
CAP\_SETGID, CAP\_SETPCAP, and CAP\_SETUID capabilities are
granted by default; this option can be used to remove them from the defaults,
which may have been modified by **--cap-add** and **--cap-drop** options used
with the *buildah from* invocation which created the container. The list of default capabilities is managed in containers.conf(5).
If a capability is specified to both the **--cap-add** and **--cap-drop**
options, it will be dropped, regardless of the order in which the options were
given.
**--cgroupns** *how*
Sets the configuration for the cgroup namespaces for the container.
The configured value can be "" (the empty string) or "private" to indicate
that a new cgroup namespace should be created, or it can be "host" to indicate
that the cgroup namespace in which `buildah` itself is being run should be reused.
**--contextdir** *directory*
Allows setting context directory for current RUN invocation. Specifying a context
directory causes RUN context to consider context directory as root directory for
specified source in `--mount` of type 'bind'.
**--device**=*device*
Add a host device, or devices under a directory, to the environment in which
the command will be run. The optional *permissions* parameter can be used to
specify device permissions, using any one or more of
**r** for read, **w** for write, and **m** for **mknod**(2).
Example: **--device=/dev/sdc:/dev/xvdc:rwm**.
Note: if _host-device_ is a symbolic link then it will be resolved first.
The container will only store the major and minor numbers of the host device.
The device to share can also be specified using a Container Device Interface
(CDI) specification (https://github.com/cncf-tags/container-device-interface).
Note: if the user only has access rights via a group, accessing the device
from inside a rootless container will fail. The **crun**(1) runtime offers a
workaround for this by adding the option **--annotation run.oci.keep_original_groups=1**.
**--env**, **-e** *env=value*
Temporarily add a value (e.g. env=*value*) to the environment for the running
process. Unlike `buildah config --env`, the environment will not persist to
later calls to `buildah run` or to the built image. Can be used multiple times.
**--hostname**
Set the hostname inside of the running container.
**--ipc** *how*
Sets the configuration for the IPC namespaces for the container.
The configured value can be "" (the empty string) or "private" to indicate
that a new IPC namespace should be created, or it can be "host" to indicate
that the IPC namespace in which `buildah` itself is being run should be reused,
or it can be the path to an IPC namespace which is already in use by another
process.
**--isolation** *type*
Controls what type of isolation is used for running the process. Recognized
types include *oci* (OCI-compatible runtime, the default), *rootless*
(OCI-compatible runtime invoked using a modified configuration, with
*--no-new-keyring* added to its *create* invocation, reusing the host's network
and UTS namespaces, and creating private IPC, PID, mount, and user namespaces;
the default for unprivileged users), and *chroot* (an internal wrapper that
leans more toward chroot(1) than container technology, reusing the host's
control group, network, IPC, and PID namespaces, and creating private mount and
UTS namespaces, and creating user namespaces only when they're required for ID
mapping).
Note: You can also override the default isolation type by setting the
BUILDAH\_ISOLATION environment variable. `export BUILDAH_ISOLATION=oci`
**--mount**=*type=TYPE,TYPE-SPECIFIC-OPTION[,...]*
Attach a filesystem mount to the container
Current supported mount TYPES are bind, cache, secret and tmpfs. Writes to `bind` and `tmpfs` mounts are discarded after the command finishes, while changes to `cache` mounts persist across uses.
e.g.
type=bind,source=/path/on/host,destination=/path/in/container
type=tmpfs,tmpfs-size=512M,destination=/path/in/container
type=cache,target=/path/in/container
Common Options:
· src, source: mount source spec for bind and cache. Mandatory for bind. If `from` is specified, `src` is the subpath in the `from` field.
· dst, destination, target: location where the command being run should see the content being mounted.
· ro, read-only: (default true for `type=bind`, false for `type=tmpfs`, `type=cache`).
Options specific to bind:
· bind-propagation: shared, slave, private, rshared, rslave, or rprivate(default). See also mount(2). <sup>[[1]](#Footnote1)</sup>
. bind-nonrecursive: do not setup a recursive bind mount. By default it is recursive.
· from: image name for the root of the source. Defaults to **--contextdir**, mandatory if **--contextdir** was not specified.
· z: Set shared SELinux label on mounted destination. Use if SELinux is enabled on host machine.
· Z: Set private SELinux label on mounted destination. Use if SELinux is enabled on host machine.
Options specific to tmpfs:
· tmpfs-size: Size of the tmpfs mount in bytes. Unlimited by default in Linux.
· tmpfs-mode: File mode of the tmpfs in octal. (e.g. 700 or 0700.) Defaults to 1777 in Linux.
· tmpcopyup: Path that is shadowed by the tmpfs mount is recursively copied up to the tmpfs itself.
Options specific to secret:
· id: the identifier for the secret passed into the `buildah bud --secret` or `podman build --secret` command.
Options specific to cache:
· id: Distinguish this cache from other caches using this ID rather than the target mount path.
· mode: File mode for new cache directory in octal. Default 0755.
· ro, readonly: read only cache if set.
· uid: uid for cache directory.
· gid: gid for cache directory.
· from: stage name for the root of the source. Defaults to host cache directory.
· sharing: Whether other users of this cache need to wait for this command to complete (`sharing=locked`) or not (`sharing=shared`, which is the default).
· z: Set shared SELinux label on mounted destination. Enabled by default if SELinux is enabled on the host machine.
· Z: Set private SELinux label on mounted destination. Use if SELinux is enabled on host machine.
**--network**, **--net**=*mode*
Sets the configuration for the network namespace for the container.
Valid _mode_ values are:
- **none**: no networking. Invalid if using **--dns**, **--dns-opt**, or **--dns-search**;
- **host**: use the host network stack. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure;
- **ns:**_path_: path to a network namespace to join;
- **private**: create a new namespace for the container (default)
- **\<network name|ID\>**: Join the network with the given name or ID, e.g. use `--network mynet` to join the network with the name mynet. Only supported for rootful users.
- **slirp4netns[:OPTIONS,...]**: use **slirp4netns**(1) to create a user network stack. This is the default for rootless containers. It is possible to specify these additional options, they can also be set with `network_cmd_options` in containers.conf:
- **allow_host_loopback=true|false**: Allow slirp4netns to reach the host loopback IP (default is 10.0.2.2 or the second IP from slirp4netns cidr subnet when changed, see the cidr option below). The default is false.
- **mtu=MTU**: Specify the MTU to use for this network. (Default is `65520`).
- **cidr=CIDR**: Specify ip range to use for this network. (Default is `10.0.2.0/24`).
- **enable_ipv6=true|false**: Enable IPv6. Default is true. (Required for `outbound_addr6`).
- **outbound_addr=INTERFACE**: Specify the outbound interface slirp binds to (ipv4 traffic only).
- **outbound_addr=IPv4**: Specify the outbound ipv4 address slirp binds to.
- **outbound_addr6=INTERFACE**: Specify the outbound interface slirp binds to (ipv6 traffic only).
- **outbound_addr6=IPv6**: Specify the outbound ipv6 address slirp binds to.
- **pasta[:OPTIONS,...]**: use **pasta**(1) to create a user-mode networking
stack. \
This is only supported in rootless mode. \
By default, IPv4 and IPv6 addresses and routes, as well as the pod interface
name, are copied from the host. If port forwarding isn't configured, ports
are forwarded dynamically as services are bound on either side (init
namespace or container namespace). Port forwarding preserves the original
source IP address. Options described in pasta(1) can be specified as
comma-separated arguments. \
In terms of pasta(1) options, **--config-net** is given by default, in
order to configure networking when the container is started, and
**--no-map-gw** is also assumed by default, to avoid direct access from
container to host using the gateway address. The latter can be overridden
by passing **--map-gw** in the pasta-specific options (despite not being an
actual pasta(1) option). \
Also, **-t none** and **-u none** are passed to disable
automatic port forwarding based on bound ports. Similarly, **-T none** and
**-U none** are given to disable the same functionality from container to
host. \
Some examples:
- **pasta:--map-gw**: Allow the container to directly reach the host using the
gateway address.
- **pasta:--mtu,1500**: Specify a 1500 bytes MTU for the _tap_ interface in
the container.
- **pasta:--ipv4-only,-a,10.0.2.0,-n,24,-g,10.0.2.2,--dns-forward,10.0.2.3,-m,1500,--no-ndp,--no-dhcpv6,--no-dhcp**,
equivalent to default slirp4netns(1) options: disable IPv6, assign
`10.0.2.0/24` to the `tap0` interface in the container, with gateway
`10.0.2.3`, enable DNS forwarder reachable at `10.0.2.3`, set MTU to 1500
bytes, disable NDP, DHCPv6 and DHCP support.
- **pasta:-I,tap0,--ipv4-only,-a,10.0.2.0,-n,24,-g,10.0.2.2,--dns-forward,10.0.2.3,--no-ndp,--no-dhcpv6,--no-dhcp**,
equivalent to default slirp4netns(1) options with Podman overrides: same as
above, but leave the MTU to 65520 bytes
- **pasta:-t,auto,-u,auto,-T,auto,-U,auto**: enable automatic port forwarding
based on observed bound ports from both host and container sides
- **pasta:-T,5201**: enable forwarding of TCP port 5201 from container to
host, using the loopback interface instead of the tap interface for improved
performance
**--no-hostname**
Do not create the _/etc/hostname_ file in the container for RUN instructions.
By default, Buildah manages the _/etc/hostname_ file, adding the container's own hostname. When the **--no-hostname** option is set, the image's _/etc/hostname_ will be preserved unmodified if it exists.
**--no-hosts**
Do not create the _/etc/hosts_ file in the container for RUN instructions.
By default, Buildah manages _/etc/hosts_, adding the container's own IP address.
**--no-hosts** disables this, and the image's _/etc/hosts_ will be preserved unmodified.
**--no-pivot**
Do not use pivot root to jail process inside rootfs. This should be used
whenever the rootfs is on top of a ramdisk.
Note: You can make this option the default by setting the BUILDAH\_NOPIVOT
environment variable. `export BUILDAH_NOPIVOT=true`
**--pid** *how*
Sets the configuration for the PID namespace for the container.
The configured value can be "" (the empty string) or "private" to indicate
that a new PID namespace should be created, or it can be "host" to indicate
that the PID namespace in which `buildah` itself is being run should be reused,
or it can be the path to a PID namespace which is already in use by another
process.
**--runtime** *path*
The *path* to an alternate OCI-compatible runtime. Default is `runc`, or `crun` when machine is configured to use cgroups V2.
Note: You can also override the default runtime by setting the BUILDAH\_RUNTIME
environment variable. `export BUILDAH_RUNTIME=/usr/bin/crun`
**--runtime-flag** *flag*
Adds global flags for the container runtime. To list the supported flags, please
consult the manpages of the selected container runtime.
Note: Do not pass the leading `--` to the flag. To pass the runc flag `--log-format json`
to buildah run, the option given would be `--runtime-flag log-format=json`.
**--tty**, **--terminal**, **-t**
By default a pseudo-TTY is allocated only when buildah's standard input is
attached to a pseudo-TTY. Setting the `--tty` option to `true` will cause a
pseudo-TTY to be allocated inside the container connecting the user's "terminal"
with the stdin and stdout stream of the container. Setting the `--tty` option to
`false` will prevent the pseudo-TTY from being allocated.
**--user** *user*[:*group*]
Set the *user* to be used for running the command in the container.
The user can be specified as a user name
or UID, optionally followed by a group name or GID, separated by a colon (':').
If names are used, the container should include entries for those names in its
*/etc/passwd* and */etc/group* files.
**--uts** *how*
Sets the configuration for the UTS namespace for the container.
The configured value can be "" (the empty string) or "private" to indicate
that a new UTS namespace should be created, or it can be "host" to indicate
that the UTS namespace in which `buildah` itself is being run should be reused,
or it can be the path to a UTS namespace which is already in use by another
process.
**--volume**, **-v** *source*:*destination*:*options*
Create a bind mount. If you specify, ` -v /HOST-DIR:/CONTAINER-DIR`, Buildah
bind mounts `/HOST-DIR` in the host to `/CONTAINER-DIR` in the Buildah
container. The `OPTIONS` are a comma delimited list and can be:
* [rw|ro]
* [U]
* [z|Z]
* [`[r]shared`|`[r]slave`|`[r]private`] <sup>[[1]](#Footnote1)</sup>
The `CONTAINER-DIR` must be an absolute path such as `/src/docs`. The `HOST-DIR`
must be an absolute path as well. Buildah bind-mounts the `HOST-DIR` to the
path you specify. For example, if you supply `/foo` as the host path,
Buildah copies the contents of `/foo` to the container filesystem on the host
and bind mounts that into the container.
You can specify multiple **-v** options to mount one or more mounts to a
container.
`Write Protected Volume Mounts`
You can add the `:ro` or `:rw` suffix to a volume to mount it read-only or
read-write mode, respectively. By default, the volumes are mounted read-write.
See examples.
`Chowning Volume Mounts`
By default, Buildah does not change the owner and group of source volume directories mounted into containers. If a container is created in a new user namespace, the UID and GID in the container may correspond to another UID and GID on the host.
The `:U` suffix tells Buildah to use the correct host UID and GID based on the UID and GID within the container, to change the owner and group of the source volume.
`Labeling Volume Mounts`
Labeling systems like SELinux require that proper labels are placed on volume
content mounted into a container. Without a label, the security system might
prevent the processes running inside the container from using the content. By
default, Buildah does not change the labels set by the OS.
To change a label in the container context, you can add either of two suffixes
`:z` or `:Z` to the volume mount. These suffixes tell Buildah to relabel file
objects on the shared volumes. The `z` option tells Buildah that two containers
share the volume content. As a result, Buildah labels the content with a shared
content label. Shared volume labels allow all containers to read/write content.
The `Z` option tells Buildah to label the content with a private unshared label.
Only the current container can use a private volume.
By default bind mounted volumes are `private`. That means any mounts done
inside container will not be visible on the host and vice versa. This behavior can
be changed by specifying a volume mount propagation property.
When the mount propagation policy is set to `shared`, any mounts completed inside
the container on that volume will be visible to both the host and container. When
the mount propagation policy is set to `slave`, one way mount propagation is enabled
and any mounts completed on the host for that volume will be visible only inside of the container.
To control the mount propagation property of the volume use the `:[r]shared`,
`:[r]slave` or `:[r]private` propagation flag. The propagation property can
be specified only for bind mounted volumes and not for internal volumes or
named volumes. For mount propagation to work on the source mount point (the mount point
where source dir is mounted on) it has to have the right propagation properties. For
shared volumes, the source mount point has to be shared. And for slave volumes,
the source mount has to be either shared or slave. <sup>[[1]](#Footnote1)</sup>
Use `df <source-dir>` to determine the source mount and then use
`findmnt -o TARGET,PROPAGATION <source-mount-dir>` to determine propagation
properties of source mount, if `findmnt` utility is not available, the source mount point
can be determined by looking at the mount entry in `/proc/self/mountinfo`. Look
at `optional fields` and see if any propagation properties are specified.
`shared:X` means the mount is `shared`, `master:X` means the mount is `slave` and if
nothing is there that means the mount is `private`. <sup>[[1]](#Footnote1)</sup>
To change propagation properties of a mount point use the `mount` command. For
example, to bind mount the source directory `/foo` do
`mount --bind /foo /foo` and `mount --make-private --make-shared /foo`. This
will convert /foo into a `shared` mount point. The propagation properties of the source
mount can be changed directly. For instance if `/` is the source mount for
`/foo`, then use `mount --make-shared /` to convert `/` into a `shared` mount.
**--workingdir** *directory*
Temporarily set the working *directory* for the running process. Unlike
`buildah config --workingdir`, the workingdir will not persist to later
calls to `buildah run` or the built image.
NOTE: End parsing of options with the `--` option, so that other
options can be passed to the command inside of the container.
## EXAMPLE
buildah run containerID -- ps -auxw
buildah run --hostname myhost containerID -- ps -auxw
buildah run containerID -- sh -c 'echo $PATH'
buildah run --runtime-flag log-format=json containerID /bin/bash
buildah run --runtime-flag debug containerID /bin/bash
buildah run --tty containerID /bin/bash
buildah run --tty=false containerID ls /
buildah run --volume /path/on/host:/path/in/container:ro,z containerID sh
buildah run -v /path/on/host:/path/in/container:z,U containerID sh
buildah run --mount type=bind,src=/tmp/on:host,dst=/in:container,ro containerID sh
## SEE ALSO
buildah(1), buildah-from(1), buildah-config(1), namespaces(7), pid\_namespaces(7), crun(1), runc(8), containers.conf(5)
## FOOTNOTES
<a name="Footnote1">1</a>: The Buildah project is committed to inclusivity, a core value of open source. The `master` and `slave` mount propagation terminology used here is problematic and divisive, and should be changed. However, these terms are currently used within the Linux kernel and must be used as-is at this time. When the kernel maintainers rectify this usage, Buildah will follow suit immediately.

View File

@@ -1,21 +0,0 @@
# buildah-source-add "1" "March 2021" "buildah"
## NAME
buildah\-source\-add - Add a source artifact to a source image
## SYNOPSIS
**buildah source add** [*options*] *path* *artifact*
## DESCRIPTION
Add add a source artifact to a source image. The artifact will be added as a
gzip-compressed tar ball. Add attempts to auto-tar and auto-compress only if
necessary.
Note that the buildah-source command and all its subcommands are experimental
and may be subject to future changes
## OPTIONS
**--annotation** *key=value*
Add an annotation to the layer descriptor in the source-image manifest. The input format is `key=value`.

View File

@@ -1,24 +0,0 @@
# buildah-source-create "1" "March 2021" "buildah"
## NAME
buildah\-source\-create - Create and initialize a source image
## SYNOPSIS
**buildah source create** [*options*] *path*
## DESCRIPTION
Create and initialize a source image. A source image is an OCI artifact; an
OCI image with a custom config media type.
Note that the buildah-source command and all its subcommands are experimental
and may be subject to future changes
## OPTIONS
**--author** *author*
Set the author of the source image mentioned in the config. By default, no author is set.
**--time-stamp** *bool-value*
Set the created time stamp in the image config. By default, the time stamp is set.

View File

@@ -1,32 +0,0 @@
# buildah-source-pull "1" "March 2021" "buildah"
## NAME
buildah\-source\-pull - Pull a source image from a registry to a specified path
## SYNOPSIS
**buildah source pull** [*options*] *registry* *path*
## DESCRIPTION
Pull a source image from a registry to a specified path. The pull operation
will fail if the image does not comply with a source-image OCI artifact.
Note that the buildah-source command and all its subcommands are experimental
and may be subject to future changes.
## OPTIONS
**--creds** *creds*
The [username[:password]] to use to authenticate with the registry if required.
If one or both values are not supplied, a command line prompt will appear and the
value can be entered. The password is entered without echo.
**--quiet**, **-q**
Suppress the progress output when pulling a source image.
**--tls-verify** *bool-value*
Require HTTPS and verification of certificates when talking to container
registries (defaults to true). TLS verification cannot be used when talking to
an insecure registry.

View File

@@ -1,35 +0,0 @@
# buildah-source-push "1" "March 2021" "buildah"
## NAME
buildah\-source\-push - Push a source image from a specified path to a registry.
## SYNOPSIS
**buildah source push** [*options*] *path* *registry*
## DESCRIPTION
Push a source image from a specified path to a registry.
Note that the buildah-source command and all its subcommands are experimental
and may be subject to future changes.
## OPTIONS
**--creds** *creds*
The [username[:password]] to use to authenticate with the registry if required.
If one or both values are not supplied, a command line prompt will appear and the
value can be entered. The password is entered without echo.
**--digestfile** *digestfile*
After copying the image, write the digest of the resulting image to the file.
**--quiet**, **-q**
Suppress the progress output when pushing a source image.
**--tls-verify** *bool-value*
Require HTTPS and verification of certificates when talking to container
registries (defaults to true). TLS verification cannot be used when talking to
an insecure registry.

View File

@@ -1,31 +0,0 @@
# buildah-source "1" "March 2021" "buildah"
## NAME
buildah\-source - Create, push, pull and manage source images and associated source artifacts
## SYNOPSIS
**buildah source** *subcommand*
## DESCRIPTION
Create, push, pull and manage source images and associated source artifacts. A
source image contains all source artifacts an ordinary OCI image has been built
with. Those artifacts can be any kind of source artifact, such as source RPMs,
an entire source tree or text files.
Note that the buildah-source command and all its subcommands are experimental
and may be subject to future changes.
## COMMANDS
| Command | Man Page | Description |
| -------- | ------------------------------------------------------ | ---------------------------------------------------------- |
| add | [buildah-source-add(1)](buildah-source-add.1.md) | Add a source artifact to a source image. |
| create | [buildah-source-create(1)](buildah-source-create.1.md) | Create and initialize a source image. |
| pull | [buildah-source-pull(1)](buildah-source-pull.1.md) | Pull a source image from a registry to a specified path. |
| push | [buildah-source-push(1)](buildah-source-push.1.md) | Push a source image from a specified path to a registry. |
## SEE ALSO
buildah(1)
## HISTORY
June 2021, Originally compiled by Valentin Rothberg <vrothber@redhat.com>

View File

@@ -1,19 +0,0 @@
# buildah-tag "1" "May 2017" "buildah"
## NAME
buildah\-tag - Add additional names to local images.
## SYNOPSIS
**buildah tag** *name* *new-name* ...
## DESCRIPTION
Adds additional names to locally-stored images.
## EXAMPLE
buildah tag imageName firstNewName
buildah tag imageName firstNewName SecondNewName
## SEE ALSO
buildah(1)

View File

@@ -1,27 +0,0 @@
# buildah-umount "1" "March 2017" "buildah"
## NAME
buildah\-umount - Unmount the root file system on the specified working containers.
## SYNOPSIS
**buildah umount** [*options*] [*container* ...]
## DESCRIPTION
Unmounts the root file system on the specified working containers.
## OPTIONS
**--all**, **-a**
All of the currently mounted containers will be unmounted.
## EXAMPLE
buildah umount containerID
buildah umount containerID1 containerID2 containerID3
buildah umount --all
## SEE ALSO
buildah(1), buildah-umount(1)

View File

@@ -1,63 +0,0 @@
# buildah-unshare "1" "June 2018" "buildah"
## NAME
buildah\-unshare - Run a command inside of a modified user namespace.
## SYNOPSIS
**buildah unshare** [*options*] [**--**] [*command*]
## DESCRIPTION
Launches a process (by default, *$SHELL*) in a new user namespace. The user
namespace is configured so that the invoking user's UID and primary GID appear
to be UID 0 and GID 0, respectively. Any ranges which match that user and
group in /etc/subuid and /etc/subgid are also mapped in as themselves with the
help of the *newuidmap(1)* and *newgidmap(1)* helpers.
buildah unshare is useful for troubleshooting unprivileged operations and for
manually clearing storage and other data related to images and containers.
It is also useful if you want to use the `buildah mount` command. If an unprivileged user wants to mount and work with a container, then they need to execute
buildah unshare. Executing `buildah mount` fails for unprivileged users unless the user is running inside a `buildah unshare` session.
## OPTIONS
**--mount**, **-m** [*VARIABLE=]containerNameOrID*
Mount the *containerNameOrID* container while running *command*, and set the
environment variable *VARIABLE* to the path of the mountpoint. If *VARIABLE*
is not specified, it defaults to *containerNameOrID*, which may not be a valid
name for an environment variable.
## EXAMPLE
buildah unshare id
buildah unshare pwd
buildah unshare cat /proc/self/uid\_map /proc/self/gid\_map
buildah unshare rm -fr $HOME/.local/share/containers/storage /run/user/\`id -u\`/run
buildah unshare --mount containerID sh -c 'cat ${containerID}/etc/os-release'
If you want to use buildah with a mount command then you can create a script that looks something like:
```
cat buildah-script.sh << _EOF
#!/bin/sh
ctr=$(buildah from scratch)
mnt=$(buildah mount $ctr)
dnf -y install --installroot=$mnt PACKAGES
dnf -y clean all --installroot=$mnt
buildah config --entrypoint="/bin/PACKAGE" --env "FOO=BAR" $ctr
buildah commit $ctr imagename
buildah unmount $ctr
_EOF
```
Then execute it with:
```
buildah unshare buildah-script.sh
```
## SEE ALSO
buildah(1), buildah-mount(1), namespaces(7), newuidmap(1), newgidmap(1), user\_namespaces(7)

View File

@@ -1,31 +0,0 @@
# buildah-version "1" "June 2017" "Buildah"
## NAME
buildah\-version - Display the Buildah Version Information.
## SYNOPSIS
**buildah version** [*options*]
## DESCRIPTION
Shows the following information: Version, Go Version, Image Spec, Runtime Spec, CNI Spec, libcni Version, Git Commit, Build Time, OS, and Architecture.
## OPTIONS
**--help, -h**
Print usage statement
**--json**
Output in JSON format.
## EXAMPLE
buildah version
buildah version --help
buildah version -h
## SEE ALSO
buildah(1)

View File

@@ -1,207 +0,0 @@
# buildah "1" "March 2017" "buildah"
## NAME
Buildah - A command line tool that facilitates building OCI container images.
## SYNOPSIS
buildah [OPTIONS] COMMAND [ARG...]
## DESCRIPTION
The Buildah package provides a command line tool which can be used to:
* Create a working container, either from scratch or using an image as a starting point.
* Mount a working container's root filesystem for manipulation.
* Unmount a working container's root filesystem.
* Use the updated contents of a container's root filesystem as a filesystem layer to create a new image.
* Delete a working container or an image.
* Rename a local container.
## OPTIONS
**--cgroup-manager**=*manager*
The CGroup manager to use for container cgroups. Supported values are cgroupfs or systemd. Default is systemd unless overridden in the containers.conf file.
Note: Setting this flag can cause certain commands to break when called on containers previously created by the other CGroup manager type.
Note: CGroup manager is not supported in rootless mode when using CGroups Version V1.
**--log-level** **value**
The log level to be used. Either "trace", "debug", "info", "warn", "error", "fatal", or "panic", defaulting to "warn".
**--help, -h**
Show help
**--registries-conf** *path*
Pathname of the configuration file which specifies which container registries should be
consulted when completing image names which do not include a registry or domain
portion. It is not recommended that this option be used, as the default
behavior of using the system-wide configuration
(*/etc/containers/registries.conf*) is most often preferred.
**--registries-conf-dir** *path*
Pathname of the directory which contains configuration snippets which specify
registries which should be consulted when completing image names which do not
include a registry or domain portion. It is not recommended that this option
be used, as the default behavior of using the system-wide configuration
(*/etc/containers/registries.d*) is most often preferred.
**--root** **value**
Storage root dir (default: "/var/lib/containers/storage" for UID 0, "$HOME/.local/share/containers/storage" for other users)
Default root dir is configured in /etc/containers/storage.conf
**--runroot** **value**
Storage state dir (default: "/run/containers/storage" for UID 0, "/run/user/$UID" for other users)
Default state dir is configured in /etc/containers/storage.conf
**--short-name-alias-conf** *path*
Pathname of the file which contains cached mappings between short image names
and their corresponding fully-qualified names. It is used for mapping from
names of images specified using short names like "ubi8" which don't
include a registry component and a corresponding fully-specified name which
includes a registry and any other components, such as
"registry.access.redhat.com/ubi8". It is not recommended that this option be
used, as the default behavior of using the system-wide cache
(*/var/cache/containers/short-name-aliases.conf*) or per-user cache
(*$HOME/.cache/containers/short-name-aliases.conf*) to supplement system-wide
defaults is most often preferred.
**--storage-driver** **value**
Storage driver. The default storage driver for UID 0 is configured in /etc/containers/storage.conf (`$HOME/.config/containers/storage.conf` in rootless mode), and is *vfs* for other users. The `STORAGE_DRIVER` environment variable overrides the default. The --storage-driver specified driver overrides all.
Examples: "overlay", "vfs"
Overriding this option will cause the *storage-opt* settings in /etc/containers/storage.conf to be ignored. The user must
specify additional options via the `--storage-opt` flag.
**--storage-opt** **value**
Storage driver option, Default storage driver options are configured in /etc/containers/storage.conf (`$HOME/.config/containers/storage.conf` in rootless mode). The `STORAGE_OPTS` environment variable overrides the default. The --storage-opt specified options overrides all.
**--userns-gid-map** *mapping*
Directly specifies a GID mapping which should be used to set ownership, at the
filesystem level, on the working container's contents.
Commands run when handling `RUN` instructions will default to being run in
their own user namespaces, configured using the UID and GID maps.
Entries in this map take the form of one or more colon-separated triples of a starting
in-container GID, a corresponding starting host-level GID, and the number of
consecutive IDs which the map entry represents.
This option overrides the *remap-gids* setting in the *options* section of
/etc/containers/storage.conf.
If this option is not specified, but a global --userns-gid-map setting is
supplied, settings from the global option will be used.
If none of --userns-uid-map-user, --userns-gid-map-group, or --userns-gid-map
are specified, but --userns-uid-map is specified, the GID map will be set to
use the same numeric values as the UID map.
**NOTE:** When this option is specified by a rootless user, the specified mappings are relative to the rootless usernamespace in the container, rather than being relative to the host as it would be when run rootful.
**--userns-uid-map** *mapping*
Directly specifies a UID mapping which should be used to set ownership, at the
filesystem level, on the working container's contents.
Commands run when handling `RUN` instructions will default to being run in
their own user namespaces, configured using the UID and GID maps.
Entries in this map take the form of one or more colon-separated triples of a starting
in-container UID, a corresponding starting host-level UID, and the number of
consecutive IDs which the map entry represents.
This option overrides the *remap-uids* setting in the *options* section of
/etc/containers/storage.conf.
If this option is not specified, but a global --userns-uid-map setting is
supplied, settings from the global option will be used.
If none of --userns-uid-map-user, --userns-gid-map-group, or --userns-uid-map
are specified, but --userns-gid-map is specified, the UID map will be set to
use the same numeric values as the GID map.
**NOTE:** When this option is specified by a rootless user, the specified mappings are relative to the rootless usernamespace in the container, rather than being relative to the host as it would be when run rootful.
**--version**, **-v**
Print the version
## Environment Variables
Buildah can set up environment variables from the env entry in the [engine] table in the containers.conf(5). These variables can be overridden by passing environment variables before the `buildah` commands.
## COMMANDS
| Command | Man Page | Description |
| ---------- | ------------------------------------------------ | ---------------------------------------------------------------------------------------------------- |
| add | [buildah-add(1)](buildah-add.1.md) | Add the contents of a file, URL, or a directory to the container. |
| build | [buildah-build(1)](buildah-build.1.md) | Builds an OCI image using instructions in one or more Containerfiles. |
| commit | [buildah-commit(1)](buildah-commit.1.md) | Create an image from a working container. |
| config | [buildah-config(1)](buildah-config.1.md) | Update image configuration settings. |
| containers | [buildah-containers(1)](buildah-containers.1.md) | List the working containers and their base images. |
| copy | [buildah-copy(1)](buildah-copy.1.md) | Copies the contents of a file, URL, or directory into a container's working directory. |
| from | [buildah-from(1)](buildah-from.1.md) | Creates a new working container, either from scratch or using a specified image as a starting point. |
| images | [buildah-images(1)](buildah-images.1.md) | List images in local storage. |
| info | [buildah-info(1)](buildah-info.1.md) | Display Buildah system information. |
| inspect | [buildah-inspect(1)](buildah-inspect.1.md) | Inspects the configuration of a container or image |
| login | [buildah-login(1)](buildah-login.1.md) | Login to a container registry. |
| logout | [buildah-logout(1)](buildah-logout.1.md) | Logout of a container registry |
| manifest | [buildah-manifest(1)](buildah-manifest.1.md) | Create and manipulate manifest lists and image indexes. |
| mkcw | [buildah-mkcw(1)](buildah-mkcw.1.md) | Convert a conventional container image into a confidential workload image.
| mount | [buildah-mount(1)](buildah-mount.1.md) | Mount the working container's root filesystem. |
| prune | [buildah-prune(1)](buildah-prune.1.md) | Cleanup intermediate images as well as build and mount cache. |
| pull | [buildah-pull(1)](buildah-pull.1.md) | Pull an image from the specified location. |
| push | [buildah-push(1)](buildah-push.1.md) | Push an image from local storage to elsewhere. |
| rename | [buildah-rename(1)](buildah-rename.1.md) | Rename a local container. |
| rm | [buildah-rm(1)](buildah-rm.1.md) | Removes one or more working containers. |
| rmi | [buildah-rmi(1)](buildah-rmi.1.md) | Removes one or more images. |
| run | [buildah-run(1)](buildah-run.1.md) | Run a command inside of the container. |
| source | [buildah-source(1)](buildah-source.1.md) | Create, push, pull and manage source images and associated source artifacts. |
| tag | [buildah-tag(1)](buildah-tag.1.md) | Add an additional name to a local image. |
| umount | [buildah-umount(1)](buildah-umount.1.md) | Unmount a working container's root file system. |
| unshare | [buildah-unshare(1)](buildah-unshare.1.md) | Launch a command in a user namespace with modified ID mappings. |
| version | [buildah-version(1)](buildah-version.1.md) | Display the Buildah Version Information |
## Files
**storage.conf** (`/etc/containers/storage.conf`)
storage.conf is the storage configuration file for all tools using containers/storage
The storage configuration file specifies all of the available container storage options for tools using shared container storage.
**mounts.conf** (`/usr/share/containers/mounts.conf` and optionally `/etc/containers/mounts.conf`)
The mounts.conf files specify volume mount files or directories that are automatically mounted inside containers when executing the `buildah run` or `buildah build` commands. Container processes can then use this content. The volume mount content does not get committed to the final image.
Usually these directories are used for passing secrets or credentials required by the package software to access remote package repositories.
For example, a mounts.conf with the line "`/usr/share/rhel/secrets:/run/secrets`", the content of `/usr/share/rhel/secrets` directory is mounted on `/run/secrets` inside the container. This mountpoint allows Red Hat Enterprise Linux subscriptions from the host to be used within the container. It is also possible to omit the destination if it's equal to the source path. For example, specifying `/var/lib/secrets` will mount the directory into the same container destination path `/var/lib/secrets`.
Note this is not a volume mount. The content of the volumes is copied into container storage, not bind mounted directly from the host.
**registries.conf** (`/etc/containers/registries.conf`)
registries.conf is the configuration file which specifies which container registries should be consulted when completing image names which do not include a registry or domain portion.
**registries.d** (`/etc/containers/registries.d`)
Directory which contains configuration snippets which specify registries which should be consulted when completing image names which do not include a registry or domain portion.
## SEE ALSO
containers.conf(5), containers-mounts.conf(5), newuidmap(1), newgidmap(1), containers-registries.conf(5), containers-storage.conf(5)
## HISTORY
December 2017, Originally compiled by Tom Sweeney <tsweeney@redhat.com>

View File

@@ -1,851 +0,0 @@
use crate::process::CommandResult;
use crate::virt::buildah::{execute_buildah_command, BuildahError, Image, thread_local_debug, set_thread_local_debug};
use std::collections::HashMap;
/// Builder struct for buildah operations
#[derive(Clone)]
pub struct Builder {
/// Name of the container
name: String,
/// Container ID
container_id: Option<String>,
/// Base image
image: String,
/// Debug mode
debug: bool,
}
impl Builder {
/// Create a new builder with a container from the specified image
///
/// # Arguments
///
/// * `name` - Name for the container
/// * `image` - Image to create the container from
///
/// # Returns
///
/// * `Result<Self, BuildahError>` - Builder instance or error
pub fn new(name: &str, image: &str) -> Result<Self, BuildahError> {
// Try to create a new container
let result = execute_buildah_command(&["from", "--name", name, image]);
match result {
Ok(success_result) => {
// Container created successfully
let container_id = success_result.stdout.trim().to_string();
Ok(Self {
name: name.to_string(),
container_id: Some(container_id),
image: image.to_string(),
debug: false,
})
},
Err(BuildahError::CommandFailed(error_msg)) => {
// Check if the error is because the container already exists
if error_msg.contains("that name is already in use") {
// Extract the container ID from the error message
// Error format: "the container name "name" is already in use by container_id. You have to remove that container to be able to reuse that name: that name is already in use"
let container_id = error_msg
.split("already in use by ")
.nth(1)
.and_then(|s| s.split('.').next())
.unwrap_or("")
.trim()
.to_string();
if !container_id.is_empty() {
// Container already exists, continue with it
Ok(Self {
name: name.to_string(),
container_id: Some(container_id),
image: image.to_string(),
debug: false,
})
} else {
// Couldn't extract container ID
Err(BuildahError::Other("Failed to extract container ID from error message".to_string()))
}
} else {
// Other command failure
Err(BuildahError::CommandFailed(error_msg))
}
},
Err(e) => {
// Other error
Err(e)
}
}
}
/// Get the container ID
pub fn container_id(&self) -> Option<&String> {
self.container_id.as_ref()
}
/// Get the container name
pub fn name(&self) -> &str {
&self.name
}
/// Get the debug mode
pub fn debug(&self) -> bool {
self.debug
}
/// Set the debug mode
pub fn set_debug(&mut self, debug: bool) -> &mut Self {
self.debug = debug;
self
}
/// Get the base image
pub fn image(&self) -> &str {
&self.image
}
/// Run a command in the container
///
/// # Arguments
///
/// * `command` - The command to run
///
/// # Returns
///
/// * `Result<CommandResult, BuildahError>` - Command result or error
pub fn run(&self, command: &str) -> Result<CommandResult, BuildahError> {
if let Some(container_id) = &self.container_id {
// Save the current debug flag
let previous_debug = thread_local_debug();
// Set the thread-local debug flag from the Builder's debug flag
set_thread_local_debug(self.debug);
// Execute the command
let result = execute_buildah_command(&["run", container_id, "sh", "-c", command]);
// Restore the previous debug flag
set_thread_local_debug(previous_debug);
result
} else {
Err(BuildahError::Other("No container ID available".to_string()))
}
}
/// Run a command in the container with specified isolation
///
/// # Arguments
///
/// * `command` - The command to run
/// * `isolation` - Isolation method (e.g., "chroot", "rootless", "oci")
///
/// # Returns
///
/// * `Result<CommandResult, BuildahError>` - Command result or error
pub fn run_with_isolation(&self, command: &str, isolation: &str) -> Result<CommandResult, BuildahError> {
if let Some(container_id) = &self.container_id {
// Save the current debug flag
let previous_debug = thread_local_debug();
// Set the thread-local debug flag from the Builder's debug flag
set_thread_local_debug(self.debug);
// Execute the command
let result = execute_buildah_command(&["run", "--isolation", isolation, container_id, "sh", "-c", command]);
// Restore the previous debug flag
set_thread_local_debug(previous_debug);
result
} else {
Err(BuildahError::Other("No container ID available".to_string()))
}
}
/// Copy files into the container
///
/// # Arguments
///
/// * `source` - Source path
/// * `dest` - Destination path in the container
///
/// # Returns
///
/// * `Result<CommandResult, BuildahError>` - Command result or error
pub fn copy(&self, source: &str, dest: &str) -> Result<CommandResult, BuildahError> {
if let Some(container_id) = &self.container_id {
// Save the current debug flag
let previous_debug = thread_local_debug();
// Set the thread-local debug flag from the Builder's debug flag
set_thread_local_debug(self.debug);
// Execute the command
let result = execute_buildah_command(&["copy", container_id, source, dest]);
// Restore the previous debug flag
set_thread_local_debug(previous_debug);
result
} else {
Err(BuildahError::Other("No container ID available".to_string()))
}
}
/// Add files into the container
///
/// # Arguments
///
/// * `source` - Source path
/// * `dest` - Destination path in the container
///
/// # Returns
///
/// * `Result<CommandResult, BuildahError>` - Command result or error
pub fn add(&self, source: &str, dest: &str) -> Result<CommandResult, BuildahError> {
if let Some(container_id) = &self.container_id {
// Save the current debug flag
let previous_debug = thread_local_debug();
// Set the thread-local debug flag from the Builder's debug flag
set_thread_local_debug(self.debug);
// Execute the command
let result = execute_buildah_command(&["add", container_id, source, dest]);
// Restore the previous debug flag
set_thread_local_debug(previous_debug);
result
} else {
Err(BuildahError::Other("No container ID available".to_string()))
}
}
/// Commit the container to an image
///
/// # Arguments
///
/// * `image_name` - Name for the new image
///
/// # Returns
///
/// * `Result<CommandResult, BuildahError>` - Command result or error
pub fn commit(&self, image_name: &str) -> Result<CommandResult, BuildahError> {
if let Some(container_id) = &self.container_id {
// Save the current debug flag
let previous_debug = thread_local_debug();
// Set the thread-local debug flag from the Builder's debug flag
set_thread_local_debug(self.debug);
// Execute the command
let result = execute_buildah_command(&["commit", container_id, image_name]);
// Restore the previous debug flag
set_thread_local_debug(previous_debug);
result
} else {
Err(BuildahError::Other("No container ID available".to_string()))
}
}
/// Remove the container
///
/// # Returns
///
/// * `Result<CommandResult, BuildahError>` - Command result or error
pub fn remove(&self) -> Result<CommandResult, BuildahError> {
if let Some(container_id) = &self.container_id {
// Save the current debug flag
let previous_debug = thread_local_debug();
// Set the thread-local debug flag from the Builder's debug flag
set_thread_local_debug(self.debug);
// Execute the command
let result = execute_buildah_command(&["rm", container_id]);
// Restore the previous debug flag
set_thread_local_debug(previous_debug);
result
} else {
Err(BuildahError::Other("No container ID available".to_string()))
}
}
/// Reset the builder by removing the container and clearing the container_id
///
/// # Returns
///
/// * `Result<(), BuildahError>` - Success or error
pub fn reset(&mut self) -> Result<(), BuildahError> {
if let Some(container_id) = &self.container_id {
// Save the current debug flag
let previous_debug = thread_local_debug();
// Set the thread-local debug flag from the Builder's debug flag
set_thread_local_debug(self.debug);
// Try to remove the container
let result = execute_buildah_command(&["rm", container_id]);
// Restore the previous debug flag
set_thread_local_debug(previous_debug);
// Clear the container_id regardless of whether the removal succeeded
self.container_id = None;
// Return the result of the removal operation
match result {
Ok(_) => Ok(()),
Err(e) => Err(e),
}
} else {
// No container to remove
Ok(())
}
}
/// Configure container metadata
///
/// # Arguments
///
/// * `options` - Map of configuration options
///
/// # Returns
///
/// * `Result<CommandResult, BuildahError>` - Command result or error
pub fn config(&self, options: HashMap<String, String>) -> Result<CommandResult, BuildahError> {
if let Some(container_id) = &self.container_id {
let mut args_owned: Vec<String> = Vec::new();
args_owned.push("config".to_string());
// Process options map
for (key, value) in options.iter() {
let option_name = format!("--{}", key);
args_owned.push(option_name);
args_owned.push(value.clone());
}
args_owned.push(container_id.clone());
// Convert Vec<String> to Vec<&str> for execute_buildah_command
let args: Vec<&str> = args_owned.iter().map(|s| s.as_str()).collect();
// Save the current debug flag
let previous_debug = thread_local_debug();
// Set the thread-local debug flag from the Builder's debug flag
set_thread_local_debug(self.debug);
// Execute the command
let result = execute_buildah_command(&args);
// Restore the previous debug flag
set_thread_local_debug(previous_debug);
result
} else {
Err(BuildahError::Other("No container ID available".to_string()))
}
}
/// Set the entrypoint for the container
///
/// # Arguments
///
/// * `entrypoint` - The entrypoint command
///
/// # Returns
///
/// * `Result<CommandResult, BuildahError>` - Command result or error
pub fn set_entrypoint(&self, entrypoint: &str) -> Result<CommandResult, BuildahError> {
if let Some(container_id) = &self.container_id {
// Save the current debug flag
let previous_debug = thread_local_debug();
// Set the thread-local debug flag from the Builder's debug flag
set_thread_local_debug(self.debug);
// Execute the command
let result = execute_buildah_command(&["config", "--entrypoint", entrypoint, container_id]);
// Restore the previous debug flag
set_thread_local_debug(previous_debug);
result
} else {
Err(BuildahError::Other("No container ID available".to_string()))
}
}
/// Set the default command for the container
///
/// # Arguments
///
/// * `cmd` - The default command
///
/// # Returns
///
/// * `Result<CommandResult, BuildahError>` - Command result or error
pub fn set_cmd(&self, cmd: &str) -> Result<CommandResult, BuildahError> {
if let Some(container_id) = &self.container_id {
// Save the current debug flag
let previous_debug = thread_local_debug();
// Set the thread-local debug flag from the Builder's debug flag
set_thread_local_debug(self.debug);
// Execute the command
let result = execute_buildah_command(&["config", "--cmd", cmd, container_id]);
// Restore the previous debug flag
set_thread_local_debug(previous_debug);
result
} else {
Err(BuildahError::Other("No container ID available".to_string()))
}
}
/// List images in local storage
///
/// # Returns
///
/// * `Result<Vec<Image>, BuildahError>` - List of images or error
pub fn images() -> Result<Vec<Image>, BuildahError> {
// Use default debug value (false) for static method
let result = execute_buildah_command(&["images", "--json"])?;
// Try to parse the JSON output
match serde_json::from_str::<serde_json::Value>(&result.stdout) {
Ok(json) => {
if let serde_json::Value::Array(images_json) = json {
let mut images = Vec::new();
for image_json in images_json {
// Extract image ID
let id = match image_json.get("id").and_then(|v| v.as_str()) {
Some(id) => id.to_string(),
None => return Err(BuildahError::ConversionError("Missing image ID".to_string())),
};
// Extract image names
let names = match image_json.get("names").and_then(|v| v.as_array()) {
Some(names_array) => {
let mut names_vec = Vec::new();
for name_value in names_array {
if let Some(name_str) = name_value.as_str() {
names_vec.push(name_str.to_string());
}
}
names_vec
},
None => Vec::new(), // Empty vector if no names found
};
// Extract image size
let size = match image_json.get("size").and_then(|v| v.as_str()) {
Some(size) => size.to_string(),
None => "Unknown".to_string(), // Default value if size not found
};
// Extract creation timestamp
let created = match image_json.get("created").and_then(|v| v.as_str()) {
Some(created) => created.to_string(),
None => "Unknown".to_string(), // Default value if created not found
};
// Create Image struct and add to vector
images.push(Image {
id,
names,
size,
created,
});
}
Ok(images)
} else {
Err(BuildahError::JsonParseError("Expected JSON array".to_string()))
}
},
Err(e) => {
Err(BuildahError::JsonParseError(format!("Failed to parse image list JSON: {}", e)))
}
}
}
/// Remove an image
///
/// # Arguments
///
/// * `image` - Image ID or name
///
/// # Returns
///
/// * `Result<CommandResult, BuildahError>` - Command result or error
pub fn image_remove(image: &str) -> Result<CommandResult, BuildahError> {
// Use default debug value (false) for static method
execute_buildah_command(&["rmi", image])
}
/// Remove an image with debug output
///
/// # Arguments
///
/// * `image` - Image ID or name
/// * `debug` - Whether to enable debug output
///
/// # Returns
///
/// * `Result<CommandResult, BuildahError>` - Command result or error
pub fn image_remove_with_debug(image: &str, debug: bool) -> Result<CommandResult, BuildahError> {
// Save the current debug flag
let previous_debug = thread_local_debug();
// Set the thread-local debug flag
set_thread_local_debug(debug);
// Execute the command
let result = execute_buildah_command(&["rmi", image]);
// Restore the previous debug flag
set_thread_local_debug(previous_debug);
result
}
/// Pull an image from a registry
///
/// # Arguments
///
/// * `image` - Image name
/// * `tls_verify` - Whether to verify TLS
///
/// # Returns
///
/// * `Result<CommandResult, BuildahError>` - Command result or error
pub fn image_pull(image: &str, tls_verify: bool) -> Result<CommandResult, BuildahError> {
// Use default debug value (false) for static method
let mut args = vec!["pull"];
if !tls_verify {
args.push("--tls-verify=false");
}
args.push(image);
execute_buildah_command(&args)
}
/// Pull an image from a registry with debug output
///
/// # Arguments
///
/// * `image` - Image name
/// * `tls_verify` - Whether to verify TLS
/// * `debug` - Whether to enable debug output
///
/// # Returns
///
/// * `Result<CommandResult, BuildahError>` - Command result or error
pub fn image_pull_with_debug(image: &str, tls_verify: bool, debug: bool) -> Result<CommandResult, BuildahError> {
// Save the current debug flag
let previous_debug = thread_local_debug();
// Set the thread-local debug flag
set_thread_local_debug(debug);
let mut args = vec!["pull"];
if !tls_verify {
args.push("--tls-verify=false");
}
args.push(image);
// Execute the command
let result = execute_buildah_command(&args);
// Restore the previous debug flag
set_thread_local_debug(previous_debug);
result
}
/// Push an image to a registry
///
/// # Arguments
///
/// * `image` - Image name
/// * `destination` - Destination registry
/// * `tls_verify` - Whether to verify TLS
///
/// # Returns
///
/// * `Result<CommandResult, BuildahError>` - Command result or error
pub fn image_push(image: &str, destination: &str, tls_verify: bool) -> Result<CommandResult, BuildahError> {
// Use default debug value (false) for static method
let mut args = vec!["push"];
if !tls_verify {
args.push("--tls-verify=false");
}
args.push(image);
args.push(destination);
execute_buildah_command(&args)
}
/// Push an image to a registry with debug output
///
/// # Arguments
///
/// * `image` - Image name
/// * `destination` - Destination registry
/// * `tls_verify` - Whether to verify TLS
/// * `debug` - Whether to enable debug output
///
/// # Returns
///
/// * `Result<CommandResult, BuildahError>` - Command result or error
pub fn image_push_with_debug(image: &str, destination: &str, tls_verify: bool, debug: bool) -> Result<CommandResult, BuildahError> {
// Save the current debug flag
let previous_debug = thread_local_debug();
// Set the thread-local debug flag
set_thread_local_debug(debug);
let mut args = vec!["push"];
if !tls_verify {
args.push("--tls-verify=false");
}
args.push(image);
args.push(destination);
// Execute the command
let result = execute_buildah_command(&args);
// Restore the previous debug flag
set_thread_local_debug(previous_debug);
result
}
/// Tag an image
///
/// # Arguments
///
/// * `image` - Image ID or name
/// * `new_name` - New tag for the image
///
/// # Returns
///
/// * `Result<CommandResult, BuildahError>` - Command result or error
pub fn image_tag(image: &str, new_name: &str) -> Result<CommandResult, BuildahError> {
// Use default debug value (false) for static method
execute_buildah_command(&["tag", image, new_name])
}
/// Tag an image with debug output
///
/// # Arguments
///
/// * `image` - Image ID or name
/// * `new_name` - New tag for the image
/// * `debug` - Whether to enable debug output
///
/// # Returns
///
/// * `Result<CommandResult, BuildahError>` - Command result or error
pub fn image_tag_with_debug(image: &str, new_name: &str, debug: bool) -> Result<CommandResult, BuildahError> {
// Save the current debug flag
let previous_debug = thread_local_debug();
// Set the thread-local debug flag
set_thread_local_debug(debug);
// Execute the command
let result = execute_buildah_command(&["tag", image, new_name]);
// Restore the previous debug flag
set_thread_local_debug(previous_debug);
result
}
/// Commit a container to an image with advanced options
///
/// # Arguments
///
/// * `container` - Container ID or name
/// * `image_name` - Name for the new image
/// * `format` - Optional format (oci or docker)
/// * `squash` - Whether to squash layers
/// * `rm` - Whether to remove the container after commit
///
/// # Returns
///
/// * `Result<CommandResult, BuildahError>` - Command result or error
pub fn image_commit(container: &str, image_name: &str, format: Option<&str>, squash: bool, rm: bool) -> Result<CommandResult, BuildahError> {
// Use default debug value (false) for static method
let mut args = vec!["commit"];
if let Some(format_str) = format {
args.push("--format");
args.push(format_str);
}
if squash {
args.push("--squash");
}
if rm {
args.push("--rm");
}
args.push(container);
args.push(image_name);
execute_buildah_command(&args)
}
/// Commit a container to an image with advanced options and debug output
///
/// # Arguments
///
/// * `container` - Container ID or name
/// * `image_name` - Name for the new image
/// * `format` - Optional format (oci or docker)
/// * `squash` - Whether to squash layers
/// * `rm` - Whether to remove the container after commit
/// * `debug` - Whether to enable debug output
///
/// # Returns
///
/// * `Result<CommandResult, BuildahError>` - Command result or error
pub fn image_commit_with_debug(container: &str, image_name: &str, format: Option<&str>, squash: bool, rm: bool, debug: bool) -> Result<CommandResult, BuildahError> {
// Save the current debug flag
let previous_debug = thread_local_debug();
// Set the thread-local debug flag
set_thread_local_debug(debug);
let mut args = vec!["commit"];
if let Some(format_str) = format {
args.push("--format");
args.push(format_str);
}
if squash {
args.push("--squash");
}
if rm {
args.push("--rm");
}
args.push(container);
args.push(image_name);
// Execute the command
let result = execute_buildah_command(&args);
// Restore the previous debug flag
set_thread_local_debug(previous_debug);
result
}
/// Build an image from a Containerfile/Dockerfile
///
/// # Arguments
///
/// * `tag` - Optional tag for the image
/// * `context_dir` - Directory containing the Containerfile/Dockerfile
/// * `file` - Path to the Containerfile/Dockerfile
/// * `isolation` - Optional isolation method
///
/// # Returns
///
/// * `Result<CommandResult, BuildahError>` - Command result or error
pub fn build(tag: Option<&str>, context_dir: &str, file: &str, isolation: Option<&str>) -> Result<CommandResult, BuildahError> {
// Use default debug value (false) for static method
let mut args = Vec::new();
args.push("build");
if let Some(tag_value) = tag {
args.push("-t");
args.push(tag_value);
}
if let Some(isolation_value) = isolation {
args.push("--isolation");
args.push(isolation_value);
}
args.push("-f");
args.push(file);
args.push(context_dir);
execute_buildah_command(&args)
}
/// Build an image from a Containerfile/Dockerfile with debug output
///
/// # Arguments
///
/// * `tag` - Optional tag for the image
/// * `context_dir` - Directory containing the Containerfile/Dockerfile
/// * `file` - Path to the Containerfile/Dockerfile
/// * `isolation` - Optional isolation method
/// * `debug` - Whether to enable debug output
///
/// # Returns
///
/// * `Result<CommandResult, BuildahError>` - Command result or error
pub fn build_with_debug(tag: Option<&str>, context_dir: &str, file: &str, isolation: Option<&str>, debug: bool) -> Result<CommandResult, BuildahError> {
// Save the current debug flag
let previous_debug = thread_local_debug();
// Set the thread-local debug flag
set_thread_local_debug(debug);
let mut args = Vec::new();
args.push("build");
if let Some(tag_value) = tag {
args.push("-t");
args.push(tag_value);
}
if let Some(isolation_value) = isolation {
args.push("--isolation");
args.push(isolation_value);
}
args.push("-f");
args.push(file);
args.push(context_dir);
// Execute the command
let result = execute_buildah_command(&args);
// Restore the previous debug flag
set_thread_local_debug(previous_debug);
result
}
}

View File

@@ -1,95 +0,0 @@
// Basic buildah operations for container management
use std::process::Command;
use crate::process::CommandResult;
use super::BuildahError;
/// Execute a buildah command and return the result
///
/// # Arguments
///
/// * `args` - The command arguments
///
/// # Returns
///
/// * `Result<CommandResult, BuildahError>` - Command result or error
pub fn execute_buildah_command(args: &[&str]) -> Result<CommandResult, BuildahError> {
// Get the debug flag from thread-local storage
let debug = thread_local_debug();
if debug {
println!("Executing buildah command: buildah {}", args.join(" "));
}
let output = Command::new("buildah")
.args(args)
.output();
match output {
Ok(output) => {
let stdout = String::from_utf8_lossy(&output.stdout).to_string();
let stderr = String::from_utf8_lossy(&output.stderr).to_string();
let result = CommandResult {
stdout,
stderr,
success: output.status.success(),
code: output.status.code().unwrap_or(-1),
};
// Always output stdout/stderr when debug is true
if debug {
if !result.stdout.is_empty() {
println!("Command stdout: {}", result.stdout);
}
if !result.stderr.is_empty() {
println!("Command stderr: {}", result.stderr);
}
if result.success {
println!("Command succeeded with code {}", result.code);
} else {
println!("Command failed with code {}", result.code);
}
}
if result.success {
Ok(result)
} else {
// If command failed and debug is false, output stderr
if !debug {
println!("Command failed with code {}: {}", result.code, result.stderr.trim());
}
Err(BuildahError::CommandFailed(format!("Command failed with code {}: {}",
result.code, result.stderr.trim())))
}
},
Err(e) => {
// Always output error information
println!("Command execution failed: {}", e);
Err(BuildahError::CommandExecutionFailed(e))
}
}
}
// Thread-local storage for debug flag
thread_local! {
static DEBUG: std::cell::RefCell<bool> = std::cell::RefCell::new(false);
}
/// Set the debug flag for the current thread
pub fn set_thread_local_debug(debug: bool) {
DEBUG.with(|cell| {
*cell.borrow_mut() = debug;
});
}
/// Get the debug flag for the current thread
pub fn thread_local_debug() -> bool {
DEBUG.with(|cell| {
*cell.borrow()
})
}
// This function is no longer needed as the debug functionality is now integrated into execute_buildah_command

View File

@@ -1,84 +0,0 @@
use crate::virt::buildah::execute_buildah_command;
use crate::process::CommandResult;
use super::BuildahError;
/// Create a container from an image
pub fn from(image: &str) -> Result<CommandResult, BuildahError> {
execute_buildah_command(&["from", image])
}
/// Run a command in a container
///
/// # Arguments
///
/// * `container` - The container ID or name
/// * `command` - The command to run
pub fn run(container: &str, command: &str) -> Result<CommandResult, BuildahError> {
execute_buildah_command(&["run", container, "sh", "-c", command])
}
/// Run a command in a container with specified isolation
///
/// # Arguments
///
/// * `container` - The container ID or name
/// * `command` - The command to run
/// * `isolation` - Isolation method (e.g., "chroot", "rootless", "oci")
pub fn bah_run_with_isolation(container: &str, command: &str, isolation: &str) -> Result<CommandResult, BuildahError> {
execute_buildah_command(&["run", "--isolation", isolation, container, "sh", "-c", command])
}
/// Copy files into a container
pub fn bah_copy(container: &str, source: &str, dest: &str) -> Result<CommandResult, BuildahError> {
execute_buildah_command(&["copy", container, source, dest])
}
pub fn bah_add(container: &str, source: &str, dest: &str) -> Result<CommandResult, BuildahError> {
execute_buildah_command(&["add", container, source, dest])
}
/// Commit a container to an image
pub fn bah_commit(container: &str, image_name: &str) -> Result<CommandResult, BuildahError> {
execute_buildah_command(&["commit", container, image_name])
}
/// Remove a container
pub fn bah_remove(container: &str) -> Result<CommandResult, BuildahError> {
execute_buildah_command(&["rm", container])
}
/// List containers
pub fn bah_list() -> Result<CommandResult, BuildahError> {
execute_buildah_command(&["containers"])
}
/// Build an image from a Containerfile/Dockerfile
///
/// # Arguments
///
/// * `tag` - Optional tag for the image (e.g., "my-app:latest")
/// * `context_dir` - The directory containing the Containerfile/Dockerfile (usually ".")
/// * `file` - Optional path to a specific Containerfile/Dockerfile
/// * `isolation` - Optional isolation method (e.g., "chroot", "rootless", "oci")
pub fn bah_build(tag: Option<&str>, context_dir: &str, file: &str, isolation: Option<&str>) -> Result<CommandResult, BuildahError> {
let mut args = Vec::new();
args.push("build");
if let Some(tag_value) = tag {
args.push("-t");
args.push(tag_value);
}
if let Some(isolation_value) = isolation {
args.push("--isolation");
args.push(isolation_value);
}
args.push("-f");
args.push(file);
args.push(context_dir);
execute_buildah_command(&args)
}

View File

@@ -1,276 +0,0 @@
#[cfg(test)]
mod tests {
use crate::process::CommandResult;
use crate::virt::buildah::BuildahError;
use std::sync::Mutex;
use lazy_static::lazy_static;
// Create a test-specific implementation of the containers module functions
// that we can use to verify the correct arguments are passed
lazy_static! {
static ref LAST_COMMAND: Mutex<Vec<String>> = Mutex::new(Vec::new());
static ref SHOULD_FAIL: Mutex<bool> = Mutex::new(false);
static ref TEST_MUTEX: Mutex<()> = Mutex::new(()); // Add a mutex for test synchronization
}
fn reset_test_state() {
let mut cmd = LAST_COMMAND.lock().unwrap();
cmd.clear();
let mut fail = SHOULD_FAIL.lock().unwrap();
*fail = false;
}
fn set_should_fail(fail: bool) {
let mut should_fail = SHOULD_FAIL.lock().unwrap();
*should_fail = fail;
}
fn get_last_command() -> Vec<String> {
let cmd = LAST_COMMAND.lock().unwrap();
cmd.clone()
}
// Test-specific implementation of execute_buildah_command
fn test_execute_buildah_command(args: &[&str]) -> Result<CommandResult, BuildahError> {
// Record the command
{
let mut cmd = LAST_COMMAND.lock().unwrap();
cmd.clear();
for arg in args {
cmd.push(arg.to_string());
}
}
// Check if we should fail
let should_fail = {
let fail = SHOULD_FAIL.lock().unwrap();
*fail
};
if should_fail {
Err(BuildahError::CommandFailed("Command failed".to_string()))
} else {
Ok(CommandResult {
stdout: "mock stdout".to_string(),
stderr: "".to_string(),
success: true,
code: 0,
})
}
}
// Test implementations of the container functions
fn test_from(image: &str) -> Result<CommandResult, BuildahError> {
test_execute_buildah_command(&["from", image])
}
fn test_run(container: &str, command: &str) -> Result<CommandResult, BuildahError> {
test_execute_buildah_command(&["run", container, "sh", "-c", command])
}
fn test_bah_run_with_isolation(container: &str, command: &str, isolation: &str) -> Result<CommandResult, BuildahError> {
test_execute_buildah_command(&["run", "--isolation", isolation, container, "sh", "-c", command])
}
fn test_bah_copy(container: &str, source: &str, dest: &str) -> Result<CommandResult, BuildahError> {
test_execute_buildah_command(&["copy", container, source, dest])
}
fn test_bah_add(container: &str, source: &str, dest: &str) -> Result<CommandResult, BuildahError> {
test_execute_buildah_command(&["add", container, source, dest])
}
fn test_bah_commit(container: &str, image_name: &str) -> Result<CommandResult, BuildahError> {
test_execute_buildah_command(&["commit", container, image_name])
}
fn test_bah_remove(container: &str) -> Result<CommandResult, BuildahError> {
test_execute_buildah_command(&["rm", container])
}
fn test_bah_list() -> Result<CommandResult, BuildahError> {
test_execute_buildah_command(&["containers"])
}
fn test_bah_build(tag: Option<&str>, context_dir: &str, file: &str, isolation: Option<&str>) -> Result<CommandResult, BuildahError> {
let mut args = Vec::new();
args.push("build");
if let Some(tag_value) = tag {
args.push("-t");
args.push(tag_value);
}
if let Some(isolation_value) = isolation {
args.push("--isolation");
args.push(isolation_value);
}
args.push("-f");
args.push(file);
args.push(context_dir);
test_execute_buildah_command(&args)
}
// Tests for each function
#[test]
fn test_from_function() {
let _lock = TEST_MUTEX.lock().unwrap(); // Acquire lock for test
reset_test_state();
let image = "alpine:latest";
let result = test_from(image);
assert!(result.is_ok());
let cmd = get_last_command();
assert_eq!(cmd, vec!["from", "alpine:latest"]);
}
#[test]
fn test_run_function() {
let _lock = TEST_MUTEX.lock().unwrap(); // Acquire lock for test
reset_test_state();
let container = "my-container";
let command = "echo hello";
// Test without isolation
let result = test_run(container, command);
assert!(result.is_ok());
let cmd = get_last_command();
assert_eq!(cmd, vec!["run", "my-container", "sh", "-c", "echo hello"]);
}
#[test]
fn test_bah_run_with_isolation_function() {
let _lock = TEST_MUTEX.lock().unwrap(); // Acquire lock for test
reset_test_state();
let container = "my-container";
let command = "echo hello";
let isolation = "chroot";
let result = test_bah_run_with_isolation(container, command, isolation);
assert!(result.is_ok());
let cmd = get_last_command();
assert_eq!(cmd, vec!["run", "--isolation", "chroot", "my-container", "sh", "-c", "echo hello"]);
}
#[test]
fn test_bah_copy_function() {
let _lock = TEST_MUTEX.lock().unwrap(); // Acquire lock for test
reset_test_state();
let container = "my-container";
let source = "/local/path";
let dest = "/container/path";
let result = test_bah_copy(container, source, dest);
assert!(result.is_ok());
let cmd = get_last_command();
assert_eq!(cmd, vec!["copy", "my-container", "/local/path", "/container/path"]);
}
#[test]
fn test_bah_add_function() {
let _lock = TEST_MUTEX.lock().unwrap(); // Acquire lock for test
reset_test_state();
let container = "my-container";
let source = "/local/path";
let dest = "/container/path";
let result = test_bah_add(container, source, dest);
assert!(result.is_ok());
let cmd = get_last_command();
assert_eq!(cmd, vec!["add", "my-container", "/local/path", "/container/path"]);
}
#[test]
fn test_bah_commit_function() {
let _lock = TEST_MUTEX.lock().unwrap(); // Acquire lock for test
reset_test_state();
let container = "my-container";
let image_name = "my-image:latest";
let result = test_bah_commit(container, image_name);
assert!(result.is_ok());
let cmd = get_last_command();
assert_eq!(cmd, vec!["commit", "my-container", "my-image:latest"]);
}
#[test]
fn test_bah_remove_function() {
let _lock = TEST_MUTEX.lock().unwrap(); // Acquire lock for test
reset_test_state();
let container = "my-container";
let result = test_bah_remove(container);
assert!(result.is_ok());
let cmd = get_last_command();
assert_eq!(cmd, vec!["rm", "my-container"]);
}
#[test]
fn test_bah_list_function() {
let _lock = TEST_MUTEX.lock().unwrap(); // Acquire lock for test
reset_test_state();
let result = test_bah_list();
assert!(result.is_ok());
let cmd = get_last_command();
assert_eq!(cmd, vec!["containers"]);
}
#[test]
fn test_bah_build_function() {
let _lock = TEST_MUTEX.lock().unwrap(); // Acquire lock for test
reset_test_state();
// Test with tag, context directory, file, and no isolation
let result = test_bah_build(Some("my-app:latest"), ".", "Dockerfile", None);
assert!(result.is_ok());
let cmd = get_last_command();
assert_eq!(cmd, vec!["build", "-t", "my-app:latest", "-f", "Dockerfile", "."]);
reset_test_state(); // Reset state between sub-tests
// Test with tag, context directory, file, and isolation
let result = test_bah_build(Some("my-app:latest"), ".", "Dockerfile.custom", Some("chroot"));
assert!(result.is_ok());
let cmd = get_last_command();
assert_eq!(cmd, vec!["build", "-t", "my-app:latest", "--isolation", "chroot", "-f", "Dockerfile.custom", "."]);
reset_test_state(); // Reset state between sub-tests
// Test with just context directory and file
let result = test_bah_build(None, ".", "Dockerfile", None);
assert!(result.is_ok());
let cmd = get_last_command();
assert_eq!(cmd, vec!["build", "-f", "Dockerfile", "."]);
}
#[test]
fn test_error_handling() {
let _lock = TEST_MUTEX.lock().unwrap(); // Acquire lock for test
reset_test_state();
set_should_fail(true);
let image = "alpine:latest";
let result = test_from(image);
assert!(result.is_err());
match result {
Err(BuildahError::CommandFailed(msg)) => {
assert_eq!(msg, "Command failed");
},
_ => panic!("Expected CommandFailed error"),
}
}
}

View File

@@ -1,82 +0,0 @@
use crate::process::CommandResult;
use crate::virt::buildah::{execute_buildah_command, BuildahError};
use std::fs::File;
use std::io::{Read, Write};
use tempfile::NamedTempFile;
/// Functions for working with file content in buildah containers
pub struct ContentOperations;
impl ContentOperations {
/// Write content to a file in the container
///
/// # Arguments
///
/// * `container_id` - The container ID
/// * `content` - The content to write
/// * `dest_path` - Destination path in the container
///
/// # Returns
///
/// * `Result<CommandResult, BuildahError>` - Command result or error
pub fn write_content(container_id: &str, content: &str, dest_path: &str) -> Result<CommandResult, BuildahError> {
// Create a temporary file
let mut temp_file = NamedTempFile::new()
.map_err(|e| BuildahError::Other(format!("Failed to create temporary file: {}", e)))?;
// Write content to the temporary file
temp_file.write_all(content.as_bytes())
.map_err(|e| BuildahError::Other(format!("Failed to write to temporary file: {}", e)))?;
// Flush the file to ensure content is written
temp_file.flush()
.map_err(|e| BuildahError::Other(format!("Failed to flush temporary file: {}", e)))?;
// Copy the temporary file to the container
let temp_path = temp_file.path().to_string_lossy().to_string();
// Use add instead of copy for better handling of paths
execute_buildah_command(&["add", container_id, &temp_path, dest_path])
}
/// Read content from a file in the container
///
/// # Arguments
///
/// * `container_id` - The container ID
/// * `source_path` - Source path in the container
///
/// # Returns
///
/// * `Result<String, BuildahError>` - File content or error
pub fn read_content(container_id: &str, source_path: &str) -> Result<String, BuildahError> {
// Create a temporary file
let temp_file = NamedTempFile::new()
.map_err(|e| BuildahError::Other(format!("Failed to create temporary file: {}", e)))?;
let temp_path = temp_file.path().to_string_lossy().to_string();
// Copy the file from the container to the temporary file
// Use mount to access the container's filesystem
let mount_result = execute_buildah_command(&["mount", container_id])?;
let mount_point = mount_result.stdout.trim();
// Construct the full path to the file in the container
let full_source_path = format!("{}{}", mount_point, source_path);
// Copy the file from the mounted container to the temporary file
execute_buildah_command(&["copy", container_id, &full_source_path, &temp_path])?;
// Unmount the container
execute_buildah_command(&["umount", container_id])?;
// Read the content from the temporary file
let mut file = File::open(temp_file.path())
.map_err(|e| BuildahError::Other(format!("Failed to open temporary file: {}", e)))?;
let mut content = String::new();
file.read_to_string(&mut content)
.map_err(|e| BuildahError::Other(format!("Failed to read from temporary file: {}", e)))?;
Ok(content)
}
}

View File

@@ -1,210 +0,0 @@
use std::collections::HashMap;
use crate::virt::buildah::execute_buildah_command;
use crate::process::CommandResult;
use super::BuildahError;
use serde_json::{self, Value};
use serde::{Deserialize, Serialize};
/// Represents a container image
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Image {
/// Image ID
pub id: String,
/// Image names/tags
pub names: Vec<String>,
/// Image size
pub size: String,
/// Creation timestamp
pub created: String,
}
/// List images in local storage
///
/// # Returns
/// * Result with array of Image objects on success or error details
pub fn images() -> Result<Vec<Image>, BuildahError> {
let result = execute_buildah_command(&["images", "--json"])?;
// Try to parse the JSON output
match serde_json::from_str::<serde_json::Value>(&result.stdout) {
Ok(json) => {
if let Value::Array(images_json) = json {
let mut images = Vec::new();
for image_json in images_json {
// Extract image ID
let id = match image_json.get("id").and_then(|v| v.as_str()) {
Some(id) => id.to_string(),
None => return Err(BuildahError::ConversionError("Missing image ID".to_string())),
};
// Extract image names
let names = match image_json.get("names").and_then(|v| v.as_array()) {
Some(names_array) => {
let mut names_vec = Vec::new();
for name_value in names_array {
if let Some(name_str) = name_value.as_str() {
names_vec.push(name_str.to_string());
}
}
names_vec
},
None => Vec::new(), // Empty vector if no names found
};
// Extract image size
let size = match image_json.get("size").and_then(|v| v.as_str()) {
Some(size) => size.to_string(),
None => "Unknown".to_string(), // Default value if size not found
};
// Extract creation timestamp
let created = match image_json.get("created").and_then(|v| v.as_str()) {
Some(created) => created.to_string(),
None => "Unknown".to_string(), // Default value if created not found
};
// Create Image struct and add to vector
images.push(Image {
id,
names,
size,
created,
});
}
Ok(images)
} else {
Err(BuildahError::JsonParseError("Expected JSON array".to_string()))
}
},
Err(e) => {
Err(BuildahError::JsonParseError(format!("Failed to parse image list JSON: {}", e)))
}
}
}
/// Remove one or more images
///
/// # Arguments
/// * `image` - Image ID or name
///
/// # Returns
/// * Result with command output or error
pub fn image_remove(image: &str) -> Result<CommandResult, BuildahError> {
execute_buildah_command(&["rmi", image])
}
/// Push an image to a registry
///
/// # Arguments
/// * `image` - Image name
/// * `destination` - Destination (e.g., "docker://registry.example.com/myimage:latest")
/// * `tls_verify` - Whether to verify TLS (default: true)
///
/// # Returns
/// * Result with command output or error
pub fn image_push(image: &str, destination: &str, tls_verify: bool) -> Result<CommandResult, BuildahError> {
let mut args = vec!["push"];
if !tls_verify {
args.push("--tls-verify=false");
}
args.push(image);
args.push(destination);
execute_buildah_command(&args)
}
/// Add an additional name to a local image
///
/// # Arguments
/// * `image` - Image ID or name
/// * `new_name` - New name for the image
///
/// # Returns
/// * Result with command output or error
pub fn image_tag(image: &str, new_name: &str) -> Result<CommandResult, BuildahError> {
execute_buildah_command(&["tag", image, new_name])
}
/// Pull an image from a registry
///
/// # Arguments
/// * `image` - Image name
/// * `tls_verify` - Whether to verify TLS (default: true)
///
/// # Returns
/// * Result with command output or error
pub fn image_pull(image: &str, tls_verify: bool) -> Result<CommandResult, BuildahError> {
let mut args = vec!["pull"];
if !tls_verify {
args.push("--tls-verify=false");
}
args.push(image);
execute_buildah_command(&args)
}
/// Commit a container to an image
///
/// # Arguments
/// * `container` - Container ID or name
/// * `image_name` - New name for the image
/// * `format` - Optional, format to use for the image (oci or docker)
/// * `squash` - Whether to squash layers
/// * `rm` - Whether to remove the container after commit
///
/// # Returns
/// * Result with command output or error
pub fn image_commit(container: &str, image_name: &str, format: Option<&str>, squash: bool, rm: bool) -> Result<CommandResult, BuildahError> {
let mut args = vec!["commit"];
if let Some(format_str) = format {
args.push("--format");
args.push(format_str);
}
if squash {
args.push("--squash");
}
if rm {
args.push("--rm");
}
args.push(container);
args.push(image_name);
execute_buildah_command(&args)
}
/// Container configuration options
///
/// # Arguments
/// * `container` - Container ID or name
/// * `options` - Map of configuration options
///
/// # Returns
/// * Result with command output or error
pub fn bah_config(container: &str, options: HashMap<String, String>) -> Result<CommandResult, BuildahError> {
let mut args_owned: Vec<String> = Vec::new();
args_owned.push("config".to_string());
// Process options map
for (key, value) in options.iter() {
let option_name = format!("--{}", key);
args_owned.push(option_name);
args_owned.push(value.clone());
}
args_owned.push(container.to_string());
// Convert Vec<String> to Vec<&str> for execute_buildah_command
let args: Vec<&str> = args_owned.iter().map(|s| s.as_str()).collect();
execute_buildah_command(&args)
}

View File

@@ -1,57 +0,0 @@
mod containers;
mod images;
mod cmd;
mod builder;
mod content;
#[cfg(test)]
mod containers_test;
use std::fmt;
use std::error::Error;
use std::io;
/// Error type for buildah operations
#[derive(Debug)]
pub enum BuildahError {
/// The buildah command failed to execute
CommandExecutionFailed(io::Error),
/// The buildah command executed but returned an error
CommandFailed(String),
/// Failed to parse JSON output
JsonParseError(String),
/// Failed to convert data
ConversionError(String),
/// Generic error
Other(String),
}
impl fmt::Display for BuildahError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
BuildahError::CommandExecutionFailed(e) => write!(f, "Failed to execute buildah command: {}", e),
BuildahError::CommandFailed(e) => write!(f, "Buildah command failed: {}", e),
BuildahError::JsonParseError(e) => write!(f, "Failed to parse JSON: {}", e),
BuildahError::ConversionError(e) => write!(f, "Conversion error: {}", e),
BuildahError::Other(e) => write!(f, "{}", e),
}
}
}
impl Error for BuildahError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
BuildahError::CommandExecutionFailed(e) => Some(e),
_ => None,
}
}
}
// Re-export the Builder
pub use builder::Builder;
// Re-export existing functions for backward compatibility
#[deprecated(since = "0.2.0", note = "Use Builder::new() instead")]
pub use containers::*;
#[deprecated(since = "0.2.0", note = "Use Builder methods instead")]
pub use images::*;
pub use cmd::*;
pub use content::ContentOperations;

View File

@@ -1,3 +0,0 @@
pub mod buildah;
pub mod nerdctl;
pub mod rfs;

View File

@@ -1,223 +0,0 @@
# SAL `nerdctl` Module (`sal::virt::nerdctl`)
## Overview
The `sal::virt::nerdctl` module provides a comprehensive Rust interface for interacting with `nerdctl`, a command-line tool for `containerd`.
It allows for managing container lifecycles, images, and other `nerdctl` functionalities programmatically from Rust and through Rhai scripts via `herodo`.
This module offers two primary ways to interact with `nerdctl`:
1. A fluent **`Container` builder pattern** for defining, creating, and managing containers with detailed configurations.
2. **Direct static functions** that wrap common `nerdctl` commands for quick operations on containers and images.
## Core Components
### 1. `NerdctlError` (in `mod.rs`)
An enum defining specific error types for `nerdctl` operations:
- `CommandExecutionFailed(io::Error)`: `nerdctl` command failed to start (e.g., not found).
- `CommandFailed(String)`: `nerdctl` command executed but returned an error.
- `JsonParseError(String)`: Failure to parse JSON output from `nerdctl`.
- `ConversionError(String)`: Error during data type conversions.
- `Other(String)`: Generic errors.
### 2. `execute_nerdctl_command` (in `cmd.rs`)
The core function for executing `nerdctl` commands. It takes an array of string arguments, runs the command, and returns a `CommandResult` or `NerdctlError`.
```rust
// Example (internal usage)
// use sal::virt::nerdctl::execute_nerdctl_command;
// let result = execute_nerdctl_command(&["ps", "-a"]);
```
### 3. `Container` Struct (defined in `container_types.rs`, builder in `container_builder.rs`, operations in `container_operations.rs`)
Represents a `nerdctl` container and is the centerpiece of the builder pattern.
**Fields (Configuration):**
- `name: String`: Name of the container.
- `container_id: Option<String>`: ID of the container (populated after creation).
- `image: Option<String>`: Base image for the container.
- `ports: Vec<String>`: Port mappings (e.g., `"8080:80"`).
- `volumes: Vec<String>`: Volume mounts (e.g., `"/host/path:/container/path"`).
- `env_vars: HashMap<String, String>`: Environment variables.
- `network: Option<String>`: Network to connect to.
- `network_aliases: Vec<String>`: Network aliases.
- `cpu_limit: Option<String>`, `memory_limit: Option<String>`, `memory_swap_limit: Option<String>`, `cpu_shares: Option<String>`: Resource limits.
- `restart_policy: Option<String>`: Restart policy (e.g., `"always"`).
- `health_check: Option<HealthCheck>`: Health check configuration.
- `detach: bool`: Whether to run in detached mode (default: `false`, but Rhai `container_build` implies `true` often).
- `snapshotter: Option<String>`: Snapshotter to use.
**Builder Methods (Fluent Interface - `impl Container` in `container_builder.rs`):**
These methods configure the `Container` object and return `Self` for chaining.
- `Container::new(name: &str, image: &str)`: Constructor (Note: Rhai uses `nerdctl_container_new(name)` and `nerdctl_container_from_image(name, image)` which call underlying Rust constructors).
- `reset()`: Resets configuration, stops/removes existing container with the same name.
- `with_port(port: &str)`, `with_ports(ports: &[&str])`
- `with_volume(volume: &str)`, `with_volumes(volumes: &[&str])`
- `with_env(key: &str, value: &str)`, `with_envs(env_map: &HashMap<&str, &str>)`
- `with_network(network: &str)`
- `with_network_alias(alias: &str)`, `with_network_aliases(aliases: &[&str])`
- `with_cpu_limit(cpus: &str)`
- `with_memory_limit(memory: &str)`
- `with_memory_swap_limit(memory_swap: &str)`
- `with_cpu_shares(shares: &str)`
- `with_restart_policy(policy: &str)`
- `with_health_check(cmd: &str)`
- `with_health_check_options(cmd, interval, timeout, retries, start_period)`
- `with_snapshotter(snapshotter: &str)`
- `with_detach(detach: bool)`
**Action Methods (on `Container` instances):
- `build()` (in `container_builder.rs`): Assembles and executes `nerdctl run` with all configured options. Populates `container_id` on success.
- `start()` (in `container_operations.rs`): Starts the container. If not yet built, it attempts to pull the image and build the container first. Verifies the container is running and provides detailed logs/status on failure.
- `stop()` (in `container_operations.rs`): Stops the container.
- `remove()` (in `container_operations.rs`): Removes the container.
- `exec(command: &str)` (in `container_operations.rs`): Executes a command in the container.
- `copy(source: &str, dest: &str)` (in `container_operations.rs`): Copies files/folders. `source`/`dest` must be formatted like `container_name_or_id:/path` or `/local/path`.
- `status()` (in `container_operations.rs`): Returns `ContainerStatus` by parsing `nerdctl inspect`.
- `health_status()` (in `container_operations.rs`): Returns the health status string from `nerdctl inspect`.
- `logs()` (in `container_operations.rs`): Fetches container logs.
- `resources()` (in `container_operations.rs`): Returns `ResourceUsage` by parsing `nerdctl stats`.
- `commit(image_name: &str)` (in `container_operations.rs`): Commits the container to a new image.
- `export(path: &str)` (in `container_operations.rs`): Exports the container's filesystem as a tarball.
### 4. `HealthCheck` Struct (in `container_types.rs`)
Defines health check parameters:
- `cmd: String`: Command to execute.
- `interval: Option<String>`
- `timeout: Option<String>`
- `retries: Option<u32>`
- `start_period: Option<String>`
### 5. `prepare_health_check_command` (in `health_check_script.rs`)
A helper function that takes a health check command string. If it's multi-line, it attempts to save it as an executable script in `/root/hero/var/containers/healthcheck_<container_name>.sh` and returns the script path. Otherwise, returns the command as is. The path `/root/hero/var/containers` implies this script needs to be accessible from within the target container at that specific location if a multi-line script is used.
### 6. `Image` Struct (in `images.rs`)
Represents a `nerdctl` image, typically from `nerdctl images` output.
- `id: String`
- `repository: String`
- `tag: String`
- `size: String`
- `created: String`
### 7. Static Image Functions (in `images.rs`)
These functions operate on images:
- `images() -> Result<CommandResult, NerdctlError>`: Lists images (`nerdctl images`).
- `image_remove(image: &str)`: Removes an image (`nerdctl rmi`).
- `image_push(image: &str, destination: &str)`: Pushes an image (`nerdctl push`).
- `image_tag(image: &str, new_name: &str)`: Tags an image (`nerdctl tag`).
- `image_pull(image: &str)`: Pulls an image (`nerdctl pull`).
- `image_commit(container: &str, image_name: &str)`: Commits a container to an image (`nerdctl commit`).
- `image_build(tag: &str, context_path: &str)`: Builds an image from a Dockerfile (`nerdctl build -t <tag> <context_path>`).
### 8. Static Container Functions (in `container_functions.rs`)
Direct wrappers for `nerdctl` commands, an alternative to the builder pattern:
- `run(image: &str, name: Option<&str>, detach: bool, ports: Option<&[&str]>, snapshotter: Option<&str>)`: Runs a container.
- `exec(container: &str, command: &str)`: Executes a command in a running container.
- `copy(source: &str, dest: &str)`: Copies files.
- `stop(container: &str)`: Stops a container.
- `remove(container: &str)`: Removes a container.
- `list(all: bool)`: Lists containers (`nerdctl ps`).
- `logs(container: &str)`: Fetches logs for a container.
### 9. `ContainerStatus` and `ResourceUsage` Structs (in `container_types.rs`)
- `ContainerStatus`: Holds parsed data from `nerdctl inspect` (state, status, created, started, health info).
- `ResourceUsage`: Holds parsed data from `nerdctl stats` (CPU, memory, network, block I/O, PIDs).
## Usage Examples
### Rust Example (Builder Pattern)
```rust
use sal::virt::nerdctl::{Container, NerdctlError};
use std::collections::HashMap;
fn main() -> Result<(), NerdctlError> {
let mut envs = HashMap::new();
envs.insert("MY_VAR", "my_value");
let container_config = Container::new("my_nginx_container", "nginx:latest") // Assuming a constructor like this exists or is adapted
.with_port("8080:80")
.with_envs(&envs)
.with_detach(true)
.with_restart_policy("always");
// Build (create and run) the container
let built_container = container_config.build()?;
println!("Container {} created with ID: {:?}", built_container.name, built_container.container_id);
// Perform operations
let status = built_container.status()?;
println!("Status: {}, State: {}", status.status, status.state);
// Stop and remove
built_container.stop()?;
built_container.remove()?;
println!("Container stopped and removed.");
Ok(())
}
```
*Note: The direct `Container::new(name, image)` constructor isn't explicitly shown in the provided Rust code snippets for `Container` itself, but the Rhai bindings `nerdctl_container_new` and `nerdctl_container_from_image` imply underlying Rust constructors that initialize a `Container` struct. The `build()` method is the primary way to run it after configuration.*
### Rhai Script Example (using `herodo`)
```rhai
// Create and configure a container using the builder pattern
let c = nerdctl_container_from_image("my_redis", "redis:alpine")
.with_port("6379:6379")
.with_restart_policy("unless-stopped");
// Build and run the container
let running_container = c.build();
if running_container.is_ok() {
print(`Container ${running_container.name} ID: ${running_container.container_id}`);
// Get status
let status = running_container.status();
if status.is_ok() {
print(`Status: ${status.state}, Health: ${status.health_status}`);
}
// Stop the container (example, might need a mutable borrow or re-fetch)
// running_container.stop(); // Assuming stop is available and works on the result
// running_container.remove();
} else {
print(`Error building container: ${running_container.error()}`);
}
// Direct command example
let images = nerdctl_images();
print(images.stdout);
nerdctl_image_pull("alpine:latest");
```
## Key Design Points
- **Fluent Builder**: The `Container` struct uses a builder pattern, allowing for clear and chainable configuration of container parameters before execution.
- **Comprehensive Operations**: Covers most common `nerdctl` functionalities for containers and images.
- **Error Handling**: `NerdctlError` provides typed errors. The Rhai layer adds more descriptive error messages for common scenarios.
- **Dual API**: Offers both a detailed builder pattern and simpler static functions for flexibility.
- **Health Check Scripting**: Supports multi-line shell scripts for health checks by saving them to a file, though care must be taken regarding the script's accessibility from within the target container.
- **Resource Parsing**: Includes parsing for `nerdctl inspect` (JSON) and `nerdctl stats` (tabular text) to provide structured information.
## File Structure
- `src/virt/nerdctl/mod.rs`: Main module file, error definitions, sub-module declarations.
- `src/virt/nerdctl/cmd.rs`: Core `execute_nerdctl_command` function.
- `src/virt/nerdctl/container_types.rs`: Definitions for `Container`, `HealthCheck`, `ContainerStatus`, `ResourceUsage`.
- `src/virt/nerdctl/container_builder.rs`: Implements the builder pattern methods for the `Container` struct.
- `src/virt/nerdctl/container_operations.rs`: Implements instance methods on `Container` (start, stop, status, etc.).
- `src/virt/nerdctl/images.rs`: `Image` struct and static functions for image management.
- `src/virt/nerdctl/container_functions.rs`: Static functions for direct container commands.
- `src/virt/nerdctl/health_check_script.rs`: Logic for `prepare_health_check_command`.
- `src/rhai/nerdctl.rs`: Rhai script bindings for `herodo`.

View File

@@ -1,37 +0,0 @@
// File: /root/code/git.threefold.info/herocode/sal/src/virt/nerdctl/cmd.rs
// Basic nerdctl operations for container management
use std::process::Command;
use crate::process::CommandResult;
use super::NerdctlError;
/// Execute a nerdctl command and return the result
pub fn execute_nerdctl_command(args: &[&str]) -> Result<CommandResult, NerdctlError> {
let output = Command::new("nerdctl")
.args(args)
.output();
match output {
Ok(output) => {
let stdout = String::from_utf8_lossy(&output.stdout).to_string();
let stderr = String::from_utf8_lossy(&output.stderr).to_string();
let result = CommandResult {
stdout,
stderr,
success: output.status.success(),
code: output.status.code().unwrap_or(-1),
};
if result.success {
Ok(result)
} else {
Err(NerdctlError::CommandFailed(format!("Command failed with code {}: {}",
result.code, result.stderr.trim())))
}
},
Err(e) => {
Err(NerdctlError::CommandExecutionFailed(e))
}
}
}

View File

@@ -1,82 +0,0 @@
// File: /root/code/git.threefold.info/herocode/sal/src/virt/nerdctl/container.rs
use super::container_types::Container;
use crate::virt::nerdctl::{execute_nerdctl_command, NerdctlError};
use sal_os as os;
use std::collections::HashMap;
impl Container {
/// Create a new container reference with the given name
///
/// # Arguments
///
/// * `name` - Name for the container
///
/// # Returns
///
/// * `Result<Self, NerdctlError>` - Container instance or error
pub fn new(name: &str) -> Result<Self, NerdctlError> {
// Check if required commands exist
match os::cmd_ensure_exists("nerdctl,runc,buildah") {
Err(e) => {
return Err(NerdctlError::CommandExecutionFailed(std::io::Error::new(
std::io::ErrorKind::NotFound,
format!("Required commands not found: {}", e),
)))
}
_ => {}
}
// Check if container exists
let result = execute_nerdctl_command(&["ps", "-a", "--format", "{{.Names}} {{.ID}}"])?;
// Look for the container name in the output
let container_id = result
.stdout
.lines()
.filter_map(|line| {
if line.starts_with(&format!("{} ", name)) {
Some(line.split_whitespace().nth(1)?.to_string())
} else {
None
}
})
.next();
Ok(Self {
name: name.to_string(),
container_id,
image: None,
config: HashMap::new(),
ports: Vec::new(),
volumes: Vec::new(),
env_vars: HashMap::new(),
network: None,
network_aliases: Vec::new(),
cpu_limit: None,
memory_limit: None,
memory_swap_limit: None,
cpu_shares: None,
restart_policy: None,
health_check: None,
detach: false,
snapshotter: None,
})
}
/// Create a container from an image
///
/// # Arguments
///
/// * `name` - Name for the container
/// * `image` - Image to create the container from
///
/// # Returns
///
/// * `Result<Self, NerdctlError>` - Container instance or error
pub fn from_image(name: &str, image: &str) -> Result<Self, NerdctlError> {
let mut container = Self::new(name)?;
container.image = Some(image.to_string());
Ok(container)
}
}

View File

@@ -1,517 +0,0 @@
// File: /root/code/git.threefold.info/herocode/sal/src/virt/nerdctl/container_builder.rs
use super::container_types::{Container, HealthCheck};
use super::health_check_script::prepare_health_check_command;
use crate::virt::nerdctl::{execute_nerdctl_command, NerdctlError};
use std::collections::HashMap;
impl Container {
/// Reset the container configuration to defaults while keeping the name and image
/// If the container exists, it will be stopped and removed.
///
/// # Returns
///
/// * `Self` - The container instance for method chaining
pub fn reset(self) -> Self {
let name = self.name;
let image = self.image.clone();
// If container exists, stop and remove it
if let Some(container_id) = &self.container_id {
println!(
"Container exists. Stopping and removing container '{}'...",
name
);
// Try to stop the container
let _ = execute_nerdctl_command(&["stop", container_id]);
// Try to remove the container
let _ = execute_nerdctl_command(&["rm", container_id]);
}
// Create a new container with just the name and image, but no container_id
Self {
name,
container_id: None, // Reset container_id to None since we removed the container
image,
config: std::collections::HashMap::new(),
ports: Vec::new(),
volumes: Vec::new(),
env_vars: std::collections::HashMap::new(),
network: None,
network_aliases: Vec::new(),
cpu_limit: None,
memory_limit: None,
memory_swap_limit: None,
cpu_shares: None,
restart_policy: None,
health_check: None,
detach: false,
snapshotter: None,
}
}
/// Add a port mapping
///
/// # Arguments
///
/// * `port` - Port mapping (e.g., "8080:80")
///
/// # Returns
///
/// * `Self` - The container instance for method chaining
pub fn with_port(mut self, port: &str) -> Self {
self.ports.push(port.to_string());
self
}
/// Add multiple port mappings
///
/// # Arguments
///
/// * `ports` - Array of port mappings (e.g., ["8080:80", "8443:443"])
///
/// # Returns
///
/// * `Self` - The container instance for method chaining
pub fn with_ports(mut self, ports: &[&str]) -> Self {
for port in ports {
self.ports.push(port.to_string());
}
self
}
/// Add a volume mount
///
/// # Arguments
///
/// * `volume` - Volume mount (e.g., "/host/path:/container/path")
///
/// # Returns
///
/// * `Self` - The container instance for method chaining
pub fn with_volume(mut self, volume: &str) -> Self {
self.volumes.push(volume.to_string());
self
}
/// Add multiple volume mounts
///
/// # Arguments
///
/// * `volumes` - Array of volume mounts (e.g., ["/host/path1:/container/path1", "/host/path2:/container/path2"])
///
/// # Returns
///
/// * `Self` - The container instance for method chaining
pub fn with_volumes(mut self, volumes: &[&str]) -> Self {
for volume in volumes {
self.volumes.push(volume.to_string());
}
self
}
/// Add an environment variable
///
/// # Arguments
///
/// * `key` - Environment variable name
/// * `value` - Environment variable value
///
/// # Returns
///
/// * `Self` - The container instance for method chaining
pub fn with_env(mut self, key: &str, value: &str) -> Self {
self.env_vars.insert(key.to_string(), value.to_string());
self
}
/// Add multiple environment variables
///
/// # Arguments
///
/// * `env_map` - Map of environment variable names to values
///
/// # Returns
///
/// * `Self` - The container instance for method chaining
pub fn with_envs(mut self, env_map: &HashMap<&str, &str>) -> Self {
for (key, value) in env_map {
self.env_vars.insert(key.to_string(), value.to_string());
}
self
}
/// Set the network for the container
///
/// # Arguments
///
/// * `network` - Network name
///
/// # Returns
///
/// * `Self` - The container instance for method chaining
pub fn with_network(mut self, network: &str) -> Self {
self.network = Some(network.to_string());
self
}
/// Add a network alias for the container
///
/// # Arguments
///
/// * `alias` - Network alias
///
/// # Returns
///
/// * `Self` - The container instance for method chaining
pub fn with_network_alias(mut self, alias: &str) -> Self {
self.network_aliases.push(alias.to_string());
self
}
/// Add multiple network aliases for the container
///
/// # Arguments
///
/// * `aliases` - Array of network aliases
///
/// # Returns
///
/// * `Self` - The container instance for method chaining
pub fn with_network_aliases(mut self, aliases: &[&str]) -> Self {
for alias in aliases {
self.network_aliases.push(alias.to_string());
}
self
}
/// Set CPU limit for the container
///
/// # Arguments
///
/// * `cpus` - CPU limit (e.g., "0.5" for half a CPU, "2" for 2 CPUs)
///
/// # Returns
///
/// * `Self` - The container instance for method chaining
pub fn with_cpu_limit(mut self, cpus: &str) -> Self {
self.cpu_limit = Some(cpus.to_string());
self
}
/// Set memory limit for the container
///
/// # Arguments
///
/// * `memory` - Memory limit (e.g., "512m" for 512MB, "1g" for 1GB)
///
/// # Returns
///
/// * `Self` - The container instance for method chaining
pub fn with_memory_limit(mut self, memory: &str) -> Self {
self.memory_limit = Some(memory.to_string());
self
}
/// Set memory swap limit for the container
///
/// # Arguments
///
/// * `memory_swap` - Memory swap limit (e.g., "1g" for 1GB)
///
/// # Returns
///
/// * `Self` - The container instance for method chaining
pub fn with_memory_swap_limit(mut self, memory_swap: &str) -> Self {
self.memory_swap_limit = Some(memory_swap.to_string());
self
}
/// Set CPU shares for the container (relative weight)
///
/// # Arguments
///
/// * `shares` - CPU shares (e.g., "1024" for default, "512" for half)
///
/// # Returns
///
/// * `Self` - The container instance for method chaining
pub fn with_cpu_shares(mut self, shares: &str) -> Self {
self.cpu_shares = Some(shares.to_string());
self
}
/// Set restart policy for the container
///
/// # Arguments
///
/// * `policy` - Restart policy (e.g., "no", "always", "on-failure", "unless-stopped")
///
/// # Returns
///
/// * `Self` - The container instance for method chaining
pub fn with_restart_policy(mut self, policy: &str) -> Self {
self.restart_policy = Some(policy.to_string());
self
}
/// Set a simple health check for the container
///
/// # Arguments
///
/// * `cmd` - Command to run for health check (e.g., "curl -f http://localhost/ || exit 1")
///
/// # Returns
///
/// * `Self` - The container instance for method chaining
pub fn with_health_check(mut self, cmd: &str) -> Self {
// Use the health check script module to prepare the command
let prepared_cmd = prepare_health_check_command(cmd, &self.name);
self.health_check = Some(HealthCheck {
cmd: prepared_cmd,
interval: None,
timeout: None,
retries: None,
start_period: None,
});
self
}
/// Set a health check with custom options for the container
///
/// # Arguments
///
/// * `cmd` - Command to run for health check
/// * `interval` - Optional time between running the check (e.g., "30s", "1m")
/// * `timeout` - Optional maximum time to wait for a check to complete (e.g., "30s", "1m")
/// * `retries` - Optional number of consecutive failures needed to consider unhealthy
/// * `start_period` - Optional start period for the container to initialize before counting retries (e.g., "30s", "1m")
///
/// # Returns
///
/// * `Self` - The container instance for method chaining
pub fn with_health_check_options(
mut self,
cmd: &str,
interval: Option<&str>,
timeout: Option<&str>,
retries: Option<u32>,
start_period: Option<&str>,
) -> Self {
// Use the health check script module to prepare the command
let prepared_cmd = prepare_health_check_command(cmd, &self.name);
let mut health_check = HealthCheck {
cmd: prepared_cmd,
interval: None,
timeout: None,
retries: None,
start_period: None,
};
if let Some(interval_value) = interval {
health_check.interval = Some(interval_value.to_string());
}
if let Some(timeout_value) = timeout {
health_check.timeout = Some(timeout_value.to_string());
}
if let Some(retries_value) = retries {
health_check.retries = Some(retries_value);
}
if let Some(start_period_value) = start_period {
health_check.start_period = Some(start_period_value.to_string());
}
self.health_check = Some(health_check);
self
}
/// Set the snapshotter
///
/// # Arguments
///
/// * `snapshotter` - Snapshotter to use
///
/// # Returns
///
/// * `Self` - The container instance for method chaining
pub fn with_snapshotter(mut self, snapshotter: &str) -> Self {
self.snapshotter = Some(snapshotter.to_string());
self
}
/// Set whether to run in detached mode
///
/// # Arguments
///
/// * `detach` - Whether to run in detached mode
///
/// # Returns
///
/// * `Self` - The container instance for method chaining
pub fn with_detach(mut self, detach: bool) -> Self {
self.detach = detach;
self
}
/// Build the container
///
/// # Returns
///
/// * `Result<Self, NerdctlError>` - Container instance or error
pub fn build(self) -> Result<Self, NerdctlError> {
// If container already exists, return it
if self.container_id.is_some() {
return Ok(self);
}
// If no image is specified, return an error
let image = match &self.image {
Some(img) => img,
None => {
return Err(NerdctlError::Other(
"No image specified for container creation".to_string(),
))
}
};
// Build the command arguments as strings
let mut args_strings = Vec::new();
args_strings.push("run".to_string());
if self.detach {
args_strings.push("-d".to_string());
}
args_strings.push("--name".to_string());
args_strings.push(self.name.clone());
// Add port mappings
for port in &self.ports {
args_strings.push("-p".to_string());
args_strings.push(port.clone());
}
// Add volume mounts
for volume in &self.volumes {
args_strings.push("-v".to_string());
args_strings.push(volume.clone());
}
// Add environment variables
for (key, value) in &self.env_vars {
args_strings.push("-e".to_string());
args_strings.push(format!("{}={}", key, value));
}
// Add network configuration
if let Some(network) = &self.network {
args_strings.push("--network".to_string());
args_strings.push(network.clone());
}
// Add network aliases
for alias in &self.network_aliases {
args_strings.push("--network-alias".to_string());
args_strings.push(alias.clone());
}
// Add resource limits
if let Some(cpu_limit) = &self.cpu_limit {
args_strings.push("--cpus".to_string());
args_strings.push(cpu_limit.clone());
}
if let Some(memory_limit) = &self.memory_limit {
args_strings.push("--memory".to_string());
args_strings.push(memory_limit.clone());
}
if let Some(memory_swap_limit) = &self.memory_swap_limit {
args_strings.push("--memory-swap".to_string());
args_strings.push(memory_swap_limit.clone());
}
if let Some(cpu_shares) = &self.cpu_shares {
args_strings.push("--cpu-shares".to_string());
args_strings.push(cpu_shares.clone());
}
// Add restart policy
if let Some(restart_policy) = &self.restart_policy {
args_strings.push("--restart".to_string());
args_strings.push(restart_policy.clone());
}
// Add health check
if let Some(health_check) = &self.health_check {
args_strings.push("--health-cmd".to_string());
args_strings.push(health_check.cmd.clone());
if let Some(interval) = &health_check.interval {
args_strings.push("--health-interval".to_string());
args_strings.push(interval.clone());
}
if let Some(timeout) = &health_check.timeout {
args_strings.push("--health-timeout".to_string());
args_strings.push(timeout.clone());
}
if let Some(retries) = &health_check.retries {
args_strings.push("--health-retries".to_string());
args_strings.push(retries.to_string());
}
if let Some(start_period) = &health_check.start_period {
args_strings.push("--health-start-period".to_string());
args_strings.push(start_period.clone());
}
}
if let Some(snapshotter_value) = &self.snapshotter {
args_strings.push("--snapshotter".to_string());
args_strings.push(snapshotter_value.clone());
}
// Add flags to avoid BPF issues
args_strings.push("--cgroup-manager=cgroupfs".to_string());
args_strings.push(image.clone());
// Convert to string slices for the command
let args: Vec<&str> = args_strings.iter().map(|s| s.as_str()).collect();
// Execute the command
let result = execute_nerdctl_command(&args)?;
// Get the container ID from the output
let container_id = result.stdout.trim().to_string();
Ok(Self {
name: self.name,
container_id: Some(container_id),
image: self.image,
config: self.config,
ports: self.ports,
volumes: self.volumes,
env_vars: self.env_vars,
network: self.network,
network_aliases: self.network_aliases,
cpu_limit: self.cpu_limit,
memory_limit: self.memory_limit,
memory_swap_limit: self.memory_swap_limit,
cpu_shares: self.cpu_shares,
restart_policy: self.restart_policy,
health_check: self.health_check,
detach: self.detach,
snapshotter: self.snapshotter,
})
}
}

View File

@@ -1,141 +0,0 @@
// File: /root/code/git.threefold.info/herocode/sal/src/virt/nerdctl/container_functions.rs
use crate::process::CommandResult;
use crate::virt::nerdctl::{execute_nerdctl_command, NerdctlError};
/// Run a container from an image
///
/// # Arguments
///
/// * `image` - Image to run
/// * `name` - Optional name for the container
/// * `detach` - Whether to run in detached mode
/// * `ports` - Optional port mappings
/// * `snapshotter` - Optional snapshotter to use
///
/// # Returns
///
/// * `Result<CommandResult, NerdctlError>` - Command result or error
pub fn run(
image: &str,
name: Option<&str>,
detach: bool,
ports: Option<&[&str]>,
snapshotter: Option<&str>,
) -> Result<CommandResult, NerdctlError> {
let mut args = vec!["run"];
if detach {
args.push("-d");
}
if let Some(name_value) = name {
args.push("--name");
args.push(name_value);
}
if let Some(ports_value) = ports {
for port in ports_value {
args.push("-p");
args.push(port);
}
}
if let Some(snapshotter_value) = snapshotter {
args.push("--snapshotter");
args.push(snapshotter_value);
}
// Add flags to avoid BPF issues
args.push("--cgroup-manager=cgroupfs");
args.push(image);
execute_nerdctl_command(&args)
}
/// Execute a command in a container
///
/// # Arguments
///
/// * `container` - Container name or ID
/// * `command` - Command to execute
///
/// # Returns
///
/// * `Result<CommandResult, NerdctlError>` - Command result or error
pub fn exec(container: &str, command: &str) -> Result<CommandResult, NerdctlError> {
execute_nerdctl_command(&["exec", container, "sh", "-c", command])
}
/// Copy files between container and local filesystem
///
/// # Arguments
///
/// * `source` - Source path (can be container:path or local path)
/// * `dest` - Destination path (can be container:path or local path)
///
/// # Returns
///
/// * `Result<CommandResult, NerdctlError>` - Command result or error
pub fn copy(source: &str, dest: &str) -> Result<CommandResult, NerdctlError> {
execute_nerdctl_command(&["cp", source, dest])
}
/// Stop a container
///
/// # Arguments
///
/// * `container` - Container name or ID
///
/// # Returns
///
/// * `Result<CommandResult, NerdctlError>` - Command result or error
pub fn stop(container: &str) -> Result<CommandResult, NerdctlError> {
execute_nerdctl_command(&["stop", container])
}
/// Remove a container
///
/// # Arguments
///
/// * `container` - Container name or ID
///
/// # Returns
///
/// * `Result<CommandResult, NerdctlError>` - Command result or error
pub fn remove(container: &str) -> Result<CommandResult, NerdctlError> {
execute_nerdctl_command(&["rm", container])
}
/// List containers
///
/// # Arguments
///
/// * `all` - Whether to list all containers (including stopped ones)
///
/// # Returns
///
/// * `Result<CommandResult, NerdctlError>` - Command result or error
pub fn list(all: bool) -> Result<CommandResult, NerdctlError> {
let mut args = vec!["ps"];
if all {
args.push("-a");
}
execute_nerdctl_command(&args)
}
/// Get container logs
///
/// # Arguments
///
/// * `container` - Container name or ID
///
/// # Returns
///
/// * `Result<CommandResult, NerdctlError>` - Command result or error
pub fn logs(container: &str) -> Result<CommandResult, NerdctlError> {
execute_nerdctl_command(&["logs", container])
}

View File

@@ -1,448 +0,0 @@
// File: /root/code/git.threefold.info/herocode/sal/src/virt/nerdctl/container_operations.rs
use crate::process::CommandResult;
use crate::virt::nerdctl::{execute_nerdctl_command, NerdctlError};
use super::container_types::{Container, ContainerStatus, ResourceUsage};
use serde_json;
impl Container {
/// Start the container and verify it's running
/// If the container hasn't been created yet, it will be created automatically.
///
/// # Returns
///
/// * `Result<CommandResult, NerdctlError>` - Command result or error with detailed information
pub fn start(&self) -> Result<CommandResult, NerdctlError> {
// If container_id is None, we need to create the container first
let container = if self.container_id.is_none() {
// Check if we have an image specified
if self.image.is_none() {
return Err(NerdctlError::Other("No image specified for container creation".to_string()));
}
// Clone self and create the container
println!("Container not created yet. Creating container from image...");
// First, try to pull the image if it doesn't exist locally
let image = self.image.as_ref().unwrap();
match execute_nerdctl_command(&["image", "inspect", image]) {
Err(_) => {
println!("Image '{}' not found locally. Pulling image...", image);
if let Err(e) = execute_nerdctl_command(&["pull", image]) {
return Err(NerdctlError::CommandFailed(
format!("Failed to pull image '{}': {}", image, e)
));
}
println!("Image '{}' pulled successfully.", image);
},
Ok(_) => {
println!("Image '{}' found locally.", image);
}
}
// Now create the container
match self.clone().build() {
Ok(built) => built,
Err(e) => {
return Err(NerdctlError::CommandFailed(
format!("Failed to create container from image '{}': {}", image, e)
));
}
}
} else {
// Container already has an ID, use it as is
self.clone()
};
if let Some(container_id) = &container.container_id {
// First, try to start the container
let start_result = execute_nerdctl_command(&["start", container_id]);
// If the start command failed, return the error with details
if let Err(err) = &start_result {
return Err(NerdctlError::CommandFailed(
format!("Failed to start container {}: {}", container_id, err)
));
}
// Verify the container is actually running
match container.verify_running() {
Ok(true) => start_result,
Ok(false) => {
// Container started but isn't running - get detailed information
let mut error_message = format!("Container {} started but is not running.", container_id);
// Get container status
if let Ok(status) = container.status() {
error_message.push_str(&format!("\nStatus: {}, State: {}, Health: {}",
status.status,
status.state,
status.health_status.unwrap_or_else(|| "N/A".to_string())
));
}
// Get container logs
if let Ok(logs) = execute_nerdctl_command(&["logs", container_id]) {
if !logs.stdout.trim().is_empty() {
error_message.push_str(&format!("\nContainer logs (stdout):\n{}", logs.stdout.trim()));
}
if !logs.stderr.trim().is_empty() {
error_message.push_str(&format!("\nContainer logs (stderr):\n{}", logs.stderr.trim()));
}
}
// Get container exit code if available
if let Ok(inspect_result) = execute_nerdctl_command(&["inspect", "--format", "{{.State.ExitCode}}", container_id]) {
let exit_code = inspect_result.stdout.trim();
if !exit_code.is_empty() && exit_code != "0" {
error_message.push_str(&format!("\nContainer exit code: {}", exit_code));
}
}
Err(NerdctlError::CommandFailed(error_message))
},
Err(err) => {
// Failed to verify if container is running
Err(NerdctlError::CommandFailed(
format!("Container {} may have started, but verification failed: {}",
container_id, err
)
))
}
}
} else {
Err(NerdctlError::Other("Failed to create container. No container ID available.".to_string()))
}
}
/// Verify if the container is running
///
/// # Returns
///
/// * `Result<bool, NerdctlError>` - True if running, false if not running, error if verification failed
fn verify_running(&self) -> Result<bool, NerdctlError> {
if let Some(container_id) = &self.container_id {
// Use inspect to check if the container is running
let inspect_result = execute_nerdctl_command(&["inspect", "--format", "{{.State.Running}}", container_id]);
match inspect_result {
Ok(result) => {
let running = result.stdout.trim().to_lowercase() == "true";
Ok(running)
},
Err(err) => Err(err)
}
} else {
Err(NerdctlError::Other("No container ID available".to_string()))
}
}
/// Stop the container
///
/// # Returns
///
/// * `Result<CommandResult, NerdctlError>` - Command result or error
pub fn stop(&self) -> Result<CommandResult, NerdctlError> {
if let Some(container_id) = &self.container_id {
execute_nerdctl_command(&["stop", container_id])
} else {
Err(NerdctlError::Other("No container ID available".to_string()))
}
}
/// Remove the container
///
/// # Returns
///
/// * `Result<CommandResult, NerdctlError>` - Command result or error
pub fn remove(&self) -> Result<CommandResult, NerdctlError> {
if let Some(container_id) = &self.container_id {
execute_nerdctl_command(&["rm", container_id])
} else {
Err(NerdctlError::Other("No container ID available".to_string()))
}
}
/// Execute a command in the container
///
/// # Arguments
///
/// * `command` - The command to run
///
/// # Returns
///
/// * `Result<CommandResult, NerdctlError>` - Command result or error
pub fn exec(&self, command: &str) -> Result<CommandResult, NerdctlError> {
if let Some(container_id) = &self.container_id {
execute_nerdctl_command(&["exec", container_id, "sh", "-c", command])
} else {
Err(NerdctlError::Other("No container ID available".to_string()))
}
}
/// Copy files between container and local filesystem
///
/// # Arguments
///
/// * `source` - Source path (can be container:path or local path)
/// * `dest` - Destination path (can be container:path or local path)
///
/// # Returns
///
/// * `Result<CommandResult, NerdctlError>` - Command result or error
pub fn copy(&self, source: &str, dest: &str) -> Result<CommandResult, NerdctlError> {
if self.container_id.is_some() {
execute_nerdctl_command(&["cp", source, dest])
} else {
Err(NerdctlError::Other("No container ID available".to_string()))
}
}
/// Export the container to a tarball
///
/// # Arguments
///
/// * `path` - Path to save the tarball
///
/// # Returns
///
/// * `Result<CommandResult, NerdctlError>` - Command result or error
pub fn export(&self, path: &str) -> Result<CommandResult, NerdctlError> {
if let Some(container_id) = &self.container_id {
execute_nerdctl_command(&["export", "-o", path, container_id])
} else {
Err(NerdctlError::Other("No container ID available".to_string()))
}
}
/// Commit the container to an image
///
/// # Arguments
///
/// * `image_name` - Name for the new image
///
/// # Returns
///
/// * `Result<CommandResult, NerdctlError>` - Command result or error
pub fn commit(&self, image_name: &str) -> Result<CommandResult, NerdctlError> {
if let Some(container_id) = &self.container_id {
execute_nerdctl_command(&["commit", container_id, image_name])
} else {
Err(NerdctlError::Other("No container ID available".to_string()))
}
}
/// Get container status
///
/// # Returns
///
/// * `Result<ContainerStatus, NerdctlError>` - Container status or error
pub fn status(&self) -> Result<ContainerStatus, NerdctlError> {
if let Some(container_id) = &self.container_id {
let result = execute_nerdctl_command(&["inspect", container_id])?;
// Parse the JSON output
match serde_json::from_str::<serde_json::Value>(&result.stdout) {
Ok(json) => {
if let Some(container_json) = json.as_array().and_then(|arr| arr.first()) {
let state = container_json
.get("State")
.and_then(|state| state.get("Status"))
.and_then(|status| status.as_str())
.unwrap_or("unknown")
.to_string();
let status = container_json
.get("State")
.and_then(|state| state.get("Running"))
.and_then(|running| {
if running.as_bool().unwrap_or(false) {
Some("running")
} else {
Some("stopped")
}
})
.unwrap_or("unknown")
.to_string();
let created = container_json
.get("Created")
.and_then(|created| created.as_str())
.unwrap_or("unknown")
.to_string();
let started = container_json
.get("State")
.and_then(|state| state.get("StartedAt"))
.and_then(|started| started.as_str())
.unwrap_or("unknown")
.to_string();
// Get health status if available
let health_status = container_json
.get("State")
.and_then(|state| state.get("Health"))
.and_then(|health| health.get("Status"))
.and_then(|status| status.as_str())
.map(|s| s.to_string());
// Get health check output if available
let health_output = container_json
.get("State")
.and_then(|state| state.get("Health"))
.and_then(|health| health.get("Log"))
.and_then(|log| log.as_array())
.and_then(|log_array| log_array.last())
.and_then(|last_log| last_log.get("Output"))
.and_then(|output| output.as_str())
.map(|s| s.to_string());
Ok(ContainerStatus {
state,
status,
created,
started,
health_status,
health_output,
})
} else {
Err(NerdctlError::JsonParseError("Invalid container inspect JSON".to_string()))
}
},
Err(e) => {
Err(NerdctlError::JsonParseError(format!("Failed to parse container inspect JSON: {}", e)))
}
}
} else {
Err(NerdctlError::Other("No container ID available".to_string()))
}
}
/// Get the health status of the container
///
/// # Returns
///
/// * `Result<String, NerdctlError>` - Health status or error
pub fn health_status(&self) -> Result<String, NerdctlError> {
if let Some(container_id) = &self.container_id {
let result = execute_nerdctl_command(&["inspect", "--format", "{{.State.Health.Status}}", container_id])?;
Ok(result.stdout.trim().to_string())
} else {
Err(NerdctlError::Other("No container ID available".to_string()))
}
}
/// Get container logs
///
/// # Returns
///
/// * `Result<CommandResult, NerdctlError>` - Command result or error
pub fn logs(&self) -> Result<CommandResult, NerdctlError> {
if let Some(container_id) = &self.container_id {
execute_nerdctl_command(&["logs", container_id])
} else {
Err(NerdctlError::Other("No container ID available".to_string()))
}
}
/// Get container resource usage
///
/// # Returns
///
/// * `Result<ResourceUsage, NerdctlError>` - Resource usage or error
pub fn resources(&self) -> Result<ResourceUsage, NerdctlError> {
if let Some(container_id) = &self.container_id {
let result = execute_nerdctl_command(&["stats", "--no-stream", container_id])?;
// Parse the output
let lines: Vec<&str> = result.stdout.lines().collect();
if lines.len() >= 2 {
let headers = lines[0];
let values = lines[1];
let headers_vec: Vec<&str> = headers.split_whitespace().collect();
let values_vec: Vec<&str> = values.split_whitespace().collect();
// Find indices for each metric
let cpu_index = headers_vec.iter().position(|&h| h.contains("CPU")).unwrap_or(0);
let mem_index = headers_vec.iter().position(|&h| h.contains("MEM")).unwrap_or(0);
let mem_perc_index = headers_vec.iter().position(|&h| h.contains("MEM%")).unwrap_or(0);
let net_in_index = headers_vec.iter().position(|&h| h.contains("NET")).unwrap_or(0);
let net_out_index = if net_in_index > 0 { net_in_index + 1 } else { 0 };
let block_in_index = headers_vec.iter().position(|&h| h.contains("BLOCK")).unwrap_or(0);
let block_out_index = if block_in_index > 0 { block_in_index + 1 } else { 0 };
let pids_index = headers_vec.iter().position(|&h| h.contains("PIDS")).unwrap_or(0);
let cpu_usage = if cpu_index < values_vec.len() {
values_vec[cpu_index].to_string()
} else {
"unknown".to_string()
};
let memory_usage = if mem_index < values_vec.len() {
values_vec[mem_index].to_string()
} else {
"unknown".to_string()
};
let memory_limit = if mem_index + 1 < values_vec.len() {
values_vec[mem_index + 1].to_string()
} else {
"unknown".to_string()
};
let memory_percentage = if mem_perc_index < values_vec.len() {
values_vec[mem_perc_index].to_string()
} else {
"unknown".to_string()
};
let network_input = if net_in_index < values_vec.len() {
values_vec[net_in_index].to_string()
} else {
"unknown".to_string()
};
let network_output = if net_out_index < values_vec.len() {
values_vec[net_out_index].to_string()
} else {
"unknown".to_string()
};
let block_input = if block_in_index < values_vec.len() {
values_vec[block_in_index].to_string()
} else {
"unknown".to_string()
};
let block_output = if block_out_index < values_vec.len() {
values_vec[block_out_index].to_string()
} else {
"unknown".to_string()
};
let pids = if pids_index < values_vec.len() {
values_vec[pids_index].to_string()
} else {
"unknown".to_string()
};
Ok(ResourceUsage {
cpu_usage,
memory_usage,
memory_limit,
memory_percentage,
network_input,
network_output,
block_input,
block_output,
pids,
})
} else {
Err(NerdctlError::ConversionError("Failed to parse stats output".to_string()))
}
} else {
Err(NerdctlError::Other("No container ID available".to_string()))
}
}
}

View File

@@ -1,307 +0,0 @@
// File: /root/code/git.threefold.info/herocode/sal/src/virt/nerdctl/container_test.rs
#[cfg(test)]
mod tests {
use super::super::container_types::Container;
use std::process::Command;
use std::thread;
use std::time::Duration;
// Helper function to check if nerdctl is available
fn is_nerdctl_available() -> bool {
match Command::new("which").arg("nerdctl").output() {
Ok(output) => output.status.success(),
Err(_) => false,
}
}
#[test]
fn test_container_builder_pattern() {
// Skip test if nerdctl is not available
if !is_nerdctl_available() {
println!("Skipping test: nerdctl is not available");
return;
}
// Create a container with builder pattern
let container = Container::new("test-container")
.unwrap()
.with_port("8080:80")
.with_volume("/tmp:/data")
.with_env("TEST_ENV", "test_value")
.with_detach(true);
// Verify container properties
assert_eq!(container.name, "test-container");
assert_eq!(container.ports.len(), 1);
assert_eq!(container.ports[0], "8080:80");
assert_eq!(container.volumes.len(), 1);
assert_eq!(container.volumes[0], "/tmp:/data");
assert_eq!(container.env_vars.len(), 1);
assert_eq!(container.env_vars.get("TEST_ENV").unwrap(), "test_value");
assert_eq!(container.detach, true);
}
#[test]
fn test_container_from_image() {
// Skip test if nerdctl is not available
if !is_nerdctl_available() {
println!("Skipping test: nerdctl is not available");
return;
}
// Create a container from image
let container = Container::from_image("test-container", "alpine:latest").unwrap();
// Verify container properties
assert_eq!(container.name, "test-container");
assert_eq!(container.image.as_ref().unwrap(), "alpine:latest");
}
#[test]
fn test_container_health_check() {
// Skip test if nerdctl is not available
if !is_nerdctl_available() {
println!("Skipping test: nerdctl is not available");
return;
}
// Create a container with health check
let container = Container::new("test-container")
.unwrap()
.with_health_check("curl -f http://localhost/ || exit 1");
// Verify health check
assert!(container.health_check.is_some());
let health_check = container.health_check.unwrap();
assert_eq!(health_check.cmd, "curl -f http://localhost/ || exit 1");
assert!(health_check.interval.is_none());
assert!(health_check.timeout.is_none());
assert!(health_check.retries.is_none());
assert!(health_check.start_period.is_none());
}
#[test]
fn test_container_health_check_options() {
// Skip test if nerdctl is not available
if !is_nerdctl_available() {
println!("Skipping test: nerdctl is not available");
return;
}
// Create a container with health check options
let container = Container::new("test-container")
.unwrap()
.with_health_check_options(
"curl -f http://localhost/ || exit 1",
Some("30s"),
Some("10s"),
Some(3),
Some("5s"),
);
// Verify health check options
assert!(container.health_check.is_some());
let health_check = container.health_check.unwrap();
assert_eq!(health_check.cmd, "curl -f http://localhost/ || exit 1");
assert_eq!(health_check.interval.as_ref().unwrap(), "30s");
assert_eq!(health_check.timeout.as_ref().unwrap(), "10s");
assert_eq!(health_check.retries.unwrap(), 3);
assert_eq!(health_check.start_period.as_ref().unwrap(), "5s");
}
#[test]
#[ignore] // Ignore by default as it requires nerdctl to be installed and running
fn test_container_runtime_and_resources() {
// Check if nerdctl is available and properly configured
let nerdctl_check = super::super::execute_nerdctl_command(&["info"]);
if nerdctl_check.is_err() {
println!("Skipping test: nerdctl is not available or properly configured");
println!("Error: {:?}", nerdctl_check.err());
return;
}
// Create a unique container name for this test
let container_name = format!(
"test-runtime-{}",
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs()
);
// Create and build a container that will use resources
// Use a simple container with a basic command to avoid dependency on external images
let container_result = Container::from_image(&container_name, "busybox:latest")
.unwrap()
.with_detach(true)
.build();
// Check if the build was successful
if container_result.is_err() {
println!("Failed to build container: {:?}", container_result.err());
return;
}
let container = container_result.unwrap();
println!("Container created successfully: {}", container_name);
// Start the container with a simple command
let start_result =
container.exec("sh -c 'for i in $(seq 1 10); do echo $i; sleep 1; done'");
if start_result.is_err() {
println!("Failed to start container: {:?}", start_result.err());
// Try to clean up
let _ = container.remove();
return;
}
println!("Container started successfully");
// Wait for the container to start and consume resources
thread::sleep(Duration::from_secs(3));
// Check container status
let status_result = container.status();
if status_result.is_err() {
println!("Failed to get container status: {:?}", status_result.err());
// Try to clean up
let _ = container.stop();
let _ = container.remove();
return;
}
let status = status_result.unwrap();
println!("Container status: {:?}", status);
// Verify the container is running
if status.status != "running" {
println!("Container is not running, status: {}", status.status);
// Try to clean up
let _ = container.remove();
return;
}
// Check resource usage
let resources_result = container.resources();
if resources_result.is_err() {
println!("Failed to get resource usage: {:?}", resources_result.err());
// Try to clean up
let _ = container.stop();
let _ = container.remove();
return;
}
let resources = resources_result.unwrap();
println!("Container resources: {:?}", resources);
// Verify the container is using memory (if we can get the information)
if resources.memory_usage == "0B" || resources.memory_usage == "unknown" {
println!(
"Warning: Container memory usage is {}",
resources.memory_usage
);
} else {
println!("Container is using memory: {}", resources.memory_usage);
}
// Clean up - stop and remove the container
println!("Stopping container...");
let stop_result = container.stop();
if stop_result.is_err() {
println!("Warning: Failed to stop container: {:?}", stop_result.err());
}
println!("Removing container...");
let remove_result = container.remove();
if remove_result.is_err() {
println!(
"Warning: Failed to remove container: {:?}",
remove_result.err()
);
}
println!("Test completed successfully");
}
#[test]
fn test_container_with_custom_command() {
// Skip test if nerdctl is not available
if !is_nerdctl_available() {
println!("Skipping test: nerdctl is not available");
return;
}
// Create a container with a custom command
let container = Container::new("test-command-container")
.unwrap()
.with_port("8080:80")
.with_volume("/tmp:/data")
.with_env("TEST_ENV", "test_value")
.with_detach(true);
// Verify container properties
assert_eq!(container.name, "test-command-container");
assert_eq!(container.ports.len(), 1);
assert_eq!(container.ports[0], "8080:80");
assert_eq!(container.volumes.len(), 1);
assert_eq!(container.volumes[0], "/tmp:/data");
assert_eq!(container.env_vars.len(), 1);
assert_eq!(container.env_vars.get("TEST_ENV").unwrap(), "test_value");
assert_eq!(container.detach, true);
// Convert the container to a command string that would be used to run it
let command_args = container_to_command_args(&container);
// Verify the command arguments contain all the expected options
assert!(command_args.contains(&"--name".to_string()));
assert!(command_args.contains(&"test-command-container".to_string()));
assert!(command_args.contains(&"-p".to_string()));
assert!(command_args.contains(&"8080:80".to_string()));
assert!(command_args.contains(&"-v".to_string()));
assert!(command_args.contains(&"/tmp:/data".to_string()));
assert!(command_args.contains(&"-e".to_string()));
assert!(command_args.contains(&"TEST_ENV=test_value".to_string()));
assert!(command_args.contains(&"-d".to_string()));
println!("Command args: {:?}", command_args);
}
// Helper function to convert a container to command arguments
fn container_to_command_args(container: &Container) -> Vec<String> {
let mut args = Vec::new();
args.push("run".to_string());
if container.detach {
args.push("-d".to_string());
}
args.push("--name".to_string());
args.push(container.name.clone());
// Add port mappings
for port in &container.ports {
args.push("-p".to_string());
args.push(port.clone());
}
// Add volume mounts
for volume in &container.volumes {
args.push("-v".to_string());
args.push(volume.clone());
}
// Add environment variables
for (key, value) in &container.env_vars {
args.push("-e".to_string());
args.push(format!("{}={}", key, value));
}
// Add image if available
if let Some(image) = &container.image {
args.push(image.clone());
}
args
}
}

View File

@@ -1,97 +0,0 @@
// File: /root/code/git.threefold.info/herocode/sal/src/virt/nerdctl/container_types.rs
use std::collections::HashMap;
/// Container struct for nerdctl operations
#[derive(Clone)]
pub struct Container {
/// Name of the container
pub name: String,
/// Container ID
pub container_id: Option<String>,
/// Base image (if created from an image)
pub image: Option<String>,
/// Configuration options
pub config: HashMap<String, String>,
/// Port mappings
pub ports: Vec<String>,
/// Volume mounts
pub volumes: Vec<String>,
/// Environment variables
pub env_vars: HashMap<String, String>,
/// Network to connect to
pub network: Option<String>,
/// Network aliases
pub network_aliases: Vec<String>,
/// CPU limit
pub cpu_limit: Option<String>,
/// Memory limit
pub memory_limit: Option<String>,
/// Memory swap limit
pub memory_swap_limit: Option<String>,
/// CPU shares
pub cpu_shares: Option<String>,
/// Restart policy
pub restart_policy: Option<String>,
/// Health check
pub health_check: Option<HealthCheck>,
/// Whether to run in detached mode
pub detach: bool,
/// Snapshotter to use
pub snapshotter: Option<String>,
}
/// Health check configuration for a container
#[derive(Debug, Clone)]
pub struct HealthCheck {
/// Command to run for health check
pub cmd: String,
/// Time between running the check (default: 30s)
pub interval: Option<String>,
/// Maximum time to wait for a check to complete (default: 30s)
pub timeout: Option<String>,
/// Number of consecutive failures needed to consider unhealthy (default: 3)
pub retries: Option<u32>,
/// Start period for the container to initialize before counting retries (default: 0s)
pub start_period: Option<String>,
}
/// Container status information
#[derive(Debug, Clone)]
pub struct ContainerStatus {
/// Container state (e.g., running, stopped)
pub state: String,
/// Container status
pub status: String,
/// Creation time
pub created: String,
/// Start time
pub started: String,
/// Health status (if health check is configured)
pub health_status: Option<String>,
/// Health check output (if health check is configured)
pub health_output: Option<String>,
}
/// Container resource usage information
#[derive(Debug, Clone)]
pub struct ResourceUsage {
/// CPU usage percentage
pub cpu_usage: String,
/// Memory usage
pub memory_usage: String,
/// Memory limit
pub memory_limit: String,
/// Memory usage percentage
pub memory_percentage: String,
/// Network input
pub network_input: String,
/// Network output
pub network_output: String,
/// Block input
pub block_input: String,
/// Block output
pub block_output: String,
/// PIDs
pub pids: String,
}

View File

@@ -1,40 +0,0 @@
// File: /root/code/git.threefold.info/herocode/sal/src/virt/nerdctl/health_check.rs
use super::container_types::HealthCheck;
impl HealthCheck {
/// Create a new health check with the given command
pub fn new(cmd: &str) -> Self {
Self {
cmd: cmd.to_string(),
interval: None,
timeout: None,
retries: None,
start_period: None,
}
}
/// Set the interval between health checks
pub fn with_interval(mut self, interval: &str) -> Self {
self.interval = Some(interval.to_string());
self
}
/// Set the timeout for health checks
pub fn with_timeout(mut self, timeout: &str) -> Self {
self.timeout = Some(timeout.to_string());
self
}
/// Set the number of retries for health checks
pub fn with_retries(mut self, retries: u32) -> Self {
self.retries = Some(retries);
self
}
/// Set the start period for health checks
pub fn with_start_period(mut self, start_period: &str) -> Self {
self.start_period = Some(start_period.to_string());
self
}
}

View File

@@ -1,79 +0,0 @@
// File: /root/code/git.threefold.info/herocode/sal/src/virt/nerdctl/health_check_script.rs
use std::fs;
use std::path::Path;
use std::os::unix::fs::PermissionsExt;
/// Handles health check scripts for containers
///
/// This module provides functionality to create and manage health check scripts
/// for containers, allowing for more complex health checks than simple commands.
/// Converts a health check command or script to a usable command
///
/// If the input is a single-line command, it is returned as is.
/// If the input is a multi-line script, it is written to a file in the
/// /root/hero/var/containers directory and the path to that file is returned.
///
/// # Arguments
///
/// * `cmd` - The command or script to convert
/// * `container_name` - The name of the container, used to create a unique script name
///
/// # Returns
///
/// * `String` - The command to use for the health check
pub fn prepare_health_check_command(cmd: &str, container_name: &str) -> String {
// If the command is a multiline script, write it to a file
if cmd.contains("\n") {
// Create the directory if it doesn't exist
let dir_path = "/root/hero/var/containers";
if let Err(_) = fs::create_dir_all(dir_path) {
// If we can't create the directory, just use the command as is
return cmd.to_string();
}
// Create a unique filename based on container name
let script_path = format!("{}/healthcheck_{}.sh", dir_path, container_name);
// Write the script to the file
if let Err(_) = fs::write(&script_path, cmd) {
// If we can't write the file, just use the command as is
return cmd.to_string();
}
// Make the script executable
if let Ok(metadata) = fs::metadata(&script_path) {
let mut perms = metadata.permissions();
perms.set_mode(0o755);
if let Err(_) = fs::set_permissions(&script_path, perms) {
// If we can't set permissions, just use the script path with sh
return format!("sh {}", script_path);
}
} else {
// If we can't get metadata, just use the script path with sh
return format!("sh {}", script_path);
}
// Use the script path as the command
script_path
} else {
// If it's a single line command, use it as is
cmd.to_string()
}
}
/// Cleans up health check scripts for a container
///
/// # Arguments
///
/// * `container_name` - The name of the container whose health check scripts should be cleaned up
pub fn cleanup_health_check_scripts(container_name: &str) {
let dir_path = "/root/hero/var/containers";
let script_path = format!("{}/healthcheck_{}.sh", dir_path, container_name);
// Try to remove the script file if it exists
if Path::new(&script_path).exists() {
let _ = fs::remove_file(script_path);
}
}

View File

@@ -1,84 +0,0 @@
// File: /root/code/git.threefold.info/herocode/sal/src/virt/nerdctl/images.rs
use super::NerdctlError;
use crate::process::CommandResult;
use crate::virt::nerdctl::execute_nerdctl_command;
use serde::{Deserialize, Serialize};
/// Represents a container image
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Image {
/// Image ID
pub id: String,
/// Image repository
pub repository: String,
/// Image tag
pub tag: String,
/// Image size
pub size: String,
/// Creation timestamp
pub created: String,
}
/// List images in local storage
pub fn images() -> Result<CommandResult, NerdctlError> {
execute_nerdctl_command(&["images"])
}
/// Remove one or more images
///
/// # Arguments
///
/// * `image` - Image ID or name
pub fn image_remove(image: &str) -> Result<CommandResult, NerdctlError> {
execute_nerdctl_command(&["rmi", image])
}
/// Push an image to a registry
///
/// # Arguments
///
/// * `image` - Image name
/// * `destination` - Destination registry URL
pub fn image_push(image: &str, destination: &str) -> Result<CommandResult, NerdctlError> {
execute_nerdctl_command(&["push", image, destination])
}
/// Add an additional name to a local image
///
/// # Arguments
///
/// * `image` - Image ID or name
/// * `new_name` - New name for the image
pub fn image_tag(image: &str, new_name: &str) -> Result<CommandResult, NerdctlError> {
execute_nerdctl_command(&["tag", image, new_name])
}
/// Pull an image from a registry
///
/// # Arguments
///
/// * `image` - Image name
pub fn image_pull(image: &str) -> Result<CommandResult, NerdctlError> {
execute_nerdctl_command(&["pull", image])
}
/// Commit a container to an image
///
/// # Arguments
///
/// * `container` - Container ID or name
/// * `image_name` - New name for the image
pub fn image_commit(container: &str, image_name: &str) -> Result<CommandResult, NerdctlError> {
execute_nerdctl_command(&["commit", container, image_name])
}
/// Build an image using a Dockerfile
///
/// # Arguments
///
/// * `tag` - Tag for the new image
/// * `context_path` - Path to the build context
pub fn image_build(tag: &str, context_path: &str) -> Result<CommandResult, NerdctlError> {
execute_nerdctl_command(&["build", "-t", tag, context_path])
}

View File

@@ -1,57 +0,0 @@
mod images;
mod cmd;
mod container_types;
mod container;
mod container_builder;
mod health_check;
mod health_check_script;
mod container_operations;
mod container_functions;
#[cfg(test)]
mod container_test;
use std::fmt;
use std::error::Error;
use std::io;
/// Error type for nerdctl operations
#[derive(Debug)]
pub enum NerdctlError {
/// The nerdctl command failed to execute
CommandExecutionFailed(io::Error),
/// The nerdctl command executed but returned an error
CommandFailed(String),
/// Failed to parse JSON output
JsonParseError(String),
/// Failed to convert data
ConversionError(String),
/// Generic error
Other(String),
}
impl fmt::Display for NerdctlError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
NerdctlError::CommandExecutionFailed(e) => write!(f, "Failed to execute nerdctl command: {}", e),
NerdctlError::CommandFailed(e) => write!(f, "Nerdctl command failed: {}", e),
NerdctlError::JsonParseError(e) => write!(f, "Failed to parse JSON: {}", e),
NerdctlError::ConversionError(e) => write!(f, "Conversion error: {}", e),
NerdctlError::Other(e) => write!(f, "{}", e),
}
}
}
impl Error for NerdctlError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
NerdctlError::CommandExecutionFailed(e) => Some(e),
_ => None,
}
}
}
pub use images::*;
pub use cmd::*;
pub use container_types::{Container, HealthCheck, ContainerStatus, ResourceUsage};
pub use container_functions::*;
pub use health_check_script::*;

View File

@@ -1,503 +0,0 @@
# nerdctl Essentials
This guide provides a comprehensive overview of essential nerdctl functionality to help you get started quickly. nerdctl is a Docker-compatible CLI for containerd, with additional features specifically designed for containerd environments.
## Introduction
nerdctl is a Docker-compatible CLI for containerd. It provides the same user experience as the Docker CLI (`docker`) but leverages the more efficient containerd container runtime. Key differences and advantages include:
- Direct integration with containerd (no extra daemon required)
- Support for containerd-specific features
- First-class support for rootless mode
- Compatibility with Docker commands
- Additional nerdctl-specific commands
## Basic Configuration
nerdctl can be configured using the `nerdctl.toml` configuration file:
- Rootful mode: `/etc/nerdctl/nerdctl.toml`
- Rootless mode: `~/.config/nerdctl/nerdctl.toml`
Example configuration:
```toml
debug = false
debug_full = false
address = "unix:///run/containerd/containerd.sock"
namespace = "default"
snapshotter = "overlayfs"
cgroup_manager = "systemd"
hosts_dir = ["/etc/containerd/certs.d", "/etc/docker/certs.d"]
```
Common configuration properties:
| Property | CLI Flag | Description |
|---------------------|-----------------------------------|----------------------------|
| `address` | `--address`, `--host`, `-a`, `-H` | containerd address |
| `namespace` | `--namespace`, `-n` | containerd namespace |
| `snapshotter` | `--snapshotter` | containerd snapshotter |
| `cni_path` | `--cni-path` | CNI binary directory |
| `data_root` | `--data-root` | Persistent state directory |
| `insecure_registry` | `--insecure-registry` | Allow insecure registry |
## Container Management
### Running Containers
**Run a container**:
```
nerdctl run [OPTIONS] IMAGE [COMMAND] [ARG...]
```
Common options:
- `-i, --interactive`: Keep STDIN open
- `-t, --tty`: Allocate a pseudo-TTY
- `-d, --detach`: Run container in background
- `--name`: Assign a name to the container
- `-p, --publish`: Publish container's port to the host
- `-v, --volume`: Bind mount a volume
- `-e, --env`: Set environment variables
- `--rm`: Automatically remove the container when it exits
- `--restart=(no|always|on-failure|unless-stopped)`: Restart policy
- `--net, --network`: Connect container to a network
Examples:
```bash
# Run an interactive container and automatically remove it when it exits
nerdctl run -it --rm alpine sh
# Run a detached container with port mapping
nerdctl run -d --name nginx -p 8080:80 nginx
# Run a container with a volume mount
nerdctl run -it --rm -v $(pwd):/data alpine ls /data
```
### Managing Containers
**List containers**:
```
nerdctl ps [OPTIONS]
```
Options:
- `-a, --all`: Show all containers (default shows just running)
- `-q, --quiet`: Only display container IDs
- `-s, --size`: Display total file sizes
**Stop a container**:
```
nerdctl stop [OPTIONS] CONTAINER [CONTAINER...]
```
**Start a container**:
```
nerdctl start [OPTIONS] CONTAINER [CONTAINER...]
```
**Remove a container**:
```
nerdctl rm [OPTIONS] CONTAINER [CONTAINER...]
```
Options:
- `-f, --force`: Force removal of running container
- `-v, --volumes`: Remove anonymous volumes
**View container logs**:
```
nerdctl logs [OPTIONS] CONTAINER
```
Options:
- `-f, --follow`: Follow log output
- `--since`: Show logs since timestamp
- `-t, --timestamps`: Show timestamps
- `-n, --tail`: Number of lines to show from the end of logs
**Execute a command in a running container**:
```
nerdctl exec [OPTIONS] CONTAINER COMMAND [ARG...]
```
Options:
- `-i, --interactive`: Keep STDIN open
- `-t, --tty`: Allocate a pseudo-TTY
- `-d, --detach`: Detached mode
- `-w, --workdir`: Working directory
- `-e, --env`: Set environment variables
## Image Management
### Working with Images
**List images**:
```
nerdctl images [OPTIONS]
```
Options:
- `-a, --all`: Show all images
- `-q, --quiet`: Only show numeric IDs
- `--digests`: Show digests
**Pull an image**:
```
nerdctl pull [OPTIONS] NAME[:TAG|@DIGEST]
```
Options:
- `--platform=(amd64|arm64|...)`: Pull content for specific platform
- `-q, --quiet`: Suppress verbose output
**Push an image**:
```
nerdctl push [OPTIONS] NAME[:TAG]
```
**Build an image**:
```
nerdctl build [OPTIONS] PATH
```
Options:
- `-t, --tag`: Name and optionally tag the image
- `-f, --file`: Name of the Dockerfile
- `--build-arg`: Set build-time variables
- `--no-cache`: Do not use cache when building
**Remove an image**:
```
nerdctl rmi [OPTIONS] IMAGE [IMAGE...]
```
Options:
- `-f, --force`: Force removal
**Tag an image**:
```
nerdctl tag SOURCE_IMAGE[:TAG] TARGET_IMAGE[:TAG]
```
**Save an image to a tar archive**:
```
nerdctl save [OPTIONS] IMAGE [IMAGE...]
```
Options:
- `-o, --output`: Write to a file
**Load an image from a tar archive**:
```
nerdctl load [OPTIONS]
```
Options:
- `-i, --input`: Read from a tar archive file
## Network Management
### Working with Networks
**List networks**:
```
nerdctl network ls [OPTIONS]
```
**Create a network**:
```
nerdctl network create [OPTIONS] NETWORK
```
Common options:
- `-d, --driver=(bridge|macvlan|ipvlan)`: Driver to manage the network
- `--subnet`: Subnet in CIDR format (e.g., "10.5.0.0/16")
- `--gateway`: Gateway for the subnet
- `--ipam-driver=(default|host-local|dhcp)`: IP address management driver
**Remove a network**:
```
nerdctl network rm NETWORK [NETWORK...]
```
**Inspect a network**:
```
nerdctl network inspect [OPTIONS] NETWORK [NETWORK...]
```
**Prune networks**:
```
nerdctl network prune [OPTIONS]
```
### Network Types
nerdctl supports the following network types:
- `bridge` (default on Linux): Creates a bridge interface on the host
- `host`: Uses the host's network stack
- `none`: No networking
- `macvlan`: Connects container interfaces directly to host interfaces
- `ipvlan`: Similar to macvlan but shares host's IP address
Example creating a macvlan network:
```bash
nerdctl network create macnet --driver macvlan \
--subnet=192.168.5.0/24 \
--gateway=192.168.5.1 \
-o parent=eth0
```
## Volume Management
### Working with Volumes
**List volumes**:
```
nerdctl volume ls [OPTIONS]
```
**Create a volume**:
```
nerdctl volume create [OPTIONS] [VOLUME]
```
**Remove a volume**:
```
nerdctl volume rm [OPTIONS] VOLUME [VOLUME...]
```
**Inspect a volume**:
```
nerdctl volume inspect [OPTIONS] VOLUME [VOLUME...]
```
**Prune volumes**:
```
nerdctl volume prune [OPTIONS]
```
### Volume Flags for Containers
Volume-related flags when running containers:
- `-v, --volume`: Bind mount a volume (format: `SRC:DST[:OPTIONS]`)
- `--mount`: Attach a filesystem mount to the container
- `--tmpfs`: Mount a tmpfs directory
Volume options:
- `rw`: Read/write (default)
- `ro`: Read-only
- `rro`: Recursive read-only (kernel >= 5.12)
- `shared`, `slave`, `private`: Non-recursive propagation
- `rshared`, `rslave`, `rprivate`: Recursive propagation
Examples:
```bash
# Mount a host directory
nerdctl run -it --rm -v /host/path:/container/path:ro alpine ls /container/path
# Use tmpfs
nerdctl run -it --rm --tmpfs /tmp:size=64m,exec alpine ls /tmp
```
## Compose
nerdctl includes Docker Compose compatibility, allowing you to define and run multi-container applications.
**Run Compose applications**:
```
nerdctl compose up [OPTIONS]
```
Options:
- `-d, --detach`: Run containers in the background
- `--build`: Build images before starting containers
- `--no-build`: Don't build images, even if they're missing
- `--force-recreate`: Force recreation of containers
**Stop Compose applications**:
```
nerdctl compose down [OPTIONS]
```
Options:
- `-v, --volumes`: Remove named volumes and anonymous volumes
**View Compose logs**:
```
nerdctl compose logs [OPTIONS] [SERVICE...]
```
Other Compose commands:
- `nerdctl compose build`: Build service images
- `nerdctl compose ps`: List containers
- `nerdctl compose pull`: Pull service images
- `nerdctl compose exec`: Execute a command in a running container
- `nerdctl compose restart`: Restart services
Example `compose.yml`:
```yaml
version: "3.8"
services:
web:
image: nginx
ports:
- "8080:80"
volumes:
- ./html:/usr/share/nginx/html
db:
image: postgres
environment:
POSTGRES_PASSWORD: example
volumes:
- db-data:/var/lib/postgresql/data
volumes:
db-data:
```
## Rootless Mode
nerdctl supports rootless containers, allowing unprivileged users to create and manage containers. This provides better security isolation compared to running everything as root.
### Setup Rootless Mode
1. Install required dependencies (see https://rootlesscontaine.rs/getting-started/common/)
2. Set up rootless containerd:
```
containerd-rootless-setuptool.sh install
```
3. Enable lingering for your user (to keep services running after logout):
```
sudo loginctl enable-linger $(whoami)
```
4. For building images, install BuildKit in rootless mode:
```
containerd-rootless-setuptool.sh install-buildkit
```
When running in rootless mode, nerdctl automatically uses the appropriate socket and configuration.
### Limitations and Considerations
- Resource limits require cgroup v2 and systemd
- By default, ports below 1024 cannot be published (use slirp4netns port driver or configure capabilities)
- Some file system operations might be restricted
- Network performance can be slower (consider using bypass4netns to improve performance)
## Registry Authentication
nerdctl uses the same authentication configuration as Docker, located in `${DOCKER_CONFIG}/config.json` (default: `$HOME/.docker/config.json`).
**Log in to a registry**:
```
nerdctl login [OPTIONS] [SERVER]
```
Options:
- `-u, --username`: Username
- `-p, --password`: Password
- `--password-stdin`: Take the password from stdin
**Log out from a registry**:
```
nerdctl logout [SERVER]
```
### Registry Certificates
For private registries with custom certificates, place certificates in:
- Rootful: `/etc/containerd/certs.d/<HOST:PORT>/` or `/etc/docker/certs.d/<HOST:PORT>/`
- Rootless: `~/.config/containerd/certs.d/<HOST:PORT>/` or `~/.config/docker/certs.d/<HOST:PORT>/`
## Advanced Features
### GPU Support
nerdctl supports NVIDIA GPU passthrough to containers:
```
nerdctl run -it --rm --gpus all nvidia/cuda:12.3.1-base-ubuntu20.04 nvidia-smi
```
Options for `--gpus`:
- `all`: Use all available GPUs
- Custom configuration: `--gpus '"capabilities=utility,compute",device=GPU-UUID'`
### BuildKit Integration
BuildKit provides advanced image building capabilities:
1. Set up BuildKit (different for rootful and rootless):
```
# Rootless with containerd worker
CONTAINERD_NAMESPACE=default containerd-rootless-setuptool.sh install-buildkit-containerd
```
2. Use advanced build features:
```
nerdctl build --output=type=local,dest=./output --platform=linux/amd64,linux/arm64 .
```
### Namespace Management
**Create a namespace**:
```
nerdctl namespace create NAMESPACE
```
**List namespaces**:
```
nerdctl namespace ls
```
**Remove a namespace**:
```
nerdctl namespace remove NAMESPACE
```
### Security Features
nerdctl supports various security features:
- `--security-opt seccomp=profile.json`: Apply a seccomp profile
- `--security-opt apparmor=profile`: Apply an AppArmor profile
- `--cap-add`/`--cap-drop`: Add or drop Linux capabilities
- `--privileged`: Give extended privileges to the container
## Typical Workflow Example
```bash
# Create a container from an existing image
container=$(nerdctl run -d --name my-nginx nginx:latest)
# Execute a command in the container
nerdctl exec $container apt-get update
nerdctl exec $container apt-get install -y curl
# Copy local configuration files to the container
nerdctl cp ./nginx.conf $container:/etc/nginx/nginx.conf
# Commit the container to create a new image
nerdctl commit $container my-custom-nginx:latest
# Stop and remove the container
nerdctl stop $container
nerdctl rm $container
# Create a new container from our custom image
nerdctl run -d --name nginx-custom -p 8080:80 my-custom-nginx:latest
# Build an image using a Dockerfile
nerdctl build -t my-app:latest .
# Push the image to a registry
nerdctl push my-custom-nginx:latest docker.io/username/my-custom-nginx:latest
# List images
nerdctl images
# List containers
nerdctl ps -a
```

View File

@@ -1,103 +0,0 @@
# Setting up `nerdctl build` with BuildKit
`nerdctl build` (and `nerdctl compose build`) relies on [BuildKit](https://github.com/moby/buildkit).
To use it, you need to set up BuildKit.
BuildKit has 2 types of backends.
- **containerd worker**: BuildKit relies on containerd to manage containers and images, etc. containerd needs to be up-and-running on the host.
- **OCI worker**: BuildKit manages containers and images, etc. containerd isn't needed. This worker relies on runc for container execution.
You need to set up BuildKit with either of the above workers.
Note that OCI worker cannot access base images (`FROM` images in Dockerfiles) managed by containerd.
Thus you cannot let `nerdctl build` use containerd-managed images as the base image.
They include images previously built using `nerdctl build`.
For example, the following build `bar` fails with OCI worker because it tries to use the previously built and containerd-managed image `foo`.
```console
$ mkdir -p /tmp/ctx && cat <<EOF > /tmp/ctx/Dockerfile
FROM ghcr.io/stargz-containers/ubuntu:20.04-org
RUN echo hello
EOF
$ nerdctl build -t foo /tmp/ctx
$ cat <<EOF > /tmp/ctx/Dockerfile
FROM foo
RUN echo bar
EOF
$ nerdctl build -t bar /tmp/ctx
```
This limitation can be avoided using containerd worker as mentioned later.
## Setting up BuildKit with containerd worker
### Rootless
| :zap: Requirement | nerdctl >= 0.18, BuildKit >= 0.10 |
|-------------------|-----------------------------------|
```
$ CONTAINERD_NAMESPACE=default containerd-rootless-setuptool.sh install-buildkit-containerd
```
`containerd-rootless-setuptool.sh` is aware of `CONTAINERD_NAMESPACE` and `CONTAINERD_SNAPSHOTTER` envvars.
It installs buildkitd to the specified containerd namespace.
This allows BuildKit using containerd-managed images in that namespace as the base image.
Note that BuildKit can't use images in other namespaces as of now.
If `CONTAINERD_NAMESPACE` envvar is not specified, this script configures buildkitd to use "buildkit" namespace (not "default" namespace).
You can install an additional buildkitd process in a different namespace by executing this script with specifying the namespace with `CONTAINERD_NAMESPACE`.
BuildKit will expose the socket at `$XDG_RUNTIME_DIR/buildkit-$CONTAINERD_NAMESPACE/buildkitd.sock` if `CONTAINERD_NAMESPACE` is specified.
If `CONTAINERD_NAMESPACE` is not specified, that location will be `$XDG_RUNTIME_DIR/buildkit/buildkitd.sock`.
### Rootful
```
$ sudo systemctl enable --now buildkit
```
Then add the following configuration to `/etc/buildkit/buildkitd.toml` to enable containerd worker.
```toml
[worker.oci]
enabled = false
[worker.containerd]
enabled = true
# namespace should be "k8s.io" for Kubernetes (including Rancher Desktop)
namespace = "default"
```
## Setting up BuildKit with OCI worker
### Rootless
```
$ containerd-rootless-setuptool.sh install-buildkit
```
As mentioned in the above, BuildKit with this configuration cannot use images managed by containerd.
They include images previously built with `nerdctl build`.
BuildKit will expose the socket at `$XDG_RUNTIME_DIR/buildkit/buildkitd.sock`.
### rootful
```
$ sudo systemctl enable --now buildkit
```
## Which BuildKit socket will nerdctl use?
You can specify BuildKit address for `nerdctl build` using `--buildkit-host` flag or `BUILDKIT_HOST` envvar.
When BuildKit address isn't specified, nerdctl tries some default BuildKit addresses the following order and uses the first available one.
- `<runtime directory>/buildkit-<current namespace>/buildkitd.sock`
- `<runtime directory>/buildkit-default/buildkitd.sock`
- `<runtime directory>/buildkit/buildkitd.sock`
For example, if you run rootless nerdctl with `test` containerd namespace, it tries to use `$XDG_RUNTIME_DIR/buildkit-test/buildkitd.sock` by default then try to fall back to `$XDG_RUNTIME_DIR/buildkit-default/buildkitd.sock` and `$XDG_RUNTIME_DIR/buildkit/buildkitd.sock`

View File

@@ -1,166 +0,0 @@
# Using CNI with nerdctl
nerdctl uses CNI plugins for its container network, you can set network by
either `--network` or `--net` option.
## Basic networks
nerdctl support some basic types of CNI plugins without any configuration
needed(you should have CNI plugin be installed), for Linux systems the basic
CNI plugin types are `bridge`, `portmap`, `firewall`, `tuning`, for Windows
system, the supported CNI plugin types are `nat` only.
The default network `bridge` for Linux and `nat` for Windows if you
don't set any network options.
Configuration of the default network `bridge` of Linux:
```json
{
"cniVersion": "1.0.0",
"name": "bridge",
"plugins": [
{
"type": "bridge",
"bridge": "nerdctl0",
"isGateway": true,
"ipMasq": true,
"hairpinMode": true,
"ipam": {
"type": "host-local",
"routes": [{ "dst": "0.0.0.0/0" }],
"ranges": [
[
{
"subnet": "10.4.0.0/24",
"gateway": "10.4.0.1"
}
]
]
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
},
{
"type": "firewall",
"ingressPolicy": "same-bridge"
},
{
"type": "tuning"
}
]
}
```
## Bridge isolation
nerdctl >= 0.18 sets the `ingressPolicy` to `same-bridge` when `firewall` plugin >= 1.1.0 is installed.
This `ingressPolicy` replaces the CNI `isolation` plugin used in nerdctl <= 0.17.
When `firewall` plugin >= 1.1.0 is not found, nerdctl does not enable the bridge isolation.
This means a container in `--net=foo` can connect to a container in `--net=bar`.
## macvlan/IPvlan networks
nerdctl also support macvlan and IPvlan network driver.
To create a `macvlan` network which bridges with a given physical network interface, use `--driver macvlan` with
`nerdctl network create` command.
```
# nerdctl network create mac0 --driver macvlan \
--subnet=192.168.5.0/24
--gateway=192.168.5.2
-o parent=eth0
```
You can specify the `parent`, which is the interface the traffic will physically go through on the host,
defaults to default route interface.
And the `subnet` should be under the same network as the network interface,
an easier way is to use DHCP to assign the IP:
```
# nerdctl network create mac0 --driver macvlan --ipam-driver=dhcp
```
Using `--driver ipvlan` can create `ipvlan` network, the default mode for IPvlan is `l2`.
## DHCP host-name and other DHCP options
Nerdctl automatically sets the DHCP host-name option to the hostname value of the container.
Furthermore, on network creation, nerdctl supports the ability to set other DHCP options through `--ipam-options`.
Currently, the following options are supported by the DHCP plugin:
```
dhcp-client-identifier
subnet-mask
routers
user-class
vendor-class-identifier
```
For example:
```
# nerdctl network create --driver macvlan \
--ipam-driver dhcp \
--ipam-opt 'vendor-class-identifier={"type": "provide", "value": "Hey! Its me!"}' \
my-dhcp-net
```
## Custom networks
You can also customize your CNI network by providing configuration files.
When rootful, the expected root location is `/etc/cni/net.d`.
For rootless, the expected root location is `~/.config/cni/net.d/`
Configuration files (like `10-mynet.conf`) can be placed either in the root location,
or under a subfolder.
If in the root location, this network will be available to all nerdctl namespaces.
If placed in a subfolder, it will be available only to the identically named namespace.
For example, you have one configuration file(`/etc/cni/net.d/10-mynet.conf`)
for `bridge` network:
```json
{
"cniVersion": "1.0.0",
"name": "mynet",
"type": "bridge",
"bridge": "cni0",
"isGateway": true,
"ipMasq": true,
"ipam": {
"type": "host-local",
"subnet": "172.19.0.0/24",
"routes": [
{ "dst": "0.0.0.0/0" }
]
}
}
```
This will configure a new CNI network with the name `mynet`, and you can use
this network to create a container in any namespace:
```console
# nerdctl run -it --net mynet --rm alpine ip addr show
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
3: eth0@if6120: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue state UP
link/ether 5e:5b:3f:0c:36:56 brd ff:ff:ff:ff:ff:ff
inet 172.19.0.51/24 brd 172.19.0.255 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::5c5b:3fff:fe0c:3656/64 scope link tentative
valid_lft forever preferred_lft forever
```

File diff suppressed because it is too large Load Diff

View File

@@ -1,44 +0,0 @@
# nerdctl compose
| :zap: Requirement | nerdctl >= 0.8 |
|-------------------|----------------|
## Usage
The `nerdctl compose` CLI is designed to be compatible with `docker-compose`.
```console
$ nerdctl compose up -d
$ nerdctl compose down
```
See the Command Reference in [`../README.md`](../README.md).
## Spec conformance
`nerdctl compose` implements [The Compose Specification](https://github.com/compose-spec/compose-spec),
which was derived from [Docker Compose file version 3 specification](https://docs.docker.com/compose/compose-file/compose-file-v3/).
### Unimplemented YAML fields
- Fields that correspond to unimplemented `docker run` flags, e.g., `services.<SERVICE>.links` (corresponds to `docker run --link`)
- Fields that correspond to unimplemented `docker build` flags, e.g., `services.<SERVICE>.build.extra_hosts` (corresponds to `docker build --add-host`)
- `services.<SERVICE>.credential_spec`
- `services.<SERVICE>.deploy.update_config`
- `services.<SERVICE>.deploy.rollback_config`
- `services.<SERVICE>.deploy.resources.reservations`
- `services.<SERVICE>.deploy.placement`
- `services.<SERVICE>.deploy.endpoint_mode`
- `services.<SERVICE>.healthcheck`
- `services.<SERVICE>.stop_grace_period`
- `services.<SERVICE>.stop_signal`
- `configs.<CONFIG>.external`
- `secrets.<SECRET>.external`
### Incompatibility
#### `services.<SERVICE>.build.context`
- The value must be a local directory path, not a URL.
#### `services.<SERVICE>.secrets`, `services.<SERVICE>.configs`
- `uid`, `gid`: Cannot be specified. The default value is not propagated from `USER` instruction of Dockerfile.
The file owner corresponds to the original file on the host.
- `mode`: Cannot be specified. The file is mounted as read-only, with permission bits that correspond to the original file on the host.

View File

@@ -1,62 +0,0 @@
# Configuring nerdctl with `nerdctl.toml`
| :zap: Requirement | nerdctl >= 0.16 |
|-------------------|-----------------|
This document describes the configuration file of nerdctl (`nerdctl.toml`).
This file is unrelated to the configuration file of containerd (`config.toml`) .
## File path
- Rootful mode: `/etc/nerdctl/nerdctl.toml`
- Rootless mode: `~/.config/nerdctl/nerdctl.toml`
The path can be overridden with `$NERDCTL_TOML`.
## Example
```toml
# This is an example of /etc/nerdctl/nerdctl.toml .
# Unrelated to the daemon's /etc/containerd/config.toml .
debug = false
debug_full = false
address = "unix:///run/k3s/containerd/containerd.sock"
namespace = "k8s.io"
snapshotter = "stargz"
cgroup_manager = "cgroupfs"
hosts_dir = ["/etc/containerd/certs.d", "/etc/docker/certs.d"]
experimental = true
```
## Properties
| TOML property | CLI flag | Env var | Description | Availability \*1 |
|---------------------|------------------------------------|---------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------|
| `debug` | `--debug` | | Debug mode | Since 0.16.0 |
| `debug_full` | `--debug-full` | | Debug mode (with full output) | Since 0.16.0 |
| `address` | `--address`,`--host`,`-a`,`-H` | `$CONTAINERD_ADDRESS` | containerd address | Since 0.16.0 |
| `namespace` | `--namespace`,`-n` | `$CONTAINERD_NAMESPACE` | containerd namespace | Since 0.16.0 |
| `snapshotter` | `--snapshotter`,`--storage-driver` | `$CONTAINERD_SNAPSHOTTER` | containerd snapshotter | Since 0.16.0 |
| `cni_path` | `--cni-path` | `$CNI_PATH` | CNI binary directory | Since 0.16.0 |
| `cni_netconfpath` | `--cni-netconfpath` | `$NETCONFPATH` | CNI config directory | Since 0.16.0 |
| `data_root` | `--data-root` | | Persistent state directory | Since 0.16.0 |
| `cgroup_manager` | `--cgroup-manager` | | cgroup manager | Since 0.16.0 |
| `insecure_registry` | `--insecure-registry` | | Allow insecure registry | Since 0.16.0 |
| `hosts_dir` | `--hosts-dir` | | `certs.d` directory | Since 0.16.0 |
| `experimental` | `--experimental` | `NERDCTL_EXPERIMENTAL` | Enable [experimental features](experimental.md) | Since 0.22.3 |
| `host_gateway_ip` | `--host-gateway-ip` | `NERDCTL_HOST_GATEWAY_IP` | IP address that the special 'host-gateway' string in --add-host resolves to. Defaults to the IP address of the host. It has no effect without setting --add-host | Since 1.3.0 |
| `bridge_ip` | `--bridge-ip` | `NERDCTL_BRIDGE_IP` | IP address for the default nerdctl bridge network, e.g., 10.1.100.1/24 | Since 2.0.1 |
| `kube_hide_dupe` | `--kube-hide-dupe` | | Deduplicate images for Kubernetes with namespace k8s.io, no more redundant <none> ones are displayed | Since 2.0.3 |
The properties are parsed in the following precedence:
1. CLI flag
2. Env var
3. TOML property
4. Built-in default value (Run `nerdctl --help` to see the default values)
\*1: Availability of the TOML properties
## See also
- [`registry.md`](registry.md)
- [`faq.md`](faq.md)
- https://github.com/containerd/containerd/blob/main/docs/ops.md#base-configuration (`/etc/containerd/config.toml`)

View File

@@ -1,214 +0,0 @@
# Container Image Sign and Verify with cosign tool
| :zap: Requirement | nerdctl >= 0.15 |
|-------------------|-----------------|
[cosign](https://github.com/sigstore/cosign) is tool that allows you to sign and verify container images with the
public/private key pairs or without them by providing
a [Keyless support](https://github.com/sigstore/cosign/blob/main/KEYLESS.md).
Keyless uses ephemeral keys and certificates, which are signed automatically by
the [fulcio](https://github.com/sigstore/fulcio) root CA. Signatures are stored in
the [rekor](https://github.com/sigstore/rekor) transparency log, which automatically provides an attestation as to when
the signature was created.
Cosign would use prompt to confirm the statement below during `sign`. Nerdctl added `--yes` to Cosign command, which says yes and prevents this prompt.
Using Nerdctl push with signing by Cosign means that users agree the statement.
```
Note that there may be personally identifiable information associated with this signed artifact.
This may include the email address associated with the account with which you authenticate.
This information will be used for signing this artifact and will be stored in public transparency logs and cannot be removed later.
By typing 'y', you attest that you grant (or have permission to grant) and agree to have this information stored permanently in transparency logs.
```
You can enable container signing and verifying features with `push` and `pull` commands of `nerdctl` by using `cosign`
under the hood with make use of flags `--sign` while pushing the container image, and `--verify` while pulling the
container image.
> * Ensure cosign executable in your `$PATH`.
> * You can install cosign by following this page: https://docs.sigstore.dev/cosign/installation
Prepare your environment:
```shell
# Create a sample Dockerfile
$ cat <<EOF | tee Dockerfile.dummy
FROM alpine:latest
CMD [ "echo", "Hello World" ]
EOF
```
> Please do not forget, we won't be validating the base images, which is `alpine:latest` in this case, of the container image that was built on,
> we'll only verify the container image itself once we sign it.
```shell
# Build the image
$ nerdctl build -t devopps/hello-world -f Dockerfile.dummy .
# Generate a key-pair: cosign.key and cosign.pub
$ cosign generate-key-pair
# Export your COSIGN_PASSWORD to prevent CLI prompting
$ export COSIGN_PASSWORD=$COSIGN_PASSWORD
```
Sign the container image while pushing:
```
# Sign the image with Keyless mode
$ nerdctl push --sign=cosign devopps/hello-world
# Sign the image and store the signature in the registry
$ nerdctl push --sign=cosign --cosign-key cosign.key devopps/hello-world
```
Verify the container image while pulling:
> REMINDER: Image won't be pulled if there are no matching signatures in case you passed `--verify` flag.
> REMINDER: For keyless flows to work, you need to set either --cosign-certificate-identity or --cosign-certificate-identity-regexp, and either --cosign-certificate-oidc-issuer or --cosign-certificate-oidc-issuer-regexp. The OIDC issuer expected in a valid Fulcio certificate for --verify=cosign, e.g. https://token.actions.githubusercontent.com or https://oauth2.sigstore.dev/auth.
```shell
# Verify the image with Keyless mode
$ nerdctl pull --verify=cosign --certificate-identity=name@example.com --certificate-oidc-issuer=https://accounts.example.com devopps/hello-world
INFO[0004] cosign:
INFO[0004] cosign: [{"critical":{"identity":...}]
docker.io/devopps/nginx-new:latest: resolved |++++++++++++++++++++++++++++++++++++++|
manifest-sha256:0910d404e58dd320c3c0c7ea31bf5fbfe7544b26905c5eccaf87c3af7bcf9b88: done |++++++++++++++++++++++++++++++++++++++|
config-sha256:1de1c4fb5122ac8650e349e018fba189c51300cf8800d619e92e595d6ddda40e: done |++++++++++++++++++++++++++++++++++++++|
elapsed: 1.4 s total: 1.3 Ki (928.0 B/s)
# You can not verify the image if it is not signed
$ nerdctl pull --verify=cosign --cosign-key cosign.pub devopps/hello-world-bad
INFO[0003] cosign: Error: no matching signatures:
INFO[0003] cosign: failed to verify signature
INFO[0003] cosign: main.go:46: error during command execution: no matching signatures:
INFO[0003] cosign: failed to verify signature
```
## Cosign in Compose
> Cosign support in Compose is also experimental and implemented based on Compose's [extension](https://github.com/compose-spec/compose-spec/blob/master/spec.md#extension) capibility.
cosign is supported in `nerdctl compose up|run|push|pull`. You can use cosign in Compose by adding the following fields in your compose yaml. These fields are _per service_, and you can enable only `verify` or only `sign` (or both).
```yaml
# only put cosign related fields under the service you want to sign/verify.
services:
svc0:
build: .
image: ${REGISTRY}/svc0_image # replace with your registry
# `x-nerdctl-verify` and `x-nerdctl-cosign-public-key` are for verify
# required for `nerdctl compose up|run|pull`
x-nerdctl-verify: cosign
x-nerdctl-cosign-public-key: /path/to/cosign.pub
# `x-nerdctl-sign` and `x-nerdctl-cosign-private-key` are for sign
# required for `nerdctl compose push`
x-nerdctl-sign: cosign
x-nerdctl-cosign-private-key: /path/to/cosign.key
ports:
- 8080:80
svc1:
build: .
image: ${REGISTRY}/svc1_image # replace with your registry
ports:
- 8081:80
```
Following the cosign tutorial above, first set up environment and prepare cosign key pair:
```shell
# Generate a key-pair: cosign.key and cosign.pub
$ cosign generate-key-pair
# Export your COSIGN_PASSWORD to prevent CLI prompting
$ export COSIGN_PASSWORD=$COSIGN_PASSWORD
```
We'll use the following `Dockerfile` and `docker-compose.yaml`:
```shell
$ cat Dockerfile
FROM nginx:1.19-alpine
RUN uname -m > /usr/share/nginx/html/index.html
$ cat docker-compose.yml
services:
svc0:
build: .
image: ${REGISTRY}/svc1_image # replace with your registry
x-nerdctl-verify: cosign
x-nerdctl-cosign-public-key: ./cosign.pub
x-nerdctl-sign: cosign
x-nerdctl-cosign-private-key: ./cosign.key
ports:
- 8080:80
svc1:
build: .
image: ${REGISTRY}/svc1_image # replace with your registry
ports:
- 8081:80
```
For keyless mode, the `docker-compose.yaml` will be:
```
$ cat docker-compose.yml
services:
svc0:
build: .
image: ${REGISTRY}/svc1_image # replace with your registry
x-nerdctl-verify: cosign
x-nerdctl-sign: cosign
x-nerdctl-cosign-certificate-identity: name@example.com # or x-nerdctl-cosign-certificate-identity-regexp
x-nerdctl-cosign-certificate-oidc-issuer: https://accounts.example.com # or x-nerdctl-cosign-certificate-oidc-issuer-regexp
ports:
- 8080:80
svc1:
build: .
image: ${REGISTRY}/svc1_image # replace with your registry
ports:
- 8081:80
```
> The `env "COSIGN_PASSWORD="$COSIGN_PASSWORD""` part in the below commands is a walkaround to use rootful nerdctl and make the env variable visible to root (in sudo). You don't need this part if (1) you're using rootless, or (2) your `COSIGN_PASSWORD` is visible in root.
First let's `build` and `push` the two services:
```shell
$ sudo nerdctl compose build
INFO[0000] Building image xxxxx/svc0_image
...
INFO[0000] Building image xxxxx/svc1_image
[+] Building 0.2s (6/6) FINISHED
$ sudo env "COSIGN_PASSWORD="$COSIGN_PASSWORD"" nerdctl compose --experimental=true push
INFO[0000] Pushing image xxxxx/svc1_image
...
INFO[0000] Pushing image xxxxx/svc0_image
INFO[0000] pushing as a reduced-platform image (application/vnd.docker.distribution.manifest.v2+json, sha256:4329abc3143b1545835de17e1302c8313a9417798b836022f4c8c8dc8b10a3e9)
INFO[0000] cosign: WARNING: Image reference xxxxx/svc0_image uses a tag, not a digest, to identify the image to sign.
INFO[0000] cosign:
INFO[0000] cosign: This can lead you to sign a different image than the intended one. Please use a
INFO[0000] cosign: digest (example.com/ubuntu@sha256:abc123...) rather than tag
INFO[0000] cosign: (example.com/ubuntu:latest) for the input to cosign. The ability to refer to
INFO[0000] cosign: images by tag will be removed in a future release.
INFO[0000] cosign: Pushing signature to: xxxxx/svc0_image
```
Then we can `pull` and `up` services (`run` is similar to up):
```shell
# ensure built images are removed and pull is performed.
$ sudo nerdctl compose down
$ sudo env "COSIGN_PASSWORD="$COSIGN_PASSWORD"" nerdctl compose --experimental=true pull
$ sudo env "COSIGN_PASSWORD="$COSIGN_PASSWORD"" nerdctl compose --experimental=true up
$ sudo env "COSIGN_PASSWORD="$COSIGN_PASSWORD"" nerdctl compose --experimental=true run svc0 -- echo "hello"
# clean up compose resources.
$ sudo nerdctl compose down
```
Check your logs to confirm that svc0 is verified by cosign (have cosign logs) and svc1 is not. You can also change the public key in `docker-compose.yaml` to a random value to see verify failure will stop the container being `pull|up|run`.

View File

@@ -1,89 +0,0 @@
# Lazy-pulling using CernVM-FS Snapshotter
CernVM-FS Snapshotter is a containerd snapshotter plugin. It is a specialized component responsible for assembling
all the layers of container images into a stacked file system that containerd can use. The snapshotter takes as input the list
of required layers and outputs a directory containing the final file system. It is also responsible to clean up the output
directory when containers using it are stopped.
See the official [documentation](https://cvmfs.readthedocs.io/en/latest/cpt-containers.html#how-to-use-the-cernvm-fs-snapshotter) to learn further information.
## Prerequisites
- Install containerd remote snapshotter plugin (`cvmfs-snapshotter`) from [here](https://github.com/cvmfs/cvmfs/tree/devel/snapshotter).
- Add the following to `/etc/containerd/config.toml`:
```toml
# Ask containerd to use this particular snapshotter
[plugins."io.containerd.grpc.v1.cri".containerd]
snapshotter = "cvmfs-snapshotter"
disable_snapshot_annotations = false
# Set the communication endpoint between containerd and the snapshotter
[proxy_plugins]
[proxy_plugins.cvmfs]
type = "snapshot"
address = "/run/containerd-cvmfs-grpc/containerd-cvmfs-grpc.sock"
```
- The default CernVM-FS repository hosting the flat root filesystems of the container images is `unpacked.cern.ch`.
The container images are unpacked into the CernVM-FS repository by the [DUCC](https://cvmfs.readthedocs.io/en/latest/cpt-ducc.html)
(Daemon that Unpacks Container Images into CernVM-FS) tool.
You can change the repository adding the following line to `/etc/containerd-cvmfs-grpc/config.toml`:
```toml
repository = "myrepo.mydomain"
```
- Launch `containerd` and `cvmfs-snapshotter`:
```console
$ systemctl start containerd cvmfs-snapshotter
```
## Enable CernVM-FS Snapshotter for `nerdctl run` and `nerdctl pull`
| :zap: Requirement | nerdctl >= 1.6.3 |
| ----------------- | ---------------- |
- Run `nerdctl` with `--snapshotter cvmfs-snapshotter` as in the example below:
```console
$ nerdctl run -it --rm --snapshotter cvmfs-snapshotter clelange/cms-higgs-4l-full:latest
```
- You can also only pull the image with CernVM-FS Snapshotter without running the container:
```console
$ nerdctl pull --snapshotter cvmfs-snapshotter clelange/cms-higgs-4l-full:latest
```
The speedup for pulling this 9 GB (4.3 GB compressed) image is shown below:
- #### with the snapshotter:
```console
$ nerdctl --snapshotter cvmfs-snapshotter pull clelange/cms-higgs-4l-full:latest
docker.io/clelange/cms-higgs-4l-full:latest: resolved |++++++++++++++++++++++++++++++++++++++|
manifest-sha256:b8acbe80629dd28d213c03cf1ffd3d46d39e573f54215a281fabce7494b3d546: done |++++++++++++++++++++++++++++++++++++++|
config-sha256:89ef54b6c4fbbedeeeb29b1df2b9916b6d157c87cf1878ea882bff86a3093b5c: done |++++++++++++++++++++++++++++++++++++++|
elapsed: 4.7 s total: 19.8 K (4.2 KiB/s)
$ nerdctl images
REPOSITORY TAG IMAGE ID CREATED PLATFORM SIZE BLOB SIZE
clelange/cms-higgs-4l-full latest b8acbe80629d 20 seconds ago linux/amd64 0.0 B 4.3 GiB
```
- #### without the snapshotter:
```console
$ nerdctl pull clelange/cms-higgs-4l-full:latest
docker.io/clelange/cms-higgs-4l-full:latest: resolved |++++++++++++++++++++++++++++++++++++++|
manifest-sha256:b8acbe80629dd28d213c03cf1ffd3d46d39e573f54215a281fabce7494b3d546: exists |++++++++++++++++++++++++++++++++++++++|
config-sha256:89ef54b6c4fbbedeeeb29b1df2b9916b6d157c87cf1878ea882bff86a3093b5c: exists |++++++++++++++++++++++++++++++++++++++|
layer-sha256:e8114d4b0d10b33aaaa4fbc3c6da22bbbcf6f0ef0291170837e7c8092b73840a: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:a3eda0944a81e87c7a44b117b1c2e707bc8d18e9b7b478e21698c11ce3e8b819: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:8f3160776e8e8736ea9e3f6c870d14cd104143824bbcabe78697315daca0b9ad: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:22a5c05baa9db0aa7bba56ffdb2dd21246b9cf3ce938fc6d7bf20e92a067060e: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:bfcf9d498f92b72426c9d5b73663504d87249d6783c6b58d71fbafc275349ab9: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:0563e1549926b9c8beac62407bc6a420fa35bcf6f9844e5d8beeb9165325a872: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:6fff5fd7fb4eeb79a1399d9508614a84191d05e53f094832062d689245599640: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:25c39bfa66e1157415236703abc512d06cc1db31bd00fe8c3030c6d6d249dc4e: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:3cc0a0eb55eb3fb7ef0760c6bf1e567dfc56933ba5f11b5415f89228af751b72: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:a8850244786303e508b94bb31c8569310765e678c9c73bf1199310729209b803: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:32cdf5fc12485ac061347eb8b5c3b4a28505ce8564a7f3f83ac4241f03911176: done |++++++++++++++++++++++++++++++++++++++|
elapsed: 181.8s total: 4.3 Gi (24.2 MiB/s)
$ nerdctl images
REPOSITORY TAG IMAGE ID CREATED PLATFORM SIZE BLOB SIZE
clelange/cms-higgs-4l-full latest b8acbe80629d 4 minutes ago linux/amd64 9.0 GiB 4.3 GiB
```

View File

@@ -1,73 +0,0 @@
# nerdctl directory layout
## Config
**Default**: `/etc/nerdctl/nerdctl.toml` (rootful), `~/.config/nerdctl/nerdctl.toml` (rootless)
The configuration file of nerdctl. See [`config.md`](./config.md).
Can be overridden with environment variable `$NERDCTL_TOML`.
This file is unrelated to the daemon config file `/etc/containerd/config.toml`.
## Data
### `<DATAROOT>`
**Default**: `/var/lib/nerdctl` (rootful), `~/.local/share/nerdctl` (rootless)
Can be overridden with `nerdctl --data-root=<DATAROOT>` flag.
The directory is solely managed by nerdctl, not by containerd.
The directory has nothing to do with containerd data root `/var/lib/containerd`.
### `<DATAROOT>/<ADDRHASH>`
e.g. `/var/lib/nerdctl/1935db59`
`1935db9` is from `$(echo -n "/run/containerd/containerd.sock" | sha256sum | cut -c1-8)`
This directory is also called "data store" in the implementation.
### `<DATAROOT>/<ADDRHASH>/containers/<NAMESPACE>/<CID>`
e.g. `/var/lib/nerdctl/1935db59/containers/default/c4ed811cc361d26faffdee8d696ddbc45a9d93c571b5b3c54d3da01cb29caeb1`
Files:
- `resolv.conf`: mounted to the container as `/etc/resolv.conf`
- `hostname`: mounted to the container as `/etc/hostname`
- `log-config.json`: used for storing the `--log-opts` map of `nerdctl run`
- `<CID>-json.log`: used by `nerdctl logs`
- `oci-hook.*.log`: logs of the OCI hook
- `lifecycle.json`: used to store stateful information about the container that can only be retrieved through OCI hooks
### `<DATAROOT>/<ADDRHASH>/names/<NAMESPACE>`
e.g. `/var/lib/nerdctl/1935db59/names/default`
Files:
- `<NAME>`: contains the container ID (CID). Represents that the name is taken by that container.
Files must be operated with a `LOCK_EX` lock against the `<DATAROOT>/<ADDRHASH>/names/<NAMESPACE>` directory.
### `<DATAROOT>/<ADDRHASH>/etchosts/<NAMESPACE>/<CID>`
e.g. `/var/lib/nerdctl/1935db59/etchosts/default/c4ed811cc361d26faffdee8d696ddbc45a9d93c571b5b3c54d3da01cb29caeb1`
Files:
- `hosts`: mounted to the container as `/etc/hosts`
- `meta.json`: metadata
Files must be operated with a `LOCK_EX` lock against the `<DATAROOT>/<ADDRHASH>/etchosts` directory.
### `<DATAROOT>/<ADDRHASH>/volumes/<NAMESPACE>/<VOLNAME>/_data`
e.g. `/var/lib/nerdctl/1935db59/volumes/default/foo/_data`
Data volume
## CNI
### `<NETCONFPATH>`
**Default**: `/etc/cni/net.d` (rootful), `~/.config/cni/net.d` (rootless)
Can be overridden with `nerdctl --cni-netconfpath=<NETCONFPATH>` flag and environment variable `$NETCONFPATH`.
At the top-level of <NETCONFPATH>, network (files) are shared accross all namespaces.
Sub-folders inside <NETCONFPATH> are only available to the namespace bearing the same name,
and its networks definitions are private.
Files:
- `nerdctl-<NWNAME>.conflist`: CNI conf list created by nerdctl

View File

@@ -1,85 +0,0 @@
# Using GPUs inside containers
| :zap: Requirement | nerdctl >= 0.9 |
|-------------------|----------------|
nerdctl provides docker-compatible NVIDIA GPU support.
## Prerequisites
- NVIDIA Drivers
- Same requirement as when you use GPUs on Docker. For details, please refer to [the doc by NVIDIA](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#pre-requisites).
- `nvidia-container-cli`
- containerd relies on this CLI for setting up GPUs inside container. You can install this via [`libnvidia-container` package](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/arch-overview.html#libnvidia-container).
## Options for `nerdctl run --gpus`
`nerdctl run --gpus` is compatible to [`docker run --gpus`](https://docs.docker.com/engine/reference/commandline/run/#access-an-nvidia-gpu).
You can specify number of GPUs to use via `--gpus` option.
The following example exposes all available GPUs.
```
nerdctl run -it --rm --gpus all nvidia/cuda:12.3.1-base-ubuntu20.04 nvidia-smi
```
You can also pass detailed configuration to `--gpus` option as a list of key-value pairs. The following options are provided.
- `count`: number of GPUs to use. `all` exposes all available GPUs.
- `device`: IDs of GPUs to use. UUID or numbers of GPUs can be specified.
- `capabilities`: [Driver capabilities](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/user-guide.html#driver-capabilities). If unset, use default driver `utility`, `compute`.
The following example exposes a specific GPU to the container.
```
nerdctl run -it --rm --gpus '"capabilities=utility,compute",device=GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a' nvidia/cuda:12.3.1-base-ubuntu20.04 nvidia-smi
```
## Fields for `nerdctl compose`
`nerdctl compose` also supports GPUs following [compose-spec](https://github.com/compose-spec/compose-spec/blob/master/deploy.md#devices).
You can use GPUs on compose when you specify some of the following `capabilities` in `services.demo.deploy.resources.reservations.devices`.
- `gpu`
- `nvidia`
- all allowed capabilities for `nerdctl run --gpus`
Available fields are the same as `nerdctl run --gpus`.
The following exposes all available GPUs to the container.
```
version: "3.8"
services:
demo:
image: nvidia/cuda:12.3.1-base-ubuntu20.04
command: nvidia-smi
deploy:
resources:
reservations:
devices:
- capabilities: ["utility"]
count: all
```
## Trouble Shooting
### `nerdctl run --gpus` fails when using the Nvidia gpu-operator
If the Nvidia driver is installed by the [gpu-operator](https://github.com/NVIDIA/gpu-operator).The `nerdctl run` will fail with the error message `(FATA[0000] exec: "nvidia-container-cli": executable file not found in $PATH)`.
So, the `nvidia-container-cli` needs to be added to the PATH environment variable.
You can do this by adding the following line to your $HOME/.profile or /etc/profile (for a system-wide installation):
```
export PATH=$PATH:/usr/local/nvidia/toolkit
```
The shared libraries also need to be added to the system.
```
echo "/run/nvidia/driver/usr/lib/x86_64-linux-gnu" > /etc/ld.so.conf.d/nvidia.conf
ldconfig
```
And then, the `nerdctl run --gpus` can run successfully.

View File

@@ -1,292 +0,0 @@
# Distribute Container Images on IPFS (Experimental)
| :zap: Requirement | nerdctl >= 0.14 |
|-------------------|-----------------|
You can distribute container images without registries, using IPFS.
IPFS support is completely optional. Your host is NOT connected to any P2P network, unless you opt in to [install and run IPFS daemon](https://docs.ipfs.io/install/).
## Prerequisites
### ipfs daemon
Make sure an IPFS daemon such as [Kubo](https://github.com/ipfs/kubo) (former go-ipfs) is running on your host.
For example, you can run Kubo using the following command.
```
ipfs daemon
```
In rootless mode, you need to install ipfs daemon using `containerd-rootless-setuptool.sh`.
```
containerd-rootless-setuptool.sh -- install-ipfs --init
```
> NOTE: correctly set IPFS_PATH as described in the output of the above command.
:information_source: If you want to expose some ports of ipfs daemon (e.g. 4001), you can install rootless containerd using `containerd-rootless-setuptool.sh install` with `CONTAINERD_ROOTLESS_ROOTLESSKIT_FLAGS="--publish=0.0.0.0:4001:4001/tcp"` environment variable.
:information_source: If you don't want IPFS to communicate with nodes on the internet, you can run IPFS daemon in offline mode using `--offline` flag or you can create a private IPFS network as described [here](https://github.com/containerd/stargz-snapshotter/blob/main/docs/ipfs.md#appendix-1-creating-ipfs-private-network).
:information_source: Instead of locally launching IPFS daemon, you can specify the address of the IPFS API using `--ipfs-address` flag.
## IPFS-enabled image and OCI Compatibility
Image distribution on IPFS is achieved by OCI-compatible *IPFS-enabled image format*.
nerdctl automatically converts an image to IPFS-enabled when necessary.
For example, when nerdctl pushes an image to IPFS, if that image isn't an IPFS-enabled one, it converts that image to the IPFS-enabled one.
Please see [the doc in stargz-snapshotter project](https://github.com/containerd/stargz-snapshotter/blob/v0.10.0/docs/ipfs.md) for details about IPFS-enabled image format.
## Using nerdctl with IPFS
nerdctl supports an image name prefix `ipfs://` to handle images on IPFS.
### `nerdctl push ipfs://<image-name>`
For `nerdctl push`, you can specify `ipfs://` prefix for arbitrary image names stored in containerd.
When this prefix is specified, nerdctl pushes that image to IPFS.
```console
> nerdctl push ipfs://ubuntu:20.04
INFO[0000] pushing image "ubuntu:20.04" to IPFS
INFO[0000] ensuring image contents
bafkreicq4dg6nkef5ju422ptedcwfz6kcvpvvhuqeykfrwq5krazf3muze
```
At last line of the output, the IPFS CID of the pushed image is printed.
You can use this CID to pull this image from IPFS.
You can also specify `--estargz` option to enable [eStargz-based lazy pulling](https://github.com/containerd/stargz-snapshotter/blob/v0.10.0/docs/ipfs.md) on IPFS.
Please see the later section for details.
```console
> nerdctl push --estargz ipfs://fedora:36
INFO[0000] pushing image "fedora:36" to IPFS
INFO[0000] ensuring image contents
INFO[0011] converted "application/vnd.docker.image.rootfs.diff.tar.gzip" to sha256:cd4be969f12ef45dee7270f3643f796364045edf94cfa9ef6744d91d5cdf2208
bafkreibp2ncujcia663uum25ustwvmyoguxqyzjnxnlhebhsgk2zowscye
```
### `nerdctl pull ipfs://<CID>` and `nerdctl run ipfs://<CID>`
You can pull an image from IPFS by specifying `ipfs://<CID>` where `CID` is the CID of the image.
```console
> nerdctl pull ipfs://bafkreicq4dg6nkef5ju422ptedcwfz6kcvpvvhuqeykfrwq5krazf3muze
bafkreicq4dg6nkef5ju422ptedcwfz6kcvpvvhuqeykfrwq5krazf3muze: resolved |++++++++++++++++++++++++++++++++++++++|
index-sha256:28bfa1fc6d491d3bee91bab451cab29c747e72917efacb0adc4e73faffe1f51c: done |++++++++++++++++++++++++++++++++++++++|
manifest-sha256:f6eed19a2880f1000be1d46fb5d114d094a59e350f9d025580f7297c8d9527d5: done |++++++++++++++++++++++++++++++++++++++|
config-sha256:ba6acccedd2923aee4c2acc6a23780b14ed4b8a5fa4e14e252a23b846df9b6c1: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:7b1a6ab2e44dbac178598dabe7cff59bd67233dba0b27e4fbd1f9d4b3c877a54: done |++++++++++++++++++++++++++++++++++++++|
elapsed: 1.2 s total: 27.2 M (22.7 MiB/s)
```
`nerdctl run` also supports the same image name syntax.
When specified, this command pulls the image from IPFS.
```console
> nerdctl run --rm -it ipfs://bafkreicq4dg6nkef5ju422ptedcwfz6kcvpvvhuqeykfrwq5krazf3muze echo hello
hello
```
You can also push that image to the container registry.
```
nerdctl tag ipfs://bafkreicq4dg6nkef5ju422ptedcwfz6kcvpvvhuqeykfrwq5krazf3muze ghcr.io/ktock/ubuntu:20.04-ipfs
nerdctl push ghcr.io/ktock/ubuntu:20.04-ipfs
```
The pushed image can run on other (IPFS-agnostic) runtimes.
```console
> docker run --rm -it ghcr.io/ktock/ubuntu:20.04-ipfs echo hello
hello
```
:information_source: Note that though the IPFS-enabled image is OCI compatible, some runtimes including [containerd](https://github.com/containerd/containerd/pull/6221) and [podman](https://github.com/containers/image/pull/1403) had bugs and failed to pull that image. Containerd fixed this since v1.5.8, podman fixed this since commit [`b55fb86c28b7d743cf59701332cd78d4294c7c54`](https://github.com/containers/image/commit/b55fb86c28b7d743cf59701332cd78d4294c7c54).
### `nerdctl build` and `localhost:5050/ipfs/<CID>` image reference
You can build images using base images on IPFS.
BuildKit >= v0.9.3 is needed.
In Dockerfile, instead of `ipfs://` prefix, you need to use the following image reference to point to an image on IPFS.
```
localhost:5050/ipfs/<CID>
```
Here, `CID` is the IPFS CID of the image.
:information_source: In the future version of nerdctl and BuildKit, `ipfs://` prefix should be supported in Dockerfile.
Using this image reference, you can build an image on IPFS.
```dockerfile
FROM localhost:5050/ipfs/bafkreicq4dg6nkef5ju422ptedcwfz6kcvpvvhuqeykfrwq5krazf3muze
RUN echo hello > /hello
```
Make sure that `nerdctl ipfs registry serve` is running.
This allows `nerdctl build` to pull images from IPFS.
```
$ nerdctl ipfs registry serve &
```
Then you can build this Dockerfile using `nerdctl build`.
```console
> nerdctl build -t hello .
[+] Building 5.3s (6/6) FINISHED
=> [internal] load build definition from Dockerfile 0.0s
=> => transferring dockerfile: 146B 0.0s
=> [internal] load .dockerignore 0.0s
=> => transferring context: 2B 0.0s
=> [internal] load metadata for localhost:5050/ipfs/bafkreicq4dg6nkef5ju422ptedcwfz6kcvpvvhuqeykfrwq5krazf3muze:latest 0.1s
=> [1/2] FROM localhost:5050/ipfs/bafkreicq4dg6nkef5ju422ptedcwfz6kcvpvvhuqeykfrwq5krazf3muze@sha256:28bfa1fc6d491d3bee91bab451cab29c747e72917e 3.8s
=> => resolve localhost:5050/ipfs/bafkreicq4dg6nkef5ju422ptedcwfz6kcvpvvhuqeykfrwq5krazf3muze@sha256:28bfa1fc6d491d3bee91bab451cab29c747e72917e 0.0s
=> => sha256:7b1a6ab2e44dbac178598dabe7cff59bd67233dba0b27e4fbd1f9d4b3c877a54 28.57MB / 28.57MB 2.1s
=> => extracting sha256:7b1a6ab2e44dbac178598dabe7cff59bd67233dba0b27e4fbd1f9d4b3c877a54 1.7s
=> [2/2] RUN echo hello > /hello 0.6s
=> exporting to oci image format 0.6s
=> => exporting layers 0.1s
=> => exporting manifest sha256:b96d490d134221ab121af91a42b13195dd8c5bf941012d7bfe07eabcf5259eda 0.0s
=> => exporting config sha256:bd706574eab19009585b98826b06e63cf6eacf8d7193504dae75caa760332ca2 0.0s
=> => sending tarball 0.5s
unpacking docker.io/library/hello:latest (sha256:b96d490d134221ab121af91a42b13195dd8c5bf941012d7bfe07eabcf5259eda)...done
> nerdctl run --rm -it hello cat /hello
hello
```
> NOTE: `--ipfs` flag has been removed since v1.2.0. You need to launch the localhost registry by yourself using `nerdctl ipfs registry serve`.
#### Details about `localhost:5050/ipfs/<CID>` and `nerdctl ipfs registry`
As of now, BuildKit doesn't support `ipfs://` prefix so nerdctl achieves builds on IPFS by having a read-only local registry backed by IPFS.
This registry converts registry API requests to IPFS operations.
So IPFS-agnostic tools can pull images from IPFS via this registry.
This registry is provided as a subcommand `nerdctl ipfs registry`.
This command starts the registry backed by the IPFS repo of the current `$IPFS_PATH`
By default, nerdctl exposes the registry at `localhost:5050` (configurable via flags).
<details>
<summary>Creating systemd unit file for `nerdctl ipfs registry`</summary>
Optionally you can create systemd unit file of `nerdctl ipfs registry serve`.
An example systemd unit file for `nerdctl ipfs registry serve` can be the following.
`nerdctl ipfs registry serve` is aware of environment variables for configuring the behaviour (e.g. listening port) so you can use `EnvironmentFile` for configuring it.
```
[Unit]
Description=nerdctl ipfs registry serve
[Service]
EnvironmentFile-=/run/nerdctl-ipfs-registry-serve/env
ExecStart=nerdctl ipfs registry serve
[Install]
WantedBy=default.target
```
</details>
The following example starts the registry on `localhost:5555` instead of `localhost:5050`.
```
nerdctl ipfs registry serve --listen-registry=localhost:5555
```
> NOTE: You'll also need to restart the registry when you change `$IPFS_PATH` to use.
> NOTE: `nerdctl ipfs registry [up|down]` has been removed since v1.2.0. You need to launch the localhost registry using `nerdctl ipfs registry serve` instead.
### Compose on IPFS
`nerdctl compose` supports same image name syntax to pull images from IPFS.
```yaml
version: "3.8"
services:
ubuntu:
image: ipfs://bafkreicq4dg6nkef5ju422ptedcwfz6kcvpvvhuqeykfrwq5krazf3muze
command: echo hello
```
When you build images using base images on IPFS, you can use `localhost:5050/ipfs/<CID>` image reference in Dockerfile as mentioned above.
```
nerdctl compose up --build
```
```
nerdctl compose build
```
> NOTE: `--ipfs` flag has been removed since v1.2.0. You need to launch the localhost registry by yourself using `nerdctl ipfs registry serve`.
### Encryption
You can distribute [encrypted images](./ocicrypt.md) on IPFS using OCIcrypt.
Please see [`/docs/ocicrypt.md`](./ocicrypt.md) for details about how to encrypt and decrypt an image.
Same as normal images, the encrypted image can be pushed to IPFS using `ipfs://` prefix.
```console
> nerdctl image encrypt --recipient=jwe:mypubkey.pem ubuntu:20.04 ubuntu:20.04-encrypted
sha256:a5c57411f3d11bb058b584934def0710c6c5b5a4a2d7e9b78f5480ecfc450740
> nerdctl push ipfs://ubuntu:20.04-encrypted
INFO[0000] pushing image "ubuntu:20.04-encrypted" to IPFS
INFO[0000] ensuring image contents
bafkreifajsysbvhtgd7fdgrfesszexdq6v5zbj5y2jnjfwxdjyqws2s3s4
```
You can pull the encrypted image from IPFS using `ipfs://` prefix and can decrypt it in the same way as described in [`/docs/ocicrypt.md`](./ocicrypt.md).
```console
> nerdctl pull --unpack=false ipfs://bafkreifajsysbvhtgd7fdgrfesszexdq6v5zbj5y2jnjfwxdjyqws2s3s4
bafkreifajsysbvhtgd7fdgrfesszexdq6v5zbj5y2jnjfwxdjyqws2s3s4: resolved |++++++++++++++++++++++++++++++++++++++|
index-sha256:73334fee83139d1d8dbf488b28ad100767c38428b2a62504c758905c475c1d6c: done |++++++++++++++++++++++++++++++++++++++|
manifest-sha256:8855ae825902045ea2b27940634673ba410b61885f91b9f038f6b3303f48727c: done |++++++++++++++++++++++++++++++++++++++|
config-sha256:ba6acccedd2923aee4c2acc6a23780b14ed4b8a5fa4e14e252a23b846df9b6c1: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:e74a9a7749e808e4ad1e90d5a81ce3146ce270de0fbdf22429cd465df8f10a13: done |++++++++++++++++++++++++++++++++++++++|
elapsed: 0.3 s total: 22.0 M (73.2 MiB/s)
> nerdctl image decrypt --key=mykey.pem ipfs://bafkreifajsysbvhtgd7fdgrfesszexdq6v5zbj5y2jnjfwxdjyqws2s3s4 ubuntu:20.04-decrypted
sha256:b0ccaddb7e7e4e702420de126468eab263eb0f3c25abf0b957ce8adcd1e82105
> nerdctl run --rm -it ubuntu:20.04-decrypted echo hello
hello
```
## Running containers on IPFS with eStargz-based lazy pulling
nerdctl supports running eStargz images on IPFS with lazy pulling using Stargz Snapshotter.
In this configuration, Stargz Snapshotter mounts the eStargz image from IPFS to the container's rootfs using FUSE with lazy pulling support.
Thus the container can startup without waiting for the entire image contents to be locally available.
You can see faster container cold-start.
To use this feature, you need to enable Stargz Snapshotter following [`/docs/stargz.md`](./stargz.md).
You also need to add the following configuration to `config.toml` of Stargz Snapshotter (typically located at `/etc/containerd-stargz-grpc/config.toml`).
```toml
ipfs = true
```
You can push an arbitrary image to IPFS with converting it to eStargz using `--estargz` option.
```
nerdctl push --estargz ipfs://fedora:36
```
You can pull and run that eStargz image with lazy pulling.
```
nerdctl run --rm -it ipfs://bafkreibp2ncujcia663uum25ustwvmyoguxqyzjnxnlhebhsgk2zowscye echo hello
```
- See [the doc in stargz-snapshotter project](https://github.com/containerd/stargz-snapshotter/blob/v0.10.0/docs/ipfs.md) for details about lazy pulling on IPFS.
- See [`/docs/stargz.md`](./stargz.md) for details about the configuration of nerdctl for Stargz Snapshotter.

View File

@@ -1,72 +0,0 @@
# Multi-platform
| :zap: Requirement | nerdctl >= 0.13 |
|-------------------|-----------------|
nerdctl can execute non-native container images using QEMU.
e.g., ARM on Intel, and vice versa.
## Preparation: Register QEMU to `/proc/sys/fs/binfmt_misc`
```console
$ sudo systemctl start containerd
$ sudo nerdctl run --privileged --rm tonistiigi/binfmt:master --install all
$ ls -1 /proc/sys/fs/binfmt_misc/qemu*
/proc/sys/fs/binfmt_misc/qemu-aarch64
/proc/sys/fs/binfmt_misc/qemu-arm
/proc/sys/fs/binfmt_misc/qemu-mips64
/proc/sys/fs/binfmt_misc/qemu-mips64el
/proc/sys/fs/binfmt_misc/qemu-ppc64le
/proc/sys/fs/binfmt_misc/qemu-riscv64
/proc/sys/fs/binfmt_misc/qemu-s390x
```
The `tonistiigi/binfmt` container must be executed with `--privileged`, and with rootful mode (`sudo`).
This container is not a daemon, and exits immediately after registering QEMU to `/proc/sys/fs/binfmt_misc`.
Run `ls -1 /proc/sys/fs/binfmt_misc/qemu*` to confirm registration.
See also https://github.com/tonistiigi/binfmt
## Usage
### Pull & Run
```console
$ nerdctl pull --platform=arm64,s390x alpine
$ nerdctl run --rm --platform=arm64 alpine uname -a
Linux e6227935cf12 5.13.0-19-generic #19-Ubuntu SMP Thu Oct 7 21:58:00 UTC 2021 aarch64 Linux
$ nerdctl run --rm --platform=s390x alpine uname -a
Linux b39da08fbdbf 5.13.0-19-generic #19-Ubuntu SMP Thu Oct 7 21:58:00 UTC 2021 s390x Linux
```
### Build & Push
```console
$ nerdctl build --platform=amd64,arm64 --output type=image,name=example.com/foo:latest,push=true .
```
Or
```console
$ nerdctl build --platform=amd64,arm64 -t example.com/foo:latest .
$ nerdctl push --all-platforms example.com/foo:latest
```
### Compose
See [`../examples/compose-multi-platform`](../examples/compose-multi-platform)
## macOS + Lima
As of 2025-03-01, qemu seems to be broken in most Apple-silicon setups.
This might be due to qemu handling of host vs. guest page sizes
(unconfirmed, see https://github.com/containerd/nerdctl/issues/3948 for more information).
It should also be noted that Linux 6.11 introduced a change to the VDSO (on ARM)
that does break Rosetta.
The take-away here is that presumably your only shot at running non-native binaries
on Apple-silicon is to use an older kernel for your guest (<6.11), typically as shipped by Debian stable,
and also to use VZ+Rosetta and not qemu (eg: `limactl create --vm-type=vz --rosetta`).

View File

@@ -1,81 +0,0 @@
# Container Image Sign and Verify with notation tool
| :zap: Requirement | nerdctl >= 1.3.0 |
|-------------------|------------------|
[notation](https://github.com/notaryproject/notation) is a project to add signatures as standard items in the registry ecosystem, and to build a set of simple tooling for signing and verifying these signatures.
You can enable container signing and verifying features with `push` and `pull` commands of `nerdctl` by using `notation`
under the hood with make use of flags `--sign` while pushing the container image, and `--verify` while pulling the
container image.
* Ensure notation executable in your `$PATH`.
* You can install notation by following this page: https://notaryproject.dev/docs/user-guides/installation/cli/
* Notation follows the RC of OCI spec v1.1.0. Follow the [instruction](https://notaryproject.dev/docs/quickstart/#create-an-oci-compatible-registry) to set up the local registry with the compliance for testing purpose.
Prepare your environment:
```shell
# Create a sample Dockerfile
$ cat <<EOF | tee Dockerfile.dummy
FROM alpine:latest
CMD [ "echo", "Hello World" ]
EOF
```
> Please do not forget, we won't be validating the base images, which is `alpine:latest` in this case, of the container image that was built on,
> we'll only verify the container image itself once we sign it.
```shell
# Build the image
$ nerdctl build -t localhost:5000/my-test -f Dockerfile.dummy .
# Generate a key-pair in notation's key store and trust store
$ notation cert generate-test --default "test"
# Confirm the signing key is correctly configured. Key name with a * prefix is the default key.
$ notation key ls
# Confirm the certificate is stored in the trust store.
$ notation cert ls
```
Sign the container image while pushing:
```
# Sign the image and store the signature in the registry
$ nerdctl push --sign=notation --notation-key-name test localhost:5000/my-test
```
Verify the container image while pulling:
> REMINDER: Image won't be pulled if there are no matching signatures with the cert in the [trust policy](https://github.com/notaryproject/specifications/blob/main/specs/trust-store-trust-policy.md#trust-policy) in case you passed `--verify` flag.
```shell
# Create `trustpolicy.json` under $XDG_CONFIG_HOME/notation (XDG_CONFIG_HOME is ~/.config below)
cat <<EOF | tee ~/.config/notation/trustpolicy.json
{
"version": "1.0",
"trustPolicies": [
{
"name": "test-images",
"registryScopes": [ "*" ],
"signatureVerification": {
"level" : "strict"
},
"trustStores": [ "ca:test" ],
"trustedIdentities": [
"*"
]
}
]
}
EOF
# Verify the image
$ nerdctl pull --verify=notation localhost:5000/my-test
# You can not verify the image if it is not signed by the cert in the trust policy
$ nerdctl pull --verify=notation localhost:5000/my-test-bad
```

View File

@@ -1,37 +0,0 @@
# Lazy-pulling using Nydus Snapshotter
| :zap: Requirement | nerdctl >= 0.22 |
| ----------------- | --------------- |
Nydus snapshotter is a remote snapshotter plugin of containerd for [Nydus](https://github.com/dragonflyoss/image-service) image service which implements a chunk-based content-addressable filesystem that improves the current OCI image specification, in terms of container launching speed, image space, and network bandwidth efficiency, as well as data integrity with several runtime backends: FUSE, virtiofs and in-kernel EROFS (Linux kernel 5.19+).
## Enable lazy-pulling for `nerdctl run`
- Install containerd remote snapshotter plugin (`containerd-nydus-grpc`) from https://github.com/containerd/nydus-snapshotter
- Add the following to `/etc/containerd/config.toml`:
```toml
[proxy_plugins]
[proxy_plugins.nydus]
type = "snapshot"
address = "/run/containerd-nydus-grpc/containerd-nydus-grpc.sock"
```
- Launch `containerd` and `containerd-nydus-grpc`
- Run `nerdctl` with `--snapshotter=nydus`
```console
# nerdctl --snapshotter=nydus run -it --rm ghcr.io/dragonflyoss/image-service/ubuntu:nydus-nightly-v5
```
For the list of pre-converted Nydus images, see https://github.com/orgs/dragonflyoss/packages?page=1&repo_name=image-service
## Build Nydus image using `nerdctl image convert`
Nerdctl supports to convert an OCI image or docker format v2 image to Nydus image by using the `nerdctl image convert` command.
Before the conversion, you should have the `nydus-image` binary installed, which is contained in the ["nydus static package"](https://github.com/dragonflyoss/image-service/releases). You can run the command like `nerdctl image convert --nydus --oci --nydus-builder-path <the_path_of_nydus_image_binary> <source_image> <target_image>` to convert the `<source_image>` to a Nydus image whose tag is `<target_image>`.
By now, the converted Nydus image cannot be run directly. It shoud be unpacked to nydus snapshotter before `nerdctl run`, which is a part of the processing flow of `nerdctl image pull`. So you need to push the converted image to a registry after the conversion and use `nerdctl --snapshotter nydus image pull` to unpack it to the nydus snapshotter before running the image.
Optionally, you can use the nydusify conversion tool to check if the format of the converted Nydus image is valid. For more details about the Nydus image validation and how to build Nydus image, please refer to [nydusify](https://github.com/dragonflyoss/image-service/blob/master/docs/nydusify.md) and [acceld](https://github.com/goharbor/acceleration-service).

View File

@@ -1,90 +0,0 @@
# OCIcrypt
| :zap: Requirement | nerdctl >= 0.7 |
|-------------------|----------------|
nerdctl supports encryption and decryption using [OCIcrypt](https://github.com/containers/ocicrypt)
(aka [imgcrypt](https://github.com/containerd/imgcrypt) for containerd).
## JWE mode
### Encryption
Use `openssl` to create a private key (`mykey.pem`) and the corresponding public key (`mypubkey.pem`):
```bash
openssl genrsa -out mykey.pem
openssl rsa -in mykey.pem -pubout -out mypubkey.pem
```
Use `nerdctl image encrypt` to create an encrypted image:
```bash
nerdctl image encrypt --recipient=jwe:mypubkey.pem --platform=linux/amd64,linux/arm64 foo example.com/foo:encrypted
nerdctl push example.com/foo:encrypted
```
:warning: CAUTION: This command only encrypts image layers, but does NOT encrypt [container configuration such as `Env` and `Cmd`](https://github.com/opencontainers/image-spec/blob/v1.0.1/config.md#example).
To see non-encrypted information, run `nerdctl image inspect --mode=native --platform=PLATFORM example.com/foo:encrypted` .
### Decryption
#### Configuration
Put the private key files to `/etc/containerd/ocicrypt/keys` (for rootless `~/.config/containerd/ocicrypt/keys`).
<details>
<summary>Extra step for containerd 1.4 and older</summary>
<p>
containerd 1.4 and older requires adding the following configuration to `/etc/containerd/config.toml`
(for rootless `~/.config/containerd/config.toml`):
```toml
version = 2
[stream_processors]
[stream_processors."io.containerd.ocicrypt.decoder.v1.tar.gzip"]
accepts = ["application/vnd.oci.image.layer.v1.tar+gzip+encrypted"]
returns = "application/vnd.oci.image.layer.v1.tar+gzip"
path = "ctd-decoder"
args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
[stream_processors."io.containerd.ocicrypt.decoder.v1.tar"]
accepts = ["application/vnd.oci.image.layer.v1.tar+encrypted"]
returns = "application/vnd.oci.image.layer.v1.tar"
path = "ctd-decoder"
args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
# NOTE: On rootless, ~/.config/containerd is mounted as /etc/containerd in the namespace.
```
</p>
</details>
#### Running nerdctl
No flag is needed for running encrypted images with `nerdctl run`, as long as the private key is stored
in `/etc/containerd/ocicrypt/keys` (for rootless `~/.config/containerd/ocicrypt/keys`).
Just run `nerdctl run example.com/encrypted-image`.
To decrypt an image without running a container, use `nerdctl image decrypt` command:
```bash
nerdctl pull --unpack=false example.com/foo:encrypted
nerdctl image decrypt --key=mykey.pem example.com/foo:encrypted foo:decrypted
```
## PGP (GPG) mode
(Undocumented yet)
## PKCS7 mode
(Undocumented yet)
## PKCS11 mode
(Undocumented yet)
## More information
- https://github.com/containerd/imgcrypt (High-level library for containerd, using `containers/ocicrypt`)
- https://github.com/containers/ocicrypt (Low-level library, used by `containerd/imgcrypt`)
- https://github.com/opencontainers/image-spec/pull/775 (Proposal for OCI Image Spec)
- https://github.com/containerd/containerd/blob/main/docs/cri/decryption.md (configuration guide)
- The `plugins."io.containerd.grpc.v1.cri"` section does not apply to nerdctl, as nerdctl does not use CRI

View File

@@ -1,35 +0,0 @@
# Lazy-pulling using OverlayBD Snapshotter
| :zap: Requirement | nerdctl >= 0.15.0 |
| ----------------- | --------------- |
OverlayBD is a remote container image format base on block-device which is an open-source implementation of paper ["DADI: Block-Level Image Service for Agile and Elastic Application Deployment. USENIX ATC'20".](https://www.usenix.org/conference/atc20/presentation/li-huiba)
See https://github.com/containerd/accelerated-container-image to learn further information.
## Enable lazy-pulling for `nerdctl run`
- Install containerd remote snapshotter plugin (`overlaybd`) from https://github.com/containerd/accelerated-container-image/blob/main/docs/BUILDING.md
- Add the following to `/etc/containerd/config.toml`:
```toml
[proxy_plugins]
[proxy_plugins.overlaybd]
type = "snapshot"
address = "/run/overlaybd-snapshotter/overlaybd.sock"
```
- Launch `containerd` and `overlaybd-snapshotter`
- Run `nerdctl` with `--snapshotter=overlaybd`
```console
nerdctl run --net host -it --rm --snapshotter=overlaybd registry.hub.docker.com/overlaybd/redis:6.2.1_obd
```
For more details about how to build overlaybd image, please refer to [accelerated-container-image](https://github.com/containerd/accelerated-container-image/blob/main/docs/IMAGE_CONVERTOR.md) conversion tool.
## Build OverlayBD image using `nerdctl image convert`
Nerdctl supports to convert an OCI image or docker format v2 image to OverlayBD image by using the `nerdctl image convert` command.
Before the conversion, you should have the `overlaybd-snapshotter` binary installed, which build from [accelerated-container-image](https://github.com/containerd/accelerated-container-image). You can run the command like `nerdctl image convert --overlaybd --oci <source_image> <target_image>` to convert the `<source_image>` to a OverlayBD image whose tag is `<target_image>`.

View File

@@ -1,471 +0,0 @@
# registry authentication
nerdctl uses `${DOCKER_CONFIG}/config.json` for the authentication with image registries.
`$DOCKER_CONFIG` defaults to `$HOME/.docker`.
## Using insecure registry
If you face `http: server gave HTTP response to HTTPS client` and you cannot configure TLS for the registry, try `--insecure-registry` flag:
e.g.,
```console
$ nerdctl --insecure-registry run --rm 192.168.12.34:5000/foo
```
## Specifying certificates
| :zap: Requirement | nerdctl >= 0.16 |
|-------------------|-----------------|
Create `~/.config/containerd/certs.d/<HOST:PORT>/hosts.toml` (or `/etc/containerd/certs.d/...` for rootful) to specify `ca` certificates.
```toml
# An example of ~/.config/containerd/certs.d/192.168.12.34:5000/hosts.toml
# (The path is "/etc/containerd/certs.d/192.168.12.34:5000/hosts.toml" for rootful)
server = "https://192.168.12.34:5000"
[host."https://192.168.12.34:5000"]
ca = "/path/to/ca.crt"
```
See https://github.com/containerd/containerd/blob/main/docs/hosts.md for the syntax of `hosts.toml` .
Docker-style directories are also supported.
The path is `~/.config/docker/certs.d` for rootless, `/etc/docker/certs.d` for rootful.
## Accessing 127.0.0.1 from rootless nerdctl
Currently, rootless nerdctl cannot pull images from 127.0.0.1, because
the pull operation occurs in RootlessKit's network namespace.
See https://github.com/containerd/nerdctl/issues/86 for the discussion about workarounds.
- - -
# Using managed registry services
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
- [Amazon Elastic Container Registry (ECR)](#amazon-elastic-container-registry-ecr)
- [Logging in](#logging-in)
- [Creating a repo](#creating-a-repo)
- [Pushing an image](#pushing-an-image)
- [Azure Container Registry (ACR)](#azure-container-registry-acr)
- [Creating a registry](#creating-a-registry)
- [Logging in](#logging-in-1)
- [Creating a repo](#creating-a-repo-1)
- [Pushing an image](#pushing-an-image-1)
- [Docker Hub](#docker-hub)
- [Logging in](#logging-in-2)
- [Creating a repo](#creating-a-repo-2)
- [Pushing an image](#pushing-an-image-2)
- [GitHub Container Registry (GHCR)](#github-container-registry-ghcr)
- [Logging in](#logging-in-3)
- [Creating a repo](#creating-a-repo-3)
- [Pushing an image](#pushing-an-image-3)
- [GitLab Container Registry](#gitlab-container-registry)
- [Logging in](#logging-in-4)
- [Creating a repo](#creating-a-repo-4)
- [Pushing an image](#pushing-an-image-4)
- [Google Artifact Registry (pkg.dev)](#google-artifact-registry-pkgdev)
- [Logging in](#logging-in-5)
- [Creating a repo](#creating-a-repo-5)
- [Pushing an image](#pushing-an-image-5)
- [Google Container Registry (GCR) [DEPRECATED]](#google-container-registry-gcr-deprecated)
- [Logging in](#logging-in-6)
- [Creating a repo](#creating-a-repo-6)
- [Pushing an image](#pushing-an-image-6)
- [JFrog Artifactory (Cloud/On-Prem)](#jfrog-artifactory-cloudon-prem)
- [Logging in](#logging-in-7)
- [Creating a repo](#creating-a-repo-7)
- [Pushing an image](#pushing-an-image-7)
- [Quay.io](#quayio)
- [Logging in](#logging-in-8)
- [Creating a repo](#creating-a-repo-8)
- [Pushing an image](#pushing-an-image-8)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
## Amazon Elastic Container Registry (ECR)
See also https://aws.amazon.com/ecr
### Logging in
```console
$ aws ecr get-login-password --region <REGION> | nerdctl login --username AWS --password-stdin <AWS_ACCOUNT_ID>.dkr.ecr.<REGION>.amazonaws.com
Login Succeeded
```
<details>
<summary>Alternative method: <code>docker-credential-ecr-login</code></summary>
This methods is more secure but needs an external dependency.
<p>
Install `docker-credential-ecr-login` from https://github.com/awslabs/amazon-ecr-credential-helper , and create the following files:
`~/.docker/config.json`:
```json
{
"credHelpers": {
"public.ecr.aws": "ecr-login",
"<AWS_ACCOUNT_ID>.dkr.ecr.<REGION>.amazonaws.com": "ecr-login"
}
}
```
`~/.aws/credentials`:
```
[default]
aws_access_key_id = ...
aws_secret_access_key = ...
```
> **Note**: If you are running nerdctl inside a VM (including Lima, Colima, Rancher Desktop, and WSL2), `docker-credential-ecr-login` has to be installed inside the guest, not the host.
> Same applies to the path of `~/.docker/config.json` and `~/.aws/credentials`, too.
</p>
</details>
### Creating a repo
You have to create a repository via https://console.aws.amazon.com/ecr/home/ .
### Pushing an image
```console
$ nerdctl tag hello-world <AWS_ACCOUNT_ID>.dkr.ecr.<REGION>.amazonaws.com/<REPO>
$ nerdctl push <AWS_ACCOUNT_ID>.dkr.ecr.<REGION>.amazonaws.com/<REPO>
```
The pushed image appears in the repository you manually created in the previous step.
## Azure Container Registry (ACR)
See also https://azure.microsoft.com/en-us/services/container-registry/#overview
### Creating a registry
You have to create a "Container registry" resource manually via [the Azure portal](https://portal.azure.com/).
### Logging in
```console
$ nerdctl login -u <USERNAME> <REGISTRY>.azurecr.io
Enter Password: ********[Enter]
Login Succeeded
```
The login credentials can be found as "Access keys" in [the Azure portal](https://portal.azure.com/).
See also https://docs.microsoft.com/en-us/azure/container-registry/container-registry-authentication .
> **Note**: nerdctl prior to v0.16.1 had a bug that required pressing the Enter key twice.
### Creating a repo
You do not need to create a repo explicitly.
### Pushing an image
```console
$ nerdctl tag hello-world <REGISTRY>.azurecr.io/hello-world
$ nerdctl push <REGISTRY>.azurecr.io/hello-world
```
The pushed image appears in [the Azure portal](https://portal.azure.com/).
Private as default.
## Docker Hub
See also https://hub.docker.com/
### Logging in
```console
$ nerdctl login -u <USERNAME>
Enter Password: ********[Enter]
Login Succeeded
```
> **Note**: nerdctl prior to v0.16.1 had a bug that required pressing the Enter key twice.
### Creating a repo
You do not need to create a repo explicitly, for public images.
To create a private repo, see https://hub.docker.com/repositories .
### Pushing an image
```console
$ nerdctl tag hello-world <USERNAME>/hello-world
$ nerdctl push <USERNAME>/hello-world
```
The pushed image appears in https://hub.docker.com/repositories .
**Public** by default.
## GitHub Container Registry (GHCR)
See also https://docs.github.com/en/packages/working-with-a-github-packages-registry/working-with-the-container-registry
### Logging in
```console
$ nerdctl login ghcr.io -u <USERNAME>
Enter Password: ********[Enter]
Login Succeeded
```
The `<USERNAME>` is your GitHub username but in lower characters.
The "Password" here is a [GitHub Personal access token](https://github.com/settings/tokens), with `read:packages` and `write:packages` scopes.
> **Note**: nerdctl prior to v0.16.1 had a bug that required pressing the Enter key twice.
### Creating a repo
You do not need to create a repo explicitly.
### Pushing an image
```console
$ nerdctl tag hello-world ghcr.io/<USERNAME>/hello-world
$ nerdctl push ghcr.io/<USERNAME>/hello-world
```
The pushed image appears in the "Packages" tab of your GitHub profile.
Private as default.
## GitLab Container Registry
See also https://docs.gitlab.com/ee/user/packages/container_registry/
### Logging in
```console
$ nerdctl login registry.gitlab.com -u <USERNAME>
Enter Password: ********[Enter]
Login Succeeded
```
The `<USERNAME>` is your GitLab username.
The "Password" here is either a [GitLab Personal access token](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html) or a [GitLab Deploy token](https://docs.gitlab.com/ee/user/project/deploy_tokens/index.html). Both options require minimum scope of `read_registry` for pull access and both `write_registry` and `read_registry` scopes for push access.
> **Note**: nerdctl prior to v0.16.1 had a bug that required pressing the Enter key twice.
### Creating a repo
Container registries in GitLab are created at the project level. A project in GitLab must exist first before you begin working with its container registry.
### Pushing an image
In this example we have created a GitLab project named `myproject`.
```console
$ nerdctl tag hello-world registry.gitlab.com/<USERNAME>/myproject/hello-world:latest
$ nerdctl push registry.gitlab.com/<USERNAME>/myproject/hello-world:latest
```
The pushed image appears under the "Packages & Registries -> Container Registry" tab of your project on GitLab.
## Google Artifact Registry (pkg.dev)
See also https://cloud.google.com/artifact-registry/docs/docker/quickstart
### Logging in
Create a [GCP Service Account](https://cloud.google.com/iam/docs/creating-managing-service-accounts#creating), grant
`Artifact Registry Reader` and `Artifact Registry Writer` roles, and download the key as a JSON file.
Then run the following command:
```console
$ cat <GCP_SERVICE_ACCOUNT_KEY_JSON> | nerdctl login -u _json_key --password-stdin https://<REGION>-docker.pkg.dev
WARNING! Your password will be stored unencrypted in /home/<USERNAME>/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store
Login Succeeded
```
See also https://cloud.google.com/artifact-registry/docs/docker/authentication
<details>
<summary>Alternative method: <code>docker-credential-gcloud</code> (<code>gcloud auth configure-docker</code>)</summary>
This methods is more secure but needs an external dependency.
<p>
Run `gcloud auth configure-docker <REGION>-docker.pkg.dev`, e.g.,
```console
$ gcloud auth configure-docker asia-northeast1-docker.pkg.dev
Adding credentials for: asia-northeast1-docker.pkg.dev
After update, the following will be written to your Docker config file located at [/home/<USERNAME>/.docker/config.json]:
{
"credHelpers": {
"asia-northeast1-docker.pkg.dev": "gcloud"
}
}
Do you want to continue (Y/n)? y
Docker configuration file updated.
```
Google Cloud SDK (`gcloud`, `docker-credential-gcloud`) has to be installed, see https://cloud.google.com/sdk/docs/quickstart .
> **Note**: If you are running nerdctl inside a VM (including Lima, Colima, Rancher Desktop, and WSL2), the Google Cloud SDK has to be installed inside the guest, not the host.
</p>
</details>
### Creating a repo
You have to create a repository via https://console.cloud.google.com/artifacts .
Choose "Docker" as the repository format.
### Pushing an image
```console
$ nerdctl tag hello-world <REGION>-docker.pkg.dev/<GCP_PROJECT_ID>/<REPO>/hello-world
$ nerdctl push <REGION>-docker.pkg.dev/<GCP_PROJECT_ID>/<REPO>/hello-world
```
The pushed image appears in the repository you manually created in the previous step.
## Google Container Registry (GCR) [DEPRECATED]
See also https://cloud.google.com/container-registry/docs/advanced-authentication
### Logging in
Create a [GCP Service Account](https://cloud.google.com/iam/docs/creating-managing-service-accounts#creating), grant
`Storage Object Admin` role, and download the key as a JSON file.
Then run the following command:
```console
$ cat <GCP_SERVICE_ACCOUNT_KEY_JSON> | nerdctl login -u _json_key --password-stdin https://asia.gcr.io
WARNING! Your password will be stored unencrypted in /home/<USERNAME>/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store
Login Succeeded
```
See also https://cloud.google.com/container-registry/docs/advanced-authentication
<details>
<summary>Alternative method: <code>docker-credential-gcloud</code> (<code>gcloud auth configure-docker</code>)</summary>
This methods is more secure but needs an external dependency.
<p>
```console
$ gcloud auth configure-docker
Adding credentials for all GCR repositories.
WARNING: A long list of credential helpers may cause delays running 'docker build'. We recommend passing the registry name to configure only the registry you are using.
After update, the following will be written to your Docker config file located at [/home/<USERNAME>/.docker/config.json]:
{
"credHelpers": {
"gcr.io": "gcloud",
"us.gcr.io": "gcloud",
"eu.gcr.io": "gcloud",
"asia.gcr.io": "gcloud",
"staging-k8s.gcr.io": "gcloud",
"marketplace.gcr.io": "gcloud"
}
}
Do you want to continue (Y/n)? y
Docker configuration file updated.
```
Google Cloud SDK (`gcloud`, `docker-credential-gcloud`) has to be installed, see https://cloud.google.com/sdk/docs/quickstart .
> **Note**: If you are running nerdctl inside a VM (including Lima, Colima, Rancher Desktop, and WSL2), the Google Cloud SDK has to be installed inside the guest, not the host.
</p>
</details>
### Creating a repo
You do not need to create a repo explicitly.
### Pushing an image
```console
$ nerdctl tag hello-world asia.gcr.io/<GCP_PROJECT_ID>/hello-world
$ nerdctl push asia.gcr.io/<GCP_PROJECT_ID>/hello-world
```
The pushed image appears in https://console.cloud.google.com/gcr/ .
Private by default.
## JFrog Artifactory (Cloud/On-Prem)
See also https://www.jfrog.com/confluence/display/JFROG/Getting+Started+with+Artifactory+as+a+Docker+Registry
### Logging in
```console
$ nerdctl login <SERVER_NAME>.jfrog.io -u <USERNAME>
Enter Password: ********[Enter]
Login Succeeded
```
Login using the default username: admin, and password: password for the on-prem installation, or the credentials provided to you by email for the cloud installation.
JFrog Platform is integrated with OAuth allowing you to delegate authentication requests to external providers (the provider types supported are Google, OpenID Connect, GitHub Enterprise, and Cloud Foundry UAA)
> **Note**: nerdctl prior to v0.16.1 had a bug that required pressing the Enter key twice.
### Creating a repo
1. Add local Docker repository
1. Add a new Local Repository with the Docker package type via `https://<server-name>.jfrog.io/ui/admin/repositories/local/new`.
2. Add virtual Docker repository
1. Add a new virtual repository with the Docker package type via `https://<server-name>.jfrog.io/ui/admin/repositories/virtual/new`.
2. Add the local docker repository you created in Steps 1 (move it from Available Repositories to Selected Repositories using the arrow buttons).
3. Set local repository as a default local deployment repository.
### Pushing an image
```console
$ nerdctl tag hello-world <SERVER_NAME>.jfrog.io/<VIRTUAL_REPO_NAME>/hello-world
$ nerdctl push <SERVER_NAME>.jfrog.io/<VIRTUAL_REPO_NAME>/hello-world
```
The `SERVER_NAME` is the first part of the URL given to you for your environment: `https://<SERVER_NAME>.jfrog.io`
The `VIRTUAL_REPO_NAME` is the name “docker” that you assigned to your virtual repository in 2.i .
The pushed image appears in `https://<SERVER_NAME>.jfrog.io/ui/repos/tree/General/<VIRTUAL_REPO_NAME>` .
Private by default.
## Quay.io
See also https://docs.quay.io/solution/getting-started.html
### Logging in
```console
$ nerdctl login quay.io -u <USERNAME>
Enter Password: ********[Enter]
Login Succeeded
```
> **Note**: nerdctl prior to v0.16.1 had a bug that required pressing the Enter key twice.
### Creating a repo
You do not need to create a repo explicitly.
### Pushing an image
```console
$ nerdctl tag hello-world quay.io/<USERNAME>/hello-world
$ nerdctl push quay.io/<USERNAME>/hello-world
```
The pushed image appears in https://quay.io/repository/ .
Private as default.

View File

@@ -1,193 +0,0 @@
# Rootless mode
See https://rootlesscontaine.rs/getting-started/common/ for the prerequisites.
## Daemon (containerd)
Use [`containerd-rootless-setuptool.sh`](../extras/rootless) to set up rootless containerd.
```console
$ containerd-rootless-setuptool.sh install
[INFO] Checking RootlessKit functionality
[INFO] Checking cgroup v2
[INFO] Checking overlayfs
[INFO] Creating /home/testuser/.config/systemd/user/containerd.service
...
[INFO] Installed containerd.service successfully.
[INFO] To control containerd.service, run: `systemctl --user (start|stop|restart) containerd.service`
[INFO] To run containerd.service on system startup, run: `sudo loginctl enable-linger testuser`
[INFO] Use `nerdctl` to connect to the rootless containerd.
[INFO] You do NOT need to specify $CONTAINERD_ADDRESS explicitly.
```
The usage of `containerd-rootless-setuptool.sh` is almost same as [`dockerd-rootless-setuptool.sh`](https://rootlesscontaine.rs/getting-started/docker/) .
Resource limitation flags such as `nerdctl run --memory` require systemd and cgroup v2: https://rootlesscontaine.rs/getting-started/common/cgroup2/
#### AppArmor Profile for Ubuntu 24.04+
Configuring AppArmor is needed only on Ubuntu 24.04+, with RootlessKit installed under a non-standard path: https://rootlesscontaine.rs/getting-started/common/apparmor/
## Client (nerdctl)
Just execute `nerdctl`. No need to specify the socket address manually.
```console
$ nerdctl run -it --rm alpine
```
Depending on your kernel version, you may need to enable FUSE-OverlayFS or set `export CONTAINERD_SNAPSHOTTER=native`.
(See below.)
## Add-ons
### BuildKit
To enable BuildKit, run the following command:
```console
$ containerd-rootless-setuptool.sh install-buildkit
```
## Snapshotters
### OverlayFS
The default `overlayfs` snapshotter only works on the following hosts:
- Any distro, with kernel >= 5.13
- Non-SELinux distro, with kernel >= 5.11
- Ubuntu since 2015
For other hosts, [`fuse-overlayfs` snapshotter](https://github.com/containerd/fuse-overlayfs-snapshotter) needs to be used instead.
### FUSE-OverlayFS
To enable `fuse-overlayfs` snapshotter, run the following command:
```console
$ containerd-rootless-setuptool.sh install-fuse-overlayfs
```
Then, add the following config to `~/.config/containerd/config.toml`, and run `systemctl --user restart containerd.service`:
```toml
[proxy_plugins]
[proxy_plugins."fuse-overlayfs"]
type = "snapshot"
# NOTE: replace "1000" with your actual UID
address = "/run/user/1000/containerd-fuse-overlayfs.sock"
```
The snapshotter can be specified as `$CONTAINERD_SNAPSHOTTER`.
```console
$ export CONTAINERD_SNAPSHOTTER=fuse-overlayfs
$ nerdctl run -it --rm alpine
```
If `fuse-overlayfs` does not work, try `export CONTAINERD_SNAPSHOTTER=native`.
### Stargz Snapshotter
[Stargz Snapshotter](./stargz.md) enables lazy-pulling of images.
To enable Stargz snapshotter, run the following command:
```console
$ containerd-rootless-setuptool.sh install-stargz
```
Then, add the following config to `~/.config/containerd/config.toml` and run `systemctl --user restart containerd.service`:
```toml
[proxy_plugins]
[proxy_plugins."stargz"]
type = "snapshot"
# NOTE: replace "1000" with your actual UID
address = "/run/user/1000/containerd-stargz-grpc/containerd-stargz-grpc.sock"
```
The snapshotter can be specified as `$CONTAINERD_SNAPSHOTTER`.
```console
$ export CONTAINERD_SNAPSHOTTER=stargz
$ nerdctl run -it --rm ghcr.io/stargz-containers/alpine:3.10.2-esgz
```
See https://github.com/containerd/stargz-snapshotter/blob/main/docs/pre-converted-images.md for the image list.
## bypass4netns
| :zap: Requirement | nerdctl >= 0.17 |
|-------------------|-----------------|
[bypass4netns](https://github.com/rootless-containers/bypass4netns) is an accelerator for rootless networking.
This improves **outgoing or incoming (with --publish option) networking performance.**
The performance benchmark with iperf3 on Ubuntu 21.10 on Hyper-V VM is shown below.
| iperf3 benchmark | without bypass4netns | with bypass4netns |
| ----------------- | -------------------- | ----------------- |
| container -> host | 0.398 Gbps | **42.2 Gbps** |
| host -> container | 20.6 Gbps | **47.4 Gbps** |
This benchmark can be reproduced with [https://github.com/rootless-containers/bypass4netns/blob/f009d96139e9e38ce69a2ea8a9a746349bad273c/Vagrantfile](https://github.com/rootless-containers/bypass4netns/blob/f009d96139e9e38ce69a2ea8a9a746349bad273c/Vagrantfile)
Acceleration with bypass4netns is available with:
- `--annotation nerdctl/bypass4netns=true` (for nerdctl v2.0 and later)
- `--label nerdctl/bypass4netns=true` (deprecated form, used in nerdctl prior to v2.0).
You also need to have `bypass4netnsd` (bypass4netns daemon) to be running.
Example
```console
$ containerd-rootless-setuptool.sh install-bypass4netnsd
$ nerdctl run -it --rm -p 8080:80 --annotation nerdctl/bypass4netns=true alpine
```
More detail is available at [https://github.com/rootless-containers/bypass4netns/blob/master/README.md](https://github.com/rootless-containers/bypass4netns/blob/master/README.md)
## Configuring RootlessKit
Rootless containerd recognizes the following environment variables to configure the behavior of [RootlessKit](https://github.com/rootless-containers/rootlesskit):
* `CONTAINERD_ROOTLESS_ROOTLESSKIT_STATE_DIR=DIR`: the rootlesskit state dir. Defaults to `$XDG_RUNTIME_DIR/containerd-rootless`.
* `CONTAINERD_ROOTLESS_ROOTLESSKIT_NET=(slirp4netns|vpnkit|lxc-user-nic)`: the rootlesskit network driver. Defaults to "slirp4netns" if slirp4netns (>= v0.4.0) is installed. Otherwise defaults to "vpnkit".
* `CONTAINERD_ROOTLESS_ROOTLESSKIT_MTU=NUM`: the MTU value for the rootlesskit network driver. Defaults to 65520 for slirp4netns, 1500 for other drivers.
* `CONTAINERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=(builtin|slirp4netns)`: the rootlesskit port driver. Defaults to "builtin" (this driver does not propagate the container's source IP address and always uses 127.0.0.1. Please check [Port Drivers](https://github.com/rootless-containers/rootlesskit/blob/master/docs/port.md#port-drivers) for more details).
* `CONTAINERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX=(auto|true|false)`: whether to protect slirp4netns with a dedicated mount namespace. Defaults to "auto".
* `CONTAINERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP=(auto|true|false)`: whether to protect slirp4netns with seccomp. Defaults to "auto".
* `CONTAINERD_ROOTLESS_ROOTLESSKIT_DETACH_NETNS=(auto|true|false)`: whether to launch rootlesskit with the "detach-netns" mode.
Defaults to "auto", which is resolved to "true" if RootlessKit >= 2.0 is installed.
The "detached-netns" mode accelerates `nerdctl (pull|push|build)` and enables `nerdctl run --net=host`,
however, there is a relatively minor drawback with BuildKit prior to v0.13:
the host loopback IP address (127.0.0.1) and abstract sockets are exposed to Dockerfile's "RUN" instructions during `nerdctl build` (not `nerdctl run`).
The drawback is fixed in BuildKit v0.13. Upgrading from a prior version of BuildKit needs removing the old systemd unit:
`containerd-rootless-setuptool.sh uninstall-buildkit && rm -f ~/.config/buildkit/buildkitd.toml`
To set these variables, create `~/.config/systemd/user/containerd.service.d/override.conf` as follows:
```ini
[Service]
Environment=CONTAINERD_ROOTLESS_ROOTLESSKIT_DETACH_NETNS="false"
```
And then run the following commands:
```bash
systemctl --user daemon-reload
systemctl --user restart containerd
```
## Troubleshooting
### Hint to Fedora users
- If SELinux is enabled on your host and your kernel is older than 5.13, you need to use [`fuse-overlayfs` instead of `overlayfs`](#fuse-overlayfs).
## Rootlesskit Network Design
In `detach-netns` mode:
- Network namespace is detached and stored in `$ROOTLESSKIT_STATE_DIR/netns`.
- The child command executes within the host's network namespace, allowing actions like `pull` and `push` to happen in the host network namespace.
- For creating and configuring the container's network namespace, the child command switches temporarily to the relevant namespace located in `$ROOTLESSKIT_STATE_DIR/netns`. This ensures necessary network setup while maintaining isolation in the host namespace.
![rootlessKit-network-design.png](images/rootlessKit-network-design.png)
- Rootlesskit Parent NetNS and Child NetNS are already configured by the startup script [containerd-rootless.sh](https://github.com/containerd/nerdctl/blob/main/extras/rootless/containerd-rootless.sh)
- Rootlesskit Parent NetNS is the host network namespace
- step1: `nerdctl` calls `containerd` in the host network namespace.
- step2: `containerd` calls `runc` in the host network namespace.
- step3: `runc` creates container with dedicated namespaces (e.g network ns) in the Parent netns.
- step4: `runc` nsenter Rootlesskit Child NetNS before triggering nerdctl ocihook.
- step5: `nerdctl` ocihook module leverages CNI.
- step6: CNI configures container network namespace: create network interfaces `eth0` -> `veth0` -> `nerdctl0`.

View File

@@ -1,47 +0,0 @@
# Lazy-pulling using SOCI Snapshotter
SOCI Snapshotter is a containerd snapshotter plugin. It enables standard OCI images to be lazily loaded without requiring a build-time conversion step. "SOCI" is short for "Seekable OCI", and is pronounced "so-CHEE".
See https://github.com/awslabs/soci-snapshotter to learn further information.
## Prerequisites
- Install containerd remote snapshotter plugin (`soci-snapshotter-grpc`) from https://github.com/awslabs/soci-snapshotter/blob/main/docs/getting-started.md
- Add the following to `/etc/containerd/config.toml`:
```toml
[proxy_plugins]
[proxy_plugins.soci]
type = "snapshot"
address = "/run/soci-snapshotter-grpc/soci-snapshotter-grpc.sock"
```
- Launch `containerd` and `soci-snapshotter-grpc`
## Enable SOCI for `nerdctl run` and `nerdctl pull`
| :zap: Requirement | nerdctl >= 1.5.0 |
| ----------------- | ---------------- |
- Run `nerdctl` with `--snapshotter=soci`
```console
nerdctl run -it --rm --snapshotter=soci public.ecr.aws/soci-workshop-examples/ffmpeg:latest
```
- You can also only pull the image with SOCI without running the container.
```console
nerdctl pull --snapshotter=soci public.ecr.aws/soci-workshop-examples/ffmpeg:latest
```
For images that already have SOCI indices, see https://gallery.ecr.aws/soci-workshop-examples
## Enable SOCI for `nerdctl push`
| :zap: Requirement | nerdctl >= 1.6.0 |
| ----------------- | ---------------- |
- Push the image with SOCI index. Adding `--snapshotter=soci` arg to `nerdctl pull`, `nerdctl` will create the SOCI index and push the index to same destination as the image.
```console
nerdctl push --snapshotter=soci --soci-span-size=2097152 --soci-min-layer-size=20971520 public.ecr.aws/my-registry/my-repo:latest
```
--soci-span-size and --soci-min-layer-size are two properties to customize the SOCI index. See [Command Reference](https://github.com/containerd/nerdctl/blob/377b2077bb616194a8ef1e19ccde32aa1ffd6c84/docs/command-reference.md?plain=1#L773) for further details.

View File

@@ -1,187 +0,0 @@
# Lazy-pulling using Stargz Snapshotter
| :zap: Requirement | nerdctl >= 0.0.1 |
|-------------------|------------------|
Lazy-pulling is a technique to running containers before completion of pulling the images.
See https://github.com/containerd/stargz-snapshotter to learn further information.
[![asciicast](https://asciinema.org/a/378377.svg)](https://asciinema.org/a/378377)
## Enable lazy-pulling for `nerdctl run`
> **NOTE**
> For rootless installation, see [`rootless.md`](./rootless.md#stargz-snapshotter)
- Install Stargz plugin (`containerd-stargz-grpc`) from https://github.com/containerd/stargz-snapshotter
- Add the following to `/etc/containerd/config.toml`:
```toml
[proxy_plugins]
[proxy_plugins.stargz]
type = "snapshot"
address = "/run/containerd-stargz-grpc/containerd-stargz-grpc.sock"
```
- Launch `containerd` and `containerd-stargz-grpc`
- Run `nerdctl` with `--snapshotter=stargz`
```console
# nerdctl --snapshotter=stargz run -it --rm ghcr.io/stargz-containers/fedora:30-esgz
```
For the list of pre-converted Stargz images, see https://github.com/containerd/stargz-snapshotter/blob/main/docs/pre-converted-images.md
### Benchmark result (Dec 9, 2020)
For running `python3 -c print("hi")`, eStargz with Stargz Snapshotter is 3-4 times faster than the legacy OCI with overlayfs snapshotter.
Legacy OCI with overlayfs snapshotter:
```console
# time nerdctl --snapshotter=overlayfs run -it --rm ghcr.io/stargz-containers/python:3.7-org python3 -c 'print("hi")'
ghcr.io/stargz-containers/python:3.7-org: resolved |++++++++++++++++++++++++++++++++++++++|
index-sha256:6008006c63b0a6043a11ac151cee572e0c8676b4ba3130ff23deff5f5d711237: done |++++++++++++++++++++++++++++++++++++++|
manifest-sha256:48eafda05f80010a6677294473d51a530e8f15375b6447195b6fb04dc2a30ce7: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:f860607a6cd9751ac8db2f33cbc3ce1777a44eb3c04853e116763441a304fbf6: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:96b2c1e36db5f5910f58da2ca4f9311b0690810c7107fb055ee1541498b5061f: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:c495e8de12d26c9843a7a2bf8c68de1e5652e66d80d9bc869279f9af6f86736a: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:33382189822a108b249cf3ccd234d04c3a8dfe7d593df19c751dcfab3675d5f2: done |++++++++++++++++++++++++++++++++++++++|
config-sha256:94c9a318e47ab8a318582e2712bb495f92f17a7c1e50f13cc8a3e362c1b09290: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:6eaa0b6b8562fb4a02e140ae53b3910fc4d0db6e68660390eaef993f42e21102: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:adbdcbacafe93bf0791e49c8d3689bb78d9e60d02d384d4e14433aedae39f52c: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:756975cb9c7e7933d824af9319b512dd72a50894232761d06ef3be59981df838: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:d77915b4e630d47296770ce4cf481894885978072432456615172af463433cc5: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:5f37a0a41b6b03489dd7de0aa2a79e369fd8b219bbc36b52f3f9790dc128e74b: done |++++++++++++++++++++++++++++++++++++++|
elapsed: 41.9s total: 321.3 (7.7 MiB/s)
hi
real 0m51.754s
user 0m2.687s
sys 0m5.533s
```
eStargz with Stargz Snapshotter:
```console
# time nerdctl --snapshotter=stargz run -it --rm ghcr.io/stargz-containers/python:3.7-esgz python3 -c 'print("hi")'
fetching sha256:2ea0dd96... application/vnd.oci.image.index.v1+json
fetching sha256:9612ff73... application/vnd.docker.distribution.manifest.v2+json
fetching sha256:34e5920e... application/vnd.docker.container.image.v1+json
hi
real 0m13.589s
user 0m0.132s
sys 0m0.158s
```
## Enable lazy-pulling for pulling base images during `nerdctl build`
- Launch `buildkitd` with `--oci-worker-snapshotter=stargz` (or `--containerd-worker-snapshotter=stargz` if you use containerd worker)
- Launch `nerdctl build`. No need to specify `--snapshotter` for `nerdctl`.
## Building stargz images using `nerdctl build`
```console
$ nerdctl build -t example.com/foo .
$ nerdctl image convert --estargz --oci example.com/foo example.com/foo:estargz
$ nerdctl push example.com/foo:estargz
```
NOTE: `--estargz` should be specified in conjunction with `--oci`
Stargz Snapshotter is not needed for building stargz images.
## Tips for image conversion
### Tips 1: Creating smaller eStargz images
`nerdctl image convert` allows the following flags for optionally creating a smaller eStargz image.
The result image requires stargz-snapshotter >= v0.13.0 for lazy pulling.
- `--estargz-min-chunk-size`: The minimal number of bytes of data must be written in one gzip stream. If it's > 0, multiple files and chunks can be written into one gzip stream. Smaller number of gzip header and smaller size of the result blob can be expected. `--estargz-min-chunk-size=0` produces normal eStargz.
- `--estargz-external-toc`: Separate TOC JSON metadata into another image (called "TOC image"). The result eStargz doesn't contain TOC so we can expect a smaller size than normal eStargz. This is an [experimental](./experimental.md) feature.
#### `--estargz-min-chunk-size` usage
conversion:
```console
# nerdctl image convert --oci --estargz --estargz-min-chunk-size=50000 ghcr.io/stargz-containers/ubuntu:22.04 registry2:5000/ubuntu:22.04-chunk50000
# nerdctl image ls
REPOSITORY TAG IMAGE ID CREATED PLATFORM SIZE BLOB SIZE
ghcr.io/stargz-containers/ubuntu 22.04 20fa2d7bb4de 14 seconds ago linux/amd64 83.4 MiB 29.0 MiB
registry2:5000/ubuntu 22.04-chunk50000 562e09e1b3c1 2 seconds ago linux/amd64 0.0 B 29.2 MiB
# nerdctl push --insecure-registry registry2:5000/ubuntu:22.04-chunk50000
```
Pull it lazily:
```console
# nerdctl pull --snapshotter=stargz --insecure-registry registry2:5000/ubuntu:22.04-chunk50000
# mount | grep "stargz on"
stargz on /var/lib/containerd-stargz-grpc/snapshotter/snapshots/1/fs type fuse.rawBridge (rw,nodev,relatime,user_id=0,group_id=0,allow_other)
```
#### `--estargz-external-toc` usage
convert:
```console
# nerdctl image convert --oci --estargz --estargz-external-toc ghcr.io/stargz-containers/ubuntu:22.04 registry2:5000/ubuntu:22.04-ex
INFO[0005] Extra image(0) registry2:5000/ubuntu:22.04-ex-esgztoc
sha256:3059dd5d9c404344e0b7c43d9782de8cae908531897262b7772103a0b585bbee
# nerdctl images
REPOSITORY TAG IMAGE ID CREATED PLATFORM SIZE BLOB SIZE
ghcr.io/stargz-containers/ubuntu 22.04 20fa2d7bb4de 9 seconds ago linux/amd64 83.4 MiB 29.0 MiB
registry2:5000/ubuntu 22.04-ex 3059dd5d9c40 1 second ago linux/amd64 0.0 B 30.8 MiB
registry2:5000/ubuntu 22.04-ex-esgztoc 18c042b6eb8b 1 second ago linux 0.0 B 151.3 KiB
```
Then push eStargz(`registry2:5000/ubuntu:22.04-ex`) and TOC image(`registry2:5000/ubuntu:22.04-ex-esgztoc`) to the same registry (`registry2` is used in this example but you can use arbitrary registries):
```console
# nerdctl push --insecure-registry registry2:5000/ubuntu:22.04-ex
# nerdctl push --insecure-registry registry2:5000/ubuntu:22.04-ex-esgztoc
```
Pull it lazily:
```console
# nerdctl pull --insecure-registry --snapshotter=stargz registry2:5000/ubuntu:22.04-ex
```
Stargz Snapshotter automatically refers to the TOC image on the same registry.
##### optional `--estargz-keep-diff-id` flag for conversion without changing layer diffID
`nerdctl image convert` supports optional flag `--estargz-keep-diff-id` specified with `--estargz-external-toc`.
This converts an image to eStargz without changing the diffID (uncompressed digest) so even eStargz-agnostic gzip decompressor (e.g. gunzip) can restore the original tar blob.
```console
# nerdctl image convert --oci --estargz --estargz-external-toc --estargz-keep-diff-id ghcr.io/stargz-containers/ubuntu:22.04 registry2:5000/ubuntu:22.04-ex-keepdiff
# nerdctl push --insecure-registry registry2:5000/ubuntu:22.04-ex-keepdiff
# nerdctl push --insecure-registry registry2:5000/ubuntu:22.04-ex-keepdiff-esgztoc
# crane --insecure blob registry2:5000/ubuntu:22.04-ex-keepdiff@sha256:2dc39ba059dcd42ade30aae30147b5692777ba9ff0779a62ad93a74de02e3e1f | jq -r '.rootfs.diff_ids[]'
sha256:7f5cbd8cc787c8d628630756bcc7240e6c96b876c2882e6fc980a8b60cdfa274
# crane blob ghcr.io/stargz-containers/ubuntu:22.04@sha256:2dc39ba059dcd42ade30aae30147b5692777ba9ff0779a62ad93a74de02e3e1f | jq -r '.rootfs.diff_ids[]'
sha256:7f5cbd8cc787c8d628630756bcc7240e6c96b876c2882e6fc980a8b60cdfa274
```
### Tips 2: Using zstd instead of gzip (a.k.a. zstd:chunked)
You can use zstd compression with lazy pulling support (a.k.a zstd:chunked) instead of gzip.
- Pros
- [Faster](https://github.com/facebook/zstd/tree/v1.5.2#benchmarks) compression/decompression.
- Cons
- Old tools might not support. And unsupported by some tools yet.
- zstd supported by OCI Image Specification is still under rc (2022/11). will be added to [v1.1.0](https://github.com/opencontainers/image-spec/commit/1a29e8675a64a5cdd2d93b6fa879a82d9a4d926a).
- zstd supported by [docker >=v23.0.0](https://github.com/moby/moby/releases/tag/v23.0.0).
- zstd supported by [containerd >= v1.5](https://github.com/containerd/containerd/releases/tag/v1.5.0).
- `min-chunk-size`, `external-toc` (described in Tips 1) are unsupported yet.
```console
$ nerdctl build -t example.com/foo .
$ nerdctl image convert --zstdchunked --oci example.com/foo example.com/foo:zstdchunked
$ nerdctl push example.com/foo:zstdchunked
```

View File

@@ -1,165 +0,0 @@
# SAL RFS (Remote File System) Module (`sal::virt::rfs`)
## Overview
The `sal::virt::rfs` module provides a Rust interface for interacting with an underlying `rfs` command-line tool. This tool facilitates mounting various types of remote and local filesystems and managing packed filesystem layers.
The module allows Rust applications and `herodo` Rhai scripts to:
- Mount and unmount filesystems from different sources (e.g., local paths, SSH, S3, WebDAV).
- List currently mounted filesystems and retrieve information about specific mounts.
- Pack directories into filesystem layers, potentially using specified storage backends.
- Unpack, list contents of, and verify these filesystem layers.
All operations are performed by invoking the `rfs` CLI tool and parsing its output.
## Key Design Points
- **CLI Wrapper**: This module acts as a wrapper around an external `rfs` command-line utility. The actual filesystem operations and layer management are delegated to this tool.
- **Asynchronous Operations (Implicit)**: While the Rust functions themselves might be synchronous, the underlying `execute_rfs_command` (presumably from `super::cmd`) likely handles command execution, which could be asynchronous or blocking depending on its implementation.
- **Filesystem Abstraction**: Supports mounting diverse filesystem types such as `local`, `ssh`, `s3`, and `webdav` through the `rfs` tool's capabilities.
- **Layer Management**: Provides functionalities to `pack` directories into portable layers, `unpack` them, `list_contents`, and `verify` their integrity. This is useful for creating and managing reproducible filesystem snapshots or components.
- **Store Specifications (`StoreSpec`)**: The packing functionality allows specifying `StoreSpec` types, suggesting that packed layers can be stored or referenced using different backend mechanisms (e.g., local files, S3 buckets). This enables flexible storage and retrieval of filesystem layers.
- **Builder Pattern**: Uses `RfsBuilder` for constructing mount commands with various options and `PackBuilder` for packing operations, providing a fluent interface for complex configurations.
- **Rhai Scriptability**: Most functionalities are exposed to Rhai scripts via `herodo` through the `sal::rhai::rfs` bridge, enabling automation of filesystem and layer management tasks.
- **Structured Error Handling**: Defines `RfsError` for specific error conditions encountered during `rfs` command execution or output parsing.
## Rhai Scripting with `herodo`
The `sal::virt::rfs` module is scriptable via `herodo`. The following functions are available in Rhai, prefixed with `rfs_`:
### Mount Operations
- `rfs_mount(source: String, target: String, mount_type: String, options: Map) -> Map`
- Mounts a filesystem.
- `source`: The source path or URL (e.g., `/path/to/local_dir`, `ssh://user@host:/remote/path`, `s3://bucket/key`).
- `target`: The local path where the filesystem will be mounted.
- `mount_type`: A string specifying the type of filesystem (e.g., "local", "ssh", "s3", "webdav").
- `options`: A Rhai map of additional mount options (e.g., `#{ "read_only": true, "uid": 1000 }`).
- Returns a map containing details of the mount (id, source, target, fs_type, options) on success.
- `rfs_unmount(target: String) -> ()`
- Unmounts the filesystem at the specified target path.
- `rfs_list_mounts() -> Array`
- Lists all currently mounted filesystems managed by `rfs`.
- Returns an array of maps, each representing a mount with its details.
- `rfs_unmount_all() -> ()`
- Unmounts all filesystems currently managed by `rfs`.
- `rfs_get_mount_info(target: String) -> Map`
- Retrieves information about a specific mounted filesystem.
- Returns a map with mount details if found.
### Pack/Layer Operations
- `rfs_pack(directory: String, output: String, store_specs: String) -> ()`
- Packs the contents of a `directory` into an `output` file (layer).
- `store_specs`: A comma-separated string defining storage specifications for the layer (e.g., `"file:path=/path/to/local_store,s3:bucket=my-archive,region=us-west-1"`). Each spec is `type:key=value,key2=value2`.
- `rfs_unpack(input: String, directory: String) -> ()`
- Unpacks an `input` layer file into the specified `directory`.
- `rfs_list_contents(input: String) -> String`
- Lists the contents of an `input` layer file.
- Returns a string containing the file listing (raw output from the `rfs` tool).
- `rfs_verify(input: String) -> bool`
- Verifies the integrity of an `input` layer file.
- Returns `true` if the layer is valid, `false` otherwise.
### Rhai Example
```rhai
// Example: Mounting a local directory (ensure /mnt/my_local_mount exists)
let source_dir = "/tmp/my_data_source"; // Create this directory first
let target_mount = "/mnt/my_local_mount";
// Create source_dir if it doesn't exist for the example to run
// In a real script, you might use sal::os::dir_create or ensure it exists.
// For this example, assume it's manually created or use: os_run_command(`mkdir -p ${source_dir}`);
print(`Mounting ${source_dir} to ${target_mount}...`);
let mount_result = rfs_mount(source_dir, target_mount, "local", #{});
if mount_result.is_ok() {
print(`Mount successful: ${mount_result}`);
} else {
print(`Mount failed: ${mount_result}`);
}
// List mounts
print("\nCurrent mounts:");
let mounts = rfs_list_mounts();
if mounts.is_ok() {
for m in mounts {
print(` Target: ${m.target}, Source: ${m.source}, Type: ${m.fs_type}`);
}
} else {
print(`Error listing mounts: ${mounts}`);
}
// Example: Packing a directory
let dir_to_pack = "/tmp/pack_this_dir"; // Create and populate this directory
let packed_file = "/tmp/my_layer.pack";
// os_run_command(`mkdir -p ${dir_to_pack}`);
// os_run_command(`echo 'hello' > ${dir_to_pack}/file1.txt`);
print(`\nPacking ${dir_to_pack} to ${packed_file}...`);
// Using a file-based store spec for simplicity
let pack_store_specs = "file:path=/tmp/rfs_store";
// os_run_command(`mkdir -p /tmp/rfs_store`);
let pack_result = rfs_pack(dir_to_pack, packed_file, pack_store_specs);
if pack_result.is_ok() {
print("Packing successful.");
// List contents of the packed file
print(`\nContents of ${packed_file}:`);
let contents = rfs_list_contents(packed_file);
if contents.is_ok() {
print(contents);
} else {
print(`Error listing contents: ${contents}`);
}
// Verify the packed file
print(`\nVerifying ${packed_file}...`);
let verify_result = rfs_verify(packed_file);
if verify_result.is_ok() && verify_result {
print("Verification successful: Layer is valid.");
} else {
print(`Verification failed or error: ${verify_result}`);
}
// Example: Unpacking
let unpack_dir = "/tmp/unpacked_layer_here";
// os_run_command(`mkdir -p ${unpack_dir}`);
print(`\nUnpacking ${packed_file} to ${unpack_dir}...`);
let unpack_result = rfs_unpack(packed_file, unpack_dir);
if unpack_result.is_ok() {
print("Unpacking successful.");
// You would typically check contents of unpack_dir here
// os_run_command(`ls -la ${unpack_dir}`);
} else {
print(`Error unpacking: ${unpack_result}`);
}
} else {
print(`Error packing: ${pack_result}`);
}
// Cleanup: Unmount the local mount
if mount_result.is_ok() {
print(`\nUnmounting ${target_mount}...`);
rfs_unmount(target_mount);
}
// To run this example, ensure the 'rfs' command-line tool is installed and configured,
// and that the necessary directories (/tmp/my_data_source, /mnt/my_local_mount, etc.)
// exist and have correct permissions.
// You might need to run herodo with sudo for mount/unmount operations.
print("\nRFS Rhai script finished.");
```
This module provides a flexible way to manage diverse filesystems and filesystem layers, making it a powerful tool for system automation and deployment tasks within the SAL ecosystem.

View File

@@ -1,291 +0,0 @@
use super::{
cmd::execute_rfs_command,
error::RfsError,
types::{Mount, MountType, StoreSpec},
};
use std::collections::HashMap;
/// Builder for RFS mount operations
#[derive(Clone)]
pub struct RfsBuilder {
/// Source path or URL
source: String,
/// Target mount point
target: String,
/// Mount type
mount_type: MountType,
/// Mount options
options: HashMap<String, String>,
/// Mount ID
#[allow(dead_code)]
mount_id: Option<String>,
/// Debug mode
debug: bool,
}
impl RfsBuilder {
/// Create a new RFS builder
///
/// # Arguments
///
/// * `source` - Source path or URL
/// * `target` - Target mount point
/// * `mount_type` - Mount type
///
/// # Returns
///
/// * `Self` - New RFS builder
pub fn new(source: &str, target: &str, mount_type: MountType) -> Self {
Self {
source: source.to_string(),
target: target.to_string(),
mount_type,
options: HashMap::new(),
mount_id: None,
debug: false,
}
}
/// Add a mount option
///
/// # Arguments
///
/// * `key` - Option key
/// * `value` - Option value
///
/// # Returns
///
/// * `Self` - Updated RFS builder for method chaining
pub fn with_option(mut self, key: &str, value: &str) -> Self {
self.options.insert(key.to_string(), value.to_string());
self
}
/// Add multiple mount options
///
/// # Arguments
///
/// * `options` - Map of option keys to values
///
/// # Returns
///
/// * `Self` - Updated RFS builder for method chaining
pub fn with_options(mut self, options: HashMap<&str, &str>) -> Self {
for (key, value) in options {
self.options.insert(key.to_string(), value.to_string());
}
self
}
/// Set debug mode
///
/// # Arguments
///
/// * `debug` - Whether to enable debug output
///
/// # Returns
///
/// * `Self` - Updated RFS builder for method chaining
pub fn with_debug(mut self, debug: bool) -> Self {
self.debug = debug;
self
}
/// Mount the filesystem
///
/// # Returns
///
/// * `Result<Mount, RfsError>` - Mount information or error
pub fn mount(self) -> Result<Mount, RfsError> {
// Build the command string
let mut cmd = String::from("mount -t ");
cmd.push_str(&self.mount_type.to_string());
// Add options if any
if !self.options.is_empty() {
cmd.push_str(" -o ");
let mut first = true;
for (key, value) in &self.options {
if !first {
cmd.push_str(",");
}
cmd.push_str(key);
cmd.push_str("=");
cmd.push_str(value);
first = false;
}
}
// Add source and target
cmd.push_str(" ");
cmd.push_str(&self.source);
cmd.push_str(" ");
cmd.push_str(&self.target);
// Split the command into arguments
let args: Vec<&str> = cmd.split_whitespace().collect();
// Execute the command
let result = execute_rfs_command(&args)?;
// Parse the output to get the mount ID
let mount_id = result.stdout.trim().to_string();
if mount_id.is_empty() {
return Err(RfsError::MountFailed("Failed to get mount ID".to_string()));
}
// Create and return the Mount struct
Ok(Mount {
id: mount_id,
source: self.source,
target: self.target,
fs_type: self.mount_type.to_string(),
options: self
.options
.iter()
.map(|(k, v)| format!("{}={}", k, v))
.collect(),
})
}
/// Unmount the filesystem
///
/// # Returns
///
/// * `Result<(), RfsError>` - Success or error
pub fn unmount(&self) -> Result<(), RfsError> {
// Execute the unmount command
let result = execute_rfs_command(&["unmount", &self.target])?;
// Check for errors
if !result.success {
return Err(RfsError::UnmountFailed(format!(
"Failed to unmount {}: {}",
self.target, result.stderr
)));
}
Ok(())
}
}
/// Builder for RFS pack operations
#[derive(Clone)]
pub struct PackBuilder {
/// Directory to pack
directory: String,
/// Output file
output: String,
/// Store specifications
store_specs: Vec<StoreSpec>,
/// Debug mode
debug: bool,
}
impl PackBuilder {
/// Create a new pack builder
///
/// # Arguments
///
/// * `directory` - Directory to pack
/// * `output` - Output file
///
/// # Returns
///
/// * `Self` - New pack builder
pub fn new(directory: &str, output: &str) -> Self {
Self {
directory: directory.to_string(),
output: output.to_string(),
store_specs: Vec::new(),
debug: false,
}
}
/// Add a store specification
///
/// # Arguments
///
/// * `store_spec` - Store specification
///
/// # Returns
///
/// * `Self` - Updated pack builder for method chaining
pub fn with_store_spec(mut self, store_spec: StoreSpec) -> Self {
self.store_specs.push(store_spec);
self
}
/// Add multiple store specifications
///
/// # Arguments
///
/// * `store_specs` - Store specifications
///
/// # Returns
///
/// * `Self` - Updated pack builder for method chaining
pub fn with_store_specs(mut self, store_specs: Vec<StoreSpec>) -> Self {
self.store_specs.extend(store_specs);
self
}
/// Set debug mode
///
/// # Arguments
///
/// * `debug` - Whether to enable debug output
///
/// # Returns
///
/// * `Self` - Updated pack builder for method chaining
pub fn with_debug(mut self, debug: bool) -> Self {
self.debug = debug;
self
}
/// Pack the directory
///
/// # Returns
///
/// * `Result<(), RfsError>` - Success or error
pub fn pack(self) -> Result<(), RfsError> {
// Build the command string
let mut cmd = String::from("pack -m ");
cmd.push_str(&self.output);
// Add store specs if any
if !self.store_specs.is_empty() {
cmd.push_str(" -s ");
let mut first = true;
for spec in &self.store_specs {
if !first {
cmd.push_str(",");
}
let spec_str = spec.to_string();
cmd.push_str(&spec_str);
first = false;
}
}
// Add directory
cmd.push_str(" ");
cmd.push_str(&self.directory);
// Split the command into arguments
let args: Vec<&str> = cmd.split_whitespace().collect();
// Execute the command
let result = execute_rfs_command(&args)?;
// Check for errors
if !result.success {
return Err(RfsError::PackFailed(format!(
"Failed to pack {}: {}",
self.directory, result.stderr
)));
}
Ok(())
}
}

View File

@@ -1,61 +0,0 @@
use super::error::RfsError;
use crate::process::{run_command, CommandResult};
use std::cell::RefCell;
use std::thread_local;
// Thread-local storage for debug flag
thread_local! {
static DEBUG: RefCell<bool> = RefCell::new(false);
}
/// Set the thread-local debug flag
#[allow(dead_code)]
pub fn set_thread_local_debug(debug: bool) {
DEBUG.with(|d| {
*d.borrow_mut() = debug;
});
}
/// Get the current thread-local debug flag
pub fn thread_local_debug() -> bool {
DEBUG.with(|d| *d.borrow())
}
/// Execute an RFS command with the given arguments
///
/// # Arguments
///
/// * `args` - Command arguments
///
/// # Returns
///
/// * `Result<CommandResult, RfsError>` - Command result or error
pub fn execute_rfs_command(args: &[&str]) -> Result<CommandResult, RfsError> {
let debug = thread_local_debug();
// Construct the command string
let mut cmd = String::from("rfs");
for arg in args {
cmd.push(' ');
cmd.push_str(arg);
}
if debug {
println!("Executing RFS command: {}", cmd);
}
// Execute the command
let result = run_command(&cmd)
.map_err(|e| RfsError::CommandFailed(format!("Failed to execute RFS command: {}", e)))?;
if debug {
println!("RFS command result: {:?}", result);
}
// Check if the command was successful
if !result.success && !result.stderr.is_empty() {
return Err(RfsError::CommandFailed(result.stderr));
}
Ok(result)
}

View File

@@ -1,43 +0,0 @@
use std::fmt;
use std::error::Error;
/// Error types for RFS operations
#[derive(Debug)]
pub enum RfsError {
/// Command execution failed
CommandFailed(String),
/// Invalid argument provided
InvalidArgument(String),
/// Mount operation failed
MountFailed(String),
/// Unmount operation failed
UnmountFailed(String),
/// List operation failed
ListFailed(String),
/// Pack operation failed
PackFailed(String),
/// Other error
Other(String),
}
impl fmt::Display for RfsError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
RfsError::CommandFailed(msg) => write!(f, "RFS command failed: {}", msg),
RfsError::InvalidArgument(msg) => write!(f, "Invalid argument: {}", msg),
RfsError::MountFailed(msg) => write!(f, "Mount failed: {}", msg),
RfsError::UnmountFailed(msg) => write!(f, "Unmount failed: {}", msg),
RfsError::ListFailed(msg) => write!(f, "List failed: {}", msg),
RfsError::PackFailed(msg) => write!(f, "Pack failed: {}", msg),
RfsError::Other(msg) => write!(f, "Other error: {}", msg),
}
}
}
impl Error for RfsError {}
impl From<std::io::Error> for RfsError {
fn from(error: std::io::Error) -> Self {
RfsError::Other(format!("IO error: {}", error))
}
}

View File

@@ -1,14 +0,0 @@
mod cmd;
mod error;
mod mount;
mod pack;
mod builder;
mod types;
pub use error::RfsError;
pub use builder::{RfsBuilder, PackBuilder};
pub use types::{Mount, MountType, StoreSpec};
pub use mount::{list_mounts, unmount_all, unmount, get_mount_info};
pub use pack::{pack_directory, unpack, list_contents, verify};
// Re-export the execute_rfs_command function for use in other modules

View File

@@ -1,142 +0,0 @@
use super::{
error::RfsError,
cmd::execute_rfs_command,
types::Mount,
};
/// List all mounted filesystems
///
/// # Returns
///
/// * `Result<Vec<Mount>, RfsError>` - List of mounts or error
pub fn list_mounts() -> Result<Vec<Mount>, RfsError> {
// Execute the list command
let result = execute_rfs_command(&["list", "--json"])?;
// Parse the JSON output
match serde_json::from_str::<serde_json::Value>(&result.stdout) {
Ok(json) => {
if let serde_json::Value::Array(mounts_json) = json {
let mut mounts = Vec::new();
for mount_json in mounts_json {
// Extract mount ID
let id = match mount_json.get("id").and_then(|v| v.as_str()) {
Some(id) => id.to_string(),
None => return Err(RfsError::ListFailed("Missing mount ID".to_string())),
};
// Extract source
let source = match mount_json.get("source").and_then(|v| v.as_str()) {
Some(source) => source.to_string(),
None => return Err(RfsError::ListFailed("Missing source".to_string())),
};
// Extract target
let target = match mount_json.get("target").and_then(|v| v.as_str()) {
Some(target) => target.to_string(),
None => return Err(RfsError::ListFailed("Missing target".to_string())),
};
// Extract filesystem type
let fs_type = match mount_json.get("type").and_then(|v| v.as_str()) {
Some(fs_type) => fs_type.to_string(),
None => return Err(RfsError::ListFailed("Missing filesystem type".to_string())),
};
// Extract options
let options = match mount_json.get("options").and_then(|v| v.as_array()) {
Some(options_array) => {
let mut options_vec = Vec::new();
for option_value in options_array {
if let Some(option_str) = option_value.as_str() {
options_vec.push(option_str.to_string());
}
}
options_vec
},
None => Vec::new(), // Empty vector if no options found
};
// Create Mount struct and add to vector
mounts.push(Mount {
id,
source,
target,
fs_type,
options,
});
}
Ok(mounts)
} else {
Err(RfsError::ListFailed("Expected JSON array".to_string()))
}
},
Err(e) => {
Err(RfsError::ListFailed(format!("Failed to parse mount list JSON: {}", e)))
}
}
}
/// Unmount a filesystem by target path
///
/// # Arguments
///
/// * `target` - Target mount point
///
/// # Returns
///
/// * `Result<(), RfsError>` - Success or error
pub fn unmount(target: &str) -> Result<(), RfsError> {
// Execute the unmount command
let result = execute_rfs_command(&["unmount", target])?;
// Check for errors
if !result.success {
return Err(RfsError::UnmountFailed(format!("Failed to unmount {}: {}", target, result.stderr)));
}
Ok(())
}
/// Unmount all filesystems
///
/// # Returns
///
/// * `Result<(), RfsError>` - Success or error
pub fn unmount_all() -> Result<(), RfsError> {
// Execute the unmount all command
let result = execute_rfs_command(&["unmount", "--all"])?;
// Check for errors
if !result.success {
return Err(RfsError::UnmountFailed(format!("Failed to unmount all filesystems: {}", result.stderr)));
}
Ok(())
}
/// Get information about a mounted filesystem
///
/// # Arguments
///
/// * `target` - Target mount point
///
/// # Returns
///
/// * `Result<Mount, RfsError>` - Mount information or error
pub fn get_mount_info(target: &str) -> Result<Mount, RfsError> {
// Get all mounts
let mounts = list_mounts()?;
// Find the mount with the specified target
for mount in mounts {
if mount.target == target {
return Ok(mount);
}
}
// Mount not found
Err(RfsError::Other(format!("No mount found at {}", target)))
}

View File

@@ -1,100 +0,0 @@
use super::{
error::RfsError,
cmd::execute_rfs_command,
types::StoreSpec,
builder::PackBuilder,
};
/// Pack a directory into a filesystem layer
///
/// # Arguments
///
/// * `directory` - Directory to pack
/// * `output` - Output file
/// * `store_specs` - Store specifications
///
/// # Returns
///
/// * `Result<(), RfsError>` - Success or error
pub fn pack_directory(directory: &str, output: &str, store_specs: &[StoreSpec]) -> Result<(), RfsError> {
// Create a new pack builder
let mut builder = PackBuilder::new(directory, output);
// Add store specs
for spec in store_specs {
builder = builder.with_store_spec(spec.clone());
}
// Pack the directory
builder.pack()
}
/// Unpack a filesystem layer
///
/// # Arguments
///
/// * `input` - Input file
/// * `directory` - Directory to unpack to
///
/// # Returns
///
/// * `Result<(), RfsError>` - Success or error
pub fn unpack(input: &str, directory: &str) -> Result<(), RfsError> {
// Execute the unpack command
let result = execute_rfs_command(&["unpack", "-m", input, directory])?;
// Check for errors
if !result.success {
return Err(RfsError::Other(format!("Failed to unpack {}: {}", input, result.stderr)));
}
Ok(())
}
/// List the contents of a filesystem layer
///
/// # Arguments
///
/// * `input` - Input file
///
/// # Returns
///
/// * `Result<String, RfsError>` - File listing or error
pub fn list_contents(input: &str) -> Result<String, RfsError> {
// Execute the list command
let result = execute_rfs_command(&["list", "-m", input])?;
// Check for errors
if !result.success {
return Err(RfsError::Other(format!("Failed to list contents of {}: {}", input, result.stderr)));
}
Ok(result.stdout)
}
/// Verify a filesystem layer
///
/// # Arguments
///
/// * `input` - Input file
///
/// # Returns
///
/// * `Result<bool, RfsError>` - Whether the layer is valid or error
pub fn verify(input: &str) -> Result<bool, RfsError> {
// Execute the verify command
let result = execute_rfs_command(&["verify", "-m", input])?;
// Check for errors
if !result.success {
// If the command failed but returned a specific error about verification,
// return false instead of an error
if result.stderr.contains("verification failed") {
return Ok(false);
}
return Err(RfsError::Other(format!("Failed to verify {}: {}", input, result.stderr)));
}
Ok(true)
}

View File

@@ -1,117 +0,0 @@
use std::collections::HashMap;
/// Represents a mounted filesystem
#[derive(Debug, Clone)]
pub struct Mount {
/// Mount ID
pub id: String,
/// Source path or URL
pub source: String,
/// Target mount point
pub target: String,
/// Filesystem type
pub fs_type: String,
/// Mount options
pub options: Vec<String>,
}
/// Types of mounts supported by RFS
#[derive(Debug, Clone)]
pub enum MountType {
/// Local filesystem
Local,
/// SSH remote filesystem
SSH,
/// S3 object storage
S3,
/// WebDAV remote filesystem
WebDAV,
/// Custom mount type
Custom(String),
}
impl MountType {
/// Convert mount type to string representation
pub fn to_string(&self) -> String {
match self {
MountType::Local => "local".to_string(),
MountType::SSH => "ssh".to_string(),
MountType::S3 => "s3".to_string(),
MountType::WebDAV => "webdav".to_string(),
MountType::Custom(s) => s.clone(),
}
}
/// Create a MountType from a string
pub fn from_string(s: &str) -> Self {
match s.to_lowercase().as_str() {
"local" => MountType::Local,
"ssh" => MountType::SSH,
"s3" => MountType::S3,
"webdav" => MountType::WebDAV,
_ => MountType::Custom(s.to_string()),
}
}
}
/// Store specification for packing operations
#[derive(Debug, Clone)]
pub struct StoreSpec {
/// Store type (e.g., "file", "s3")
pub spec_type: String,
/// Store options
pub options: HashMap<String, String>,
}
impl StoreSpec {
/// Create a new store specification
///
/// # Arguments
///
/// * `spec_type` - Store type (e.g., "file", "s3")
///
/// # Returns
///
/// * `Self` - New store specification
pub fn new(spec_type: &str) -> Self {
Self {
spec_type: spec_type.to_string(),
options: HashMap::new(),
}
}
/// Add an option to the store specification
///
/// # Arguments
///
/// * `key` - Option key
/// * `value` - Option value
///
/// # Returns
///
/// * `Self` - Updated store specification for method chaining
pub fn with_option(mut self, key: &str, value: &str) -> Self {
self.options.insert(key.to_string(), value.to_string());
self
}
/// Convert the store specification to a string
///
/// # Returns
///
/// * `String` - String representation of the store specification
pub fn to_string(&self) -> String {
let mut result = self.spec_type.clone();
if !self.options.is_empty() {
result.push_str(":");
let options: Vec<String> = self.options
.iter()
.map(|(k, v)| format!("{}={}", k, v))
.collect();
result.push_str(&options.join(","));
}
result
}
}