cloud-hypervisor SAL + rhai test script for it
This commit is contained in:
459
packages/system/virt/src/cloudhv/mod.rs
Normal file
459
packages/system/virt/src/cloudhv/mod.rs
Normal file
@@ -0,0 +1,459 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::error::Error;
|
||||
use std::fmt;
|
||||
use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
|
||||
use sal_os;
|
||||
use sal_process;
|
||||
|
||||
/// Error type for Cloud Hypervisor operations
|
||||
#[derive(Debug)]
|
||||
pub enum CloudHvError {
|
||||
CommandFailed(String),
|
||||
IoError(String),
|
||||
JsonError(String),
|
||||
DependencyMissing(String),
|
||||
InvalidSpec(String),
|
||||
NotFound(String),
|
||||
}
|
||||
|
||||
impl fmt::Display for CloudHvError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
CloudHvError::CommandFailed(e) => write!(f, "{}", e),
|
||||
CloudHvError::IoError(e) => write!(f, "IO error: {}", e),
|
||||
CloudHvError::JsonError(e) => write!(f, "JSON error: {}", e),
|
||||
CloudHvError::DependencyMissing(e) => write!(f, "Dependency missing: {}", e),
|
||||
CloudHvError::InvalidSpec(e) => write!(f, "Invalid spec: {}", e),
|
||||
CloudHvError::NotFound(e) => write!(f, "{}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Error for CloudHvError {}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct VmSpec {
|
||||
pub id: String,
|
||||
/// Optional for firmware boot; required for direct kernel boot
|
||||
pub kernel_path: Option<String>,
|
||||
/// Optional for direct kernel boot; required for firmware boot
|
||||
pub firmware_path: Option<String>,
|
||||
/// Disk image path (qcow2 or raw)
|
||||
pub disk_path: String,
|
||||
/// API socket path for ch-remote and management
|
||||
pub api_socket: String,
|
||||
/// vCPUs to boot with
|
||||
pub vcpus: u32,
|
||||
/// Memory in MB
|
||||
pub memory_mb: u32,
|
||||
/// Kernel cmdline (only used for direct kernel boot)
|
||||
pub cmdline: Option<String>,
|
||||
/// Extra args (raw) if you need to extend; keep minimal for Phase 2
|
||||
pub extra_args: Option<Vec<String>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct VmRuntime {
|
||||
/// PID of cloud-hypervisor process if running
|
||||
pub pid: Option<i64>,
|
||||
/// Last known status: "stopped" | "running"
|
||||
pub status: String,
|
||||
/// Console log file path
|
||||
pub log_file: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct VmRecord {
|
||||
pub spec: VmSpec,
|
||||
pub runtime: VmRuntime,
|
||||
}
|
||||
|
||||
fn ensure_deps() -> Result<(), CloudHvError> {
|
||||
if sal_process::which("cloud-hypervisor-static").is_none() {
|
||||
return Err(CloudHvError::DependencyMissing(
|
||||
"cloud-hypervisor-static not found on PATH. Install Cloud Hypervisor static binary.".into(),
|
||||
));
|
||||
}
|
||||
if sal_process::which("ch-remote-static").is_none() {
|
||||
return Err(CloudHvError::DependencyMissing(
|
||||
"ch-remote-static not found on PATH. Install Cloud Hypervisor tools (static).".into(),
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn hero_vm_root() -> PathBuf {
|
||||
let home = std::env::var("HOME").unwrap_or_else(|_| "/tmp".into());
|
||||
Path::new(&home).join("hero/virt/vms")
|
||||
}
|
||||
|
||||
fn vm_dir(id: &str) -> PathBuf {
|
||||
hero_vm_root().join(id)
|
||||
}
|
||||
|
||||
fn vm_json_path(id: &str) -> PathBuf {
|
||||
vm_dir(id).join("vm.json")
|
||||
}
|
||||
|
||||
fn vm_log_path(id: &str) -> PathBuf {
|
||||
vm_dir(id).join("logs/console.log")
|
||||
}
|
||||
|
||||
fn vm_pid_path(id: &str) -> PathBuf {
|
||||
vm_dir(id).join("pid")
|
||||
}
|
||||
|
||||
fn write_json(path: &Path, value: &serde_json::Value) -> Result<(), CloudHvError> {
|
||||
if let Some(parent) = path.parent() {
|
||||
fs::create_dir_all(parent).map_err(|e| CloudHvError::IoError(e.to_string()))?;
|
||||
}
|
||||
let s = serde_json::to_string_pretty(value).map_err(|e| CloudHvError::JsonError(e.to_string()))?;
|
||||
fs::write(path, s).map_err(|e| CloudHvError::IoError(e.to_string()))
|
||||
}
|
||||
|
||||
fn read_json(path: &Path) -> Result<serde_json::Value, CloudHvError> {
|
||||
let content = fs::read_to_string(path).map_err(|e| CloudHvError::IoError(e.to_string()))?;
|
||||
serde_json::from_str(&content).map_err(|e| CloudHvError::JsonError(e.to_string()))
|
||||
}
|
||||
|
||||
fn proc_exists(pid: i64) -> bool {
|
||||
#[cfg(target_os = "linux")]
|
||||
{
|
||||
Path::new(&format!("/proc/{}", pid)).exists()
|
||||
}
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
{
|
||||
// Minimal check for non-Linux; try a kill -0 style command
|
||||
let res = sal_process::run(&format!("kill -0 {}", pid)).die(false).silent(true).execute();
|
||||
res.map(|r| r.success).unwrap_or(false)
|
||||
}
|
||||
}
|
||||
|
||||
/// Create and persist a VM spec
|
||||
pub fn vm_create(spec: &VmSpec) -> Result<String, CloudHvError> {
|
||||
// Validate inputs minimally
|
||||
if spec.id.trim().is_empty() {
|
||||
return Err(CloudHvError::InvalidSpec("spec.id must not be empty".into()));
|
||||
}
|
||||
// Validate boot method: either firmware_path exists or kernel_path exists
|
||||
let has_fw = spec
|
||||
.firmware_path
|
||||
.as_ref()
|
||||
.map(|p| Path::new(p).exists())
|
||||
.unwrap_or(false);
|
||||
let has_kernel = spec
|
||||
.kernel_path
|
||||
.as_ref()
|
||||
.map(|p| Path::new(p).exists())
|
||||
.unwrap_or(false);
|
||||
|
||||
if !(has_fw || has_kernel) {
|
||||
return Err(CloudHvError::InvalidSpec(
|
||||
"either firmware_path or kernel_path must be set to an existing file".into(),
|
||||
));
|
||||
}
|
||||
|
||||
if !Path::new(&spec.disk_path).exists() {
|
||||
return Err(CloudHvError::InvalidSpec(format!(
|
||||
"disk_path not found: {}",
|
||||
&spec.disk_path
|
||||
)));
|
||||
}
|
||||
if spec.vcpus == 0 {
|
||||
return Err(CloudHvError::InvalidSpec("vcpus must be >= 1".into()));
|
||||
}
|
||||
if spec.memory_mb == 0 {
|
||||
return Err(CloudHvError::InvalidSpec("memory_mb must be >= 128".into()));
|
||||
}
|
||||
|
||||
// Prepare directory layout
|
||||
let dir = vm_dir(&spec.id);
|
||||
sal_os::mkdir(
|
||||
dir.to_str()
|
||||
.unwrap_or_else(|| "/tmp/hero/virt/vms/__invalid__"),
|
||||
)
|
||||
.map_err(|e| CloudHvError::IoError(e.to_string()))?;
|
||||
let log_dir = dir.join("logs");
|
||||
sal_os::mkdir(log_dir.to_str().unwrap()).map_err(|e| CloudHvError::IoError(e.to_string()))?;
|
||||
|
||||
// Persist initial record
|
||||
let rec = VmRecord {
|
||||
spec: spec.clone(),
|
||||
runtime: VmRuntime {
|
||||
pid: None,
|
||||
status: "stopped".into(),
|
||||
log_file: vm_log_path(&spec.id).to_string_lossy().into_owned(),
|
||||
},
|
||||
};
|
||||
let value = serde_json::to_value(&rec).map_err(|e| CloudHvError::JsonError(e.to_string()))?;
|
||||
write_json(&vm_json_path(&spec.id), &value)?;
|
||||
|
||||
Ok(spec.id.clone())
|
||||
}
|
||||
|
||||
/// Start a VM using cloud-hypervisor
|
||||
pub fn vm_start(id: &str) -> Result<(), CloudHvError> {
|
||||
ensure_deps()?;
|
||||
|
||||
// Load record
|
||||
let p = vm_json_path(id);
|
||||
if !p.exists() {
|
||||
return Err(CloudHvError::NotFound(format!("VM '{}' not found", id)));
|
||||
}
|
||||
let value = read_json(&p)?;
|
||||
let mut rec: VmRecord =
|
||||
serde_json::from_value(value).map_err(|e| CloudHvError::JsonError(e.to_string()))?;
|
||||
|
||||
// Prepare invocation
|
||||
let api_socket = if rec.spec.api_socket.trim().is_empty() {
|
||||
vm_dir(id).join("api.sock").to_string_lossy().into_owned()
|
||||
} else {
|
||||
rec.spec.api_socket.clone()
|
||||
};
|
||||
let log_file = vm_log_path(id).to_string_lossy().into_owned();
|
||||
|
||||
// Build command (minimal args for Phase 2)
|
||||
// We redirect all output to log_file via shell and keep process in background with nohup
|
||||
|
||||
// CH CLI flags (very common subset)
|
||||
// --disk path=... uses virtio-blk by default
|
||||
let mut parts: Vec<String> = vec![
|
||||
"cloud-hypervisor-static".into(),
|
||||
"--api-socket".into(),
|
||||
api_socket.clone(),
|
||||
];
|
||||
|
||||
if let Some(fw) = rec.spec.firmware_path.clone() {
|
||||
// Firmware boot path
|
||||
parts.push("--firmware".into());
|
||||
parts.push(fw);
|
||||
} else if let Some(kpath) = rec.spec.kernel_path.clone() {
|
||||
// Direct kernel boot path
|
||||
let cmdline = rec
|
||||
.spec
|
||||
.cmdline
|
||||
.clone()
|
||||
.unwrap_or_else(|| "console=ttyS0 reboot=k panic=1".to_string());
|
||||
parts.push("--kernel".into());
|
||||
parts.push(kpath);
|
||||
parts.push("--cmdline".into());
|
||||
parts.push(cmdline);
|
||||
} else {
|
||||
return Err(CloudHvError::InvalidSpec(
|
||||
"neither firmware_path nor kernel_path set at start time".into(),
|
||||
));
|
||||
}
|
||||
|
||||
parts.push("--disk".into());
|
||||
parts.push(format!("path={}", rec.spec.disk_path));
|
||||
parts.push("--cpus".into());
|
||||
parts.push(format!("boot={}", rec.spec.vcpus));
|
||||
parts.push("--memory".into());
|
||||
parts.push(format!("size={}M", rec.spec.memory_mb));
|
||||
parts.push("--serial".into());
|
||||
parts.push("tty".into());
|
||||
parts.push("--console".into());
|
||||
parts.push("off".into());
|
||||
|
||||
if let Some(extra) = rec.spec.extra_args.clone() {
|
||||
for e in extra {
|
||||
parts.push(e);
|
||||
}
|
||||
}
|
||||
|
||||
let args_str = shell_join(&parts);
|
||||
let script = format!(
|
||||
"#!/bin/bash -e
|
||||
nohup {} > '{}' 2>&1 &
|
||||
echo $! > '{}'
|
||||
",
|
||||
args_str,
|
||||
log_file,
|
||||
vm_pid_path(id).to_string_lossy()
|
||||
);
|
||||
|
||||
// Execute script; this will background cloud-hypervisor and return
|
||||
let result = sal_process::run(&script).execute();
|
||||
match result {
|
||||
Ok(res) => {
|
||||
if !res.success {
|
||||
return Err(CloudHvError::CommandFailed(format!(
|
||||
"Failed to start VM '{}': {}",
|
||||
id, res.stderr
|
||||
)));
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(CloudHvError::CommandFailed(format!(
|
||||
"Failed to start VM '{}': {}",
|
||||
id, e
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
// Read PID back
|
||||
let pid = match fs::read_to_string(vm_pid_path(id)) {
|
||||
Ok(s) => s.trim().parse::<i64>().ok(),
|
||||
Err(_) => None,
|
||||
};
|
||||
|
||||
// Update state
|
||||
rec.runtime.pid = pid;
|
||||
rec.runtime.status = if pid.is_some() { "running".into() } else { "stopped".into() };
|
||||
rec.runtime.log_file = log_file;
|
||||
rec.spec.api_socket = api_socket.clone();
|
||||
|
||||
let value = serde_json::to_value(&rec).map_err(|e| CloudHvError::JsonError(e.to_string()))?;
|
||||
write_json(&vm_json_path(id), &value)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Return VM record info (spec + runtime) by id
|
||||
pub fn vm_info(id: &str) -> Result<VmRecord, CloudHvError> {
|
||||
let p = vm_json_path(id);
|
||||
if !p.exists() {
|
||||
return Err(CloudHvError::NotFound(format!("VM '{}' not found", id)));
|
||||
}
|
||||
let value = read_json(&p)?;
|
||||
let rec: VmRecord = serde_json::from_value(value).map_err(|e| CloudHvError::JsonError(e.to_string()))?;
|
||||
Ok(rec)
|
||||
}
|
||||
|
||||
/// Stop a VM via ch-remote (graceful), optionally force kill
|
||||
pub fn vm_stop(id: &str, force: bool) -> Result<(), CloudHvError> {
|
||||
ensure_deps().ok(); // best-effort; we might still force-kill
|
||||
|
||||
let p = vm_json_path(id);
|
||||
if !p.exists() {
|
||||
return Err(CloudHvError::NotFound(format!("VM '{}' not found", id)));
|
||||
}
|
||||
let value = read_json(&p)?;
|
||||
let mut rec: VmRecord =
|
||||
serde_json::from_value(value).map_err(|e| CloudHvError::JsonError(e.to_string()))?;
|
||||
|
||||
// Attempt graceful shutdown if api socket known
|
||||
if !rec.spec.api_socket.trim().is_empty() {
|
||||
let cmd = format!("ch-remote-static --api-socket {} shutdown", rec.spec.api_socket);
|
||||
let _ = sal_process::run(&cmd).die(false).silent(true).execute();
|
||||
}
|
||||
|
||||
// Wait a bit for process to exit
|
||||
if let Some(pid) = rec.runtime.pid {
|
||||
for _ in 0..20 {
|
||||
if !proc_exists(pid) {
|
||||
break;
|
||||
}
|
||||
thread::sleep(Duration::from_millis(200));
|
||||
}
|
||||
// If still alive and force, kill -9
|
||||
if proc_exists(pid) && force {
|
||||
let _ = sal_process::run(&format!("kill -9 {}", pid))
|
||||
.die(false)
|
||||
.silent(true)
|
||||
.execute();
|
||||
}
|
||||
}
|
||||
|
||||
// Update state
|
||||
rec.runtime.status = "stopped".into();
|
||||
rec.runtime.pid = None;
|
||||
let value = serde_json::to_value(&rec).map_err(|e| CloudHvError::JsonError(e.to_string()))?;
|
||||
write_json(&vm_json_path(id), &value)?;
|
||||
|
||||
// Remove pid file
|
||||
let _ = fs::remove_file(vm_pid_path(id));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Delete a VM definition; optionally delete disks.
|
||||
pub fn vm_delete(id: &str, delete_disks: bool) -> Result<(), CloudHvError> {
|
||||
let p = vm_json_path(id);
|
||||
if !p.exists() {
|
||||
return Err(CloudHvError::NotFound(format!("VM '{}' not found", id)));
|
||||
}
|
||||
let rec: VmRecord = serde_json::from_value(read_json(&p)?)
|
||||
.map_err(|e| CloudHvError::JsonError(e.to_string()))?;
|
||||
|
||||
// Refuse to delete if still running
|
||||
if let Some(pid) = rec.runtime.pid {
|
||||
if proc_exists(pid) {
|
||||
return Err(CloudHvError::CommandFailed(
|
||||
"VM appears to be running; stop it first".into(),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
if delete_disks {
|
||||
let _ = fs::remove_file(&rec.spec.disk_path);
|
||||
}
|
||||
|
||||
let d = vm_dir(id);
|
||||
fs::remove_dir_all(&d).map_err(|e| CloudHvError::IoError(e.to_string()))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// List all VMs
|
||||
pub fn vm_list() -> Result<Vec<VmRecord>, CloudHvError> {
|
||||
let root = hero_vm_root();
|
||||
if !root.exists() {
|
||||
return Ok(vec![]);
|
||||
}
|
||||
let mut out = vec![];
|
||||
for entry in fs::read_dir(&root).map_err(|e| CloudHvError::IoError(e.to_string()))? {
|
||||
let entry = entry.map_err(|e| CloudHvError::IoError(e.to_string()))?;
|
||||
let p = entry.path();
|
||||
if !p.is_dir() {
|
||||
continue;
|
||||
}
|
||||
let vm_json = p.join("vm.json");
|
||||
if !vm_json.exists() {
|
||||
continue;
|
||||
}
|
||||
let rec: VmRecord = serde_json::from_value(read_json(&vm_json)?)
|
||||
.map_err(|e| CloudHvError::JsonError(e.to_string()))?;
|
||||
|
||||
out.push(rec);
|
||||
}
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
/// Render a shell-safe command string from vector of tokens
|
||||
fn shell_join(parts: &Vec<String>) -> String {
|
||||
let mut s = String::new();
|
||||
for (i, p) in parts.iter().enumerate() {
|
||||
if i > 0 {
|
||||
s.push(' ');
|
||||
}
|
||||
s.push_str(&shell_escape(p));
|
||||
}
|
||||
s
|
||||
}
|
||||
|
||||
fn shell_escape(s: &str) -> String {
|
||||
if s.is_empty() {
|
||||
return "''".into();
|
||||
}
|
||||
if s
|
||||
.chars()
|
||||
.all(|c| c.is_ascii_alphanumeric() || "-_./=:".contains(c))
|
||||
{
|
||||
return s.into();
|
||||
}
|
||||
// single-quote wrap, escape existing quotes
|
||||
let mut out = String::from("'");
|
||||
for ch in s.chars() {
|
||||
if ch == '\'' {
|
||||
out.push_str("'\"'\"'");
|
||||
} else {
|
||||
out.push(ch);
|
||||
}
|
||||
}
|
||||
out.push('\'');
|
||||
out
|
||||
}
|
Reference in New Issue
Block a user