networking VMs (WIP)
This commit is contained in:
148
packages/system/virt/tests/rhai/05_cloudhv_diag.rhai
Normal file
148
packages/system/virt/tests/rhai/05_cloudhv_diag.rhai
Normal file
@@ -0,0 +1,148 @@
|
||||
// Cloud Hypervisor diagnostic script
|
||||
// Creates a VM, starts CH, verifies PID, API socket, ch-remote info, and tails logs.
|
||||
|
||||
print("=== CloudHV Diagnostic ===");
|
||||
|
||||
// Dependency check
|
||||
let chs = which("cloud-hypervisor-static");
|
||||
let chrs = which("ch-remote-static");
|
||||
let ch_missing = (chs == () || chs == "");
|
||||
let chr_missing = (chrs == () || chrs == "");
|
||||
if ch_missing || chr_missing {
|
||||
print("cloud-hypervisor-static and/or ch-remote-static not available - aborting.");
|
||||
exit();
|
||||
}
|
||||
|
||||
// Inputs
|
||||
let firmware_path = "/tmp/virt_images/hypervisor-fw";
|
||||
let disk_path = "/tmp/virt_images/noble-server-cloudimg-amd64.img";
|
||||
|
||||
if !exist(firmware_path) {
|
||||
print(`Firmware not found: ${firmware_path}`);
|
||||
exit();
|
||||
}
|
||||
if !exist(disk_path) {
|
||||
print(`Disk image not found: ${disk_path}`);
|
||||
exit();
|
||||
}
|
||||
|
||||
// Unique ID
|
||||
let rid = run_silent("date +%s%N");
|
||||
let suffix = if rid.success && rid.stdout != "" { rid.stdout.trim() } else { "100000" };
|
||||
let vm_id = `diagvm_${suffix}`;
|
||||
|
||||
// Socket path will be obtained from VM info (SAL populates spec.api_socket after start)
|
||||
|
||||
// Build minimal spec; let SAL decide the api_socket under the VM dir
|
||||
let spec = #{
|
||||
"id": vm_id,
|
||||
"disk_path": disk_path,
|
||||
"vcpus": 1,
|
||||
"memory_mb": 512
|
||||
};
|
||||
spec.firmware_path = firmware_path;
|
||||
|
||||
fn pid_alive(p) {
|
||||
if p == () { return false; }
|
||||
// Use /proc to avoid noisy "kill: No such process" messages from kill -0
|
||||
return exist(`/proc/${p}`);
|
||||
}
|
||||
|
||||
fn tail_log(p, n) {
|
||||
if exist(p) {
|
||||
let r = run_silent(`tail -n ${n} ${p}`);
|
||||
if r.success { print(r.stdout); } else { print(r.stderr); }
|
||||
} else {
|
||||
print(`Log file not found: ${p}`);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
print("--- Create VM spec ---");
|
||||
let created = cloudhv_vm_create(spec);
|
||||
print(`created: ${created}`);
|
||||
} catch (err) {
|
||||
print(`create failed: ${err}`);
|
||||
exit();
|
||||
}
|
||||
|
||||
// Read back info to get SAL-resolved log_file path
|
||||
let info0 = cloudhv_vm_info(vm_id);
|
||||
let log_file = info0.runtime.log_file;
|
||||
|
||||
// Rely on SAL to handle socket directory creation and stale-socket cleanup
|
||||
|
||||
print("--- Start VM ---");
|
||||
try {
|
||||
cloudhv_vm_start(vm_id);
|
||||
print("start invoked");
|
||||
} catch (err) {
|
||||
print(`start failed: ${err}`);
|
||||
tail_log(log_file, 200);
|
||||
exit();
|
||||
}
|
||||
|
||||
// Fetch PID and discover API socket path from updated spec
|
||||
let info1 = cloudhv_vm_info(vm_id);
|
||||
let pid = info1.runtime.pid;
|
||||
let api_sock = info1.spec.api_socket;
|
||||
print(`pid=${pid}`);
|
||||
print(`api_sock_from_sal=${api_sock}`);
|
||||
|
||||
// Wait for socket file
|
||||
let sock_ok = false;
|
||||
for x in 0..50 {
|
||||
if exist(api_sock) { sock_ok = true; break; }
|
||||
sleep(1);
|
||||
}
|
||||
print(`api_sock_exists=${sock_ok} path=${api_sock}`);
|
||||
|
||||
// Probe ch-remote info
|
||||
let info_ok = false;
|
||||
let last_err = "";
|
||||
if sock_ok {
|
||||
for x in 0..20 {
|
||||
let r = run_silent(`ch-remote-static --api-socket ${api_sock} info`);
|
||||
if r.success {
|
||||
info_ok = true;
|
||||
print("ch-remote info OK");
|
||||
break;
|
||||
} else {
|
||||
last_err = if r.stderr != "" { r.stderr } else { r.stdout };
|
||||
sleep(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
if !info_ok {
|
||||
print("ch-remote info FAILED");
|
||||
if last_err != "" { print(last_err); }
|
||||
let alive = pid_alive(pid);
|
||||
print(`pid_alive=${alive}`);
|
||||
print("--- Last 200 lines of CH log ---");
|
||||
tail_log(log_file, 200);
|
||||
print("--- End of log ---");
|
||||
} else {
|
||||
print("--- Stop via SAL (force) ---");
|
||||
try {
|
||||
cloudhv_vm_stop(vm_id, true);
|
||||
print("SAL stop invoked (force)");
|
||||
} catch (err) {
|
||||
print(`stop failed: ${err}`);
|
||||
}
|
||||
// wait for exit (check original PID)
|
||||
for x in 0..30 {
|
||||
if !pid_alive(pid) { break; }
|
||||
sleep(1);
|
||||
}
|
||||
print(`pid_alive_after_stop=${pid_alive(pid)}`);
|
||||
}
|
||||
|
||||
print("--- Cleanup ---");
|
||||
try {
|
||||
cloudhv_vm_delete(vm_id, false);
|
||||
print("vm deleted");
|
||||
} catch (err) {
|
||||
print(`delete failed: ${err}`);
|
||||
}
|
||||
|
||||
print("=== Diagnostic done ===");
|
Reference in New Issue
Block a user