diff --git a/Justfile b/Justfile index fe3deb3..f7052ab 100644 --- a/Justfile +++ b/Justfile @@ -25,10 +25,10 @@ test-integration *ARGS: build pull-test-images #!/usr/bin/env bash set -euo pipefail export BCVK_PATH=$(pwd)/target/release/bcvk - + # Clean up any leftover containers before starting cargo run --release --bin test-cleanup -p integration-tests 2>/dev/null || true - + # Run the tests if command -v cargo-nextest &> /dev/null; then cargo nextest run --release -P integration -p integration-tests {{ ARGS }} @@ -37,12 +37,16 @@ test-integration *ARGS: build pull-test-images cargo test --release -p integration-tests -- {{ ARGS }} TEST_EXIT_CODE=$? fi - + # Clean up containers after tests complete cargo run --release --bin test-cleanup -p integration-tests 2>/dev/null || true - + exit $TEST_EXIT_CODE +# Run integration tests with qemu:///system (tests rootless podman + system libvirt) +test-integration-system *ARGS: build pull-test-images + env LIBVIRT_DEFAULT_URI=qemu:///system just test-integration {{ARGS}} + # Clean up integration test containers test-cleanup: cargo run --release --bin test-cleanup -p integration-tests diff --git a/crates/integration-tests/src/main.rs b/crates/integration-tests/src/main.rs index 033c0f8..bd0c978 100644 --- a/crates/integration-tests/src/main.rs +++ b/crates/integration-tests/src/main.rs @@ -64,6 +64,18 @@ pub(crate) fn get_alternative_test_image() -> String { } } +/// Get libvirt connection arguments for CLI commands +/// +/// Returns ["--connect", "URI"] if LIBVIRT_DEFAULT_URI is set, otherwise empty vec. +/// This uses the standard libvirt environment variable. +pub(crate) fn get_libvirt_connect_args() -> Vec { + if let Some(uri) = std::env::var("LIBVIRT_DEFAULT_URI").ok() { + vec!["--connect".to_string(), uri] + } else { + vec![] + } +} + /// Captured output from a command with decoded stdout/stderr strings pub(crate) struct CapturedOutput { pub output: Output, diff --git a/crates/integration-tests/src/tests/libvirt_base_disks.rs b/crates/integration-tests/src/tests/libvirt_base_disks.rs index 47b9173..9a8e854 100644 --- a/crates/integration-tests/src/tests/libvirt_base_disks.rs +++ b/crates/integration-tests/src/tests/libvirt_base_disks.rs @@ -8,12 +8,13 @@ use std::process::Command; -use crate::{get_bck_command, get_test_image}; +use crate::{get_bck_command, get_libvirt_connect_args, get_test_image}; /// Test that base disk is created and reused for multiple VMs pub fn test_base_disk_creation_and_reuse() { let bck = get_bck_command().unwrap(); let test_image = get_test_image(); + let connect_args = get_libvirt_connect_args(); // Generate unique names for test VMs let timestamp = std::time::SystemTime::now() @@ -33,20 +34,12 @@ pub fn test_base_disk_creation_and_reuse() { // Create first VM - this should create a new base disk println!("Creating first VM (should create base disk)..."); - let vm1_output = Command::new("timeout") - .args([ - "300s", - &bck, - "libvirt", - "run", - "--name", - &vm1_name, - "--filesystem", - "ext4", - &test_image, - ]) - .output() - .expect("Failed to create first VM"); + let mut vm1_cmd = Command::new("timeout"); + vm1_cmd.args(["300s", &bck, "libvirt"]); + vm1_cmd.args(&connect_args); + vm1_cmd.arg("run"); + vm1_cmd.args(["--name", &vm1_name, "--filesystem", "ext4", &test_image]); + let vm1_output = vm1_cmd.output().expect("Failed to create first VM"); let vm1_stdout = String::from_utf8_lossy(&vm1_output.stdout); let vm1_stderr = String::from_utf8_lossy(&vm1_output.stderr); @@ -69,20 +62,12 @@ pub fn test_base_disk_creation_and_reuse() { // Create second VM - this should reuse the base disk println!("Creating second VM (should reuse base disk)..."); - let vm2_output = Command::new("timeout") - .args([ - "300s", - &bck, - "libvirt", - "run", - "--name", - &vm2_name, - "--filesystem", - "ext4", - &test_image, - ]) - .output() - .expect("Failed to create second VM"); + let mut vm2_cmd = Command::new("timeout"); + vm2_cmd.args(["300s", &bck, "libvirt"]); + vm2_cmd.args(&connect_args); + vm2_cmd.arg("run"); + vm2_cmd.args(["--name", &vm2_name, "--filesystem", "ext4", &test_image]); + let vm2_output = vm2_cmd.output().expect("Failed to create second VM"); let vm2_stdout = String::from_utf8_lossy(&vm2_output.stdout); let vm2_stderr = String::from_utf8_lossy(&vm2_output.stderr); @@ -110,13 +95,15 @@ pub fn test_base_disk_creation_and_reuse() { /// Test base-disks list command pub fn test_base_disks_list_command() { let bck = get_bck_command().unwrap(); + let connect_args = get_libvirt_connect_args(); println!("Testing base-disks list command"); - let output = Command::new(&bck) - .args(["libvirt", "base-disks", "list"]) - .output() - .expect("Failed to run base-disks list"); + let mut cmd = Command::new(&bck); + cmd.arg("libvirt"); + cmd.args(&connect_args); + cmd.args(["base-disks", "list"]); + let output = cmd.output().expect("Failed to run base-disks list"); let stdout = String::from_utf8_lossy(&output.stdout); let stderr = String::from_utf8_lossy(&output.stderr); @@ -149,11 +136,15 @@ pub fn test_base_disks_list_command() { /// Test base-disks prune command with dry-run pub fn test_base_disks_prune_dry_run() { let bck = get_bck_command().unwrap(); + let connect_args = get_libvirt_connect_args(); println!("Testing base-disks prune --dry-run command"); - let output = Command::new(&bck) - .args(["libvirt", "base-disks", "prune", "--dry-run"]) + let mut cmd = Command::new(&bck); + cmd.arg("libvirt"); + cmd.args(&connect_args); + cmd.args(["base-disks", "prune", "--dry-run"]); + let output = cmd .output() .expect("Failed to run base-disks prune --dry-run"); @@ -185,6 +176,7 @@ pub fn test_base_disks_prune_dry_run() { pub fn test_vm_disk_references_base() { let bck = get_bck_command().unwrap(); let test_image = get_test_image(); + let connect_args = get_libvirt_connect_args(); let timestamp = std::time::SystemTime::now() .duration_since(std::time::UNIX_EPOCH) @@ -197,20 +189,12 @@ pub fn test_vm_disk_references_base() { cleanup_domain(&vm_name); // Create VM - let output = Command::new("timeout") - .args([ - "300s", - &bck, - "libvirt", - "run", - "--name", - &vm_name, - "--filesystem", - "ext4", - &test_image, - ]) - .output() - .expect("Failed to create VM"); + let mut vm_cmd = Command::new("timeout"); + vm_cmd.args(["300s", &bck, "libvirt"]); + vm_cmd.args(&connect_args); + vm_cmd.arg("run"); + vm_cmd.args(["--name", &vm_name, "--filesystem", "ext4", &test_image]); + let output = vm_cmd.output().expect("Failed to create VM"); if !output.status.success() { let stderr = String::from_utf8_lossy(&output.stderr); @@ -220,10 +204,12 @@ pub fn test_vm_disk_references_base() { } // Get VM disk path from domain XML - let dumpxml_output = Command::new("virsh") - .args(&["dumpxml", &vm_name]) - .output() - .expect("Failed to dump domain XML"); + let mut dumpxml_cmd = Command::new("virsh"); + for arg in &connect_args { + dumpxml_cmd.arg(arg); + } + dumpxml_cmd.args(&["dumpxml", &vm_name]); + let dumpxml_output = dumpxml_cmd.output().expect("Failed to dump domain XML"); if !dumpxml_output.status.success() { cleanup_domain(&vm_name); @@ -261,18 +247,24 @@ pub fn test_vm_disk_references_base() { /// Helper function to cleanup domain and its disk fn cleanup_domain(domain_name: &str) { + let connect_args = get_libvirt_connect_args(); println!("Cleaning up domain: {}", domain_name); // Stop domain if running - let _ = Command::new("virsh") - .args(&["destroy", domain_name]) - .output(); + let mut destroy_cmd = Command::new("virsh"); + for arg in &connect_args { + destroy_cmd.arg(arg); + } + destroy_cmd.args(&["destroy", domain_name]); + let _ = destroy_cmd.output(); // Use bcvk libvirt rm for proper cleanup let bck = get_bck_command().unwrap(); - let cleanup_output = Command::new(&bck) - .args(&["libvirt", "rm", domain_name, "--force", "--stop"]) - .output(); + let mut cleanup_cmd = Command::new(&bck); + cleanup_cmd.arg("libvirt"); + cleanup_cmd.args(&connect_args); + cleanup_cmd.args(&["rm", domain_name, "--force", "--stop"]); + let cleanup_output = cleanup_cmd.output(); if let Ok(output) = cleanup_output { if output.status.success() { diff --git a/crates/kit/src/domain_list.rs b/crates/kit/src/domain_list.rs index 68b562f..0d25d20 100644 --- a/crates/kit/src/domain_list.rs +++ b/crates/kit/src/domain_list.rs @@ -135,24 +135,8 @@ impl DomainLister { /// Get domain XML metadata as parsed DOM pub fn get_domain_xml(&self, domain_name: &str) -> Result { - let output = self - .virsh_command() - .args(&["dumpxml", domain_name]) - .output() - .with_context(|| format!("Failed to dump XML for domain '{}'", domain_name))?; - - if !output.status.success() { - let stderr = String::from_utf8_lossy(&output.stderr); - return Err(color_eyre::eyre::eyre!( - "Failed to get XML for domain '{}': {}", - domain_name, - stderr - )); - } - - let xml = String::from_utf8(output.stdout)?; - xml_utils::parse_xml_dom(&xml) - .with_context(|| format!("Failed to parse XML for domain '{}'", domain_name)) + crate::libvirt::run::run_virsh_xml(self.connect_uri.as_deref(), &["dumpxml", domain_name]) + .context(format!("Failed to get XML for domain '{}'", domain_name)) } /// Extract podman-bootc metadata from parsed domain XML diff --git a/crates/kit/src/libvirt/base_disks.rs b/crates/kit/src/libvirt/base_disks.rs index 7cfbf26..8dfbc49 100644 --- a/crates/kit/src/libvirt/base_disks.rs +++ b/crates/kit/src/libvirt/base_disks.rs @@ -12,6 +12,15 @@ use color_eyre::Result; use std::fs; use tracing::{debug, info}; +/// Check if we have write access to a directory +/// Returns true if we can create files in this directory +fn can_write_to_directory(path: &Utf8Path) -> bool { + use rustix::fs::{access, Access}; + + // Check if we have write access to the directory + access(path.as_str(), Access::WRITE_OK).is_ok() +} + /// Find or create a base disk for the given parameters pub fn find_or_create_base_disk( source_image: &str, @@ -37,25 +46,42 @@ pub fn find_or_create_base_disk( let pool_path = super::run::get_libvirt_storage_pool_path(connect_uri)?; let base_disk_path = pool_path.join(&base_disk_name); - // Check if base disk already exists with valid metadata - if base_disk_path.exists() { - debug!("Checking existing base disk: {:?}", base_disk_path); - - if crate::cache_metadata::check_cached_disk( - base_disk_path.as_std_path(), - image_digest, - install_options, - kernel_args, - )? - .is_ok() - { - info!("Found cached base disk: {:?}", base_disk_path); - return Ok(base_disk_path); + // Check if base disk already exists using virsh (works even without direct file access) + let mut vol_info_cmd = super::run::virsh_command(connect_uri)?; + vol_info_cmd.args(&["vol-info", "--pool", "default", &base_disk_name]); + let vol_exists = vol_info_cmd + .output() + .map(|o| o.status.success()) + .unwrap_or(false); + + if vol_exists { + debug!("Base disk volume exists in pool: {:?}", base_disk_name); + + // Try to validate metadata if we have direct file access + // If we don't have access, we'll just use the existing disk + if can_write_to_directory(&pool_path) { + if let Ok(Ok(())) = crate::cache_metadata::check_cached_disk( + base_disk_path.as_std_path(), + image_digest, + install_options, + kernel_args, + ) { + info!("Found cached base disk: {:?}", base_disk_path); + return Ok(base_disk_path); + } else { + info!("Base disk exists but metadata doesn't match, will recreate"); + // Delete via virsh since we might not have direct file access + let mut del_cmd = super::run::virsh_command(connect_uri)?; + del_cmd.args(&["vol-delete", "--pool", "default", &base_disk_name]); + let _ = del_cmd.output(); + } } else { - info!("Base disk exists but metadata doesn't match, will recreate"); - fs::remove_file(&base_disk_path).with_context(|| { - format!("Failed to remove stale base disk: {:?}", base_disk_path) - })?; + // Can't validate metadata without direct access, assume it's valid + info!( + "Found existing base disk (assuming valid): {:?}", + base_disk_path + ); + return Ok(base_disk_path); } } @@ -87,18 +113,37 @@ fn create_base_disk( use crate::run_ephemeral::CommonVmOpts; use crate::to_disk::{Format, ToDiskAdditionalOpts, ToDiskOpts}; + let pool_path = base_disk_path.parent().unwrap(); + + // Check if we have direct write access to the pool directory + // This is important for rootless podman + qemu:///system scenarios + let can_write_directly = can_write_to_directory(pool_path); + + if !can_write_directly { + info!( + "No direct write access to pool directory {:?}, will create disk in temporary location and upload", + pool_path + ); + return create_base_disk_via_upload( + base_disk_path, + source_image, + image_digest, + install_options, + kernel_args, + connect_uri, + ); + } + + // Fast path: direct creation in pool directory + debug!("Creating base disk directly in pool directory"); + // Use a unique temporary file to avoid conflicts when multiple processes // race to create the same base disk let temp_file = tempfile::Builder::new() .prefix(&format!("{}.", base_disk_path.file_stem().unwrap())) .suffix(".tmp.qcow2") - .tempfile_in(base_disk_path.parent().unwrap()) - .with_context(|| { - format!( - "Failed to create temp file in {:?}", - base_disk_path.parent() - ) - })?; + .tempfile_in(pool_path) + .with_context(|| format!("Failed to create temp file in {:?}", pool_path))?; let temp_disk_path = Utf8PathBuf::from(temp_file.path().to_str().unwrap()); @@ -190,6 +235,149 @@ fn create_base_disk( } } +/// Create a base disk via temporary location and virsh vol-upload +/// This is used when we don't have direct write access to the pool directory, +/// such as when using rootless podman with qemu:///system +fn create_base_disk_via_upload( + base_disk_path: &Utf8Path, + source_image: &str, + image_digest: &str, + install_options: &InstallOptions, + kernel_args: &[String], + connect_uri: Option<&str>, +) -> Result<()> { + use crate::run_ephemeral::CommonVmOpts; + use crate::to_disk::{Format, ToDiskAdditionalOpts, ToDiskOpts}; + + // Create a unique temp file path for rootless podman + // Use ~/.cache/bcvk for temp disk storage because: + // - Rootless podman can reliably access user's home directory + // - It's on disk (not RAM-backed like /run) + // - /var/tmp may not be accessible to rootless podman depending on system config + let home = std::env::var("HOME").with_context(|| "HOME environment variable not set")?; + let temp_dir = Utf8PathBuf::from(home).join(".cache/bcvk"); + + // Ensure the directory exists + fs::create_dir_all(&temp_dir) + .with_context(|| format!("Failed to create temp directory: {:?}", temp_dir))?; + + let temp_file = tempfile::Builder::new() + .prefix("bcvk-base-disk-") + .suffix(".qcow2") + .tempfile_in(temp_dir.as_std_path()) + .with_context(|| format!("Failed to create temp file in {:?}", temp_dir))?; + let temp_disk_path = Utf8PathBuf::from_path_buf(temp_file.path().to_path_buf()) + .map_err(|p| eyre!("temp path is not UTF-8: {:?}", p))?; + + info!( + "Creating base disk in temporary location: {:?}", + temp_disk_path + ); + + // Create the disk using to_disk at temporary location + let to_disk_opts = ToDiskOpts { + source_image: source_image.to_string(), + target_disk: temp_disk_path.clone(), + install: install_options.clone(), + additional: ToDiskAdditionalOpts { + disk_size: install_options + .root_size + .clone() + .or(Some(super::LIBVIRT_DEFAULT_DISK_SIZE.to_string())), + format: Format::Qcow2, + common: CommonVmOpts { + memory: crate::common_opts::MemoryOpts { + memory: super::LIBVIRT_DEFAULT_MEMORY.to_string(), + }, + vcpus: Some(super::LIBVIRT_DEFAULT_VCPUS), + ssh_keygen: false, + ..Default::default() + }, + ..Default::default() + }, + }; + + // Run bootc install + crate::to_disk::run(to_disk_opts) + .with_context(|| format!("Failed to install bootc to base disk: {:?}", temp_disk_path))?; + + // Verify metadata was written + let metadata_valid = crate::cache_metadata::check_cached_disk( + temp_disk_path.as_std_path(), + image_digest, + install_options, + kernel_args, + ) + .context("Querying cached disk")?; + + metadata_valid.map_err(|e| eyre!("Generated disk metadata validation failed: {e}"))?; + + // Get disk size for volume creation + let metadata = fs::metadata(&temp_disk_path) + .with_context(|| format!("Failed to get disk metadata: {:?}", temp_disk_path))?; + let disk_size = metadata.len(); + + info!("Uploading base disk to libvirt pool: {:?}", base_disk_path); + + let base_disk_name = base_disk_path + .file_name() + .ok_or_else(|| eyre!("Base disk path has no filename: {:?}", base_disk_path))?; + + // Delete existing volume if present + let mut cmd = super::run::virsh_command(connect_uri)?; + cmd.args(&["vol-delete", "--pool", "default", base_disk_name]); + let _ = cmd.output(); // Ignore errors if volume doesn't exist + + // Create empty volume in the pool + let mut cmd = super::run::virsh_command(connect_uri)?; + cmd.args(&[ + "vol-create-as", + "default", + base_disk_name, + &disk_size.to_string(), + "--format", + "qcow2", + ]); + + let output = cmd + .output() + .with_context(|| "Failed to run virsh vol-create-as")?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + return Err(eyre!("Failed to create volume in pool: {}", stderr)); + } + + // Upload the disk content to the volume + let mut cmd = super::run::virsh_command(connect_uri)?; + cmd.args(&[ + "vol-upload", + base_disk_name, + temp_disk_path.as_str(), + "--pool", + "default", + ]); + + let output = cmd + .output() + .with_context(|| "Failed to run virsh vol-upload")?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + // Try to clean up the empty volume + let mut del_cmd = super::run::virsh_command(connect_uri)?; + del_cmd.args(&["vol-delete", "--pool", "default", base_disk_name]); + let _ = del_cmd.output(); + return Err(eyre!("Failed to upload disk to pool: {}", stderr)); + } + + info!( + "Successfully created and uploaded base disk: {:?}", + base_disk_path + ); + Ok(()) +} + /// Clone a base disk to create a VM-specific disk /// /// Uses predictable disk name: `{vm_name}.qcow2` @@ -250,16 +438,33 @@ pub fn clone_from_base( base_disk_path, vm_disk_path ); - // Get the virtual size of the base disk to use for the new volume - let info = crate::qemu_img::info(base_disk_path)?; - let virtual_size = info.virtual_size; - - // Create volume with backing file using vol-create-as - // This creates a qcow2 image with the base disk as backing file (proper CoW) + // Get the virtual size of the base disk using virsh vol-dumpxml + // We use virsh instead of qemu-img to avoid permission issues with qemu:///system let base_disk_filename = base_disk_path.file_name().ok_or_else(|| { color_eyre::eyre::eyre!("Base disk path has no filename: {:?}", base_disk_path) })?; + let dom = super::run::run_virsh_xml( + connect_uri, + &["vol-dumpxml", "--pool", "default", base_disk_filename], + ) + .context(format!( + "Failed to get base disk info for {}", + base_disk_filename + ))?; + + let capacity_node = dom + .find("capacity") + .ok_or_else(|| eyre!("Failed to find capacity element in vol-dumpxml"))?; + + let virtual_size: u64 = capacity_node + .text_content() + .trim() + .parse() + .with_context(|| format!("Failed to parse capacity: {}", capacity_node.text_content()))?; + + // Create volume with backing file using vol-create-as + // This creates a qcow2 image with the base disk as backing file (proper CoW) let mut cmd = super::run::virsh_command(connect_uri)?; cmd.args(&[ "vol-create-as", diff --git a/crates/kit/src/libvirt/list_volumes.rs b/crates/kit/src/libvirt/list_volumes.rs index d79e55f..5bc3ff5 100644 --- a/crates/kit/src/libvirt/list_volumes.rs +++ b/crates/kit/src/libvirt/list_volumes.rs @@ -3,7 +3,6 @@ //! This module provides functionality to discover and list bootc volumes //! with their container image metadata and creation information. -use crate::xml_utils; use clap::Parser; use color_eyre::{eyre::eyre, Result}; use comfy_table::{presets::UTF8_FULL, Table}; @@ -177,21 +176,15 @@ impl LibvirtListVolumesOpts { } // Get metadata from volume XML - let xml_output = self - .virsh_command(global_opts) - .args(&["vol-dumpxml", volume_name, "--pool", &self.pool]) - .output()?; - let mut source_image = None; let mut source_digest = None; let mut created = None; - if xml_output.status.success() { - let xml = String::from_utf8(xml_output.stdout)?; - debug!("Volume XML for {}: {}", volume_name, xml); - - // Parse XML once and search for metadata - let dom = xml_utils::parse_xml_dom(&xml)?; + if let Ok(dom) = super::run::run_virsh_xml( + global_opts.connect.as_deref(), + &["vol-dumpxml", volume_name, "--pool", &self.pool], + ) { + debug!("Volume XML retrieved for {}", volume_name); // First try to extract metadata from description field (new format) if let Some(description_node) = dom.find("description") { diff --git a/crates/kit/src/libvirt/run.rs b/crates/kit/src/libvirt/run.rs index 3380258..02f0b15 100644 --- a/crates/kit/src/libvirt/run.rs +++ b/crates/kit/src/libvirt/run.rs @@ -26,6 +26,36 @@ pub(super) fn virsh_command(connect_uri: Option<&str>) -> Result, args: &[&str]) -> Result { + let mut cmd = virsh_command(connect_uri)?; + cmd.args(args); + + let output = cmd.output().context("Failed to run virsh")?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + return Err(eyre::eyre!("virsh command failed: {}", stderr)); + } + + // Parse XML directly from bytes + let xml = std::str::from_utf8(&output.stdout).context("Invalid UTF-8 in virsh output")?; + + xml_utils::parse_xml_dom(xml).context("Failed to parse XML") +} + /// Firmware type for virtual machines #[derive(Debug, Clone, Copy, PartialEq, Eq, ValueEnum)] #[clap(rename_all = "kebab-case")] @@ -343,24 +373,8 @@ pub fn get_libvirt_storage_pool_path(connect_uri: Option<&str>) -> Result Result { - let output = global_opts - .virsh_command() - .args(&["dumpxml", &self.domain_name]) - .output()?; - - if !output.status.success() { - return Err(eyre!("Failed to get domain XML for '{}'", self.domain_name)); - } - - let xml = String::from_utf8(output.stdout)?; - debug!("Domain XML for SSH extraction: {}", xml); - - // Parse XML once for all metadata extraction - let dom = xml_utils::parse_xml_dom(&xml) - .map_err(|e| eyre!("Failed to parse domain XML: {}", e))?; + let dom = super::run::run_virsh_xml( + global_opts.connect.as_deref(), + &["dumpxml", &self.domain_name], + ) + .context(format!( + "Failed to get domain XML for '{}'", + self.domain_name + ))?; + debug!("Domain XML retrieved for SSH extraction"); // Extract SSH metadata from bootc:container section // First try the new base64 encoded format @@ -382,6 +378,7 @@ pub fn run_ssh_impl( #[cfg(test)] mod tests { use super::*; + use crate::xml_utils; #[test] fn test_ssh_metadata_extraction() {