ci: Extend VFIO test with memory hotplug

One more time, we're extending the VFIO integration test to verify that
a VFIO device passed through a VM is still usable after some new memory
has been hotplugged.

Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
This commit is contained in:
Sebastien Boeuf 2020-03-26 17:19:41 +01:00
parent 9e18177654
commit 0c29c2ec49
2 changed files with 33 additions and 4 deletions

View File

@ -74,7 +74,7 @@ write_files:
bash -c "echo 1af4 1041 > /sys/bus/pci/drivers/vfio-pci/new_id"
bash -c "echo 0000:00:06.0 > /sys/bus/pci/devices/0000\:00\:06.0/driver/unbind"
bash -c "echo 1af4 1041 > /sys/bus/pci/drivers/vfio-pci/new_id"
# 512M ram requires 256 pages
echo 256 | sudo tee /proc/sys/vm/nr_hugepages
# 1G ram requires 512 pages
echo 512 | sudo tee /proc/sys/vm/nr_hugepages
sudo chmod a+rwX /dev/hugepages
/mnt/cloud-hypervisor --kernel /mnt/vmlinux --cmdline "console=hvc0 reboot=k panic=1 nomodules i8042.noaux i8042.nomux i8042.nopnp i8042.dumbkbd root=PARTUUID=6fb4d1a8-6c8c-4dd7-9f7c-1fe0b9f2574c VFIOTAG" --disk path=/mnt/clear-31311-cloudguest.img path=/mnt/cloudinit.img --cpus boot=1 --memory size=512M,file=/dev/hugepages --device path=/sys/bus/pci/devices/0000:00:05.0/ path=/sys/bus/pci/devices/0000:00:06.0/ --api-socket /tmp/ch_api.sock
/mnt/cloud-hypervisor --kernel /mnt/vmlinux --cmdline "console=hvc0 reboot=k panic=1 nomodules i8042.noaux i8042.nomux i8042.nopnp i8042.dumbkbd root=PARTUUID=6fb4d1a8-6c8c-4dd7-9f7c-1fe0b9f2574c VFIOTAG" --disk path=/mnt/clear-31311-cloudguest.img path=/mnt/cloudinit.img --cpus boot=1 --memory size=512M,hotplug_size=1G,file=/dev/hugepages --device path=/sys/bus/pci/devices/0000:00:05.0/ path=/sys/bus/pci/devices/0000:00:06.0/ --api-socket /tmp/ch_api.sock

View File

@ -692,6 +692,14 @@ mod tests {
.map_err(Error::Parsing)?)
}
fn get_total_memory_l2(&self) -> Result<u32, Error> {
Ok(self
.ssh_command_l2_1("grep MemTotal /proc/meminfo | grep -o \"[0-9]*\"")?
.trim()
.parse()
.map_err(Error::Parsing)?)
}
fn get_entropy(&self) -> Result<u32, Error> {
Ok(self
.ssh_command("cat /proc/sys/kernel/random/entropy_avail")?
@ -2445,7 +2453,7 @@ mod tests {
let mut child = GuestCommand::new(&guest)
.args(&["--cpus", "boot=4"])
.args(&["--memory", "size=1G,file=/dev/hugepages"])
.args(&["--memory", "size=2G,file=/dev/hugepages"])
.args(&["--kernel", kernel_path.to_str().unwrap()])
.default_disks()
.args(&[
@ -2597,6 +2605,27 @@ mod tests {
7,
);
// Perform memory hotplug in L2 and validate the memory is showing
// up as expected. In order to check, we will use the virtio-net
// device already passed through L2 as a VFIO device, this will
// verify that VFIO devices are functional with memory hotplug.
aver!(
tb,
guest.get_total_memory_l2().unwrap_or_default() > 491_000
);
guest.ssh_command_l2_1(
"sudo bash -c 'echo online > /sys/devices/system/memory/auto_online_blocks'",
)?;
guest.ssh_command_l1(
"sudo /mnt/ch-remote \
--api-socket=/tmp/ch_api.sock \
resize --memory=1073741824",
)?;
aver!(
tb,
guest.get_total_memory_l2().unwrap_or_default() > 982_000
);
let _ = child.kill();
let _ = daemon_child.kill();
let _ = child.wait();