tests: Add VFIO integration test

The VFIO integration test first boots a QEMU guest and then assigns the
QEMU virtio-pci networking device into a nested cloud-hypervisor guest.
We then check that we can ssh into the nested guest and verify that it's
running with the right kernel command line.

Signed-off-by: Samuel Ortiz <sameo@linux.intel.com>
This commit is contained in:
Samuel Ortiz 2019-07-19 10:26:13 +02:00
parent 4d16ca8ae7
commit 5ae3144f5b
4 changed files with 235 additions and 13 deletions

3
Jenkinsfile vendored
View File

@ -5,7 +5,7 @@ stage ("Builds") {
} }
stage ('Install system packages') { stage ('Install system packages') {
sh "sudo DEBIAN_FRONTEND=noninteractive apt-get install -yq build-essential mtools libssl-dev pkg-config" sh "sudo DEBIAN_FRONTEND=noninteractive apt-get install -yq build-essential mtools libssl-dev pkg-config"
sh "sudo apt-get install -yq flex bison libelf-dev qemu-utils" sh "sudo apt-get install -yq flex bison libelf-dev qemu-utils qemu-system"
} }
stage ('Install Rust') { stage ('Install Rust') {
sh "nohup curl https://sh.rustup.rs -sSf | sh -s -- -y" sh "nohup curl https://sh.rustup.rs -sSf | sh -s -- -y"
@ -16,7 +16,6 @@ stage ("Builds") {
} }
stage ('Run integration tests') { stage ('Run integration tests') {
sh "sudo mount -t tmpfs tmpfs /tmp" sh "sudo mount -t tmpfs tmpfs /tmp"
sh "sudo chmod a+rw /dev/kvm"
sh "scripts/run_integration_tests.sh" sh "scripts/run_integration_tests.sh"
} }
} }

View File

@ -14,6 +14,14 @@ if [ ! -f "$FW" ]; then
popd popd
fi fi
OVMF_URL="https://cdn.download.clearlinux.org/image/OVMF.fd"
OVMF="$WORKLOADS_DIR/OVMF.fd"
if [ ! -f "$OVMF" ]; then
pushd $WORKLOADS_DIR
wget --quiet $OVMF_URL
popd
fi
OS_IMAGE_NAME="clear-29810-cloud.img" OS_IMAGE_NAME="clear-29810-cloud.img"
OS_IMAGE_URL="https://cloudhypervisorstorage.blob.core.windows.net/images/$OS_IMAGE_NAME.xz" OS_IMAGE_URL="https://cloudhypervisorstorage.blob.core.windows.net/images/$OS_IMAGE_NAME.xz"
OS_IMAGE="$WORKLOADS_DIR/$OS_IMAGE_NAME" OS_IMAGE="$WORKLOADS_DIR/$OS_IMAGE_NAME"
@ -86,7 +94,42 @@ if [ ! -d "$SHARED_DIR" ]; then
echo "bar" > "$SHARED_DIR/file3" echo "bar" > "$SHARED_DIR/file3"
fi fi
VFIO_DIR="$WORKLOADS_DIR/vfio"
if [ ! -d "$VFIO_DIR" ]; then
mkdir -p $VFIO_DIR
cp $OS_IMAGE $VFIO_DIR
cp $FW $VFIO_DIR
cp $VMLINUX_IMAGE $VFIO_DIR
fi
# VFIO test network setup.
# We reserve a different IP class for it: 172.16.0.0/24.
sudo ip link add name vfio-br0 type bridge
sudo ip link set vfio-br0 up
sudo ip addr add 172.16.0.1/24 dev vfio-br0
sudo ip tuntap add vfio-tap0 mode tap
sudo ip link set vfio-tap0 master vfio-br0
sudo ip link set vfio-tap0 up
sudo ip tuntap add vfio-tap1 mode tap
sudo ip link set vfio-tap1 master vfio-br0
sudo ip link set vfio-tap1 up
cargo build cargo build
sudo setcap cap_net_admin+ep target/debug/cloud-hypervisor sudo setcap cap_net_admin+ep target/debug/cloud-hypervisor
# We always copy a fresh version of our binary for our L2 guest.
cp target/debug/cloud-hypervisor $VFIO_DIR
# We need qemu to have NET_ADMIN as well.
sudo setcap cap_net_admin+ep /usr/bin/qemu-system-x86_64
sudo adduser $USER kvm
newgrp kvm << EOF
cargo test --features "integration_tests" cargo test --features "integration_tests"
EOF
# Tear VFIO test network down
sudo ip link del vfio-br0
sudo ip link del vfio-tap0
sudo ip link del vfio-tap1

View File

@ -196,8 +196,10 @@ mod tests {
disks: Vec<String>, disks: Vec<String>,
fw_path: String, fw_path: String,
guest_ip: String, guest_ip: String,
l2_guest_ip: String,
host_ip: String, host_ip: String,
guest_mac: String, guest_mac: String,
l2_guest_mac: String,
} }
fn prepare_virtiofsd(tmp_dir: &TempDir) -> (std::process::Child, String) { fn prepare_virtiofsd(tmp_dir: &TempDir) -> (std::process::Child, String) {
@ -230,20 +232,18 @@ mod tests {
} }
impl Guest { impl Guest {
fn new() -> Self { fn new_from_ip_range(class: &str, id: u8) -> Self {
let tmp_dir = TempDir::new("ch").unwrap(); let tmp_dir = TempDir::new("ch").unwrap();
let mut guard = NEXT_VM_ID.lock().unwrap();
let id = *guard;
*guard = id + 1;
let mut guest = Guest { let mut guest = Guest {
tmp_dir, tmp_dir,
disks: Vec::new(), disks: Vec::new(),
fw_path: String::new(), fw_path: String::new(),
guest_ip: format!("192.168.{}.2", id), guest_ip: format!("{}.{}.2", class, id),
host_ip: format!("192.168.{}.1", id), l2_guest_ip: format!("{}.{}.3", class, id),
host_ip: format!("{}.{}.1", class, id),
guest_mac: format!("12:34:56:78:90:{:02x}", id), guest_mac: format!("12:34:56:78:90:{:02x}", id),
l2_guest_mac: format!("de:ad:be:ef:12:{:02x}", id),
}; };
guest.prepare_files(); guest.prepare_files();
@ -251,6 +251,14 @@ mod tests {
guest guest
} }
fn new() -> Self {
let mut guard = NEXT_VM_ID.lock().unwrap();
let id = *guard;
*guard = id + 1;
Self::new_from_ip_range("192.168", id)
}
fn prepare_cloudinit(&self) -> String { fn prepare_cloudinit(&self) -> String {
let cloudinit_file_path = let cloudinit_file_path =
String::from(self.tmp_dir.path().join("cloudinit").to_str().unwrap()); String::from(self.tmp_dir.path().join("cloudinit").to_str().unwrap());
@ -282,7 +290,9 @@ mod tests {
user_data_string = user_data_string.replace("192.168.2.1", &self.host_ip); user_data_string = user_data_string.replace("192.168.2.1", &self.host_ip);
user_data_string = user_data_string.replace("192.168.2.2", &self.guest_ip); user_data_string = user_data_string.replace("192.168.2.2", &self.guest_ip);
user_data_string = user_data_string.replace("192.168.2.3", &self.l2_guest_ip);
user_data_string = user_data_string.replace("12:34:56:78:90:ab", &self.guest_mac); user_data_string = user_data_string.replace("12:34:56:78:90:ab", &self.guest_mac);
user_data_string = user_data_string.replace("de:ad:be:ef:12:34", &self.l2_guest_mac);
fs::File::create(cloud_init_directory.join("latest").join("user_data")) fs::File::create(cloud_init_directory.join("latest").join("user_data"))
.unwrap() .unwrap()
@ -343,7 +353,7 @@ mod tests {
) )
} }
fn ssh_command(&self, command: &str) -> String { fn ssh_command_ip(&self, command: &str, ip: &str) -> String {
let mut s = String::new(); let mut s = String::new();
#[derive(Debug)] #[derive(Debug)]
enum Error { enum Error {
@ -355,8 +365,8 @@ mod tests {
let mut counter = 0; let mut counter = 0;
loop { loop {
match (|| -> Result<(), Error> { match (|| -> Result<(), Error> {
let tcp = TcpStream::connect(format!("{}:22", self.guest_ip)) let tcp =
.map_err(|_| Error::Connection)?; TcpStream::connect(format!("{}:22", ip)).map_err(|_| Error::Connection)?;
let mut sess = Session::new().unwrap(); let mut sess = Session::new().unwrap();
sess.handshake(&tcp).map_err(|_| Error::Connection)?; sess.handshake(&tcp).map_err(|_| Error::Connection)?;
@ -387,6 +397,18 @@ mod tests {
s s
} }
fn ssh_command(&self, command: &str) -> String {
self.ssh_command_ip(command, &self.guest_ip)
}
fn ssh_command_l1(&self, command: &str) -> String {
self.ssh_command_ip(command, &self.guest_ip)
}
fn ssh_command_l2(&self, command: &str) -> String {
self.ssh_command_ip(command, &self.l2_guest_ip)
}
fn get_cpu_count(&self) -> u32 { fn get_cpu_count(&self) -> u32 {
self.ssh_command("grep -c processor /proc/cpuinfo") self.ssh_command("grep -c processor /proc/cpuinfo")
.trim() .trim()
@ -1095,4 +1117,123 @@ mod tests {
}); });
} }
#[test]
// The VFIO integration test starts a qemu guest and then direct assigns
// one of the virtio-PCI device to a cloud-hypervisor nested guest. The
// test assigns one of the 2 virtio-pci networking interface, and thus
// the cloud-hypervisor guest will get a networking interface through that
// direct assignment.
// The test starts the QEMU guest with 2 TAP backed networking interfaces,
// bound through a simple bridge on the host. So if the nested
// cloud-hypervisor succeeds in getting a directly assigned interface from
// its QEMU host, we should be able to ssh into it, and verify that it's
// running with the right kernel command line (We tag the cloud-hypervisor
// command line for that puspose).
fn test_vfio() {
test_block!(tb, "", {
let guest = Guest::new_from_ip_range("172.16", 0);
let home = dirs::home_dir().unwrap();
let mut cloud_init_vfio_base_path = home.clone();
cloud_init_vfio_base_path.push("workloads");
cloud_init_vfio_base_path.push("vfio");
cloud_init_vfio_base_path.push("cloudinit.img");
// We copy our cloudinit into the vfio mount point, for the nested
// cloud-hypervisor guest to use.
fs::copy(&guest.disks[1], &cloud_init_vfio_base_path)
.expect("copying of cloud-init disk failed");
let vfio_9p_path = format!(
"local,id=shared,path={}/workloads/vfio/,security_model=none",
home.to_str().unwrap()
);
let ovmf_path = format!("{}/workloads/OVMF.fd", home.to_str().unwrap());
let os_disk = format!("file={},format=qcow2", guest.disks[0].as_str());
let cloud_init_disk = format!("file={},format=raw", guest.disks[1].as_str());
let vfio_tap0 = "vfio-tap0";
let vfio_tap1 = "vfio-tap1";
let ssh_net = "ssh-net";
let vfio_net = "vfio-net";
let netdev_ssh = format!(
"tap,ifname={},id={},script=no,downscript=no",
vfio_tap0, ssh_net
);
let netdev_ssh_device = format!(
"virtio-net-pci,netdev={},disable-legacy=on,iommu_platform=on,ats=on,mac={}",
ssh_net, guest.guest_mac
);
let netdev_vfio = format!(
"tap,ifname={},id={},script=no,downscript=no",
vfio_tap1, vfio_net
);
let netdev_vfio_device = format!(
"virtio-net-pci,netdev={},disable-legacy=on,iommu_platform=on,ats=on,mac={}",
vfio_net, guest.l2_guest_mac
);
let mut qemu_child = Command::new("qemu-system-x86_64")
.args(&["-machine", "q35,accel=kvm,kernel_irqchip=split"])
.args(&["-bios", &ovmf_path])
.args(&["-smp", "sockets=1,cpus=4,cores=2"])
.args(&["-cpu", "host"])
.args(&["-m", "1024"])
.args(&["-vga", "none"])
.args(&["-nographic"])
.args(&["-drive", &os_disk])
.args(&["-drive", &cloud_init_disk])
.args(&["-device", "virtio-rng-pci"])
.args(&["-netdev", &netdev_ssh])
.args(&["-device", &netdev_ssh_device])
.args(&["-netdev", &netdev_vfio])
.args(&["-device", &netdev_vfio_device])
.args(&[
"-device",
"intel-iommu,intremap=on,caching-mode=on,device-iotlb=on",
])
.args(&["-fsdev", &vfio_9p_path])
.args(&[
"-device",
"virtio-9p-pci,fsdev=shared,mount_tag=cloud_hypervisor",
])
.spawn()
.unwrap();
thread::sleep(std::time::Duration::new(30, 0));
guest.ssh_command_l1("sudo systemctl start vfio");
thread::sleep(std::time::Duration::new(30, 0));
// We booted our cloud hypervisor L2 guest with a "VFIOTAG" tag
// added to its kernel command line.
// Let's ssh into it and verify that it's there. If it is it means
// we're in the right guest (The L2 one) because the QEMU L1 guest
// does not have this command line tag.
aver_eq!(
tb,
guest
.ssh_command_l2("cat /proc/cmdline | grep -c 'VFIOTAG'")
.trim()
.parse::<u32>()
.unwrap(),
1
);
guest.ssh_command_l2("sudo reboot");
thread::sleep(std::time::Duration::new(10, 0));
guest.ssh_command_l1("sudo shutdown -h now");
thread::sleep(std::time::Duration::new(10, 0));
let _ = qemu_child.kill();
let _ = qemu_child.wait();
Ok(())
});
}
} }

View File

@ -6,7 +6,7 @@ users:
- ALL=(ALL) NOPASSWD:ALL - ALL=(ALL) NOPASSWD:ALL
write_files: write_files:
- -
path: /etc/systemd/network/00-static.network path: /etc/systemd/network/00-static-l1.network
permissions: 0644 permissions: 0644
content: | content: |
[Match] [Match]
@ -15,3 +15,42 @@ write_files:
[Network] [Network]
Address=192.168.2.2/24 Address=192.168.2.2/24
Gateway=192.168.2.1 Gateway=192.168.2.1
-
path: /etc/systemd/network/00-static-l2.network
permissions: 0644
content: |
[Match]
MACAddress=de:ad:be:ef:12:34
[Network]
Address=192.168.2.3/24
Gateway=192.168.2.1
-
path: /etc/systemd/system/vfio.service
permissions: 0644
content: |
[Unit]
Description=VFIO test systemd service
[Service]
Type=simple
ExecStart=/bin/bash /usr/bin/cloud-hypervisor-vfio.sh
[Install]
WantedBy=multi-user.target
-
path: /usr/bin/cloud-hypervisor-vfio.sh
permissions: 0755
content: |
#!/bin/bash
mount -t 9p -o trans=virtio cloud_hypervisor /mnt -oversion=9p2000.L,posixacl,cache=loose
modprobe vfio_iommu_type1 allow_unsafe_interrupts
modprobe vfio_pci
bash -c "echo 0000:00:03.0 > /sys/bus/pci/devices/0000\:00\:03.0/driver/unbind"
bash -c "echo 1af4 1041 > /sys/bus/pci/drivers/vfio-pci/new_id"
/mnt/cloud-hypervisor --console off --serial tty --kernel /mnt/vmlinux --cmdline "console=ttyS0 reboot=k panic=1 nomodules i8042.noaux i8042.nomux i8042.nopnp i8042.dumbkbd root=/dev/vda2 VFIOTAG" --disk /mnt/clear-29810-cloud.img /mnt/cloudinit.img --cpus 1 --memory size=512M --rng --device /sys/bus/pci/devices/0000:00:03.0/